merge mozilla-inbound to mozilla-central a=merge

This commit is contained in:
Carsten "Tomcat" Book 2016-10-04 11:58:07 +02:00
commit 02f4754fbc
129 changed files with 2594 additions and 1555 deletions

View File

@ -2,7 +2,7 @@
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
<!DOCTYPE html>
<html>
<html dir="">
<head>
<link rel="stylesheet"
type="text/css"

View File

@ -1114,7 +1114,7 @@ const ThreadActor = ActorClassWithSpec(threadSpec, {
let els = Cc["@mozilla.org/eventlistenerservice;1"]
.getService(Ci.nsIEventListenerService);
let targets = els.getEventTargetChainFor(eventTarget);
let targets = els.getEventTargetChainFor(eventTarget, true);
let listeners = [];
for (let target of targets) {

View File

@ -806,50 +806,7 @@ nsIContent::PreHandleEvent(EventChainPreVisitor& aVisitor)
ShadowRoot* thisShadowRoot = ShadowRoot::FromNode(this);
if (thisShadowRoot) {
// The following events must always be stopped at the root node of the node tree:
// abort
// error
// select
// change
// load
// reset
// resize
// scroll
// selectstart
bool stopEvent = false;
switch (aVisitor.mEvent->mMessage) {
case eImageAbort:
case eLoadError:
case eFormSelect:
case eFormChange:
case eLoad:
case eFormReset:
case eResize:
case eScroll:
case eSelectStart:
stopEvent = true;
break;
case eUnidentifiedEvent:
if (aVisitor.mDOMEvent) {
nsAutoString eventType;
aVisitor.mDOMEvent->GetType(eventType);
if (eventType.EqualsLiteral("abort") ||
eventType.EqualsLiteral("error") ||
eventType.EqualsLiteral("select") ||
eventType.EqualsLiteral("change") ||
eventType.EqualsLiteral("load") ||
eventType.EqualsLiteral("reset") ||
eventType.EqualsLiteral("resize") ||
eventType.EqualsLiteral("scroll")) {
stopEvent = true;
}
}
break;
default:
break;
}
if (stopEvent) {
if (!aVisitor.mEvent->mFlags.mComposed) {
// If we do stop propagation, we still want to propagate
// the event to chrome (nsPIDOMWindow::GetParentTarget()).
// The load event is special in that we don't ever propagate it
@ -907,7 +864,11 @@ nsIContent::PreHandleEvent(EventChainPreVisitor& aVisitor)
}
}
if (parent) {
if (!aVisitor.mEvent->mFlags.mComposedInNativeAnonymousContent &&
IsRootOfNativeAnonymousSubtree() && OwnerDoc() &&
OwnerDoc()->GetWindow()) {
aVisitor.mParentTarget = OwnerDoc()->GetWindow()->GetParentTarget();
} else if (parent) {
aVisitor.mParentTarget = parent;
} else {
aVisitor.mParentTarget = GetComposedDoc();

View File

@ -16707,6 +16707,7 @@ class CGEventMethod(CGNativeMember):
e->InitEvent(${eventType}, ${eventInit}.mBubbles, ${eventInit}.mCancelable);
$*{members}
e->SetTrusted(trusted);
e->SetComposed(${eventInit}.mComposed);
$*{holdJS}
return e.forget();
""",

View File

@ -107,6 +107,7 @@ BluetoothLeDeviceEvent::Constructor(
}
e->SetTrusted(trusted);
e->SetComposed(aEventInitDict.mComposed);
return e.forget();
}

View File

@ -2271,8 +2271,10 @@ CanvasRenderingContext2D::SetMozCurrentTransform(JSContext* aCx,
void
CanvasRenderingContext2D::GetMozCurrentTransform(JSContext* aCx,
JS::MutableHandle<JSObject*> aResult,
ErrorResult& aError) const
ErrorResult& aError)
{
EnsureTarget();
MatrixToJSObject(aCx, mTarget ? mTarget->GetTransform() : Matrix(),
aResult, aError);
}
@ -2300,8 +2302,10 @@ CanvasRenderingContext2D::SetMozCurrentTransformInverse(JSContext* aCx,
void
CanvasRenderingContext2D::GetMozCurrentTransformInverse(JSContext* aCx,
JS::MutableHandle<JSObject*> aResult,
ErrorResult& aError) const
ErrorResult& aError)
{
EnsureTarget();
if (!mTarget) {
MatrixToJSObject(aCx, Matrix(), aResult, aError);
return;

View File

@ -353,13 +353,13 @@ public:
void GetMozCurrentTransform(JSContext* aCx,
JS::MutableHandle<JSObject*> aResult,
mozilla::ErrorResult& aError) const;
mozilla::ErrorResult& aError);
void SetMozCurrentTransform(JSContext* aCx,
JS::Handle<JSObject*> aCurrentTransform,
mozilla::ErrorResult& aError);
void GetMozCurrentTransformInverse(JSContext* aCx,
JS::MutableHandle<JSObject*> aResult,
mozilla::ErrorResult& aError) const;
mozilla::ErrorResult& aError);
void SetMozCurrentTransformInverse(JSContext* aCx,
JS::Handle<JSObject*> aCurrentTransform,
mozilla::ErrorResult& aError);

View File

@ -52,6 +52,7 @@ AnimationEvent::Constructor(const GlobalObject& aGlobal,
internalEvent->mPseudoElement = aParam.mPseudoElement;
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -84,6 +84,7 @@ ClipboardEvent::Constructor(const GlobalObject& aGlobal,
e->InitClipboardEvent(aType, aParam.mBubbles, aParam.mCancelable,
clipboardData);
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -60,6 +60,7 @@ CustomEvent::Constructor(const GlobalObject& aGlobal,
JS::Rooted<JS::Value> detail(aGlobal.Context(), aParam.mDetail);
e->InitCustomEvent(aGlobal.Context(), aType, aParam.mBubbles, aParam.mCancelable, detail, aRv);
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -99,7 +99,7 @@ DeviceMotionEvent::Constructor(const GlobalObject& aGlobal,
e->mInterval = aEventInitDict.mInterval;
e->SetTrusted(trusted);
e->SetComposed(aEventInitDict.mComposed);
return e.forget();
}

View File

@ -111,6 +111,7 @@ DragEvent::Constructor(const GlobalObject& aGlobal,
aParam.mDataTransfer);
e->InitializeExtraMouseEventDictionaryMembers(aParam);
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -415,6 +415,7 @@ Event::Constructor(const GlobalObject& aGlobal,
bool trusted = e->Init(t);
e->InitEvent(aType, aParam.mBubbles, aParam.mCancelable);
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}
@ -567,11 +568,14 @@ Event::SetEventType(const nsAString& aEventTypeArg)
mEvent->mSpecifiedEventType =
nsContentUtils::GetEventMessageAndAtom(aEventTypeArg, mEvent->mClass,
&(mEvent->mMessage));
mEvent->SetDefaultComposed();
} else {
mEvent->mSpecifiedEventType = nullptr;
mEvent->mMessage = eUnidentifiedEvent;
mEvent->mSpecifiedEventTypeString = aEventTypeArg;
mEvent->SetComposed(aEventTypeArg);
}
mEvent->SetDefaultComposedInNativeAnonymousContent();
}
void
@ -1167,6 +1171,7 @@ Event::Serialize(IPC::Message* aMsg, bool aSerializeInterfaceType)
IPC::WriteParam(aMsg, Bubbles());
IPC::WriteParam(aMsg, Cancelable());
IPC::WriteParam(aMsg, IsTrusted());
IPC::WriteParam(aMsg, Composed());
// No timestamp serialization for now!
}
@ -1186,8 +1191,12 @@ Event::Deserialize(const IPC::Message* aMsg, PickleIterator* aIter)
bool trusted = false;
NS_ENSURE_TRUE(IPC::ReadParam(aMsg, aIter, &trusted), false);
bool composed = false;
NS_ENSURE_TRUE(IPC::ReadParam(aMsg, aIter, &composed), false);
InitEvent(type, bubbles, cancelable);
SetTrusted(trusted);
SetComposed(composed);
return true;
}

View File

@ -170,6 +170,11 @@ public:
return mEvent->mFlags.mCancelable;
}
bool Composed() const
{
return mEvent->mFlags.mComposed;
}
// xpidl implementation
// void PreventDefault();
@ -274,6 +279,11 @@ protected:
*/
bool IsChrome(JSContext* aCx) const;
void SetComposed(bool aComposed)
{
mEvent->SetComposed(aComposed);
}
mozilla::WidgetEvent* mEvent;
RefPtr<nsPresContext> mPresContext;
nsCOMPtr<EventTarget> mExplicitOriginalTarget;

View File

@ -220,6 +220,7 @@ EventListenerService::GetListenerInfoFor(nsIDOMEventTarget* aEventTarget,
NS_IMETHODIMP
EventListenerService::GetEventTargetChainFor(nsIDOMEventTarget* aEventTarget,
bool aComposed,
uint32_t* aCount,
nsIDOMEventTarget*** aOutArray)
{
@ -227,6 +228,7 @@ EventListenerService::GetEventTargetChainFor(nsIDOMEventTarget* aEventTarget,
*aOutArray = nullptr;
NS_ENSURE_ARG(aEventTarget);
WidgetEvent event(true, eVoidEvent);
event.SetComposed(aComposed);
nsTArray<EventTarget*> targets;
nsresult rv = EventDispatcher::Dispatch(aEventTarget, nullptr, &event,
nullptr, nullptr, nullptr, &targets);

View File

@ -65,6 +65,7 @@ FocusEvent::Constructor(const GlobalObject& aGlobal,
e->InitFocusEvent(aType, aParam.mBubbles, aParam.mCancelable, aParam.mView,
aParam.mDetail, aParam.mRelatedTarget);
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -56,6 +56,7 @@ InputEvent::Constructor(const GlobalObject& aGlobal,
InternalEditorInputEvent* internalEvent = e->mEvent->AsEditorInputEvent();
internalEvent->mIsComposing = aParam.mIsComposing;
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -187,7 +187,7 @@ MouseEvent::Constructor(const GlobalObject& aGlobal,
aParam.mMetaKey, aParam.mButton, aParam.mRelatedTarget);
e->InitializeExtraMouseEventDictionaryMembers(aParam);
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -100,6 +100,7 @@ PointerEvent::Constructor(EventTarget* aOwner,
widgetEvent->buttons = aParam.mButtons;
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -31,6 +31,7 @@ SpeechRecognitionError::Constructor(const GlobalObject& aGlobal,
bool trusted = e->Init(t);
e->InitSpeechRecognitionError(aType, aParam.mBubbles, aParam.mCancelable, aParam.mError, aParam.mMessage);
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -65,7 +65,7 @@ StorageEvent::Constructor(EventTarget* aOwner,
e->mUrl = aEventInitDict.mUrl;
e->mStorageArea = aEventInitDict.mStorageArea;
e->SetTrusted(trusted);
e->SetComposed(aEventInitDict.mComposed);
return e.forget();
}

View File

@ -245,6 +245,7 @@ TouchEvent::Constructor(const GlobalObject& aGlobal,
aParam.mShiftKey, aParam.mMetaKey, touches, targetTouches,
changedTouches);
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -52,6 +52,7 @@ TransitionEvent::Constructor(const GlobalObject& aGlobal,
internalEvent->mPseudoElement = aParam.mPseudoElement;
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -90,6 +90,7 @@ UIEvent::Constructor(const GlobalObject& aGlobal,
e->InitUIEvent(aType, aParam.mBubbles, aParam.mCancelable, aParam.mView,
aParam.mDetail);
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -125,6 +125,7 @@ WheelEvent::Constructor(const GlobalObject& aGlobal,
aParam.mDeltaY, aParam.mDeltaZ, aParam.mDeltaMode);
e->InitializeExtraMouseEventDictionaryMembers(aParam);
e->SetTrusted(trusted);
e->SetComposed(aParam.mComposed);
return e.forget();
}

View File

@ -78,6 +78,7 @@ interface nsIEventListenerService : nsISupports
* event target chain than what this methods returns.
*/
void getEventTargetChainFor(in nsIDOMEventTarget aEventTarget,
in boolean composed,
[optional] out unsigned long aCount,
[retval, array, size_is(aCount)] out
nsIDOMEventTarget aOutArray);

View File

@ -35,7 +35,6 @@ support-files =
support-files = pointerevent_pointercancel_touch-manual.html
[test_pointerevent_pointerdown-manual.html]
support-files = pointerevent_pointerdown-manual.html
disabled = should be investigated
[test_pointerevent_pointerenter_does_not_bubble-manual.html]
support-files = pointerevent_pointerenter_does_not_bubble-manual.html
[test_pointerevent_pointerenter_nohover-manual.html]
@ -91,7 +90,6 @@ support-files =
support-files = pointerevent_pointertype_touch-manual.html
[test_pointerevent_pointerup-manual.html]
support-files = pointerevent_pointerup-manual.html
disabled = should be investigated
[test_pointerevent_pointerup_isprimary_same_as_pointerdown-manual.html]
support-files = pointerevent_pointerup_isprimary_same_as_pointerdown-manual.html
[test_pointerevent_pointerup_pointertype-manual.html]

View File

@ -94,7 +94,7 @@ function runTests() {
l2 = document.getElementById("testlevel2");
l3 = document.getElementById("testlevel3");
var textnode = l3.firstChild;
var chain = els.getEventTargetChainFor(textnode, {});
var chain = els.getEventTargetChainFor(textnode, true, {});
ok(chain.length > 3, "Too short event target chain.");
ok(SpecialPowers.compare(chain[0], textnode), "Wrong chain item (1)");
ok(SpecialPowers.compare(chain[1], l3), "Wrong chain item (2)");
@ -167,14 +167,14 @@ function testAllListener() {
els.addListenerForAllEvents(root, allListener, false, true, true);
els.addListenerForAllEvents(root, allListenerTrustedOnly, false, false, true);
l3.dispatchEvent(new Event("testevent", { bubbles: true }));
dispatchTrusted(l3, { bubbles: true });
l3.dispatchEvent(new Event("testevent", { bubbles: true, composed: true }));
dispatchTrusted(l3, { bubbles: true, composed: true });
els.removeListenerForAllEvents(root, allListener, false);
els.removeListenerForAllEvents(root, allListener, false, true);
els.removeListenerForAllEvents(root, allListenerTrustedOnly, false, true);
// make sure removeListenerForAllEvents works.
l3.dispatchEvent(new Event("testevent", { bubbles: true }));
dispatchTrusted(l3, { bubbles: true });
l3.dispatchEvent(new Event("testevent", { bubbles: true, composed : true }));
dispatchTrusted(l3, { bubbles: true, composed: true });
// Test the order of event listeners.
var clickListenerCalled = false;

View File

@ -100,6 +100,7 @@ MediaKeyMessageEvent::Constructor(const GlobalObject& aGlobal,
}
e->mMessageType = aEventInitDict.mMessageType;
e->SetTrusted(trusted);
e->SetComposed(aEventInitDict.mComposed);
return e.forget();
}

View File

@ -43,6 +43,7 @@ public:
bool trusted = e->Init(aOwner);
e->InitEvent(aType, aOptions.mBubbles, aOptions.mCancelable);
e->SetTrusted(trusted);
e->SetComposed(aOptions.mComposed);
e->mNotification = aOptions.mNotification;
e->SetWantsPopupControlCheck(e->IsTrusted());
return e.forget();

View File

@ -27,7 +27,7 @@ function isEventChain(actual, expected, msg) {
// Check to make sure the event chain matches what we get back from nsIEventListenerService.getEventTargetChainFor
if (0 < actual.length) {
var chain = els.getEventTargetChainFor(actual[0]); // Events should be dispatched on actual[0].
var chain = els.getEventTargetChainFor(actual[0], true); // Events should be dispatched on actual[0].
for (var i = 0; i < expected.length; i++) {
ok(SpecialPowers.compare(chain[i], expected[i]), msg + " at " + i + " for nsIEventListenerService");
}
@ -64,7 +64,7 @@ shadowOne.appendChild(elemThree);
elemOne.appendChild(elemTwo);
var eventChain = [];
var customEvent = new CustomEvent("custom", { "bubbles" : true });
var customEvent = new CustomEvent("custom", { "bubbles" : true, "composed" : true });
elemTwo.dispatchEvent(customEvent);
isEventChain(eventChain, [elemTwo, elemFour, elemThree, shadowOne, elemOne], "Event path for test 1 for event dispatched on elemTwo.");
@ -111,7 +111,7 @@ shadowOne.appendChild(elemSix);
elemSix.appendChild(elemThree);
eventChain = [];
customEvent = new CustomEvent("custom", { "bubbles" : true });
customEvent = new CustomEvent("custom", { "bubbles" : true, "composed" : true });
elemOne.dispatchEvent(customEvent);
is(elemOne.getDestinationInsertionPoints().length, 2, "yes");
isEventChain(eventChain, [elemOne, elemThree, elemSix, shadowOne, elemTwo, elemFour, shadowTwo, elemFive], "Event path for test 2 for event dispatched on elemOne.");
@ -159,7 +159,7 @@ elemFour.appendChild(elemFive);
shadowTwo.appendChild(elemSix);
eventChain = [];
customEvent = new CustomEvent("custom", { "bubbles" : true });
customEvent = new CustomEvent("custom", { "bubbles" : true, "composed" : true });
elemTwo.dispatchEvent(customEvent);
isEventChain(eventChain, [elemTwo, elemFive, elemFour, elemSix, shadowTwo, elemThree, shadowOne, elemOne], "Event path for test 3 for event dispatched on elemTwo.");
@ -223,12 +223,12 @@ shadowThree.appendChild(elemThree);
shadowOne.appendChild(elemFive);
eventChain = [];
customEvent = new CustomEvent("custom", { "bubbles" : true });
customEvent = new CustomEvent("custom", { "bubbles" : true, "composed" : true });
elemOne.dispatchEvent(customEvent);
isEventChain(eventChain, [elemOne, elemFive, shadowOne, elemThree, shadowThree, elemTwo, elemFour, elemSix, shadowTwo, elemSeven], "Event path for test 4 for event dispatched on elemOne.");
eventChain = [];
customEvent = new CustomEvent("custom", { "bubbles" : true });
customEvent = new CustomEvent("custom", { "bubbles" : true, "composed" : true });
elemEight.dispatchEvent(customEvent);
isEventChain(eventChain, [elemEight, elemFive, shadowOne, elemSix, shadowTwo, elemSeven], "Event path for test 4 for event dispatched on elemEight.");
@ -281,12 +281,12 @@ elemFour.appendChild(elemFive);
shadowTwo.appendChild(elemSeven);
eventChain = [];
customEvent = new CustomEvent("custom", { "bubbles" : true });
customEvent = new CustomEvent("custom", { "bubbles" : true, "composed" : true });
elemTwo.dispatchEvent(customEvent);
isEventChain(eventChain, [elemTwo, elemSeven, shadowTwo, elemSix, elemFour, shadowOne, elemOne], "Event path for test 5 for event dispatched on elemTwo.");
eventChain = [];
customEvent = new CustomEvent("custom", { "bubbles" : true });
customEvent = new CustomEvent("custom", { "bubbles" : true, "composed" : true });
elemThree.dispatchEvent(customEvent);
isEventChain(eventChain, [elemThree, elemSeven, shadowTwo, elemFive, elemFour, shadowOne, elemOne], "Event path for test 5 for event dispatched on elemThree.");
@ -339,12 +339,12 @@ shadowTwo.appendChild(elemSix);
shadowTwo.appendChild(elemSeven);
eventChain = [];
customEvent = new CustomEvent("custom", { "bubbles" : true });
customEvent = new CustomEvent("custom", { "bubbles" : true, "composed" : true });
elemTwo.dispatchEvent(customEvent);
isEventChain(eventChain, [elemTwo, elemSix, shadowTwo, elemFive, elemFour, shadowOne, elemOne], "Event path for test 6 for event dispatched on elemTwo.");
eventChain = [];
customEvent = new CustomEvent("custom", { "bubbles" : true });
customEvent = new CustomEvent("custom", { "bubbles" : true, "composed" : true });
elemThree.dispatchEvent(customEvent);
isEventChain(eventChain, [elemThree, elemSeven, shadowTwo, elemFive, elemFour, shadowOne, elemOne], "Event path for test 6 for event dispatched on elemThree.");
@ -403,12 +403,12 @@ elemEight.appendChild(elemSix);
elemEight.appendChild(elemSeven);
eventChain = [];
customEvent = new CustomEvent("custom", { "bubbles" : true });
customEvent = new CustomEvent("custom", { "bubbles" : true, "composed" : true });
elemTwo.dispatchEvent(customEvent);
isEventChain(eventChain, [elemTwo, elemSix, elemEight, shadowTwo, elemFive, elemFour, shadowOne, elemOne], "Event path for test 7 for event dispatched on elemTwo.");
eventChain = [];
customEvent = new CustomEvent("custom", { "bubbles" : true });
customEvent = new CustomEvent("custom", { "bubbles" : true, "composed" : true });
elemThree.dispatchEvent(customEvent);
isEventChain(eventChain, [elemThree, elemSeven, elemEight, shadowTwo, elemFive, elemFour, shadowOne, elemOne], "Event path for test 7 for event dispatched on elemThree.");
@ -449,7 +449,7 @@ elemFour.appendChild(elemTwo);
shadowTwo.appendChild(elemThree);
eventChain = [];
customEvent = new CustomEvent("custom", { "bubbles" : true });
customEvent = new CustomEvent("custom", { "bubbles" : true, "composed" : true });
elemThree.dispatchEvent(customEvent);
isEventChain(eventChain, [elemThree, shadowTwo, elemTwo, elemFour, shadowOne, elemOne], "Event path for test 8 for event dispatched on elemThree.");

View File

@ -26,7 +26,7 @@ function isEventChain(actual, expected, msg) {
}
if (0 < actual.length) {
var chain = els.getEventTargetChainFor(actual[0]); // Events should be dispatched on actual[0].
var chain = els.getEventTargetChainFor(actual[0], false); // Events should be dispatched on actual[0].
ok(expected.length < chain.length, "There should be additional chrome event targets.");
}
}

View File

@ -41,6 +41,8 @@ interface Event {
readonly attribute boolean defaultPreventedByChrome;
[ChromeOnly, Pure]
readonly attribute boolean defaultPreventedByContent;
[Pure]
readonly attribute boolean composed;
[Unforgeable, Pure]
readonly attribute boolean isTrusted;
@ -69,4 +71,5 @@ partial interface Event {
dictionary EventInit {
boolean bubbles = false;
boolean cancelable = false;
boolean composed = false;
};

View File

@ -149,6 +149,7 @@ FetchEvent::Constructor(const GlobalObject& aGlobal,
bool trusted = e->Init(owner);
e->InitEvent(aType, aOptions.mBubbles, aOptions.mCancelable);
e->SetTrusted(trusted);
e->SetComposed(aOptions.mComposed);
e->mRequest = aOptions.mRequest;
e->mClientId = aOptions.mClientId;
e->mIsReload = aOptions.mIsReload;
@ -1141,6 +1142,7 @@ PushEvent::Constructor(mozilla::dom::EventTarget* aOwner,
bool trusted = e->Init(aOwner);
e->InitEvent(aType, aOptions.mBubbles, aOptions.mCancelable);
e->SetTrusted(trusted);
e->SetComposed(aOptions.mComposed);
if(aOptions.mData.WasPassed()){
nsTArray<uint8_t> bytes;
nsresult rv = ExtractBytesFromData(aOptions.mData.Value(), bytes);

View File

@ -77,6 +77,7 @@ public:
bool trusted = e->Init(aOwner);
e->InitEvent(aType, aOptions.mBubbles, aOptions.mCancelable);
e->SetTrusted(trusted);
e->SetComposed(aOptions.mComposed);
return e.forget();
}

View File

@ -4,7 +4,8 @@ This directory contains the harfbuzz source from the 'master' branch of
https://github.com/behdad/harfbuzz.
Current version: 1.3.0 + recent fixes from trunk,
up to 547ddb0721365dca985aef5b759d08718f7c5f82.
up to 547ddb0721365dca985aef5b759d08718f7c5f82
+ the relevant part of https://github.com/behdad/harfbuzz/pull/334.
UPDATING:

View File

@ -382,6 +382,7 @@ hb_glib_get_unicode_funcs (void)
return const_cast<hb_unicode_funcs_t *> (&_hb_glib_unicode_funcs);
}
#if GLIB_CHECK_VERSION(2,31,10)
/**
* hb_glib_blob_create:
*
@ -398,3 +399,4 @@ hb_glib_blob_create (GBytes *gbytes)
g_bytes_ref (gbytes),
(hb_destroy_func_t) g_bytes_unref);
}
#endif

View File

@ -46,9 +46,10 @@ hb_glib_script_from_script (hb_script_t script);
HB_EXTERN hb_unicode_funcs_t *
hb_glib_get_unicode_funcs (void);
#if GLIB_CHECK_VERSION(2,31,10)
HB_EXTERN hb_blob_t *
hb_glib_blob_create (GBytes *gbytes);
#endif
HB_END_DECLS

View File

@ -59,6 +59,15 @@ UNIFIED_SOURCES += [
'hb-warning.cc',
]
if 'gtk' in CONFIG['MOZ_WIDGET_TOOLKIT']:
EXPORTS.harfbuzz += [
'hb-glib.h',
]
UNIFIED_SOURCES += [
'hb-glib.cc',
]
CXXFLAGS += CONFIG['GLIB_CFLAGS']
# We allow warnings for third-party code that can be updated from upstream.
ALLOW_COMPILER_WARNINGS = True

View File

@ -654,7 +654,9 @@ RotatedContentBuffer::BeginPaint(PaintedLayer* aLayer,
&destDTBuffer, &destDTBufferOnWhite);
if (!destDTBuffer ||
(!destDTBufferOnWhite && (bufferFlags & BUFFER_COMPONENT_ALPHA))) {
gfxCriticalError(CriticalLog::DefaultOptions(Factory::ReasonableSurfaceSize(IntSize(destBufferRect.width, destBufferRect.height)))) << "Failed 1 buffer db=" << hexa(destDTBuffer.get()) << " dw=" << hexa(destDTBufferOnWhite.get()) << " for " << destBufferRect.x << ", " << destBufferRect.y << ", " << destBufferRect.width << ", " << destBufferRect.height;
if (Factory::ReasonableSurfaceSize(IntSize(destBufferRect.width, destBufferRect.height))) {
gfxCriticalNote << "Failed 1 buffer db=" << hexa(destDTBuffer.get()) << " dw=" << hexa(destDTBufferOnWhite.get()) << " for " << destBufferRect.x << ", " << destBufferRect.y << ", " << destBufferRect.width << ", " << destBufferRect.height;
}
return result;
}
}
@ -676,7 +678,9 @@ RotatedContentBuffer::BeginPaint(PaintedLayer* aLayer,
&destDTBuffer, &destDTBufferOnWhite);
if (!destDTBuffer ||
(!destDTBufferOnWhite && (bufferFlags & BUFFER_COMPONENT_ALPHA))) {
gfxCriticalError(CriticalLog::DefaultOptions(Factory::ReasonableSurfaceSize(IntSize(destBufferRect.width, destBufferRect.height)))) << "Failed 2 buffer db=" << hexa(destDTBuffer.get()) << " dw=" << hexa(destDTBufferOnWhite.get()) << " for " << destBufferRect.x << ", " << destBufferRect.y << ", " << destBufferRect.width << ", " << destBufferRect.height;
if (Factory::ReasonableSurfaceSize(IntSize(destBufferRect.width, destBufferRect.height))) {
gfxCriticalNote << "Failed 2 buffer db=" << hexa(destDTBuffer.get()) << " dw=" << hexa(destDTBufferOnWhite.get()) << " for " << destBufferRect.x << ", " << destBufferRect.y << ", " << destBufferRect.width << ", " << destBufferRect.height;
}
return result;
}
}

View File

@ -0,0 +1,47 @@
<!DOCTYPE HTML>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width; initial-scale=1.0">
<title>Test pointer events are dispatched once for touch tap</title>
<script type="application/javascript" src="/tests/SimpleTest/paint_listener.js"></script>
<script type="application/javascript" src="apz_test_utils.js"></script>
<script type="application/javascript" src="apz_test_native_event_utils.js"></script>
<script type="application/javascript">
/** Test for Bug 1299195 **/
function runTests() {
let target0 = document.getElementById("target0");
let mouseup_count = 0;
let mousedown_count = 0;
let pointerup_count = 0;
let pointerdown_count = 0;
target0.addEventListener("mouseup", () => {
++mouseup_count;
if (mouseup_count == 2) {
is(mousedown_count, 2, "Double tap with touch should fire 2 mousedown events");
is(mouseup_count, 2, "Double tap with touch should fire 2 mouseup events");
is(pointerdown_count, 2, "Double tap with touch should fire 2 pointerdown events");
is(pointerup_count, 2, "Double tap with touch should fire 2 pointerup events");
subtestDone();
}
});
target0.addEventListener("mousedown", () => {
++mousedown_count;
});
target0.addEventListener("pointerup", () => {
++pointerup_count;
});
target0.addEventListener("pointerdown", () => {
++pointerdown_count;
});
synthesizeNativeTap(document.getElementById('target0'), 100, 100);
synthesizeNativeTap(document.getElementById('target0'), 100, 100);
}
waitUntilApzStable().then(runTests);
</script>
</head>
<body>
<div id="target0" style="width: 200px; height: 200px; background: green"></div>
</body>
</html>

View File

@ -29,6 +29,7 @@ support-files =
helper_touch_action_complex.html
helper_tap_fullzoom.html
helper_bug1162771.html
helper_bug1299195.html
tags = apz
[test_bug982141.html]
[test_bug1151663.html]
@ -63,7 +64,7 @@ skip-if = (toolkit == 'android') || (toolkit == 'cocoa') # wheel events not supp
skip-if = (os == 'android') || (os == 'b2g') # uses wheel events which are not supported on mobile
[test_group_zoom.html]
skip-if = (toolkit != 'android') # only android supports zoom
[test_bug1285070.html]
[test_group_pointerevents.html]
# Windows touch injection doesn't work in automation, but this test can be run locally on a windows touch device.
# On OS X we don't support touch events at all.
skip-if = (toolkit == 'windows') || (toolkit == 'cocoa')

View File

@ -12,7 +12,8 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1285070
<script type="application/javascript">
var subtests = [
{'file': 'helper_bug1285070.html', 'prefs': [["dom.w3c_pointer_events.enabled", true]]}
{'file': 'helper_bug1285070.html', 'prefs': [["dom.w3c_pointer_events.enabled", true]]},
{'file': 'helper_bug1299195.html', 'prefs': [["dom.w3c_pointer_events.enabled", true]]}
];
if (isApzEnabled()) {

View File

@ -189,6 +189,12 @@ ClientLayerManager::CreateReadbackLayer()
void
ClientLayerManager::BeginTransactionWithTarget(gfxContext* aTarget)
{
MOZ_ASSERT(mForwarder, "ClientLayerManager::BeginTransaction without forwarder");
if (!mForwarder->IPCOpen()) {
gfxCriticalNote << "ClientLayerManager::BeginTransaction with IPC channel down. GPU process may have died.";
return;
}
mInTransaction = true;
mTransactionStart = TimeStamp::Now();
@ -273,12 +279,6 @@ ClientLayerManager::EndTransactionInternal(DrawPaintedLayerCallback aCallback,
js::ProfileEntry::Category::GRAPHICS);
if (!mForwarder || !mForwarder->IPCOpen()) {
gfxCriticalError() << "LayerManager::EndTransaction while IPC is dead.";
// Pointless to try to render since the content cannot be sent to the
// compositor. We should not get here in the first place but I suspect
// This is happening during shutdown, tab-switch or some other scenario
// where we already started tearing the resources down but something
// triggered painting anyway.
return false;
}

View File

@ -402,8 +402,9 @@ CreateShadowFor(ClientLayer* aLayer,
CreatedMethod aMethod)
{
PLayerChild* shadow = aMgr->AsShadowForwarder()->ConstructShadowFor(aLayer);
// XXX error handling
MOZ_ASSERT(shadow, "failed to create shadow");
if (!shadow) {
return;
}
aLayer->SetShadow(shadow);
(aMgr->AsShadowForwarder()->*aMethod)(aLayer);

View File

@ -1071,7 +1071,6 @@ TextureClient::CreateForDrawing(TextureForwarder* aAllocator,
gfx::BackendType moz2DBackend = BackendTypeForBackendSelector(aLayersBackend, aSelector);
// also test the validity of aAllocator
MOZ_ASSERT(aAllocator && aAllocator->IPCOpen());
if (!aAllocator || !aAllocator->IPCOpen()) {
return nullptr;
}
@ -1168,7 +1167,6 @@ TextureClient::CreateFromSurface(KnowsCompositor* aAllocator,
TextureAllocationFlags aAllocFlags)
{
// also test the validity of aAllocator
MOZ_ASSERT(aAllocator && aAllocator->GetTextureForwarder()->IPCOpen());
if (!aAllocator || !aAllocator->GetTextureForwarder()->IPCOpen()) {
return nullptr;
}

View File

@ -692,7 +692,9 @@ ShadowLayerForwarder::EndTransaction(InfallibleTArray<EditReply>* aReplies,
AutoTArray<Edit, 10> cset;
size_t nCsets = mTxn->mCset.size() + mTxn->mPaints.size();
MOZ_ASSERT(nCsets > 0 || mTxn->RotationChanged(), "should have bailed by now");
if (nCsets == 0 && !mTxn->RotationChanged()) {
return true;
}
cset.SetCapacity(nCsets);
if (!mTxn->mCset.empty()) {

View File

@ -16,6 +16,7 @@
#include "gfxFT2FontBase.h"
#include "gfxFT2Utils.h"
#include "harfbuzz/hb.h"
#include "harfbuzz/hb-glib.h"
#include "harfbuzz/hb-ot.h"
#include "nsUnicodeProperties.h"
#include "nsUnicodeScriptCodes.h"
@ -1623,7 +1624,7 @@ gfxPangoFontGroup::FindFontForChar(uint32_t aCh, uint32_t aPrevCh,
// https://developer.gnome.org/pango/stable/pango-Scripts-and-Languages.html#PangoScript
const hb_tag_t scriptTag = GetScriptTagForCode(aRunScript);
const PangoScript script =
(const PangoScript)g_unicode_script_from_iso15924(scriptTag);
(const PangoScript)hb_glib_script_from_script(hb_script_from_iso15924_tag(scriptTag));
// Might be nice to call pango_language_includes_script only once for the
// run rather than for each character.

View File

@ -32,6 +32,7 @@ EXPORTS += [
'nsILanguageAtomService.h',
'nsIPlatformCharset.h',
'nsPosixLocale.h',
'nsUConvPropertySearch.h',
'nsWin32Locale.h',
]

View File

@ -1047,7 +1047,7 @@ wasm::GenerateInterruptStub(MacroAssembler& masm)
// Save the stack pointer in a non-volatile register.
masm.mov(sp,r6);
// Align the stack.
masm.ma_and(Imm32(~7), sp, sp);
masm.as_bic(sp, sp, Imm8(7));
// Store resumePC into the return PC stack slot.
masm.loadWasmActivationFromSymbolicAddress(IntArgReg0);

View File

@ -891,11 +891,6 @@ BytecodeEmitter::EmitterScope::enterLexical(BytecodeEmitter* bce, ScopeKind kind
updateFrameFixedSlots(bce, bi);
// Put frame slots in TDZ. Environment slots are poisoned during
// environment creation.
if (!deadZoneFrameSlotRange(bce, firstFrameSlot, frameSlotEnd()))
return false;
// Create and intern the VM scope.
auto createScope = [kind, bindings, firstFrameSlot](ExclusiveContext* cx,
HandleScope enclosing)
@ -915,6 +910,14 @@ BytecodeEmitter::EmitterScope::enterLexical(BytecodeEmitter* bce, ScopeKind kind
if (!appendScopeNote(bce))
return false;
// Put frame slots in TDZ. Environment slots are poisoned during
// environment creation.
//
// This must be done after appendScopeNote to be considered in the extent
// of the scope.
if (!deadZoneFrameSlotRange(bce, firstFrameSlot, frameSlotEnd()))
return false;
return checkEnvironmentChainLength(bce);
}

View File

@ -2841,7 +2841,8 @@ Parser<ParseHandler>::checkFunctionDefinition(HandleAtom funAtom, Node pn, Funct
template <>
bool
Parser<FullParseHandler>::skipLazyInnerFunction(ParseNode* pn, bool tryAnnexB)
Parser<FullParseHandler>::skipLazyInnerFunction(ParseNode* pn, FunctionSyntaxKind kind,
bool tryAnnexB)
{
// When a lazily-parsed function is called, we only fully parse (and emit)
// that function, not any of its nested children. The initial syntax-only
@ -2867,12 +2868,21 @@ Parser<FullParseHandler>::skipLazyInnerFunction(ParseNode* pn, bool tryAnnexB)
// script source.
Rooted<LazyScript*> lazyOuter(context, handler.lazyOuterFunction());
uint32_t userbufBase = lazyOuter->begin() - lazyOuter->column();
return tokenStream.advance(fun->lazyScript()->end() - userbufBase);
if (!tokenStream.advance(fun->lazyScript()->end() - userbufBase))
return false;
if (kind == Statement && fun->isExprBody()) {
if (!MatchOrInsertSemicolonAfterExpression(tokenStream))
return false;
}
return true;
}
template <>
bool
Parser<SyntaxParseHandler>::skipLazyInnerFunction(Node pn, bool tryAnnexB)
Parser<SyntaxParseHandler>::skipLazyInnerFunction(Node pn, FunctionSyntaxKind kind,
bool tryAnnexB)
{
MOZ_CRASH("Cannot skip lazy inner functions when syntax parsing");
}
@ -2970,7 +2980,7 @@ Parser<ParseHandler>::functionDefinition(InHandling inHandling, YieldHandling yi
// functions, which are also lazy. Instead, their free variables and
// source extents are recorded and may be skipped.
if (handler.canSkipLazyInnerFunctions()) {
if (!skipLazyInnerFunction(pn, tryAnnexB))
if (!skipLazyInnerFunction(pn, kind, tryAnnexB))
return null();
return pn;
}

View File

@ -1259,7 +1259,7 @@ class Parser final : private JS::AutoGCRooter, public StrictModeGetter
bool checkFunctionDefinition(HandleAtom funAtom, Node pn, FunctionSyntaxKind kind,
GeneratorKind generatorKind, bool* tryAnnexB);
bool skipLazyInnerFunction(Node pn, bool tryAnnexB);
bool skipLazyInnerFunction(Node pn, FunctionSyntaxKind kind, bool tryAnnexB);
bool innerFunction(Node pn, ParseContext* outerpc, HandleFunction fun,
InHandling inHandling, FunctionSyntaxKind kind,
GeneratorKind generatorKind, bool tryAnnexB,

View File

@ -0,0 +1,6 @@
function f() {
if (0)
function g() x;
else;
}
f();

View File

@ -2736,6 +2736,20 @@ MBinaryBitwiseInstruction::foldsTo(TempAllocator& alloc)
MDefinition*
MBinaryBitwiseInstruction::foldUnnecessaryBitop()
{
// Fold unsigned shift right operator when the second operand is zero and
// the only use is an unsigned modulo. Thus, the expression
// |(x >>> 0) % y| becomes |x % y|.
if (isUrsh() && hasOneDefUse() && getOperand(1)->isConstant()) {
MConstant* constant = getOperand(1)->toConstant();
if (constant->type() == MIRType::Int32 && constant->toInt32() == 0) {
for (MUseDefIterator use(this); use; use++) {
if (use.def()->isMod() && use.def()->toMod()->isUnsigned())
return getOperand(0);
break;
}
}
}
if (specialization_ != MIRType::Int32)
return this;

View File

@ -7139,23 +7139,51 @@ class MDiv : public MBinaryArithInstruction
class MMod : public MBinaryArithInstruction
{
public:
enum class PossiblyUnsigned {
NotPossible,
LHSPossible,
RHSPossible,
BothPossible,
};
protected:
bool unsigned_;
bool canBeNegativeDividend_;
bool canBePowerOfTwoDivisor_;
bool canBeDivideByZero_;
bool trapOnError_;
PossiblyUnsigned possiblyUnsigned_;
MMod(MDefinition* left, MDefinition* right, MIRType type)
: MBinaryArithInstruction(left, right),
unsigned_(false),
canBeNegativeDividend_(true),
canBePowerOfTwoDivisor_(true),
canBeDivideByZero_(true),
trapOnError_(false)
trapOnError_(false),
possiblyUnsigned_(PossiblyUnsigned::NotPossible)
{
if (type != MIRType::Value)
specialization_ = type;
setResultType(type);
if (left->isUrsh() && left->getOperand(1)->isConstant()) {
MConstant* constant = left->getOperand(1)->toConstant();
if (constant->type() == MIRType::Int32 && constant->toInt32() == 0)
possiblyUnsigned_ = PossiblyUnsigned::LHSPossible;
}
if (right->isUrsh() && right->getOperand(1)->isConstant()) {
MConstant* constant = right->getOperand(1)->toConstant();
if (constant->type() == MIRType::Int32 && constant->toInt32() == 0) {
if (possiblyUnsigned_ == PossiblyUnsigned::NotPossible)
possiblyUnsigned_ = PossiblyUnsigned::RHSPossible;
else
possiblyUnsigned_ = PossiblyUnsigned::BothPossible;
}
}
}
public:

View File

@ -1561,13 +1561,40 @@ MMod::computeRange(TempAllocator& alloc)
// If either operand is a NaN, the result is NaN. This also conservatively
// handles Infinity cases.
if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds())
if ((possiblyUnsigned_ == PossiblyUnsigned::NotPossible &&
!lhs.hasInt32Bounds()) || !rhs.hasInt32Bounds())
{
return;
}
// If RHS can be zero, the result can be NaN.
if (rhs.lower() <= 0 && rhs.upper() >= 0)
return;
// (x >>> 0) % y is an unsigned modulo operation but the lhs' range is not
// always >= 0. The lhs range assumes a signed integer 32 bit while the
// value is unsigned 32 bit. That breaks the assumption that range >= 0.
if (specialization() == MIRType::Int32) {
switch (possiblyUnsigned_) {
case PossiblyUnsigned::NotPossible:
break;
case PossiblyUnsigned::LHSPossible:
if (rhs.lower() > 0 && !rhs.canHaveFractionalPart())
unsigned_ = true;
break;
case PossiblyUnsigned::RHSPossible:
if (lhs.lower() >= 0 && !lhs.canHaveFractionalPart())
unsigned_ = true;
break;
case PossiblyUnsigned::BothPossible:
if (lhs.lower() >= 0 && !lhs.canHaveFractionalPart())
unsigned_ = true;
else if (rhs.lower() > 0 && !rhs.canHaveFractionalPart())
unsigned_ = true;
break;
}
}
// If both operands are non-negative integers, we can optimize this to an
// unsigned mod.
if (specialization() == MIRType::Int32 && lhs.lower() >= 0 && rhs.lower() > 0 &&

View File

@ -1117,7 +1117,7 @@ Imm8::EncodeTwoImms(uint32_t imm)
}
ALUOp
jit::ALUNeg(ALUOp op, Register dest, Imm32* imm, Register* negDest)
jit::ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm, Register* negDest)
{
// Find an alternate ALUOp to get the job done, and use a different imm.
*negDest = dest;
@ -1149,7 +1149,7 @@ jit::ALUNeg(ALUOp op, Register dest, Imm32* imm, Register* negDest)
case OpTst:
MOZ_ASSERT(dest == InvalidReg);
*imm = Imm32(~imm->value);
*negDest = ScratchRegister;
*negDest = scratch;
return OpBic;
// orr has orn on thumb2 only.
default:
@ -3421,3 +3421,8 @@ Assembler::GetPoolMaxOffset()
}
return AsmPoolMaxOffset;
}
SecondScratchRegisterScope::SecondScratchRegisterScope(MacroAssembler &masm)
: AutoRegisterScope(masm, masm.getSecondScratchReg())
{
}

View File

@ -55,6 +55,11 @@ struct ScratchRegisterScope : public AutoRegisterScope
{ }
};
struct SecondScratchRegisterScope : public AutoRegisterScope
{
explicit SecondScratchRegisterScope(MacroAssembler& masm);
};
static constexpr Register OsrFrameReg = r3;
static constexpr Register ArgumentsRectifierReg = r8;
static constexpr Register CallTempReg0 = r5;
@ -409,7 +414,7 @@ enum VFPOp {
};
// Negate the operation, AND negate the immediate that we were passed in.
ALUOp ALUNeg(ALUOp op, Register dest, Imm32* imm, Register* negDest);
ALUOp ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm, Register* negDest);
bool can_dbl(ALUOp op);
bool condsAreSafe(ALUOp op);

View File

@ -71,7 +71,7 @@ CodeGeneratorARM::visitTestIAndBranch(LTestIAndBranch* test)
MBasicBlock* ifFalse = test->ifFalse();
// Test the operand
masm.ma_cmp(ToRegister(opd), Imm32(0));
masm.as_cmp(ToRegister(opd), Imm8(0));
if (isNextBlock(ifFalse->lir())) {
jumpToBlock(ifTrue, Assembler::NonZero);
@ -91,12 +91,16 @@ CodeGeneratorARM::visitCompare(LCompare* comp)
const LAllocation* right = comp->getOperand(1);
const LDefinition* def = comp->getDef(0);
if (right->isConstant())
masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)));
else if (right->isRegister())
ScratchRegisterScope scratch(masm);
if (right->isConstant()) {
masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)), scratch);
} else if (right->isRegister()) {
masm.ma_cmp(ToRegister(left), ToRegister(right));
else
masm.ma_cmp(ToRegister(left), Operand(ToAddress(right)));
} else {
SecondScratchRegisterScope scratch2(masm);
masm.ma_cmp(ToRegister(left), Operand(ToAddress(right)), scratch, scratch2);
}
masm.ma_mov(Imm32(0), ToRegister(def));
masm.ma_mov(Imm32(1), ToRegister(def), cond);
}
@ -108,12 +112,16 @@ CodeGeneratorARM::visitCompareAndBranch(LCompareAndBranch* comp)
const LAllocation* left = comp->left();
const LAllocation* right = comp->right();
if (right->isConstant())
masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)));
else if (right->isRegister())
ScratchRegisterScope scratch(masm);
if (right->isConstant()) {
masm.ma_cmp(ToRegister(left), Imm32(ToInt32(right)), scratch);
} else if (right->isRegister()) {
masm.ma_cmp(ToRegister(left), ToRegister(right));
else
masm.ma_cmp(ToRegister(left), Operand(ToAddress(right)));
} else {
SecondScratchRegisterScope scratch2(masm);
masm.ma_cmp(ToRegister(left), Operand(ToAddress(right)), scratch, scratch2);
}
emitBranch(cond, comp->ifTrue(), comp->ifFalse());
}
@ -281,8 +289,10 @@ CodeGeneratorARM::visitAddI(LAddI* ins)
const LAllocation* rhs = ins->getOperand(1);
const LDefinition* dest = ins->getDef(0);
ScratchRegisterScope scratch(masm);
if (rhs->isConstant())
masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCC);
masm.ma_add(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch, SetCC);
else if (rhs->isRegister())
masm.ma_add(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), SetCC);
else
@ -315,8 +325,10 @@ CodeGeneratorARM::visitSubI(LSubI* ins)
const LAllocation* rhs = ins->getOperand(1);
const LDefinition* dest = ins->getDef(0);
ScratchRegisterScope scratch(masm);
if (rhs->isConstant())
masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), SetCC);
masm.ma_sub(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch, SetCC);
else if (rhs->isRegister())
masm.ma_sub(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), SetCC);
else
@ -358,13 +370,13 @@ CodeGeneratorARM::visitMulI(LMulI* ins)
int32_t constant = ToInt32(rhs);
if (mul->canBeNegativeZero() && constant <= 0) {
Assembler::Condition bailoutCond = (constant == 0) ? Assembler::LessThan : Assembler::Equal;
masm.ma_cmp(ToRegister(lhs), Imm32(0));
masm.as_cmp(ToRegister(lhs), Imm8(0));
bailoutIf(bailoutCond, ins->snapshot());
}
// TODO: move these to ma_mul.
switch (constant) {
case -1:
masm.ma_rsb(ToRegister(lhs), Imm32(0), ToRegister(dest), SetCC);
masm.as_rsb(ToRegister(dest), ToRegister(lhs), Imm8(0), SetCC);
break;
case 0:
masm.ma_mov(Imm32(0), ToRegister(dest));
@ -422,10 +434,11 @@ CodeGeneratorARM::visitMulI(LMulI* ins)
}
if (!handled) {
ScratchRegisterScope scratch(masm);
if (mul->canOverflow())
c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), c);
c = masm.ma_check_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch, c);
else
masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest));
masm.ma_mul(ToRegister(lhs), Imm32(ToInt32(rhs)), ToRegister(dest), scratch);
}
}
}
@ -435,10 +448,12 @@ CodeGeneratorARM::visitMulI(LMulI* ins)
} else {
Assembler::Condition c = Assembler::Overflow;
if (mul->canOverflow())
c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), c);
else
if (mul->canOverflow()) {
ScratchRegisterScope scratch(masm);
c = masm.ma_check_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest), scratch, c);
} else {
masm.ma_mul(ToRegister(lhs), ToRegister(rhs), ToRegister(dest));
}
// Bailout on overflow.
if (mul->canOverflow())
@ -446,7 +461,7 @@ CodeGeneratorARM::visitMulI(LMulI* ins)
if (mul->canBeNegativeZero()) {
Label done;
masm.ma_cmp(ToRegister(dest), Imm32(0));
masm.as_cmp(ToRegister(dest), Imm8(0));
masm.ma_b(&done, Assembler::NotEqual);
// Result is -0 if lhs or rhs is negative.
@ -503,14 +518,16 @@ void
CodeGeneratorARM::divICommon(MDiv* mir, Register lhs, Register rhs, Register output,
LSnapshot* snapshot, Label& done)
{
ScratchRegisterScope scratch(masm);
if (mir->canBeNegativeOverflow()) {
// Handle INT32_MIN / -1;
// The integer division will give INT32_MIN, but we want -(double)INT32_MIN.
// Sets EQ if lhs == INT32_MIN.
masm.ma_cmp(lhs, Imm32(INT32_MIN));
masm.ma_cmp(lhs, Imm32(INT32_MIN), scratch);
// If EQ (LHS == INT32_MIN), sets EQ if rhs == -1.
masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal);
masm.ma_cmp(rhs, Imm32(-1), scratch, Assembler::Equal);
if (mir->canTruncateOverflow()) {
if (mir->trapOnError()) {
masm.ma_b(wasm::JumpTarget::IntegerOverflow, Assembler::Equal);
@ -530,7 +547,7 @@ CodeGeneratorARM::divICommon(MDiv* mir, Register lhs, Register rhs, Register out
// Handle divide by zero.
if (mir->canBeDivideByZero()) {
masm.ma_cmp(rhs, Imm32(0));
masm.as_cmp(rhs, Imm8(0));
if (mir->canTruncateInfinities()) {
if (mir->trapOnError()) {
masm.ma_b(wasm::JumpTarget::IntegerDivideByZero, Assembler::Equal);
@ -551,9 +568,9 @@ CodeGeneratorARM::divICommon(MDiv* mir, Register lhs, Register rhs, Register out
// Handle negative 0.
if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
Label nonzero;
masm.ma_cmp(lhs, Imm32(0));
masm.as_cmp(lhs, Imm8(0));
masm.ma_b(&nonzero, Assembler::NotEqual);
masm.ma_cmp(rhs, Imm32(0));
masm.as_cmp(rhs, Imm8(0));
MOZ_ASSERT(mir->fallible());
bailoutIf(Assembler::LessThan, snapshot);
masm.bind(&nonzero);
@ -615,7 +632,7 @@ CodeGeneratorARM::visitSoftDivI(LSoftDivI* ins)
// idivmod returns the quotient in r0, and the remainder in r1.
if (!mir->canTruncateRemainder()) {
MOZ_ASSERT(mir->fallible());
masm.ma_cmp(r1, Imm32(0));
masm.as_cmp(r1, Imm8(0));
bailoutIf(Assembler::NonZero, ins->snapshot());
}
@ -687,8 +704,8 @@ CodeGeneratorARM::modICommon(MMod* mir, Register lhs, Register rhs, Register out
// If (Y > 0), we don't set EQ, and we don't trigger LT, so we don't take
// the bailout.
if (mir->canBeDivideByZero() || mir->canBeNegativeDividend()) {
masm.ma_cmp(rhs, Imm32(0));
masm.ma_cmp(lhs, Imm32(0), Assembler::LessThan);
masm.as_cmp(rhs, Imm8(0));
masm.as_cmp(lhs, Imm8(0), Assembler::LessThan);
if (mir->isTruncated()) {
if (mir->trapOnError()) {
masm.ma_b(wasm::JumpTarget::IntegerDivideByZero, Assembler::Equal);
@ -722,7 +739,10 @@ CodeGeneratorARM::visitModI(LModI* ins)
Label done;
modICommon(mir, lhs, rhs, output, ins->snapshot(), done);
masm.ma_smod(lhs, rhs, output);
{
ScratchRegisterScope scratch(masm);
masm.ma_smod(lhs, rhs, output, scratch);
}
// If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0.
if (mir->canBeNegativeDividend()) {
@ -731,9 +751,9 @@ CodeGeneratorARM::visitModI(LModI* ins)
} else {
MOZ_ASSERT(mir->fallible());
// See if X < 0
masm.ma_cmp(output, Imm32(0));
masm.as_cmp(output, Imm8(0));
masm.ma_b(&done, Assembler::NotEqual);
masm.ma_cmp(callTemp, Imm32(0));
masm.as_cmp(callTemp, Imm8(0));
bailoutIf(Assembler::Signed, ins->snapshot());
}
}
@ -756,13 +776,17 @@ CodeGeneratorARM::visitSoftModI(LSoftModI* ins)
MOZ_ASSERT(callTemp.code() > r3.code() && callTemp.code() < r12.code());
masm.ma_mov(lhs, callTemp);
// Prevent INT_MIN % -1;
// The integer division will give INT_MIN, but we want -(double)INT_MIN.
if (mir->canBeNegativeDividend()) {
// Sets EQ if lhs == INT_MIN
masm.ma_cmp(lhs, Imm32(INT_MIN));
// If EQ (LHS == INT_MIN), sets EQ if rhs == -1
masm.ma_cmp(rhs, Imm32(-1), Assembler::Equal);
{
ScratchRegisterScope scratch(masm);
// Sets EQ if lhs == INT_MIN
masm.ma_cmp(lhs, Imm32(INT_MIN), scratch);
// If EQ (LHS == INT_MIN), sets EQ if rhs == -1
masm.ma_cmp(rhs, Imm32(-1), scratch, Assembler::Equal);
}
if (mir->isTruncated()) {
// (INT_MIN % -1)|0 == 0
Label skip;
@ -793,9 +817,9 @@ CodeGeneratorARM::visitSoftModI(LSoftModI* ins)
} else {
MOZ_ASSERT(mir->fallible());
// See if X < 0
masm.ma_cmp(r1, Imm32(0));
masm.as_cmp(r1, Imm8(0));
masm.ma_b(&done, Assembler::NotEqual);
masm.ma_cmp(callTemp, Imm32(0));
masm.as_cmp(callTemp, Imm8(0));
bailoutIf(Assembler::Signed, ins->snapshot());
}
}
@ -811,11 +835,15 @@ CodeGeneratorARM::visitModPowTwoI(LModPowTwoI* ins)
Label fin;
// bug 739870, jbramley has a different sequence that may help with speed
// here.
masm.ma_mov(in, out, SetCC);
masm.ma_b(&fin, Assembler::Zero);
masm.ma_rsb(Imm32(0), out, LeaveCC, Assembler::Signed);
masm.ma_and(Imm32((1 << ins->shift()) - 1), out);
masm.ma_rsb(Imm32(0), out, SetCC, Assembler::Signed);
masm.as_rsb(out, out, Imm8(0), LeaveCC, Assembler::Signed);
{
ScratchRegisterScope scratch(masm);
masm.ma_and(Imm32((1 << ins->shift()) - 1), out, scratch);
}
masm.as_rsb(out, out, Imm8(0), SetCC, Assembler::Signed);
if (mir->canBeNegativeDividend()) {
if (!mir->isTruncated()) {
MOZ_ASSERT(mir->fallible());
@ -835,7 +863,12 @@ CodeGeneratorARM::visitModMaskI(LModMaskI* ins)
Register tmp1 = ToRegister(ins->getTemp(0));
Register tmp2 = ToRegister(ins->getTemp(1));
MMod* mir = ins->mir();
masm.ma_mod_mask(src, dest, tmp1, tmp2, ins->shift());
ScratchRegisterScope scratch(masm);
SecondScratchRegisterScope scratch2(masm);
masm.ma_mod_mask(src, dest, tmp1, tmp2, scratch, scratch2, ins->shift());
if (mir->canBeNegativeDividend()) {
if (!mir->isTruncated()) {
MOZ_ASSERT(mir->fallible());
@ -864,23 +897,26 @@ CodeGeneratorARM::visitBitOpI(LBitOpI* ins)
const LAllocation* lhs = ins->getOperand(0);
const LAllocation* rhs = ins->getOperand(1);
const LDefinition* dest = ins->getDef(0);
ScratchRegisterScope scratch(masm);
// All of these bitops should be either imm32's, or integer registers.
switch (ins->bitop()) {
case JSOP_BITOR:
if (rhs->isConstant())
masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
masm.ma_orr(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest), scratch);
else
masm.ma_orr(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
break;
case JSOP_BITXOR:
if (rhs->isConstant())
masm.ma_eor(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
masm.ma_eor(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest), scratch);
else
masm.ma_eor(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
break;
case JSOP_BITAND:
if (rhs->isConstant())
masm.ma_and(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest));
masm.ma_and(Imm32(ToInt32(rhs)), ToRegister(lhs), ToRegister(dest), scratch);
else
masm.ma_and(ToRegister(rhs), ToRegister(lhs), ToRegister(dest));
break;
@ -918,7 +954,7 @@ CodeGeneratorARM::visitShiftI(LShiftI* ins)
// x >>> 0 can overflow.
masm.ma_mov(lhs, dest);
if (ins->mir()->toUrsh()->fallible()) {
masm.ma_cmp(dest, Imm32(0));
masm.as_cmp(dest, Imm8(0));
bailoutIf(Assembler::LessThan, ins->snapshot());
}
}
@ -930,7 +966,7 @@ CodeGeneratorARM::visitShiftI(LShiftI* ins)
// The shift amounts should be AND'ed into the 0-31 range since arm
// shifts by the lower byte of the register (it will attempt to shift by
// 250 if you ask it to).
masm.ma_and(Imm32(0x1F), ToRegister(rhs), dest);
masm.as_and(dest, ToRegister(rhs), Imm8(0x1F));
switch (ins->bitop()) {
case JSOP_LSH:
@ -943,7 +979,7 @@ CodeGeneratorARM::visitShiftI(LShiftI* ins)
masm.ma_lsr(dest, lhs, dest);
if (ins->mir()->toUrsh()->fallible()) {
// x >>> 0 can overflow.
masm.ma_cmp(dest, Imm32(0));
masm.as_cmp(dest, Imm8(0));
bailoutIf(Assembler::LessThan, ins->snapshot());
}
break;
@ -969,7 +1005,7 @@ CodeGeneratorARM::visitUrshD(LUrshD* ins)
else
masm.ma_mov(lhs, temp);
} else {
masm.ma_and(Imm32(0x1F), ToRegister(rhs), temp);
masm.as_and(temp, ToRegister(rhs), Imm8(0x1F));
masm.ma_lsr(temp, lhs, temp);
}
@ -1119,10 +1155,12 @@ CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch* mir, Register index, Reg
// which ensures we don't attempt to execute the address table.
Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
ScratchRegisterScope scratch(masm);
int32_t cases = mir->numCases();
// Lower value with low value.
masm.ma_sub(index, Imm32(mir->low()), index, SetCC);
masm.ma_rsb(index, Imm32(cases - 1), index, SetCC, Assembler::NotSigned);
masm.ma_sub(index, Imm32(mir->low()), index, scratch, SetCC);
masm.ma_rsb(index, Imm32(cases - 1), index, scratch, SetCC, Assembler::NotSigned);
// Inhibit pools within the following sequence because we are indexing into
// a pc relative table. The region will have one instruction for ma_ldr, one
// for ma_b, and each table case takes one word.
@ -1262,11 +1300,12 @@ void
CodeGeneratorARM::emitRoundDouble(FloatRegister src, Register dest, Label* fail)
{
ScratchDoubleScope scratch(masm);
ScratchRegisterScope scratchReg(masm);
masm.ma_vcvt_F64_I32(src, scratch);
masm.ma_vxfer(scratch, dest);
masm.ma_cmp(dest, Imm32(0x7fffffff));
masm.ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual);
masm.ma_cmp(dest, Imm32(0x7fffffff), scratchReg);
masm.ma_cmp(dest, Imm32(0x80000000), scratchReg, Assembler::NotEqual);
masm.ma_b(fail, Assembler::Equal);
}
@ -1380,8 +1419,10 @@ CodeGeneratorARM::visitUnbox(LUnbox* unbox)
MUnbox* mir = unbox->mir();
Register type = ToRegister(unbox->type());
ScratchRegisterScope scratch(masm);
if (mir->fallible()) {
masm.ma_cmp(type, Imm32(MIRTypeToTag(mir->type())));
masm.ma_cmp(type, Imm32(MIRTypeToTag(mir->type())), scratch);
bailoutIf(Assembler::NotEqual, unbox->snapshot());
}
}
@ -1583,8 +1624,9 @@ CodeGeneratorARM::visitCompareBitwiseAndBranch(LCompareBitwiseAndBranch* lir)
void
CodeGeneratorARM::visitBitAndAndBranch(LBitAndAndBranch* baab)
{
ScratchRegisterScope scratch(masm);
if (baab->right()->isConstant())
masm.ma_tst(ToRegister(baab->left()), Imm32(ToInt32(baab->right())));
masm.ma_tst(ToRegister(baab->left()), Imm32(ToInt32(baab->right())), scratch);
else
masm.ma_tst(ToRegister(baab->left()), ToRegister(baab->right()));
emitBranch(Assembler::NonZero, baab->ifTrue(), baab->ifFalse());
@ -1606,7 +1648,7 @@ void
CodeGeneratorARM::visitNotI(LNotI* ins)
{
// It is hard to optimize !x, so just do it the basic way for now.
masm.ma_cmp(ToRegister(ins->input()), Imm32(0));
masm.as_cmp(ToRegister(ins->input()), Imm8(0));
masm.emitSet(Assembler::Equal, ToRegister(ins->output()));
}
@ -1617,7 +1659,7 @@ CodeGeneratorARM::visitNotI64(LNotI64* lir)
Register output = ToRegister(lir->output());
masm.ma_orr(input.low, input.high, output);
masm.ma_cmp(output, Imm32(0));
masm.as_cmp(output, Imm8(0));
masm.emitSet(Assembler::Equal, output);
}
@ -1640,7 +1682,7 @@ CodeGeneratorARM::visitNotD(LNotD* ins)
masm.ma_lsr(Imm32(28), dest, dest);
// 28 + 2 = 30
masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
masm.ma_and(Imm32(1), dest);
masm.as_and(dest, dest, Imm8(1));
} else {
masm.as_vmrs(pc);
masm.ma_mov(Imm32(0), dest);
@ -1668,7 +1710,7 @@ CodeGeneratorARM::visitNotF(LNotF* ins)
masm.ma_lsr(Imm32(28), dest, dest);
// 28 + 2 = 30
masm.ma_alu(dest, lsr(dest, 2), dest, OpOrr);
masm.ma_and(Imm32(1), dest);
masm.as_and(dest, dest, Imm8(1));
} else {
masm.as_vmrs(pc);
masm.ma_mov(Imm32(0), dest);
@ -1683,8 +1725,9 @@ CodeGeneratorARM::visitGuardShape(LGuardShape* guard)
Register obj = ToRegister(guard->input());
Register tmp = ToRegister(guard->tempInt());
ScratchRegisterScope scratch(masm);
masm.ma_ldr(DTRAddr(obj, DtrOffImm(ShapedObject::offsetOfShape())), tmp);
masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->shape()));
masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->shape()), scratch);
bailoutIf(Assembler::NotEqual, guard->snapshot());
}
@ -1696,8 +1739,9 @@ CodeGeneratorARM::visitGuardObjectGroup(LGuardObjectGroup* guard)
Register tmp = ToRegister(guard->tempInt());
MOZ_ASSERT(obj != tmp);
ScratchRegisterScope scratch(masm);
masm.ma_ldr(DTRAddr(obj, DtrOffImm(JSObject::offsetOfGroup())), tmp);
masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->group()));
masm.ma_cmp(tmp, ImmGCPtr(guard->mir()->group()), scratch);
Assembler::Condition cond =
guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual;
@ -1710,8 +1754,10 @@ CodeGeneratorARM::visitGuardClass(LGuardClass* guard)
Register obj = ToRegister(guard->input());
Register tmp = ToRegister(guard->tempInt());
ScratchRegisterScope scratch(masm);
masm.loadObjClass(obj, tmp);
masm.ma_cmp(tmp, Imm32((uint32_t)guard->mir()->getClass()));
masm.ma_cmp(tmp, Imm32((uint32_t)guard->mir()->getClass()), scratch);
bailoutIf(Assembler::NotEqual, guard->snapshot());
}
@ -2130,7 +2176,7 @@ CodeGeneratorARM::visitAsmSelect(LAsmSelect* ins)
MIRType mirType = ins->mir()->type();
Register cond = ToRegister(ins->condExpr());
masm.ma_cmp(cond, Imm32(0));
masm.as_cmp(cond, Imm8(0));
if (mirType == MIRType::Int32) {
Register falseExpr = ToRegister(ins->falseExpr());
@ -2263,16 +2309,19 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
int32_t ptrImm = ptr->toConstant()->toInt32();
MOZ_ASSERT(ptrImm >= 0);
if (isFloat) {
ScratchRegisterScope scratch(masm);
VFPRegister vd(ToFloatRegister(ins->output()));
if (size == 32)
masm.ma_vldr(Address(HeapReg, ptrImm), vd.singleOverlay(), Assembler::Always);
masm.ma_vldr(Address(HeapReg, ptrImm), vd.singleOverlay(), scratch, Assembler::Always);
else
masm.ma_vldr(Address(HeapReg, ptrImm), vd, Assembler::Always);
masm.ma_vldr(Address(HeapReg, ptrImm), vd, scratch, Assembler::Always);
} else {
ScratchRegisterScope scratch(masm);
masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, Imm32(ptrImm),
ToRegister(ins->output()), Offset, Assembler::Always);
ToRegister(ins->output()), scratch, Offset, Assembler::Always);
}
} else {
ScratchRegisterScope scratch(masm);
Register ptrReg = ToRegister(ptr);
if (isFloat) {
FloatRegister output = ToFloatRegister(ins->output());
@ -2285,12 +2334,12 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
masm.append(wasm::BoundsCheck(cmp.getOffset()));
size_t nanOffset = size == 32 ? wasm::NaN32GlobalDataOffset : wasm::NaN64GlobalDataOffset;
masm.ma_vldr(Address(GlobalReg, nanOffset - AsmJSGlobalRegBias), output,
masm.ma_vldr(Address(GlobalReg, nanOffset - AsmJSGlobalRegBias), output, scratch,
Assembler::AboveOrEqual);
cond = Assembler::Below;
}
masm.ma_vldr(output, HeapReg, ptrReg, 0, cond);
masm.ma_vldr(output, HeapReg, ptrReg, scratch, 0, cond);
} else {
Register output = ToRegister(ins->output());
@ -2303,7 +2352,7 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
cond = Assembler::Below;
}
masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output, Offset, cond);
masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, output, scratch, Offset, cond);
}
}
}
@ -2322,9 +2371,10 @@ CodeGeneratorARM::emitWasmLoad(T* lir)
// Maybe add the offset.
if (offset || type == Scalar::Int64) {
ScratchRegisterScope scratch(masm);
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
if (offset)
masm.ma_add(Imm32(offset), ptrPlusOffset);
masm.ma_add(Imm32(offset), ptrPlusOffset, scratch);
ptr = ptrPlusOffset;
} else {
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
@ -2341,7 +2391,7 @@ CodeGeneratorARM::emitWasmLoad(T* lir)
if (type == Scalar::Int64) {
MOZ_ASSERT(INT64LOW_OFFSET == 0);
masm.ma_dataTransferN(IsLoad, 32, /* signed = */ false, HeapReg, ptr, output.low);
masm.ma_add(Imm32(INT64HIGH_OFFSET), ptr);
masm.as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
masm.ma_dataTransferN(IsLoad, 32, isSigned, HeapReg, ptr, output.high);
} else {
masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.low);
@ -2357,7 +2407,7 @@ CodeGeneratorARM::emitWasmLoad(T* lir)
MOZ_ASSERT((byteSize == 4) == output.fpu().isSingle());
ScratchRegisterScope scratch(masm);
masm.ma_add(HeapReg, ptr, scratch);
masm.ma_vldr(Address(scratch, 0), output.fpu());
masm.ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), output.fpu());
} else {
masm.ma_dataTransferN(IsLoad, byteSize * 8, isSigned, HeapReg, ptr, output.gpr());
}
@ -2388,8 +2438,10 @@ CodeGeneratorARM::emitWasmUnalignedLoad(T* lir)
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
Register ptr = ToRegister(lir->ptrCopy());
if (offset)
masm.ma_add(Imm32(offset), ptr);
if (offset) {
ScratchRegisterScope scratch(masm);
masm.ma_add(Imm32(offset), ptr, scratch);
}
// Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
masm.ma_add(HeapReg, ptr);
@ -2467,7 +2519,8 @@ CodeGeneratorARM::visitWasmAddOffset(LWasmAddOffset* lir)
Register base = ToRegister(lir->base());
Register out = ToRegister(lir->output());
masm.ma_add(base, Imm32(mir->offset()), out, SetCC);
ScratchRegisterScope scratch(masm);
masm.ma_add(base, Imm32(mir->offset()), out, scratch, SetCC);
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::CarrySet);
}
@ -2486,9 +2539,10 @@ CodeGeneratorARM::emitWasmStore(T* lir)
// Maybe add the offset.
if (offset || type == Scalar::Int64) {
ScratchRegisterScope scratch(masm);
Register ptrPlusOffset = ToRegister(lir->ptrCopy());
if (offset)
masm.ma_add(Imm32(offset), ptrPlusOffset);
masm.ma_add(Imm32(offset), ptrPlusOffset, scratch);
ptr = ptrPlusOffset;
} else {
MOZ_ASSERT(lir->ptrCopy()->isBogusTemp());
@ -2501,16 +2555,16 @@ CodeGeneratorARM::emitWasmStore(T* lir)
Register64 value = ToRegister64(lir->getInt64Operand(lir->ValueIndex));
masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ false, HeapReg, ptr, value.low);
masm.ma_add(Imm32(INT64HIGH_OFFSET), ptr);
masm.as_add(ptr, ptr, Imm8(INT64HIGH_OFFSET));
masm.ma_dataTransferN(IsStore, 32 /* bits */, /* signed */ true, HeapReg, ptr, value.high);
} else {
AnyRegister value = ToAnyRegister(lir->getOperand(lir->ValueIndex));
if (value.isFloat()) {
ScratchRegisterScope scratch(masm);
FloatRegister val = value.fpu();
MOZ_ASSERT((byteSize == 4) == val.isSingle());
ScratchRegisterScope scratch(masm);
masm.ma_add(HeapReg, ptr, scratch);
masm.ma_vstr(val, Address(scratch, 0));
masm.ma_vstr(val, Operand(Address(scratch, 0)).toVFPAddr());
} else {
bool isSigned = type == Scalar::Uint32 || type == Scalar::Int32; // see AsmJSStoreHeap;
Register val = value.gpr();
@ -2543,8 +2597,10 @@ CodeGeneratorARM::emitWasmUnalignedStore(T* lir)
MOZ_ASSERT(offset < wasm::OffsetGuardLimit);
Register ptr = ToRegister(lir->ptrCopy());
if (offset)
masm.ma_add(Imm32(offset), ptr);
if (offset) {
ScratchRegisterScope scratch(masm);
masm.ma_add(Imm32(offset), ptr, scratch);
}
// Add HeapReg to ptr, so we can use base+index addressing in the byte loads.
masm.ma_add(HeapReg, ptr);
@ -2630,8 +2686,9 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
else
masm.storeDouble(vd, addr);
} else {
ScratchRegisterScope scratch(masm);
masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, Imm32(ptrImm),
ToRegister(ins->value()), Offset, Assembler::Always);
ToRegister(ins->value()), scratch, Offset, Assembler::Always);
}
} else {
Register ptrReg = ToRegister(ptr);
@ -2645,14 +2702,16 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
}
if (isFloat) {
ScratchRegisterScope scratch(masm);
FloatRegister value = ToFloatRegister(ins->value());
if (size == 32)
value = value.singleOverlay();
masm.ma_vstr(value, HeapReg, ptrReg, 0, 0, Assembler::Below);
masm.ma_vstr(value, HeapReg, ptrReg, scratch, 0, Assembler::Below);
} else {
ScratchRegisterScope scratch(masm);
Register value = ToRegister(ins->value());
masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value, Offset, cond);
masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg, value, scratch, Offset, cond);
}
}
}
@ -2841,13 +2900,17 @@ CodeGeneratorARM::visitAsmJSPassStackArg(LAsmJSPassStackArg* ins)
{
const MAsmJSPassStackArg* mir = ins->mir();
Address dst(StackPointer, mir->spOffset());
ScratchRegisterScope scratch(masm);
SecondScratchRegisterScope scratch2(masm);
if (ins->arg()->isConstant()) {
masm.ma_storeImm(Imm32(ToInt32(ins->arg())), dst);
masm.ma_mov(Imm32(ToInt32(ins->arg())), scratch);
masm.ma_str(scratch, dst, scratch2);
} else {
if (ins->arg()->isGeneralReg())
masm.ma_str(ToRegister(ins->arg()), dst);
masm.ma_str(ToRegister(ins->arg()), dst, scratch);
else
masm.ma_vstr(ToFloatRegister(ins->arg()), dst);
masm.ma_vstr(ToFloatRegister(ins->arg()), dst, scratch);
}
}
@ -2866,7 +2929,7 @@ CodeGeneratorARM::visitUDiv(LUDiv* ins)
// Check for large unsigned result - represent as double.
if (!ins->mir()->isTruncated()) {
MOZ_ASSERT(ins->mir()->fallible());
masm.ma_cmp(output, Imm32(0));
masm.as_cmp(output, Imm8(0));
bailoutIf(Assembler::LessThan, ins->snapshot());
}
@ -2895,12 +2958,15 @@ CodeGeneratorARM::visitUMod(LUMod* ins)
Label done;
generateUDivModZeroCheck(rhs, output, &done, ins->snapshot(), ins->mir());
masm.ma_umod(lhs, rhs, output);
{
ScratchRegisterScope scratch(masm);
masm.ma_umod(lhs, rhs, output, scratch);
}
// Check for large unsigned result - represent as double.
if (!ins->mir()->isTruncated()) {
MOZ_ASSERT(ins->mir()->fallible());
masm.ma_cmp(output, Imm32(0));
masm.as_cmp(output, Imm8(0));
bailoutIf(Assembler::LessThan, ins->snapshot());
}
@ -2916,7 +2982,7 @@ CodeGeneratorARM::generateUDivModZeroCheck(Register rhs, Register output, Label*
if (!mir)
return;
if (mir->canBeDivideByZero()) {
masm.ma_cmp(rhs, Imm32(0));
masm.as_cmp(rhs, Imm8(0));
if (mir->isTruncated()) {
if (mir->trapOnError()) {
masm.ma_b(wasm::JumpTarget::IntegerDivideByZero, Assembler::Equal);
@ -2967,7 +3033,7 @@ CodeGeneratorARM::visitSoftUDivOrMod(LSoftUDivOrMod* ins)
// uidivmod returns the quotient in r0, and the remainder in r1.
if (div && !div->canTruncateRemainder()) {
MOZ_ASSERT(div->fallible());
masm.ma_cmp(r1, Imm32(0));
masm.as_cmp(r1, Imm8(0));
bailoutIf(Assembler::NonZero, ins->snapshot());
}
@ -2975,7 +3041,7 @@ CodeGeneratorARM::visitSoftUDivOrMod(LSoftUDivOrMod* ins)
if ((div && !div->isTruncated()) || (mod && !mod->isTruncated())) {
DebugOnly<bool> isFallible = (div && div->fallible()) || (mod && mod->fallible());
MOZ_ASSERT(isFallible);
masm.ma_cmp(output, Imm32(0));
masm.as_cmp(output, Imm8(0));
bailoutIf(Assembler::LessThan, ins->snapshot());
}
@ -2989,8 +3055,11 @@ CodeGeneratorARM::visitEffectiveAddress(LEffectiveAddress* ins)
Register base = ToRegister(ins->base());
Register index = ToRegister(ins->index());
Register output = ToRegister(ins->output());
ScratchRegisterScope scratch(masm);
masm.as_add(output, base, lsl(index, mir->scale()));
masm.ma_add(Imm32(mir->displacement()), output);
masm.ma_add(Imm32(mir->displacement()), output, scratch);
}
void
@ -2998,14 +3067,17 @@ CodeGeneratorARM::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
{
const MWasmLoadGlobalVar* mir = ins->mir();
unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
ScratchRegisterScope scratch(masm);
if (mir->type() == MIRType::Int32) {
masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output()));
masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr), ToRegister(ins->output()), scratch);
} else if (mir->type() == MIRType::Float32) {
VFPRegister vd(ToFloatRegister(ins->output()));
masm.ma_vldr(Address(GlobalReg, addr), vd.singleOverlay());
masm.ma_vldr(Address(GlobalReg, addr), vd.singleOverlay(), scratch);
} else {
MOZ_ASSERT(mir->type() == MIRType::Double);
masm.ma_vldr(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
masm.ma_vldr(Address(GlobalReg, addr), ToFloatRegister(ins->output()), scratch);
}
}
@ -3017,8 +3089,9 @@ CodeGeneratorARM::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
MOZ_ASSERT(mir->type() == MIRType::Int64);
Register64 output = ToOutRegister64(ins);
masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr + INT64LOW_OFFSET), output.low);
masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), output.high);
ScratchRegisterScope scratch(masm);
masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr + INT64LOW_OFFSET), output.low, scratch);
masm.ma_dtr(IsLoad, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), output.high, scratch);
}
void
@ -3027,15 +3100,17 @@ CodeGeneratorARM::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
const MWasmStoreGlobalVar* mir = ins->mir();
MIRType type = mir->value()->type();
ScratchRegisterScope scratch(masm);
unsigned addr = mir->globalDataOffset() - AsmJSGlobalRegBias;
if (type == MIRType::Int32) {
masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value()));
masm.ma_dtr(IsStore, GlobalReg, Imm32(addr), ToRegister(ins->value()), scratch);
} else if (type == MIRType::Float32) {
VFPRegister vd(ToFloatRegister(ins->value()));
masm.ma_vstr(vd.singleOverlay(), Address(GlobalReg, addr));
masm.ma_vstr(vd.singleOverlay(), Address(GlobalReg, addr), scratch);
} else {
MOZ_ASSERT(type == MIRType::Double);
masm.ma_vstr(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
masm.ma_vstr(ToFloatRegister(ins->value()), Address(GlobalReg, addr), scratch);
}
}
@ -3047,8 +3122,9 @@ CodeGeneratorARM::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
MOZ_ASSERT (mir->value()->type() == MIRType::Int64);
Register64 input = ToRegister64(ins->value());
masm.ma_dtr(IsStore, GlobalReg, Imm32(addr + INT64LOW_OFFSET), input.low);
masm.ma_dtr(IsStore, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), input.high);
ScratchRegisterScope scratch(masm);
masm.ma_dtr(IsStore, GlobalReg, Imm32(addr + INT64LOW_OFFSET), input.low, scratch);
masm.ma_dtr(IsStore, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), input.high, scratch);
}
void
@ -3128,6 +3204,7 @@ CodeGeneratorARM::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir)
}
ScratchDoubleScope scratchScope(masm);
ScratchRegisterScope scratchReg(masm);
FloatRegister scratch = scratchScope.uintOverlay();
// ARM conversion instructions clamp the value to ensure it fits within the
@ -3144,8 +3221,8 @@ CodeGeneratorARM::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir)
masm.ma_vxfer(scratch, output);
// int32_t(UINT32_MAX) == -1.
masm.ma_cmp(output, Imm32(-1));
masm.ma_cmp(output, Imm32(0), Assembler::NotEqual);
masm.ma_cmp(output, Imm32(-1), scratchReg);
masm.as_cmp(output, Imm8(0), Assembler::NotEqual);
masm.ma_b(ool->entry(), Assembler::Equal);
masm.bind(ool->rejoin());
@ -3162,8 +3239,8 @@ CodeGeneratorARM::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir)
MOZ_CRASH("unexpected type in visitWasmTruncateToInt32");
masm.ma_vxfer(scratch, output);
masm.ma_cmp(output, Imm32(INT32_MAX));
masm.ma_cmp(output, Imm32(INT32_MIN), Assembler::NotEqual);
masm.ma_cmp(output, Imm32(INT32_MAX), scratchReg);
masm.ma_cmp(output, Imm32(INT32_MIN), scratchReg, Assembler::NotEqual);
masm.ma_b(ool->entry(), Assembler::Equal);
masm.bind(ool->rejoin());
@ -3199,8 +3276,9 @@ CodeGeneratorARM::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir)
masm.Pop(input);
masm.ma_cmp(output.high, Imm32(0x80000000));
masm.ma_cmp(output.low, Imm32(0x00000000), Assembler::Equal);
ScratchRegisterScope scratch(masm);
masm.ma_cmp(output.high, Imm32(0x80000000), scratch);
masm.as_cmp(output.low, Imm8(0x00000000), Assembler::Equal);
masm.ma_b(ool->entry(), Assembler::Equal);
masm.bind(ool->rejoin());
@ -3336,11 +3414,13 @@ CodeGeneratorARM::visitCopySignF(LCopySignF* ins)
masm.ma_vxfer(lhs, lhsi);
masm.ma_vxfer(rhs, rhsi);
ScratchRegisterScope scratch(masm);
// Clear lhs's sign.
masm.ma_and(Imm32(INT32_MAX), lhsi, lhsi);
masm.ma_and(Imm32(INT32_MAX), lhsi, lhsi, scratch);
// Keep rhs's sign.
masm.ma_and(Imm32(INT32_MIN), rhsi, rhsi);
masm.ma_and(Imm32(INT32_MIN), rhsi, rhsi, scratch);
// Combine.
masm.ma_orr(lhsi, rhsi, rhsi);
@ -3362,11 +3442,13 @@ CodeGeneratorARM::visitCopySignD(LCopySignD* ins)
masm.as_vxfer(lhsi, InvalidReg, lhs, Assembler::FloatToCore, Assembler::Always, 1);
masm.as_vxfer(rhsi, InvalidReg, rhs, Assembler::FloatToCore, Assembler::Always, 1);
ScratchRegisterScope scratch(masm);
// Clear lhs's sign.
masm.ma_and(Imm32(INT32_MAX), lhsi, lhsi);
masm.ma_and(Imm32(INT32_MAX), lhsi, lhsi, scratch);
// Keep rhs's sign.
masm.ma_and(Imm32(INT32_MIN), rhsi, rhsi);
masm.ma_and(Imm32(INT32_MIN), rhsi, rhsi, scratch);
// Combine.
masm.ma_orr(lhsi, rhsi, rhsi);
@ -3680,13 +3762,14 @@ CodeGeneratorARM::visitAsmSelectI64(LAsmSelectI64* lir)
Register64 out = ToOutRegister64(lir);
MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out, "true expr is reused for input");
masm.ma_cmp(cond, Imm32(0));
masm.as_cmp(cond, Imm8(0));
if (falseExpr.low().isRegister()) {
masm.ma_mov(ToRegister(falseExpr.low()), out.low, LeaveCC, Assembler::Equal);
masm.ma_mov(ToRegister(falseExpr.high()), out.high, LeaveCC, Assembler::Equal);
} else {
masm.ma_ldr(ToAddress(falseExpr.low()), out.low, Offset, Assembler::Equal);
masm.ma_ldr(ToAddress(falseExpr.high()), out.high, Offset, Assembler::Equal);
ScratchRegisterScope scratch(masm);
masm.ma_ldr(ToAddress(falseExpr.low()), out.low, scratch, Offset, Assembler::Equal);
masm.ma_ldr(ToAddress(falseExpr.high()), out.high, scratch, Offset, Assembler::Equal);
}
}
@ -3747,8 +3830,8 @@ CodeGeneratorARM::visitTestI64AndBranch(LTestI64AndBranch* lir)
{
Register64 input = ToRegister64(lir->getInt64Operand(0));
masm.ma_cmp(input.high, Imm32(0));
masm.as_cmp(input.high, Imm8(0));
jumpToBlock(lir->ifTrue(), Assembler::NonZero);
masm.ma_cmp(input.low, Imm32(0));
masm.as_cmp(input.low, Imm8(0));
emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
}

View File

@ -70,23 +70,28 @@ MacroAssembler::and32(Register src, Register dest)
void
MacroAssembler::and32(Imm32 imm, Register dest)
{
ma_and(imm, dest, SetCC);
ScratchRegisterScope scratch(*this);
ma_and(imm, dest, scratch, SetCC);
}
void
MacroAssembler::and32(Imm32 imm, const Address& dest)
{
ScratchRegisterScope scratch(*this);
load32(dest, scratch);
ma_and(imm, scratch);
store32(scratch, dest);
SecondScratchRegisterScope scratch2(*this);
ma_ldr(dest, scratch, scratch2);
ma_and(imm, scratch, scratch2);
ma_str(scratch, dest, scratch2);
}
void
MacroAssembler::and32(const Address& src, Register dest)
{
ScratchRegisterScope scratch(*this);
load32(src, scratch);
SecondScratchRegisterScope scratch2(*this);
ma_ldr(src, scratch, scratch2);
ma_and(scratch, dest, SetCC);
}
@ -99,7 +104,8 @@ MacroAssembler::andPtr(Register src, Register dest)
void
MacroAssembler::andPtr(Imm32 imm, Register dest)
{
ma_and(imm, dest);
ScratchRegisterScope scratch(*this);
ma_and(imm, dest, scratch);
}
void
@ -138,16 +144,19 @@ MacroAssembler::or32(Register src, Register dest)
void
MacroAssembler::or32(Imm32 imm, Register dest)
{
ma_orr(imm, dest);
ScratchRegisterScope scratch(*this);
ma_orr(imm, dest, scratch);
}
void
MacroAssembler::or32(Imm32 imm, const Address& dest)
{
ScratchRegisterScope scratch(*this);
load32(dest, scratch);
ma_orr(imm, scratch);
store32(scratch, dest);
SecondScratchRegisterScope scratch2(*this);
ma_ldr(dest, scratch, scratch2);
ma_orr(imm, scratch, scratch2);
ma_str(scratch, dest, scratch2);
}
void
@ -159,7 +168,8 @@ MacroAssembler::orPtr(Register src, Register dest)
void
MacroAssembler::orPtr(Imm32 imm, Register dest)
{
ma_orr(imm, dest);
ScratchRegisterScope scratch(*this);
ma_orr(imm, dest, scratch);
}
void
@ -192,7 +202,8 @@ MacroAssembler::xor32(Register src, Register dest)
void
MacroAssembler::xor32(Imm32 imm, Register dest)
{
ma_eor(imm, dest, SetCC);
ScratchRegisterScope scratch(*this);
ma_eor(imm, dest, scratch, SetCC);
}
void
@ -204,7 +215,8 @@ MacroAssembler::xorPtr(Register src, Register dest)
void
MacroAssembler::xorPtr(Imm32 imm, Register dest)
{
ma_eor(imm, dest);
ScratchRegisterScope scratch(*this);
ma_eor(imm, dest, scratch);
}
// ===============================================================
@ -219,16 +231,19 @@ MacroAssembler::add32(Register src, Register dest)
void
MacroAssembler::add32(Imm32 imm, Register dest)
{
ma_add(imm, dest, SetCC);
ScratchRegisterScope scratch(*this);
ma_add(imm, dest, scratch, SetCC);
}
void
MacroAssembler::add32(Imm32 imm, const Address& dest)
{
ScratchRegisterScope scratch(*this);
load32(dest, scratch);
ma_add(imm, scratch, SetCC);
store32(scratch, dest);
SecondScratchRegisterScope scratch2(*this);
ma_ldr(dest, scratch, scratch2);
ma_add(imm, scratch, scratch2, SetCC);
ma_str(scratch, dest, scratch2);
}
void
@ -240,7 +255,8 @@ MacroAssembler::addPtr(Register src, Register dest)
void
MacroAssembler::addPtr(Imm32 imm, Register dest)
{
ma_add(imm, dest);
ScratchRegisterScope scratch(*this);
ma_add(imm, dest, scratch);
}
void
@ -253,16 +269,20 @@ void
MacroAssembler::addPtr(Imm32 imm, const Address& dest)
{
ScratchRegisterScope scratch(*this);
loadPtr(dest, scratch);
addPtr(imm, scratch);
storePtr(scratch, dest);
SecondScratchRegisterScope scratch2(*this);
ma_ldr(dest, scratch, scratch2);
ma_add(imm, scratch, scratch2);
ma_str(scratch, dest, scratch2);
}
void
MacroAssembler::addPtr(const Address& src, Register dest)
{
ScratchRegisterScope scratch(*this);
load32(src, scratch);
SecondScratchRegisterScope scratch2(*this);
ma_ldr(src, scratch, scratch2);
ma_add(scratch, dest, SetCC);
}
@ -276,15 +296,17 @@ MacroAssembler::add64(Register64 src, Register64 dest)
void
MacroAssembler::add64(Imm32 imm, Register64 dest)
{
ma_add(imm, dest.low, SetCC);
ma_adc(Imm32(0), dest.high, LeaveCC);
ScratchRegisterScope scratch(*this);
ma_add(imm, dest.low, scratch, SetCC);
as_adc(dest.high, dest.high, Imm8(0), LeaveCC);
}
void
MacroAssembler::add64(Imm64 imm, Register64 dest)
{
ma_add(imm.low(), dest.low, SetCC);
ma_adc(imm.hi(), dest.high, LeaveCC);
ScratchRegisterScope scratch(*this);
ma_add(imm.low(), dest.low, scratch, SetCC);
ma_adc(imm.hi(), dest.high, scratch, LeaveCC);
}
void
@ -308,14 +330,17 @@ MacroAssembler::sub32(Register src, Register dest)
void
MacroAssembler::sub32(Imm32 imm, Register dest)
{
ma_sub(imm, dest, SetCC);
ScratchRegisterScope scratch(*this);
ma_sub(imm, dest, scratch, SetCC);
}
void
MacroAssembler::sub32(const Address& src, Register dest)
{
ScratchRegisterScope scratch(*this);
load32(src, scratch);
SecondScratchRegisterScope scratch2(*this);
ma_ldr(src, scratch, scratch2);
ma_sub(scratch, dest, SetCC);
}
@ -329,22 +354,27 @@ void
MacroAssembler::subPtr(Register src, const Address& dest)
{
ScratchRegisterScope scratch(*this);
loadPtr(dest, scratch);
SecondScratchRegisterScope scratch2(*this);
ma_ldr(dest, scratch, scratch2);
ma_sub(src, scratch);
storePtr(scratch, dest);
ma_str(scratch, dest, scratch2);
}
void
MacroAssembler::subPtr(Imm32 imm, Register dest)
{
ma_sub(imm, dest);
ScratchRegisterScope scratch(*this);
ma_sub(imm, dest, scratch);
}
void
MacroAssembler::subPtr(const Address& addr, Register dest)
{
ScratchRegisterScope scratch(*this);
loadPtr(addr, scratch);
SecondScratchRegisterScope scratch2(*this);
ma_ldr(addr, scratch, scratch2);
ma_sub(scratch, dest);
}
@ -358,8 +388,9 @@ MacroAssembler::sub64(Register64 src, Register64 dest)
void
MacroAssembler::sub64(Imm64 imm, Register64 dest)
{
ma_sub(imm.low(), dest.low, SetCC);
ma_sbc(imm.hi(), dest.high, LeaveCC);
ScratchRegisterScope scratch(*this);
ma_sub(imm.low(), dest.low, scratch, SetCC);
ma_sbc(imm.hi(), dest.high, scratch, LeaveCC);
}
void
@ -388,25 +419,28 @@ MacroAssembler::mul64(Imm64 imm, const Register64& dest)
// + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
// + HIGH(LOW(dest) * LOW(imm)) [carry]
ScratchRegisterScope scratch(*this);
SecondScratchRegisterScope scratch2(*this);
// HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
ma_mov(Imm32(imm.value & 0xFFFFFFFFL), ScratchRegister);
as_mul(dest.high, dest.high, ScratchRegister);
ma_mov(Imm32(imm.value & 0xFFFFFFFFL), scratch);
as_mul(dest.high, dest.high, scratch);
// high:low = LOW(dest) * LOW(imm);
as_umull(secondScratchReg_, ScratchRegister, dest.low, ScratchRegister);
as_umull(scratch2, scratch, dest.low, scratch);
// HIGH(dest) += high;
as_add(dest.high, dest.high, O2Reg(secondScratchReg_));
as_add(dest.high, dest.high, O2Reg(scratch2));
// HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
if (((imm.value >> 32) & 0xFFFFFFFFL) == 5)
as_add(secondScratchReg_, dest.low, lsl(dest.low, 2));
as_add(scratch2, dest.low, lsl(dest.low, 2));
else
MOZ_CRASH("Not supported imm");
as_add(dest.high, dest.high, O2Reg(secondScratchReg_));
as_add(dest.high, dest.high, O2Reg(scratch2));
// LOW(dest) = low;
ma_mov(ScratchRegister, dest.low);
ma_mov(scratch, dest.low);
}
void
@ -420,10 +454,11 @@ MacroAssembler::mul64(Imm64 imm, const Register64& dest, const Register temp)
MOZ_ASSERT(temp != dest.high && temp != dest.low);
// Compute mul64
ma_mul(dest.high, imm.low(), dest.high); // (2)
ma_mul(dest.low, imm.hi(), temp); // (3)
ScratchRegisterScope scratch(*this);
ma_mul(dest.high, imm.low(), dest.high, scratch); // (2)
ma_mul(dest.low, imm.hi(), temp, scratch); // (3)
ma_add(dest.high, temp, temp);
ma_umull(dest.low, imm.low(), dest.high, dest.low); // (4) + (1)
ma_umull(dest.low, imm.low(), dest.high, dest.low, scratch); // (4) + (1)
ma_add(temp, dest.high, dest.high);
}
@ -467,9 +502,12 @@ MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest)
void
MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
{
movePtr(imm, ScratchRegister);
loadDouble(Address(ScratchRegister, 0), ScratchDoubleReg);
mulDouble(ScratchDoubleReg, dest);
ScratchRegisterScope scratch(*this);
ScratchDoubleScope scratchDouble(*this);
movePtr(imm, scratch);
ma_vldr(Operand(Address(scratch, 0)).toVFPAddr(), scratchDouble);
mulDouble(scratchDouble, dest);
}
void
@ -486,10 +524,12 @@ void
MacroAssembler::remainder32(Register rhs, Register srcDest, bool isUnsigned)
{
MOZ_ASSERT(HasIDIV());
ScratchRegisterScope scratch(*this);
if (isUnsigned)
ma_umod(srcDest, rhs, srcDest);
ma_umod(srcDest, rhs, srcDest, scratch);
else
ma_smod(srcDest, rhs, srcDest);
ma_smod(srcDest, rhs, srcDest, scratch);
}
void
@ -514,8 +554,8 @@ MacroAssembler::inc64(AbsoluteAddress dest)
ma_mov(Imm32((int32_t)dest.addr), scratch);
ma_ldrd(EDtrAddr(scratch, EDtrOffImm(0)), r0, r1);
ma_add(Imm32(1), r0, SetCC);
ma_adc(Imm32(0), r1, LeaveCC);
as_add(r0, r0, Imm8(1), SetCC);
as_adc(r1, r1, Imm8(0), LeaveCC);
ma_strd(r0, r1, EDtrAddr(scratch, EDtrOffImm(0)));
ma_ldrd(EDtrAddr(sp, EDtrOffImm(8)), r0, r1, PostIndex);
@ -530,8 +570,8 @@ MacroAssembler::neg32(Register reg)
void
MacroAssembler::neg64(Register64 reg)
{
ma_rsb(Imm32(0), reg.low, SetCC);
ma_rsc(Imm32(0), reg.high);
as_rsb(reg.low, reg.low, Imm8(0), SetCC);
as_rsc(reg.high, reg.high, Imm8(0));
}
void
@ -631,13 +671,13 @@ MacroAssembler::lshift64(Register unmaskedShift, Register64 dest)
// Note: one of the two dest.low shift will always yield zero due to negative shift.
ScratchRegisterScope shift(*this);
ma_and(Imm32(0x3f), unmaskedShift, shift);
as_and(shift, unmaskedShift, Imm8(0x3f));
as_mov(dest.high, lsl(dest.high, shift));
ma_sub(shift, Imm32(32), shift);
as_sub(shift, shift, Imm8(32));
as_orr(dest.high, dest.high, lsl(dest.low, shift));
ma_neg(shift, shift);
as_orr(dest.high, dest.high, lsr(dest.low, shift));
ma_and(Imm32(0x3f), unmaskedShift, shift);
as_and(shift, unmaskedShift, Imm8(0x3f));
as_mov(dest.low, lsl(dest.low, shift));
}
@ -710,11 +750,11 @@ MacroAssembler::rshift64Arithmetic(Register unmaskedShift, Register64 dest)
// Note: Negative shifts yield a zero as result, except for the signed
// right shift. Therefore we need to test for it and only do it if
// it isn't negative.
ScratchRegisterScope shift(*this);
ma_and(Imm32(0x3f), unmaskedShift, shift);
as_and(shift, unmaskedShift, Imm8(0x3f));
as_mov(dest.low, lsr(dest.low, shift));
ma_rsb(shift, Imm32(32), shift);
as_rsb(shift, shift, Imm8(32));
as_orr(dest.low, dest.low, lsl(dest.high, shift));
ma_neg(shift, shift, SetCC);
ma_b(&proceed, Signed);
@ -722,7 +762,7 @@ MacroAssembler::rshift64Arithmetic(Register unmaskedShift, Register64 dest)
as_orr(dest.low, dest.low, asr(dest.high, shift));
bind(&proceed);
ma_and(Imm32(0x3f), unmaskedShift, shift);
as_and(shift, unmaskedShift, Imm8(0x3f));
as_mov(dest.high, asr(dest.high, shift));
}
@ -764,13 +804,13 @@ MacroAssembler::rshift64(Register unmaskedShift, Register64 dest)
// Note: one of the two dest.high shifts will always yield zero due to negative shift.
ScratchRegisterScope shift(*this);
ma_and(Imm32(0x3f), unmaskedShift, shift);
as_and(shift, unmaskedShift, Imm8(0x3f));
as_mov(dest.low, lsr(dest.low, shift));
ma_sub(shift, Imm32(32), shift);
as_sub(shift, shift, Imm8(32));
as_orr(dest.low, dest.low, lsr(dest.high, shift));
ma_neg(shift, shift);
as_orr(dest.low, dest.low, lsl(dest.high, shift));
ma_and(Imm32(0x3f), unmaskedShift, shift);
as_and(shift, unmaskedShift, Imm8(0x3f));
as_mov(dest.high, lsr(dest.high, shift));
}
@ -788,7 +828,8 @@ MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest)
void
MacroAssembler::rotateLeft(Register count, Register input, Register dest)
{
ma_rol(count, input, dest);
ScratchRegisterScope scratch(*this);
ma_rol(count, input, dest, scratch);
}
void
@ -833,35 +874,34 @@ MacroAssembler::rotateLeft64(Register shift, Register64 src, Register64 dest, Re
Label high, done;
ma_mov(src.high, temp);
ma_and(Imm32(0x3f), shift, shift_value);
ma_cmp(shift_value, Imm32(32));
as_and(shift_value, shift, Imm8(0x3f));
as_cmp(shift_value, Imm8(32));
ma_b(&high, GreaterThanOrEqual);
// high = high << shift | low >> 32 - shift
// low = low << shift | high >> 32 - shift
as_mov(dest.high, lsl(src.high, shift_value));
ma_rsb(Imm32(32), shift_value);
as_rsb(shift_value, shift_value, Imm8(32));
as_orr(dest.high, dest.high, lsr(src.low, shift_value));
ma_rsb(Imm32(32), shift_value);
as_rsb(shift_value, shift_value, Imm8(32));
as_mov(dest.low, lsl(src.low, shift_value));
ma_rsb(Imm32(32), shift_value);
as_rsb(shift_value, shift_value, Imm8(32));
as_orr(dest.low, dest.low, lsr(temp, shift_value));
ma_b(&done);
// A 32 - 64 shift is a 0 - 32 shift in the other direction.
bind(&high);
ma_rsb(Imm32(64), shift_value);
as_rsb(shift_value, shift_value, Imm8(64));
as_mov(dest.high, lsr(src.high, shift_value));
ma_rsb(Imm32(32), shift_value);
as_rsb(shift_value, shift_value, Imm8(32));
as_orr(dest.high, dest.high, lsl(src.low, shift_value));
ma_rsb(Imm32(32), shift_value);
as_rsb(shift_value, shift_value, Imm8(32));
as_mov(dest.low, lsr(src.low, shift_value));
ma_rsb(Imm32(32), shift_value);
as_rsb(shift_value, shift_value, Imm8(32));
as_orr(dest.low, dest.low, lsl(temp, shift_value));
bind(&done);
@ -924,35 +964,34 @@ MacroAssembler::rotateRight64(Register shift, Register64 src, Register64 dest, R
Label high, done;
ma_mov(src.high, temp);
ma_and(Imm32(0x3f), shift, shift_value);
ma_cmp(shift_value, Imm32(32));
as_and(shift_value, shift, Imm8(0x3f));
as_cmp(shift_value, Imm8(32));
ma_b(&high, GreaterThanOrEqual);
// high = high >> shift | low << 32 - shift
// low = low >> shift | high << 32 - shift
as_mov(dest.high, lsr(src.high, shift_value));
ma_rsb(Imm32(32), shift_value);
as_rsb(shift_value, shift_value, Imm8(32));
as_orr(dest.high, dest.high, lsl(src.low, shift_value));
ma_rsb(Imm32(32), shift_value);
as_rsb(shift_value, shift_value, Imm8(32));
as_mov(dest.low, lsr(src.low, shift_value));
ma_rsb(Imm32(32), shift_value);
as_rsb(shift_value, shift_value, Imm8(32));
as_orr(dest.low, dest.low, lsl(temp, shift_value));
ma_b(&done);
// A 32 - 64 shift is a 0 - 32 shift in the other direction.
bind(&high);
ma_rsb(Imm32(64), shift_value);
as_rsb(shift_value, shift_value, Imm8(64));
as_mov(dest.high, lsl(src.high, shift_value));
ma_rsb(Imm32(32), shift_value);
as_rsb(shift_value, shift_value, Imm8(32));
as_orr(dest.high, dest.high, lsr(src.low, shift_value));
ma_rsb(Imm32(32), shift_value);
as_rsb(shift_value, shift_value, Imm8(32));
as_mov(dest.low, lsl(src.low, shift_value));
ma_rsb(Imm32(32), shift_value);
as_rsb(shift_value, shift_value, Imm8(32));
as_orr(dest.low, dest.low, lsr(temp, shift_value));
bind(&done);
@ -971,25 +1010,26 @@ void
MacroAssembler::clz64(Register64 src, Register dest)
{
ScratchRegisterScope scratch(*this);
ma_clz(src.high, scratch);
ma_cmp(scratch, Imm32(32));
as_cmp(scratch, Imm8(32));
ma_mov(scratch, dest, LeaveCC, NotEqual);
ma_clz(src.low, dest, Equal);
ma_add(Imm32(32), dest, LeaveCC, Equal);
as_add(dest, dest, Imm8(32), LeaveCC, Equal);
}
void
MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero)
{
ma_ctz(src, dest);
ScratchRegisterScope scratch(*this);
ma_ctz(src, dest, scratch);
}
void
MacroAssembler::ctz64(Register64 src, Register dest)
{
Label done, high;
ma_cmp(src.low, Imm32(0));
as_cmp(src.low, Imm8(0));
ma_b(&high, Equal);
ctz32(src.low, dest, /* knownNotZero = */ true);
@ -997,7 +1037,7 @@ MacroAssembler::ctz64(Register64 src, Register dest)
bind(&high);
ctz32(src.high, dest, /* knownNotZero = */ false);
ma_add(Imm32(32), dest);
as_add(dest, dest, Imm8(32));
bind(&done);
}
@ -1007,17 +1047,20 @@ MacroAssembler::popcnt32(Register input, Register output, Register tmp)
{
// Equivalent to GCC output of mozilla::CountPopulation32()
ScratchRegisterScope scratch(*this);
if (input != output)
ma_mov(input, output);
as_mov(tmp, asr(output, 1));
ma_and(Imm32(0x55555555), tmp);
ma_and(Imm32(0x55555555), tmp, scratch);
ma_sub(output, tmp, output);
as_mov(tmp, asr(output, 2));
ma_and(Imm32(0x33333333), output);
ma_and(Imm32(0x33333333), tmp);
ma_mov(Imm32(0x33333333), scratch);
ma_and(scratch, output);
ma_and(scratch, tmp);
ma_add(output, tmp, output);
as_add(output, output, lsr(output, 4));
ma_and(Imm32(0xF0F0F0F), output);
ma_and(Imm32(0xF0F0F0F), output, scratch);
as_add(output, output, lsl(output, 8));
as_add(output, output, lsl(output, 16));
as_mov(output, asr(output, 24));
@ -1058,7 +1101,9 @@ template <class L>
void
MacroAssembler::branch32(Condition cond, Register lhs, Imm32 rhs, L label)
{
ma_cmp(lhs, rhs);
ScratchRegisterScope scratch(*this);
ma_cmp(lhs, rhs, scratch);
ma_b(label, cond);
}
@ -1066,43 +1111,69 @@ void
MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs, Label* label)
{
ScratchRegisterScope scratch(*this);
load32(lhs, scratch);
branch32(cond, scratch, rhs, label);
SecondScratchRegisterScope scratch2(*this);
ma_ldr(lhs, scratch, scratch2);
ma_cmp(scratch, rhs);
ma_b(label, cond);
}
void
MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
{
// branch32 will use ScratchRegister.
AutoRegisterScope scratch(*this, secondScratchReg_);
load32(lhs, scratch);
branch32(cond, scratch, rhs, label);
ScratchRegisterScope scratch(*this);
SecondScratchRegisterScope scratch2(*this);
ma_ldr(lhs, scratch, scratch2);
ma_cmp(scratch, rhs, scratch2);
ma_b(label, cond);
}
void
MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
{
AutoRegisterScope scratch2(*this, secondScratchReg_);
loadPtr(lhs, scratch2); // ma_cmp will use the scratch register.
ma_cmp(scratch2, rhs);
ScratchRegisterScope scratch(*this);
// Load into scratch.
movePtr(ImmWord(uintptr_t(lhs.addr)), scratch);
ma_ldr(DTRAddr(scratch, DtrOffImm(0)), scratch);
ma_cmp(scratch, rhs);
ma_b(label, cond);
}
void
MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
{
AutoRegisterScope scratch2(*this, secondScratchReg_);
loadPtr(lhs, scratch2); // ma_cmp will use the scratch register.
ma_cmp(scratch2, rhs);
ScratchRegisterScope scratch(*this);
SecondScratchRegisterScope scratch2(*this);
// Load into scratch.
movePtr(ImmWord(uintptr_t(lhs.addr)), scratch);
ma_ldr(DTRAddr(scratch, DtrOffImm(0)), scratch);
ma_cmp(scratch, rhs, scratch2);
ma_b(label, cond);
}
void
MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, Label* label)
{
// branch32 will use ScratchRegister.
AutoRegisterScope scratch2(*this, secondScratchReg_);
load32(lhs, scratch2);
SecondScratchRegisterScope scratch2(*this);
{
ScratchRegisterScope scratch(*this);
Register base = lhs.base;
uint32_t scale = Imm32::ShiftOf(lhs.scale).value;
// Load lhs into scratch2.
if (lhs.offset != 0) {
ma_add(base, Imm32(lhs.offset), scratch, scratch2);
ma_ldr(DTRAddr(scratch, DtrRegImmShift(lhs.index, LSL, scale)), scratch2);
} else {
ma_ldr(DTRAddr(base, DtrRegImmShift(lhs.index, LSL, scale)), scratch2);
}
}
branch32(cond, scratch2, rhs, label);
}
@ -1110,8 +1181,13 @@ void
MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
{
ScratchRegisterScope scratch(*this);
loadPtr(lhs, scratch);
branch32(cond, scratch, rhs, label);
SecondScratchRegisterScope scratch2(*this);
movePtr(lhs, scratch);
ma_ldr(DTRAddr(scratch, DtrOffImm(0)), scratch);
ma_cmp(scratch, rhs, scratch2);
ma_b(label, cond);
}
void
@ -1297,7 +1373,7 @@ MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs, Label*
void
MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs, Label* label)
{
AutoRegisterScope scratch2(*this, secondScratchReg_);
SecondScratchRegisterScope scratch2(*this);
loadPtr(lhs, scratch2);
branchPtr(cond, scratch2, rhs, label);
}
@ -1305,7 +1381,7 @@ MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs, Labe
void
MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs, Label* label)
{
AutoRegisterScope scratch2(*this, secondScratchReg_);
SecondScratchRegisterScope scratch2(*this);
loadPtr(lhs, scratch2);
branchPtr(cond, scratch2, rhs, label);
}
@ -1313,42 +1389,45 @@ MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs, Label
void
MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
{
ScratchRegisterScope scratch(*this);
loadPtr(lhs, scratch);
branchPtr(cond, scratch, rhs, label);
SecondScratchRegisterScope scratch2(*this);
loadPtr(lhs, scratch2);
branchPtr(cond, scratch2, rhs, label);
}
void
MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label)
{
ScratchRegisterScope scratch(*this);
loadPtr(lhs, scratch);
branchPtr(cond, scratch, rhs, label);
SecondScratchRegisterScope scratch2(*this);
loadPtr(lhs, scratch2);
branchPtr(cond, scratch2, rhs, label);
}
void
MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
{
ScratchRegisterScope scratch(*this);
loadPtr(lhs, scratch);
branchPtr(cond, scratch, rhs, label);
SecondScratchRegisterScope scratch2(*this);
loadPtr(lhs, scratch2);
branchPtr(cond, scratch2, rhs, label);
}
template <typename T>
CodeOffsetJump
inline CodeOffsetJump
MacroAssembler::branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label)
{
ma_cmp(lhs, rhs);
cmpPtr(lhs, rhs);
return jumpWithPatch(label, cond);
}
template <typename T>
CodeOffsetJump
inline CodeOffsetJump
MacroAssembler::branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label)
{
AutoRegisterScope scratch2(*this, secondScratchReg_);
ma_ldr(lhs, scratch2);
ma_cmp(scratch2, rhs);
SecondScratchRegisterScope scratch2(*this);
{
ScratchRegisterScope scratch(*this);
ma_ldr(lhs, scratch2, scratch);
}
cmpPtr(scratch2, rhs);
return jumpWithPatch(label, cond);
}
@ -1391,11 +1470,13 @@ MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src, Register
void
MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
{
ScratchFloat32Scope scratch(*this);
ma_vcvt_F32_I32(src, scratch.sintOverlay());
ma_vxfer(scratch, dest);
ma_cmp(dest, Imm32(0x7fffffff));
ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual);
ScratchFloat32Scope scratchFloat32(*this);
ScratchRegisterScope scratch(*this);
ma_vcvt_F32_I32(src, scratchFloat32.sintOverlay());
ma_vxfer(scratchFloat32, dest);
ma_cmp(dest, Imm32(0x7fffffff), scratch);
ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::NotEqual);
ma_b(fail, Assembler::Equal);
}
@ -1440,13 +1521,14 @@ MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src, Register d
void
MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
{
ScratchDoubleScope scratch(*this);
FloatRegister scratchSIntReg = scratch.sintOverlay();
ScratchDoubleScope scratchDouble(*this);
FloatRegister scratchSIntReg = scratchDouble.sintOverlay();
ScratchRegisterScope scratch(*this);
ma_vcvt_F64_I32(src, scratchSIntReg);
ma_vxfer(scratchSIntReg, dest);
ma_cmp(dest, Imm32(0x7fffffff));
ma_cmp(dest, Imm32(0x80000000), Assembler::NotEqual);
ma_cmp(dest, Imm32(0x7fffffff), scratch);
ma_cmp(dest, Imm32(0x80000000), scratch, Assembler::NotEqual);
ma_b(fail, Assembler::Equal);
}
@ -1462,14 +1544,15 @@ template <typename T>
void
MacroAssembler::branchSub32(Condition cond, T src, Register dest, Label* label)
{
ma_sub(src, dest, SetCC);
sub32(src, dest);
j(cond, label);
}
void
MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label)
{
ma_sub(rhs, lhs, SetCC);
ScratchRegisterScope scratch(*this);
ma_sub(rhs, lhs, scratch, SetCC);
as_b(label, cond);
}
@ -1481,7 +1564,7 @@ MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs, L label
// x86 likes test foo, foo rather than cmp foo, #0.
// Convert the former into the latter.
if (lhs == rhs && (cond == Zero || cond == NonZero))
ma_cmp(lhs, Imm32(0));
as_cmp(lhs, Imm8(0));
else
ma_tst(lhs, rhs);
ma_b(label, cond);
@ -1492,15 +1575,15 @@ void
MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs, L label)
{
MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
ma_tst(lhs, rhs);
ScratchRegisterScope scratch(*this);
ma_tst(lhs, rhs, scratch);
ma_b(label, cond);
}
void
MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs, Label* label)
{
// branchTest32 will use ScratchRegister.
AutoRegisterScope scratch2(*this, secondScratchReg_);
SecondScratchRegisterScope scratch2(*this);
load32(lhs, scratch2);
branchTest32(cond, scratch2, rhs, label);
}
@ -1508,8 +1591,7 @@ MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs, Labe
void
MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
{
// branchTest32 will use ScratchRegister.
AutoRegisterScope scratch2(*this, secondScratchReg_);
SecondScratchRegisterScope scratch2(*this);
load32(lhs, scratch2);
branchTest32(cond, scratch2, rhs, label);
}
@ -1538,11 +1620,13 @@ void
MacroAssembler::branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
L label)
{
ScratchRegisterScope scratch(*this);
if (cond == Assembler::Zero) {
MOZ_ASSERT(lhs.low == rhs.low);
MOZ_ASSERT(lhs.high == rhs.high);
ma_orr(lhs.low, lhs.high, ScratchRegister);
branchTestPtr(cond, ScratchRegister, ScratchRegister, label);
ma_orr(lhs.low, lhs.high, scratch);
branchTestPtr(cond, scratch, scratch, label);
} else {
MOZ_CRASH("Unsupported condition");
}
@ -1925,25 +2009,31 @@ MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr, JSWhyMag
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& addr)
{
ma_vstr(src, addr);
ScratchRegisterScope scratch(*this);
ma_vstr(src, addr, scratch);
}
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& addr)
{
ScratchRegisterScope scratch(*this);
SecondScratchRegisterScope scratch2(*this);
uint32_t scale = Imm32::ShiftOf(addr.scale).value;
ma_vstr(src, addr.base, addr.index, scale, addr.offset);
ma_vstr(src, addr.base, addr.index, scratch, scratch2, scale, addr.offset);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const Address& addr)
{
ma_vstr(src.asSingle(), addr);
ScratchRegisterScope scratch(*this);
ma_vstr(src.asSingle(), addr, scratch);
}
void
MacroAssembler::storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& addr)
{
ScratchRegisterScope scratch(*this);
SecondScratchRegisterScope scratch2(*this);
uint32_t scale = Imm32::ShiftOf(addr.scale).value;
ma_vstr(src.asSingle(), addr.base, addr.index, scale, addr.offset);
ma_vstr(src.asSingle(), addr.base, addr.index, scratch, scratch2, scale, addr.offset);
}
void

File diff suppressed because it is too large Load Diff

View File

@ -43,6 +43,11 @@ class MacroAssemblerARM : public Assembler
// address.
Register secondScratchReg_;
public:
Register getSecondScratchReg() const {
return secondScratchReg_;
}
public:
// Higher level tag testing code.
// TODO: Can probably remove the Operand versions.
@ -103,12 +108,10 @@ class MacroAssemblerARM : public Assembler
SBit s, Condition c);
public:
void ma_alu(Register src1, Imm32 imm, Register dest, AutoRegisterScope& scratch,
ALUOp op, SBit s = LeaveCC, Condition c = Always);
void ma_alu(Register src1, Operand2 op2, Register dest, ALUOp op,
SBit s = LeaveCC, Condition c = Always);
void ma_alu(Register src1, Imm32 imm, Register dest,
ALUOp op,
SBit s = LeaveCC, Condition c = Always);
void ma_alu(Register src1, Operand op2, Register dest, ALUOp op,
SBit s = LeaveCC, Condition c = Always);
void ma_nop();
@ -141,7 +144,7 @@ class MacroAssemblerARM : public Assembler
void ma_lsr(Register shift, Register src, Register dst);
void ma_asr(Register shift, Register src, Register dst);
void ma_ror(Register shift, Register src, Register dst);
void ma_rol(Register shift, Register src, Register dst);
void ma_rol(Register shift, Register src, Register dst, AutoRegisterScope& scratch);
// Move not (dest <- ~src)
void ma_mvn(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
@ -157,14 +160,14 @@ class MacroAssemblerARM : public Assembler
void ma_and(Register src1, Register src2, Register dest,
SBit s = LeaveCC, Condition c = Always);
void ma_and(Imm32 imm, Register dest,
void ma_and(Imm32 imm, Register dest, AutoRegisterScope& scratch,
SBit s = LeaveCC, Condition c = Always);
void ma_and(Imm32 imm, Register src1, Register dest,
void ma_and(Imm32 imm, Register src1, Register dest, AutoRegisterScope& scratch,
SBit s = LeaveCC, Condition c = Always);
// Bit clear (dest <- dest & ~imm) or (dest <- src1 & ~src2)
void ma_bic(Imm32 imm, Register dest,
void ma_bic(Imm32 imm, Register dest, AutoRegisterScope& scratch,
SBit s = LeaveCC, Condition c = Always);
// Exclusive or
@ -174,13 +177,12 @@ class MacroAssemblerARM : public Assembler
void ma_eor(Register src1, Register src2, Register dest,
SBit s = LeaveCC, Condition c = Always);
void ma_eor(Imm32 imm, Register dest,
void ma_eor(Imm32 imm, Register dest, AutoRegisterScope& scratch,
SBit s = LeaveCC, Condition c = Always);
void ma_eor(Imm32 imm, Register src1, Register dest,
void ma_eor(Imm32 imm, Register src1, Register dest, AutoRegisterScope& scratch,
SBit s = LeaveCC, Condition c = Always);
// Or
void ma_orr(Register src, Register dest,
SBit s = LeaveCC, Condition c = Always);
@ -188,112 +190,118 @@ class MacroAssemblerARM : public Assembler
void ma_orr(Register src1, Register src2, Register dest,
SBit s = LeaveCC, Condition c = Always);
void ma_orr(Imm32 imm, Register dest,
void ma_orr(Imm32 imm, Register dest, AutoRegisterScope& scratch,
SBit s = LeaveCC, Condition c = Always);
void ma_orr(Imm32 imm, Register src1, Register dest,
void ma_orr(Imm32 imm, Register src1, Register dest, AutoRegisterScope& scratch,
SBit s = LeaveCC, Condition c = Always);
// Arithmetic based ops.
// Add with carry:
void ma_adc(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_adc(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s = LeaveCC, Condition c = Always);
void ma_adc(Register src, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_adc(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
// Add:
void ma_add(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_add(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s = LeaveCC, Condition c = Always);
void ma_add(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_add(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_add(Register src1, Operand op, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_add(Register src1, Imm32 op, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_add(Register src1, Imm32 op, Register dest, AutoRegisterScope& scratch,
SBit s = LeaveCC, Condition c = Always);
// Subtract with carry:
void ma_sbc(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sbc(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s = LeaveCC, Condition c = Always);
void ma_sbc(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sbc(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
// Subtract:
void ma_sub(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sub(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s = LeaveCC, Condition c = Always);
void ma_sub(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sub(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sub(Register src1, Operand op, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sub(Register src1, Imm32 op, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_sub(Register src1, Imm32 op, Register dest, AutoRegisterScope& scratch,
SBit s = LeaveCC, Condition c = Always);
// Reverse subtract:
void ma_rsb(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_rsb(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s = LeaveCC, Condition c = Always);
void ma_rsb(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_rsb(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_rsb(Register src1, Imm32 op2, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_rsb(Register src1, Imm32 op2, Register dest, AutoRegisterScope& scratch,
SBit s = LeaveCC, Condition c = Always);
// Reverse subtract with carry:
void ma_rsc(Imm32 imm, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_rsc(Imm32 imm, Register dest, AutoRegisterScope& scratch, SBit s = LeaveCC, Condition c = Always);
void ma_rsc(Register src1, Register dest, SBit s = LeaveCC, Condition c = Always);
void ma_rsc(Register src1, Register src2, Register dest, SBit s = LeaveCC, Condition c = Always);
// Compares/tests.
// Compare negative (sets condition codes as src1 + src2 would):
void ma_cmn(Register src1, Imm32 imm, Condition c = Always);
void ma_cmn(Register src1, Imm32 imm, AutoRegisterScope& scratch, Condition c = Always);
void ma_cmn(Register src1, Register src2, Condition c = Always);
void ma_cmn(Register src1, Operand op, Condition c = Always);
// Compare (src - src2):
void ma_cmp(Register src1, Imm32 imm, Condition c = Always);
void ma_cmp(Register src1, ImmWord ptr, Condition c = Always);
void ma_cmp(Register src1, ImmGCPtr ptr, Condition c = Always);
void ma_cmp(Register src1, Operand op, Condition c = Always);
void ma_cmp(Register src1, Imm32 imm, AutoRegisterScope& scratch, Condition c = Always);
void ma_cmp(Register src1, ImmTag tag, Condition c = Always);
void ma_cmp(Register src1, ImmWord ptr, AutoRegisterScope& scratch, Condition c = Always);
void ma_cmp(Register src1, ImmGCPtr ptr, AutoRegisterScope& scratch, Condition c = Always);
void ma_cmp(Register src1, Operand op, AutoRegisterScope& scratch, AutoRegisterScope& scratch2,
Condition c = Always);
void ma_cmp(Register src1, Register src2, Condition c = Always);
// Test for equality, (src1 ^ src2):
void ma_teq(Register src1, Imm32 imm, Condition c = Always);
void ma_teq(Register src1, Imm32 imm, AutoRegisterScope& scratch, Condition c = Always);
void ma_teq(Register src1, Register src2, Condition c = Always);
void ma_teq(Register src1, Operand op, Condition c = Always);
// Test (src1 & src2):
void ma_tst(Register src1, Imm32 imm, Condition c = Always);
void ma_tst(Register src1, Imm32 imm, AutoRegisterScope& scratch, Condition c = Always);
void ma_tst(Register src1, Register src2, Condition c = Always);
void ma_tst(Register src1, Operand op, Condition c = Always);
// Multiplies. For now, there are only two that we care about.
void ma_mul(Register src1, Register src2, Register dest);
void ma_mul(Register src1, Imm32 imm, Register dest);
Condition ma_check_mul(Register src1, Register src2, Register dest, Condition cond);
Condition ma_check_mul(Register src1, Imm32 imm, Register dest, Condition cond);
void ma_mul(Register src1, Imm32 imm, Register dest, AutoRegisterScope& scratch);
Condition ma_check_mul(Register src1, Register src2, Register dest,
AutoRegisterScope& scratch, Condition cond);
Condition ma_check_mul(Register src1, Imm32 imm, Register dest,
AutoRegisterScope& scratch, Condition cond);
void ma_umull(Register src1, Imm32 imm, Register destHigh, Register destLow, AutoRegisterScope& scratch);
void ma_umull(Register src1, Register src2, Register destHigh, Register destLow);
void ma_umull(Register src1, Imm32 imm, Register destHigh, Register destLow);
// Fast mod, uses scratch registers, and thus needs to be in the assembler
// implicitly assumes that we can overwrite dest at the beginning of the
// sequence.
void ma_mod_mask(Register src, Register dest, Register hold, Register tmp,
int32_t shift);
AutoRegisterScope& scratch, AutoRegisterScope& scratch2, int32_t shift);
// Mod - depends on integer divide instructions being supported.
void ma_smod(Register num, Register div, Register dest);
void ma_umod(Register num, Register div, Register dest);
void ma_smod(Register num, Register div, Register dest, AutoRegisterScope& scratch);
void ma_umod(Register num, Register div, Register dest, AutoRegisterScope& scratch);
// Division - depends on integer divide instructions being supported.
void ma_sdiv(Register num, Register div, Register dest, Condition cond = Always);
void ma_udiv(Register num, Register div, Register dest, Condition cond = Always);
// Misc operations
void ma_clz(Register src, Register dest, Condition cond = Always);
void ma_ctz(Register src, Register dest);
void ma_ctz(Register src, Register dest, AutoRegisterScope& scratch);
// Memory:
// Shortcut for when we know we're transferring 32 bits of data.
void ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt,
void ma_dtr(LoadStore ls, Register rn, Imm32 offset, Register rt, AutoRegisterScope& scratch,
Index mode = Offset, Condition cc = Always);
void ma_dtr(LoadStore ls, Register rn, Register rm, Register rt,
Index mode = Offset, Condition cc = Always);
void ma_dtr(LoadStore ls, Register rt, const Address& addr, AutoRegisterScope& scratch,
Index mode, Condition cc);
void ma_str(Register rt, DTRAddr addr, Index mode = Offset, Condition cc = Always);
void ma_str(Register rt, const Address& addr, Index mode = Offset, Condition cc = Always);
void ma_dtr(LoadStore ls, Register rt, const Address& addr, Index mode, Condition cc);
void ma_str(Register rt, const Address& addr, AutoRegisterScope& scratch,
Index mode = Offset, Condition cc = Always);
void ma_ldr(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
void ma_ldr(const Address& addr, Register rt, Index mode = Offset, Condition cc = Always);
void ma_ldr(const Address& addr, Register rt, AutoRegisterScope& scratch,
Index mode = Offset, Condition cc = Always);
void ma_ldrb(DTRAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
void ma_ldrh(EDtrAddr addr, Register rt, Index mode = Offset, Condition cc = Always);
@ -308,16 +316,22 @@ class MacroAssemblerARM : public Assembler
// Specialty for moving N bits of data, where n == 8,16,32,64.
BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
Register rn, Register rm, Register rt,
Register rn, Register rm, Register rt, AutoRegisterScope& scratch,
Index mode = Offset, Condition cc = Always,
unsigned scale = TimesOne);
Scale scale = TimesOne);
BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
Register rn, Imm32 offset, Register rt,
Register rn, Register rm, Register rt,
Index mode = Offset, Condition cc = Always);
BufferOffset ma_dataTransferN(LoadStore ls, int size, bool IsSigned,
Register rn, Imm32 offset, Register rt, AutoRegisterScope& scratch,
Index mode = Offset, Condition cc = Always);
void ma_pop(Register r);
void ma_popn_pc(Imm32 n, AutoRegisterScope& scratch, AutoRegisterScope& scratch2);
void ma_push(Register r);
void ma_push_sp(Register r, AutoRegisterScope& scratch);
void ma_vpop(VFPRegister r);
void ma_vpush(VFPRegister r);
@ -397,17 +411,20 @@ class MacroAssemblerARM : public Assembler
// Transfer (do not coerce) a couple of gpr into a double
void ma_vxfer(Register src1, Register src2, FloatRegister dest, Condition cc = Always);
BufferOffset ma_vdtr(LoadStore ls, const Address& addr, VFPRegister dest, Condition cc = Always);
BufferOffset ma_vdtr(LoadStore ls, const Address& addr, VFPRegister dest, AutoRegisterScope& scratch,
Condition cc = Always);
BufferOffset ma_vldr(VFPAddr addr, VFPRegister dest, Condition cc = Always);
BufferOffset ma_vldr(const Address& addr, VFPRegister dest, Condition cc = Always);
BufferOffset ma_vldr(VFPRegister src, Register base, Register index,
BufferOffset ma_vldr(const Address& addr, VFPRegister dest, AutoRegisterScope& scratch, Condition cc = Always);
BufferOffset ma_vldr(VFPRegister src, Register base, Register index, AutoRegisterScope& scratch,
int32_t shift = defaultShift, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, VFPAddr addr, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, const Address& addr, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, Register base, Register index, int32_t shift,
int32_t offset, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, const Address& addr, AutoRegisterScope& scratch, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, Register base, Register index, AutoRegisterScope& scratch,
AutoRegisterScope& scratch2, int32_t shift, int32_t offset, Condition cc = Always);
BufferOffset ma_vstr(VFPRegister src, Register base, Register index, AutoRegisterScope& scratch,
int32_t shift, Condition cc = Always);
void ma_call(ImmPtr dest);
@ -539,8 +556,9 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_pop(pc);
}
void retn(Imm32 n) {
// pc <- [sp]; sp += n
ma_dtr(IsLoad, sp, n, pc, PostIndex);
ScratchRegisterScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
ma_popn_pc(n, scratch, scratch2);
}
void push(Imm32 imm) {
ScratchRegisterScope scratch(asMasm());
@ -557,26 +575,32 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
}
void push(const Address& addr) {
ScratchRegisterScope scratch(asMasm());
ma_ldr(addr, scratch);
SecondScratchRegisterScope scratch2(asMasm());
ma_ldr(addr, scratch, scratch2);
ma_push(scratch);
}
void push(Register reg) {
ma_push(reg);
if (reg == sp) {
ScratchRegisterScope scratch(asMasm());
ma_push_sp(reg, scratch);
} else {
ma_push(reg);
}
}
void push(FloatRegister reg) {
ma_vpush(VFPRegister(reg));
}
void pushWithPadding(Register reg, const Imm32 extraSpace) {
ScratchRegisterScope scratch(asMasm());
Imm32 totSpace = Imm32(extraSpace.value + 4);
ma_dtr(IsStore, sp, totSpace, reg, PreIndex);
ma_dtr(IsStore, sp, totSpace, reg, scratch, PreIndex);
}
void pushWithPadding(Imm32 imm, const Imm32 extraSpace) {
AutoRegisterScope scratch2(asMasm(), secondScratchReg_);
ScratchRegisterScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
Imm32 totSpace = Imm32(extraSpace.value + 4);
// ma_dtr may need the scratch register to adjust the stack, so use the
// second scratch register.
ma_mov(imm, scratch2);
ma_dtr(IsStore, sp, totSpace, scratch2, PreIndex);
ma_mov(imm, scratch);
ma_dtr(IsStore, sp, totSpace, scratch, scratch2, PreIndex);
}
void pop(Register reg) {
@ -587,8 +611,9 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
}
void popN(Register reg, Imm32 extraSpace) {
ScratchRegisterScope scratch(asMasm());
Imm32 totSpace = Imm32(extraSpace.value + 4);
ma_dtr(IsLoad, sp, totSpace, reg, PostIndex);
ma_dtr(IsLoad, sp, totSpace, reg, scratch, PostIndex);
}
CodeOffset toggledJump(Label* label);
@ -624,7 +649,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
}
void jump(const Address& addr) {
ScratchRegisterScope scratch(asMasm());
ma_ldr(addr, scratch);
SecondScratchRegisterScope scratch2(asMasm());
ma_ldr(addr, scratch, scratch2);
ma_bx(scratch);
}
void jump(wasm::JumpTarget target) {
@ -638,12 +664,14 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_tst(lhs, rhs);
}
void test32(Register lhs, Imm32 imm) {
ma_tst(lhs, imm);
ScratchRegisterScope scratch(asMasm());
ma_tst(lhs, imm, scratch);
}
void test32(const Address& addr, Imm32 imm) {
ScratchRegisterScope scratch(asMasm());
ma_ldr(addr, scratch);
ma_tst(scratch, imm);
SecondScratchRegisterScope scratch2(asMasm());
ma_ldr(addr, scratch, scratch2);
ma_tst(scratch, imm, scratch2);
}
void testPtr(Register lhs, Register rhs) {
test32(lhs, rhs);
@ -725,7 +753,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void unboxPrivate(const ValueOperand& src, Register dest);
void notBoolean(const ValueOperand& val) {
ma_eor(Imm32(1), val.payloadReg());
as_eor(val.payloadReg(), val.payloadReg(), Imm8(1));
}
// Boxing code.
@ -779,10 +807,12 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
}
void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
if (dest.isFloat())
if (dest.isFloat()) {
loadInt32OrDouble(address, dest.fpu());
else
ma_ldr(address, dest.gpr());
} else {
ScratchRegisterScope scratch(asMasm());
ma_ldr(address, dest.gpr(), scratch);
}
}
void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
@ -839,30 +869,90 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void storeValue(ValueOperand val, const BaseIndex& dest);
void storeValue(JSValueType type, Register reg, BaseIndex dest) {
ScratchRegisterScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
int32_t payloadoffset = dest.offset + NUNBOX32_PAYLOAD_OFFSET;
int32_t typeoffset = dest.offset + NUNBOX32_TYPE_OFFSET;
ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
storeValue(type, reg, Address(scratch, dest.offset));
// Store the payload.
if (payloadoffset < 4096 && payloadoffset > -4096)
ma_str(reg, DTRAddr(scratch, DtrOffImm(payloadoffset)));
else
ma_str(reg, Address(scratch, payloadoffset), scratch2);
// Store the type.
if (typeoffset < 4096 && typeoffset > -4096) {
// Encodable as DTRAddr, so only two instructions needed.
ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch2);
ma_str(scratch2, DTRAddr(scratch, DtrOffImm(typeoffset)));
} else {
// Since there are only two scratch registers, the offset must be
// applied early using a third instruction to be safe.
ma_add(Imm32(typeoffset), scratch, scratch2);
ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch2);
ma_str(scratch2, DTRAddr(scratch, DtrOffImm(0)));
}
}
void storeValue(JSValueType type, Register reg, Address dest) {
ma_str(reg, dest);
AutoRegisterScope scratch2(asMasm(), secondScratchReg_);
ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch2);
ma_str(scratch2, Address(dest.base, dest.offset + 4));
ScratchRegisterScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
ma_str(reg, dest, scratch2);
ma_mov(ImmTag(JSVAL_TYPE_TO_TAG(type)), scratch);
ma_str(scratch, Address(dest.base, dest.offset + NUNBOX32_TYPE_OFFSET), scratch2);
}
void storeValue(const Value& val, const Address& dest) {
AutoRegisterScope scratch2(asMasm(), secondScratchReg_);
ScratchRegisterScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
jsval_layout jv = JSVAL_TO_IMPL(val);
ma_mov(Imm32(jv.s.tag), scratch2);
ma_str(scratch2, ToType(dest));
ma_mov(Imm32(jv.s.tag), scratch);
ma_str(scratch, ToType(dest), scratch2);
if (val.isMarkable())
ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell*>(val.toGCThing())), scratch2);
ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell*>(val.toGCThing())), scratch);
else
ma_mov(Imm32(jv.s.payload.i32), scratch2);
ma_str(scratch2, ToPayload(dest));
ma_mov(Imm32(jv.s.payload.i32), scratch);
ma_str(scratch, ToPayload(dest), scratch2);
}
void storeValue(const Value& val, BaseIndex dest) {
ScratchRegisterScope scratch(asMasm());
SecondScratchRegisterScope scratch2(asMasm());
jsval_layout jv = JSVAL_TO_IMPL(val);
int32_t typeoffset = dest.offset + NUNBOX32_TYPE_OFFSET;
int32_t payloadoffset = dest.offset + NUNBOX32_PAYLOAD_OFFSET;
ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
storeValue(val, Address(scratch, dest.offset));
// Store the type.
if (typeoffset < 4096 && typeoffset > -4096) {
ma_mov(Imm32(jv.s.tag), scratch2);
ma_str(scratch2, DTRAddr(scratch, DtrOffImm(typeoffset)));
} else {
ma_add(Imm32(typeoffset), scratch, scratch2);
ma_mov(Imm32(jv.s.tag), scratch2);
ma_str(scratch2, DTRAddr(scratch, DtrOffImm(0)));
// Restore scratch for the payload store.
ma_alu(dest.base, lsl(dest.index, dest.scale), scratch, OpAdd);
}
// Store the payload, marking if necessary.
if (payloadoffset < 4096 && payloadoffset > -4096) {
if (val.isMarkable())
ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell*>(val.toGCThing())), scratch2);
else
ma_mov(Imm32(jv.s.payload.i32), scratch2);
ma_str(scratch2, DTRAddr(scratch, DtrOffImm(payloadoffset)));
} else {
ma_add(Imm32(payloadoffset), scratch, scratch2);
if (val.isMarkable())
ma_mov(ImmGCPtr(reinterpret_cast<gc::Cell*>(val.toGCThing())), scratch2);
else
ma_mov(Imm32(jv.s.payload.i32), scratch2);
ma_str(scratch2, DTRAddr(scratch, DtrOffImm(0)));
}
}
void storeValue(const Address& src, const Address& dest, Register temp) {
load32(ToType(src), temp);
@ -1403,12 +1493,14 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
}
void computeEffectiveAddress(const Address& address, Register dest) {
ma_add(address.base, Imm32(address.offset), dest, LeaveCC);
ScratchRegisterScope scratch(asMasm());
ma_add(address.base, Imm32(address.offset), dest, scratch, LeaveCC);
}
void computeEffectiveAddress(const BaseIndex& address, Register dest) {
ScratchRegisterScope scratch(asMasm());
ma_alu(address.base, lsl(address.index, address.scale), dest, OpAdd, LeaveCC);
if (address.offset)
ma_add(dest, Imm32(address.offset), dest, LeaveCC);
ma_add(dest, Imm32(address.offset), dest, scratch, LeaveCC);
}
void floor(FloatRegister input, Register output, Label* handleNotAnInt);
void floorf(FloatRegister input, Register output, Label* handleNotAnInt);
@ -1422,24 +1514,21 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
// This is the instruction sequence that gcc generated for this
// operation.
ScratchRegisterScope scratch(asMasm());
ma_sub(r, Imm32(0x80000001), scratch);
ma_cmn(scratch, Imm32(3));
SecondScratchRegisterScope scratch2(asMasm());
ma_sub(r, Imm32(0x80000001), scratch, scratch2);
as_cmn(scratch, Imm8(3));
ma_b(handleNotAnInt, Above);
}
void lea(Operand addr, Register dest) {
ma_add(addr.baseReg(), Imm32(addr.disp()), dest);
ScratchRegisterScope scratch(asMasm());
ma_add(addr.baseReg(), Imm32(addr.disp()), dest, scratch);
}
void abiret() {
as_bx(lr);
}
void ma_storeImm(Imm32 c, const Address& dest) {
ma_mov(c, lr);
ma_str(lr, dest);
}
void moveFloat32(FloatRegister src, FloatRegister dest, Condition cc = Always) {
as_vmov(VFPRegister(dest).singleOverlay(), VFPRegister(src).singleOverlay(), cc);
}
@ -1448,9 +1537,10 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
loadPtr(Address(GlobalReg, globalDataOffset - AsmJSGlobalRegBias), dest);
}
void loadWasmPinnedRegsFromTls() {
loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg);
loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg);
ma_add(Imm32(AsmJSGlobalRegBias), GlobalReg);
ScratchRegisterScope scratch(asMasm());
ma_ldr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg, scratch);
ma_ldr(Address(WasmTlsReg, offsetof(wasm::TlsData, globalData)), GlobalReg, scratch);
ma_add(Imm32(AsmJSGlobalRegBias), GlobalReg, scratch);
}
// Instrumentation for entering and leaving the profiler.

View File

@ -89,7 +89,8 @@ MoveEmitterARM::tempReg()
masm.Push(spilledReg_);
pushedAtSpill_ = masm.framePushed();
} else {
masm.ma_str(spilledReg_, spillSlot());
ScratchRegisterScope scratch(masm);
masm.ma_str(spilledReg_, spillSlot(), scratch);
}
return spilledReg_;
}
@ -104,38 +105,41 @@ MoveEmitterARM::breakCycle(const MoveOperand& from, const MoveOperand& to,
//
// This case handles (A -> B), which we reach first. We save B, then allow
// the original move to continue.
ScratchRegisterScope scratch(masm);
switch (type) {
case MoveOp::FLOAT32:
if (to.isMemory()) {
VFPRegister temp = ScratchFloat32Reg;
masm.ma_vldr(toAddress(to), temp);
ScratchFloat32Scope scratchFloat32(masm);
masm.ma_vldr(toAddress(to), scratchFloat32, scratch);
// Since it is uncertain if the load will be aligned or not
// just fill both of them with the same value.
masm.ma_vstr(temp, cycleSlot(slotId, 0));
masm.ma_vstr(temp, cycleSlot(slotId, 4));
masm.ma_vstr(scratchFloat32, cycleSlot(slotId, 0), scratch);
masm.ma_vstr(scratchFloat32, cycleSlot(slotId, 4), scratch);
} else if (to.isGeneralReg()) {
// Since it is uncertain if the load will be aligned or not
// just fill both of them with the same value.
masm.ma_str(to.reg(), cycleSlot(slotId, 0));
masm.ma_str(to.reg(), cycleSlot(slotId, 4));
masm.ma_str(to.reg(), cycleSlot(slotId, 0), scratch);
masm.ma_str(to.reg(), cycleSlot(slotId, 4), scratch);
} else {
FloatRegister src = to.floatReg();
// Just always store the largest possible size. Currently, this is
// a double. When SIMD is added, two doubles will need to be stored.
masm.ma_vstr(src.doubleOverlay(), cycleSlot(slotId, 0));
masm.ma_vstr(src.doubleOverlay(), cycleSlot(slotId, 0), scratch);
}
break;
case MoveOp::DOUBLE:
if (to.isMemory()) {
ScratchDoubleScope scratch(masm);
masm.ma_vldr(toAddress(to), scratch);
masm.ma_vstr(scratch, cycleSlot(slotId, 0));
ScratchDoubleScope scratchDouble(masm);
masm.ma_vldr(toAddress(to), scratchDouble, scratch);
masm.ma_vstr(scratchDouble, cycleSlot(slotId, 0), scratch);
} else if (to.isGeneralRegPair()) {
ScratchDoubleScope scratch(masm);
masm.ma_vxfer(to.evenReg(), to.oddReg(), scratch);
masm.ma_vstr(scratch, cycleSlot(slotId, 0));
ScratchDoubleScope scratchDouble(masm);
masm.ma_vxfer(to.evenReg(), to.oddReg(), scratchDouble);
masm.ma_vstr(scratchDouble, cycleSlot(slotId, 0), scratch);
} else {
masm.ma_vstr(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0));
masm.ma_vstr(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0), scratch);
}
break;
case MoveOp::INT32:
@ -143,15 +147,15 @@ MoveEmitterARM::breakCycle(const MoveOperand& from, const MoveOperand& to,
// an non-vfp value
if (to.isMemory()) {
Register temp = tempReg();
masm.ma_ldr(toAddress(to), temp);
masm.ma_str(temp, cycleSlot(0,0));
masm.ma_ldr(toAddress(to), temp, scratch);
masm.ma_str(temp, cycleSlot(0,0), scratch);
} else {
if (to.reg() == spilledReg_) {
// If the destination was spilled, restore it first.
masm.ma_ldr(spillSlot(), spilledReg_);
masm.ma_ldr(spillSlot(), spilledReg_, scratch);
spilledReg_ = InvalidReg;
}
masm.ma_str(to.reg(), cycleSlot(0,0));
masm.ma_str(to.reg(), cycleSlot(0,0), scratch);
}
break;
default:
@ -168,39 +172,42 @@ MoveEmitterARM::completeCycle(const MoveOperand& from, const MoveOperand& to, Mo
//
// This case handles (B -> A), which we reach last. We emit a move from the
// saved value of B, to A.
ScratchRegisterScope scratch(masm);
switch (type) {
case MoveOp::FLOAT32:
MOZ_ASSERT(!to.isGeneralRegPair());
if (to.isMemory()) {
ScratchFloat32Scope scratch(masm);
masm.ma_vldr(cycleSlot(slotId, 0), scratch);
masm.ma_vstr(scratch, toAddress(to));
ScratchFloat32Scope scratchFloat32(masm);
masm.ma_vldr(cycleSlot(slotId, 0), scratchFloat32, scratch);
masm.ma_vstr(scratchFloat32, toAddress(to), scratch);
} else if (to.isGeneralReg()) {
MOZ_ASSERT(type == MoveOp::FLOAT32);
masm.ma_ldr(toAddress(from), to.reg());
masm.ma_ldr(toAddress(from), to.reg(), scratch);
} else {
uint32_t offset = 0;
if ((!from.isMemory()) && from.floatReg().numAlignedAliased() == 1)
offset = sizeof(float);
masm.ma_vldr(cycleSlot(slotId, offset), to.floatReg());
masm.ma_vldr(cycleSlot(slotId, offset), to.floatReg(), scratch);
}
break;
case MoveOp::DOUBLE:
MOZ_ASSERT(!to.isGeneralReg());
if (to.isMemory()) {
ScratchDoubleScope scratch(masm);
masm.ma_vldr(cycleSlot(slotId, 0), scratch);
masm.ma_vstr(scratch, toAddress(to));
ScratchDoubleScope scratchDouble(masm);
masm.ma_vldr(cycleSlot(slotId, 0), scratchDouble, scratch);
masm.ma_vstr(scratchDouble, toAddress(to), scratch);
} else if (to.isGeneralRegPair()) {
MOZ_ASSERT(type == MoveOp::DOUBLE);
ScratchDoubleScope scratch(masm);
masm.ma_vldr(toAddress(from), scratch);
masm.ma_vxfer(scratch, to.evenReg(), to.oddReg());
ScratchDoubleScope scratchDouble(masm);
masm.ma_vldr(toAddress(from), scratchDouble, scratch);
masm.ma_vxfer(scratchDouble, to.evenReg(), to.oddReg());
} else {
uint32_t offset = 0;
if ((!from.isMemory()) && from.floatReg().numAlignedAliased() == 1)
offset = sizeof(float);
masm.ma_vldr(cycleSlot(slotId, offset), to.floatReg());
masm.ma_vldr(cycleSlot(slotId, offset), to.floatReg(), scratch);
}
break;
case MoveOp::INT32:
@ -208,14 +215,14 @@ MoveEmitterARM::completeCycle(const MoveOperand& from, const MoveOperand& to, Mo
MOZ_ASSERT(slotId == 0);
if (to.isMemory()) {
Register temp = tempReg();
masm.ma_ldr(cycleSlot(slotId, 0), temp);
masm.ma_str(temp, toAddress(to));
masm.ma_ldr(cycleSlot(slotId, 0), temp, scratch);
masm.ma_str(temp, toAddress(to), scratch);
} else {
if (to.reg() == spilledReg_) {
// Make sure we don't re-clobber the spilled register later.
spilledReg_ = InvalidReg;
}
masm.ma_ldr(cycleSlot(slotId, 0), to.reg());
masm.ma_ldr(cycleSlot(slotId, 0), to.reg(), scratch);
}
break;
default:
@ -230,6 +237,8 @@ MoveEmitterARM::emitMove(const MoveOperand& from, const MoveOperand& to)
MOZ_ASSERT(!from.isGeneralRegPair());
MOZ_ASSERT(!to.isGeneralRegPair());
ScratchRegisterScope scratch(masm);
if (to.isGeneralReg() && to.reg() == spilledReg_) {
// If the destination is the spilled register, make sure we
// don't re-clobber its value.
@ -240,30 +249,30 @@ MoveEmitterARM::emitMove(const MoveOperand& from, const MoveOperand& to)
if (from.reg() == spilledReg_) {
// If the source is a register that has been spilled, make sure
// to load the source back into that register.
masm.ma_ldr(spillSlot(), spilledReg_);
masm.ma_ldr(spillSlot(), spilledReg_, scratch);
spilledReg_ = InvalidReg;
}
if (to.isMemoryOrEffectiveAddress())
masm.ma_str(from.reg(), toAddress(to));
masm.ma_str(from.reg(), toAddress(to), scratch);
else
masm.ma_mov(from.reg(), to.reg());
} else if (to.isGeneralReg()) {
MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
if (from.isMemory())
masm.ma_ldr(toAddress(from), to.reg());
masm.ma_ldr(toAddress(from), to.reg(), scratch);
else
masm.ma_add(from.base(), Imm32(from.disp()), to.reg());
masm.ma_add(from.base(), Imm32(from.disp()), to.reg(), scratch);
} else {
// Memory to memory gpr move.
Register reg = tempReg();
MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
if (from.isMemory())
masm.ma_ldr(toAddress(from), reg);
masm.ma_ldr(toAddress(from), reg, scratch);
else
masm.ma_add(from.base(), Imm32(from.disp()), reg);
masm.ma_add(from.base(), Imm32(from.disp()), reg, scratch);
MOZ_ASSERT(to.base() != reg);
masm.ma_str(reg, toAddress(to));
masm.ma_str(reg, toAddress(to), scratch);
}
}
@ -274,30 +283,33 @@ MoveEmitterARM::emitFloat32Move(const MoveOperand& from, const MoveOperand& to)
MOZ_ASSERT(!from.isGeneralRegPair());
MOZ_ASSERT(!to.isGeneralRegPair());
ScratchRegisterScope scratch(masm);
if (from.isFloatReg()) {
if (to.isFloatReg())
masm.ma_vmov_f32(from.floatReg(), to.floatReg());
else if (to.isGeneralReg())
masm.ma_vxfer(from.floatReg(), to.reg());
else
masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(), toAddress(to));
masm.ma_vstr(VFPRegister(from.floatReg()).singleOverlay(), toAddress(to), scratch);
} else if (from.isGeneralReg()) {
if (to.isFloatReg())
if (to.isFloatReg()) {
masm.ma_vxfer(from.reg(), to.floatReg());
else if (to.isGeneralReg())
} else if (to.isGeneralReg()) {
masm.ma_mov(from.reg(), to.reg());
else
masm.ma_str(from.reg(), toAddress(to));
} else {
masm.ma_str(from.reg(), toAddress(to), scratch);
}
} else if (to.isFloatReg()) {
masm.ma_vldr(toAddress(from), VFPRegister(to.floatReg()).singleOverlay());
masm.ma_vldr(toAddress(from), VFPRegister(to.floatReg()).singleOverlay(), scratch);
} else if (to.isGeneralReg()) {
masm.ma_ldr(toAddress(from), to.reg());
masm.ma_ldr(toAddress(from), to.reg(), scratch);
} else {
// Memory to memory move.
MOZ_ASSERT(from.isMemory());
FloatRegister reg = ScratchFloat32Reg;
masm.ma_vldr(toAddress(from), VFPRegister(reg).singleOverlay());
masm.ma_vstr(VFPRegister(reg).singleOverlay(), toAddress(to));
ScratchFloat32Scope scratchFloat32(masm);
masm.ma_vldr(toAddress(from), scratchFloat32, scratch);
masm.ma_vstr(scratchFloat32, toAddress(to), scratch);
}
}
@ -308,13 +320,15 @@ MoveEmitterARM::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
MOZ_ASSERT(!from.isGeneralReg());
MOZ_ASSERT(!to.isGeneralReg());
ScratchRegisterScope scratch(masm);
if (from.isFloatReg()) {
if (to.isFloatReg())
masm.ma_vmov(from.floatReg(), to.floatReg());
else if (to.isGeneralRegPair())
masm.ma_vxfer(from.floatReg(), to.evenReg(), to.oddReg());
else
masm.ma_vstr(from.floatReg(), toAddress(to));
masm.ma_vstr(from.floatReg(), toAddress(to), scratch);
} else if (from.isGeneralRegPair()) {
if (to.isFloatReg())
masm.ma_vxfer(from.evenReg(), from.oddReg(), to.floatReg());
@ -323,12 +337,12 @@ MoveEmitterARM::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
masm.ma_mov(from.evenReg(), to.evenReg());
masm.ma_mov(from.oddReg(), to.oddReg());
} else {
FloatRegister reg = ScratchDoubleReg;
masm.ma_vxfer(from.evenReg(), from.oddReg(), reg);
masm.ma_vstr(reg, toAddress(to));
ScratchDoubleScope scratchDouble(masm);
masm.ma_vxfer(from.evenReg(), from.oddReg(), scratchDouble);
masm.ma_vstr(scratchDouble, toAddress(to), scratch);
}
} else if (to.isFloatReg()) {
masm.ma_vldr(toAddress(from), to.floatReg());
masm.ma_vldr(toAddress(from), to.floatReg(), scratch);
} else if (to.isGeneralRegPair()) {
MOZ_ASSERT(from.isMemory());
Address src = toAddress(from);
@ -345,9 +359,9 @@ MoveEmitterARM::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
} else {
// Memory to memory move.
MOZ_ASSERT(from.isMemory());
ScratchDoubleScope scratch(masm);
masm.ma_vldr(toAddress(from), scratch);
masm.ma_vstr(scratch, toAddress(to));
ScratchDoubleScope scratchDouble(masm);
masm.ma_vldr(toAddress(from), scratchDouble, scratch);
masm.ma_vstr(scratchDouble, toAddress(to), scratch);
}
}
@ -405,7 +419,9 @@ MoveEmitterARM::finish()
{
assertDone();
if (pushedAtSpill_ != -1 && spilledReg_ != InvalidReg)
masm.ma_ldr(spillSlot(), spilledReg_);
if (pushedAtSpill_ != -1 && spilledReg_ != InvalidReg) {
ScratchRegisterScope scratch(masm);
masm.ma_ldr(spillSlot(), spilledReg_, scratch);
}
masm.freeStack(masm.framePushed() - pushedAtStart_);
}

View File

@ -59,11 +59,12 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
masm.mov(scratchReg, R0.payloadReg());
break;
case JSOP_MUL: {
ScratchRegisterScope scratch(masm);
Assembler::Condition cond = masm.ma_check_mul(R0.payloadReg(), R1.payloadReg(), scratchReg,
Assembler::Overflow);
scratch, Assembler::Overflow);
masm.j(cond, &failure);
masm.ma_cmp(scratchReg, Imm32(0));
masm.as_cmp(scratchReg, Imm8(0));
masm.j(Assembler::Equal, &maybeNegZero);
masm.mov(scratchReg, R0.payloadReg());
@ -72,13 +73,16 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
case JSOP_DIV:
case JSOP_MOD: {
// Check for INT_MIN / -1, it results in a double.
masm.ma_cmp(R0.payloadReg(), Imm32(INT_MIN));
masm.ma_cmp(R1.payloadReg(), Imm32(-1), Assembler::Equal);
masm.j(Assembler::Equal, &failure);
{
ScratchRegisterScope scratch(masm);
masm.ma_cmp(R0.payloadReg(), Imm32(INT_MIN), scratch);
masm.ma_cmp(R1.payloadReg(), Imm32(-1), scratch, Assembler::Equal);
masm.j(Assembler::Equal, &failure);
}
// Check for both division by zero and 0 / X with X < 0 (results in -0).
masm.ma_cmp(R1.payloadReg(), Imm32(0));
masm.ma_cmp(R0.payloadReg(), Imm32(0), Assembler::LessThan);
masm.as_cmp(R1.payloadReg(), Imm8(0));
masm.as_cmp(R0.payloadReg(), Imm8(0), Assembler::LessThan);
masm.j(Assembler::Equal, &failure);
// The call will preserve registers r4-r11. Save R0 and the link
@ -118,17 +122,17 @@ ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
break;
case JSOP_LSH:
// ARM will happily try to shift by more than 0x1f.
masm.ma_and(Imm32(0x1F), R1.payloadReg(), R1.payloadReg());
masm.as_and(R1.payloadReg(), R1.payloadReg(), Imm8(0x1F));
masm.ma_lsl(R1.payloadReg(), R0.payloadReg(), R0.payloadReg());
break;
case JSOP_RSH:
masm.ma_and(Imm32(0x1F), R1.payloadReg(), R1.payloadReg());
masm.as_and(R1.payloadReg(), R1.payloadReg(), Imm8(0x1F));
masm.ma_asr(R1.payloadReg(), R0.payloadReg(), R0.payloadReg());
break;
case JSOP_URSH:
masm.ma_and(Imm32(0x1F), R1.payloadReg(), scratchReg);
masm.as_and(scratchReg, R1.payloadReg(), Imm8(0x1F));
masm.ma_lsr(scratchReg, R0.payloadReg(), scratchReg);
masm.ma_cmp(scratchReg, Imm32(0));
masm.as_cmp(scratchReg, Imm8(0));
if (allowDouble_) {
Label toUint;
masm.j(Assembler::LessThan, &toUint);
@ -196,7 +200,7 @@ ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(0x7fffffff), &failure);
// Compile -x as 0 - x.
masm.ma_rsb(R0.payloadReg(), Imm32(0), R0.payloadReg());
masm.as_rsb(R0.payloadReg(), R0.payloadReg(), Imm8(0));
break;
default:
MOZ_CRASH("Unexpected op");

View File

@ -87,11 +87,14 @@ EmitBaselineTailCallVM(JitCode* target, MacroAssembler& masm, uint32_t argSize)
// Compute frame size.
masm.movePtr(BaselineFrameReg, r0);
masm.ma_add(Imm32(BaselineFrame::FramePointerOffset), r0);
masm.as_add(r0, r0, Imm8(BaselineFrame::FramePointerOffset));
masm.ma_sub(BaselineStackReg, r0);
// Store frame size without VMFunction arguments for GC marking.
masm.ma_sub(r0, Imm32(argSize), r1);
{
ScratchRegisterScope scratch(masm);
masm.ma_sub(r0, Imm32(argSize), r1, scratch);
}
masm.store32(r1, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));
// Push frame descriptor and perform the tail call.
@ -133,7 +136,7 @@ EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32
// Compute stub frame size. We have to add two pointers: the stub reg and
// previous frame pointer pushed by EmitEnterStubFrame.
masm.mov(BaselineFrameReg, reg);
masm.ma_add(Imm32(sizeof(void*) * 2), reg);
masm.as_add(reg, reg, Imm8(sizeof(void*) * 2));
masm.ma_sub(BaselineStackReg, reg);
masm.makeFrameDescriptor(reg, JitFrame_BaselineStub, headerSize);
@ -174,7 +177,7 @@ EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch)
// Compute frame size.
masm.mov(BaselineFrameReg, scratch);
masm.ma_add(Imm32(BaselineFrame::FramePointerOffset), scratch);
masm.as_add(scratch, scratch, Imm8(BaselineFrame::FramePointerOffset));
masm.ma_sub(BaselineStackReg, scratch);
masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize()));

View File

@ -174,7 +174,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
// argument is expected by the Jit frame.
//
aasm->as_sub(r4, sp, O2RegImmShift(r1, LSL, 3)); // r4 = sp - argc*8
masm.ma_and(Imm32(~(JitStackAlignment - 1)), r4, r4);
aasm->as_bic(r4, r4, Imm8(JitStackAlignment - 1));
// r4 is now the aligned on the bottom of the list of arguments.
static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
"No need to consider the JitFrameLayout for aligning the stack");
@ -317,7 +317,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
&skipProfilingInstrumentation);
masm.ma_add(framePtr, Imm32(sizeof(void*)), realFramePtr);
masm.as_add(realFramePtr, framePtr, Imm8(sizeof(void*)));
masm.profilerEnterFrame(realFramePtr, scratch);
masm.bind(&skipProfilingInstrumentation);
}
@ -400,7 +400,7 @@ JitRuntime::generateInvalidator(JSContext* cx)
// The old return address should not matter, but we still want the stack to
// be aligned, and there is no good reason to automatically align it with a
// call to setupUnalignedABICall.
masm.ma_and(Imm32(~7), sp, sp);
masm.as_bic(sp, sp, Imm8(7));
masm.startDataTransferM(IsStore, sp, DB, WriteBack);
// We don't have to push everything, but this is likely easier.
// Setting regs_.
@ -412,8 +412,9 @@ JitRuntime::generateInvalidator(JSContext* cx)
// if there are only 16 double registers, then we need to reserve
// space on the stack for the missing 16.
if (FloatRegisters::ActualTotalPhys() != FloatRegisters::TotalPhys) {
ScratchRegisterScope scratch(masm);
int missingRegs = FloatRegisters::TotalPhys - FloatRegisters::ActualTotalPhys();
masm.ma_sub(Imm32(missingRegs * sizeof(double)), sp);
masm.ma_sub(Imm32(missingRegs * sizeof(double)), sp, scratch);
}
masm.startFloatTransferM(IsStore, sp, DB, WriteBack);
@ -434,12 +435,18 @@ JitRuntime::generateInvalidator(JSContext* cx)
masm.passABIArg(r2);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, InvalidationBailout));
masm.ma_ldr(Address(sp, 0), r2);
masm.ma_ldr(Address(sp, sizeOfBailoutInfo), r1);
masm.ma_ldr(DTRAddr(sp, DtrOffImm(0)), r2);
{
ScratchRegisterScope scratch(masm);
masm.ma_ldr(Address(sp, sizeOfBailoutInfo), r1, scratch);
}
// Remove the return address, the IonScript, the register state
// (InvaliationBailoutStack) and the space that was allocated for the return
// value.
masm.ma_add(sp, Imm32(sizeof(InvalidationBailoutStack) + sizeOfRetval + sizeOfBailoutInfo), sp);
{
ScratchRegisterScope scratch(masm);
masm.ma_add(sp, Imm32(sizeof(InvalidationBailoutStack) + sizeOfRetval + sizeOfBailoutInfo), sp, scratch);
}
// Remove the space that this frame was using before the bailout (computed
// by InvalidationBailout)
masm.ma_add(sp, r1, sp);
@ -475,14 +482,20 @@ JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
// Load the number of |undefined|s to push into r6.
masm.ma_ldr(DTRAddr(sp, DtrOffImm(RectifierFrameLayout::offsetOfCalleeToken())), r1);
masm.ma_and(Imm32(CalleeTokenMask), r1, r6);
{
ScratchRegisterScope scratch(masm);
masm.ma_and(Imm32(CalleeTokenMask), r1, r6, scratch);
}
masm.ma_ldrh(EDtrAddr(r6, EDtrOffImm(JSFunction::offsetOfNargs())), r6);
masm.ma_sub(r6, r8, r2);
// Get the topmost argument.
masm.ma_alu(sp, lsl(r8, 3), r3, OpAdd); // r3 <- r3 + nargs * 8
masm.ma_add(r3, Imm32(sizeof(RectifierFrameLayout)), r3);
{
ScratchRegisterScope scratch(masm);
masm.ma_alu(sp, lsl(r8, 3), r3, OpAdd); // r3 <- r3 + nargs * 8
masm.ma_add(r3, Imm32(sizeof(RectifierFrameLayout)), r3, scratch);
}
{
Label notConstructing;
@ -491,8 +504,8 @@ JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
&notConstructing);
// Add sizeof(Value) to overcome |this|
masm.ma_dataTransferN(IsLoad, 64, true, r3, Imm32(8), r4, Offset);
masm.ma_dataTransferN(IsStore, 64, true, sp, Imm32(-8), r4, PreIndex);
masm.as_extdtr(IsLoad, 64, true, Offset, r4, EDtrAddr(r3, EDtrOffImm(8)));
masm.as_extdtr(IsStore, 64, true, PreIndex, r4, EDtrAddr(sp, EDtrOffImm(-8)));
// Include the newly pushed newTarget value in the frame size
// calculated below.
@ -506,8 +519,8 @@ JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
{
Label undefLoopTop;
masm.bind(&undefLoopTop);
masm.ma_dataTransferN(IsStore, 64, true, sp, Imm32(-8), r4, PreIndex);
masm.ma_sub(r2, Imm32(1), r2, SetCC);
masm.as_extdtr(IsStore, 64, true, PreIndex, r4, EDtrAddr(sp, EDtrOffImm(-8)));
masm.as_sub(r2, r2, Imm8(1), SetCC);
masm.ma_b(&undefLoopTop, Assembler::NonZero);
}
@ -516,15 +529,15 @@ JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
{
Label copyLoopTop;
masm.bind(&copyLoopTop);
masm.ma_dataTransferN(IsLoad, 64, true, r3, Imm32(-8), r4, PostIndex);
masm.ma_dataTransferN(IsStore, 64, true, sp, Imm32(-8), r4, PreIndex);
masm.as_extdtr(IsLoad, 64, true, PostIndex, r4, EDtrAddr(r3, EDtrOffImm(-8)));
masm.as_extdtr(IsStore, 64, true, PreIndex, r4, EDtrAddr(sp, EDtrOffImm(-8)));
masm.ma_sub(r8, Imm32(1), r8, SetCC);
masm.as_sub(r8, r8, Imm8(1), SetCC);
masm.ma_b(&copyLoopTop, Assembler::NotSigned);
}
// translate the framesize from values into bytes
masm.ma_add(r6, Imm32(1), r6);
masm.as_add(r6, r6, Imm8(1));
masm.ma_lsl(Imm32(3), r6, r6);
// Construct sizeDescriptor.
@ -551,7 +564,10 @@ JitRuntime::generateArgumentsRectifier(JSContext* cx, void** returnAddrOut)
// return address
// Remove the rectifier frame.
masm.ma_dtr(IsLoad, sp, Imm32(12), r4, PostIndex);
{
ScratchRegisterScope scratch(masm);
masm.ma_dtr(IsLoad, sp, Imm32(12), r4, scratch, PostIndex);
}
// arg1
// ...
@ -600,12 +616,14 @@ PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass, Register spArg)
masm.transferReg(Register::FromCode(i));
masm.finishDataTransfer();
ScratchRegisterScope scratch(masm);
// Since our datastructures for stack inspection are compile-time fixed,
// if there are only 16 double registers, then we need to reserve
// space on the stack for the missing 16.
if (FloatRegisters::ActualTotalPhys() != FloatRegisters::TotalPhys) {
int missingRegs = FloatRegisters::TotalPhys - FloatRegisters::ActualTotalPhys();
masm.ma_sub(Imm32(missingRegs * sizeof(double)), sp);
masm.ma_sub(Imm32(missingRegs * sizeof(double)), sp, scratch);
}
masm.startFloatTransferM(IsStore, sp, DB, WriteBack);
for (uint32_t i = 0; i < FloatRegisters::ActualTotalPhys(); i++)
@ -658,8 +676,12 @@ GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
// Sp % 8 == 0
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, Bailout));
masm.ma_ldr(Address(sp, 0), r2);
masm.ma_add(sp, Imm32(sizeOfBailoutInfo), sp);
masm.ma_ldr(DTRAddr(sp, DtrOffImm(0)), r2);
{
ScratchRegisterScope scratch(masm);
masm.ma_add(sp, Imm32(sizeOfBailoutInfo), sp, scratch);
}
// Common size of a bailout frame.
uint32_t bailoutFrameSize = 0
+ sizeof(void*) // frameClass
@ -675,9 +697,11 @@ GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
// sizeof(uint32_t) for the tableOffset that was pushed onto the stack
// sizeof(uintptr_t) for the snapshotOffset;
// alignment to round the uintptr_t up to a multiple of 8 bytes.
masm.ma_add(sp, Imm32(bailoutFrameSize+12), sp);
ScratchRegisterScope scratch(masm);
masm.ma_add(sp, Imm32(bailoutFrameSize+12), sp, scratch);
masm.as_add(sp, sp, O2Reg(r4));
} else {
ScratchRegisterScope scratch(masm);
uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize();
masm.ma_add(Imm32(// The frame that was added when we entered the most
// recent function.
@ -687,7 +711,7 @@ GenerateBailoutThunk(JSContext* cx, MacroAssembler& masm, uint32_t frameClass)
+ sizeof(void*)
// Everything else that was pushed on the stack.
+ bailoutFrameSize)
, sp);
, sp, scratch);
}
// Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
@ -777,7 +801,8 @@ JitRuntime::generateVMWrapper(JSContext* cx, const VMFunction& f)
if (f.explicitArgs) {
argsBase = r5;
regs.take(argsBase);
masm.ma_add(sp, Imm32(ExitFrameLayout::SizeWithFooter()), argsBase);
ScratchRegisterScope scratch(masm);
masm.ma_add(sp, Imm32(ExitFrameLayout::SizeWithFooter()), argsBase, scratch);
}
// Reserve space for the outparameter.
@ -1155,7 +1180,10 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// Going into the conditionals, we will have:
// FrameDescriptor.size in scratch1
// FrameDescriptor.type in scratch2
masm.ma_and(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1, scratch2);
{
ScratchRegisterScope asmScratch(masm);
masm.ma_and(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1, scratch2, asmScratch);
}
masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
// Handling of each case is dependent on FrameDescriptor.type
@ -1201,7 +1229,7 @@ JitRuntime::generateProfilerExitFrameTailStub(JSContext* cx)
// Store return frame in lastProfilingFrame.
// scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
masm.ma_add(StackPointer, scratch1, scratch2);
masm.ma_add(scratch2, Imm32(JitFrameLayout::Size()), scratch2);
masm.as_add(scratch2, scratch2, Imm8(JitFrameLayout::Size()));
masm.storePtr(scratch2, lastProfilingFrame);
masm.ret();
}

View File

@ -2743,7 +2743,7 @@ CrossAxisPositionTracker::
aAxisTracker.IsCrossAxisReversed()),
mPackingSpaceRemaining(0),
mNumPackingSpacesRemaining(0),
mAlignContent(aReflowInput.mStylePosition->ComputedAlignContent())
mAlignContent(aReflowInput.mStylePosition->mAlignContent)
{
MOZ_ASSERT(aFirstLine, "null first line pointer");
@ -4067,7 +4067,7 @@ nsFlexContainerFrame::DoFlexLayout(nsPresContext* aPresContext,
const auto justifyContent = IsLegacyBox(aReflowInput.mStyleDisplay,
mStyleContext) ?
ConvertLegacyStyleToJustifyContent(StyleXUL()) :
aReflowInput.mStylePosition->ComputedJustifyContent();
aReflowInput.mStylePosition->mJustifyContent;
for (FlexLine* line = lines.getFirst(); line; line = line->getNext()) {
// Main-Axis Alignment - Flexbox spec section 9.5

View File

@ -4075,7 +4075,7 @@ nsGridContainerFrame::Tracks::InitializeItemBaselines(
// For this purpose, the 'start', 'end', 'flex-start', and 'flex-end'
// values of 'align-self' are treated as either 'self-start' or
// 'self-end', whichever they end up equivalent to.
auto alignContent = child->StylePosition()->ComputedAlignContent();
auto alignContent = child->StylePosition()->mAlignContent;
alignContent &= ~NS_STYLE_ALIGN_FLAG_BITS;
if (alignContent == NS_STYLE_ALIGN_BASELINE ||
alignContent == NS_STYLE_ALIGN_LAST_BASELINE) {
@ -4677,8 +4677,8 @@ nsGridContainerFrame::Tracks::AlignJustifyContent(
}
const bool isAlign = mAxis == eLogicalAxisBlock;
auto valueAndFallback = isAlign ? aStyle->ComputedAlignContent() :
aStyle->ComputedJustifyContent();
auto valueAndFallback = isAlign ? aStyle->mAlignContent :
aStyle->mJustifyContent;
bool overflowSafe;
auto alignment = ::GetAlignJustifyValue(valueAndFallback, aWM, isAlign,
&overflowSafe);

View File

@ -1,11 +1,11 @@
<!DOCTYPE HTML>
<html class="reftest-wait">
<body style="background:white;">
<body style="background:white;"
onload="document.getElementById('v').poster = ''; setTimeout(function(){document.documentElement.className = '';}, 0);">
<!-- Test if we show video frame after removing valid poster. -->
<video src="black140x100.ogv"
preload="auto"
id="v"
onload="document.getElementById('v').poster = ''; setTimeout(function(){document.documentElement.className = '';}, 0);"
poster="blue250x200.png"></video>
</body>
</html>

View File

@ -1,11 +1,11 @@
<!DOCTYPE HTML>
<html class="reftest-wait">
<body style="background:white;">
<body style="background:white;"
onload="document.getElementById('v').poster = 'red140x100.png'; setTimeout(function(){document.documentElement.className = '';}, 0);">
<!-- Test that poster frame changes when you change the poster attribute. -->
<video src="black140x100.ogv"
preload="none"
id="v"
onload="document.getElementById('v').poster = 'red140x100.png'; setTimeout(function(){document.documentElement.className = '';}, 0);"
poster="blue250x200.png"></video>
</body>
</html>

View File

@ -1,11 +1,11 @@
<!DOCTYPE HTML>
<html class="reftest-wait">
<body style="background:white;">
<body style="background:white;"
onload="document.getElementById('v').poster = ''; setTimeout(function(){document.documentElement.className = '';}, 0);">
<!-- Test if we show video frame after removing valid poster. -->
<video src="black140x100.webm"
preload="auto"
id="v"
onload="document.getElementById('v').poster = ''; setTimeout(function(){document.documentElement.className = '';}, 0);"
poster="blue250x200.png"></video>
</body>
</html>

View File

@ -1,11 +1,11 @@
<!DOCTYPE HTML>
<html class="reftest-wait">
<body style="background:white;">
<body style="background:white;"
onload="document.getElementById('v').poster = 'red140x100.png'; setTimeout(function(){document.documentElement.className = '';}, 0);">
<!-- Test that poster frame changes when you change the poster attribute. -->
<video src="black140x100.webm"
preload="none"
id="v"
onload="document.getElementById('v').poster = 'red140x100.png'; setTimeout(function(){document.documentElement.className = '';}, 0);"
poster="blue250x200.png"></video>
</body>
</html>

View File

@ -604,8 +604,7 @@ Gecko_AtomEqualsUTF8(nsIAtom* aAtom, const char* aString, uint32_t aLength)
{
// XXXbholley: We should be able to do this without converting, I just can't
// find the right thing to call.
nsAutoString atomStr;
aAtom->ToString(atomStr);
nsDependentAtomString atomStr(aAtom);
NS_ConvertUTF8toUTF16 inStr(nsDependentCSubstring(aString, aLength));
return atomStr.Equals(inStr);
}
@ -615,8 +614,7 @@ Gecko_AtomEqualsUTF8IgnoreCase(nsIAtom* aAtom, const char* aString, uint32_t aLe
{
// XXXbholley: We should be able to do this without converting, I just can't
// find the right thing to call.
nsAutoString atomStr;
aAtom->ToString(atomStr);
nsDependentAtomString atomStr(aAtom);
NS_ConvertUTF8toUTF16 inStr(nsDependentCSubstring(aString, aLength));
return nsContentUtils::EqualsIgnoreASCIICase(atomStr, inStr);
}

View File

@ -4371,7 +4371,7 @@ nsComputedDOMStyle::DoGetAlignContent()
{
RefPtr<nsROCSSPrimitiveValue> val = new nsROCSSPrimitiveValue;
nsAutoString str;
auto align = StylePosition()->ComputedAlignContent();
auto align = StylePosition()->mAlignContent;
nsCSSValue::AppendAlignJustifyValueToString(align & NS_STYLE_ALIGN_ALL_BITS, str);
auto fallback = align >> NS_STYLE_ALIGN_ALL_SHIFT;
if (fallback) {
@ -4387,7 +4387,7 @@ nsComputedDOMStyle::DoGetAlignItems()
{
RefPtr<nsROCSSPrimitiveValue> val = new nsROCSSPrimitiveValue;
nsAutoString str;
auto align = StylePosition()->ComputedAlignItems();
auto align = StylePosition()->mAlignItems;
nsCSSValue::AppendAlignJustifyValueToString(align, str);
val->SetString(str);
return val.forget();
@ -4409,7 +4409,7 @@ nsComputedDOMStyle::DoGetJustifyContent()
{
RefPtr<nsROCSSPrimitiveValue> val = new nsROCSSPrimitiveValue;
nsAutoString str;
auto justify = StylePosition()->ComputedJustifyContent();
auto justify = StylePosition()->mJustifyContent;
nsCSSValue::AppendAlignJustifyValueToString(justify & NS_STYLE_JUSTIFY_ALL_BITS, str);
auto fallback = justify >> NS_STYLE_JUSTIFY_ALL_SHIFT;
if (fallback) {

View File

@ -1692,7 +1692,7 @@ nsStylePosition::UsedAlignSelf(nsStyleContext* aParent) const
return mAlignSelf;
}
if (MOZ_LIKELY(aParent)) {
auto parentAlignItems = aParent->StylePosition()->ComputedAlignItems();
auto parentAlignItems = aParent->StylePosition()->mAlignItems;
MOZ_ASSERT(!(parentAlignItems & NS_STYLE_ALIGN_LEGACY),
"align-items can't have 'legacy'");
return parentAlignItems;

View File

@ -1707,27 +1707,12 @@ struct MOZ_NEEDS_MEMMOVABLE_MEMBERS nsStylePosition
return nsChangeHint(0);
}
/**
* Return the computed value for 'align-content'.
*/
uint16_t ComputedAlignContent() const { return mAlignContent; }
/**
* Return the computed value for 'align-items'.
*/
uint8_t ComputedAlignItems() const { return mAlignItems; }
/**
* Return the used value for 'align-self' given our parent StyleContext
* aParent (or null for the root).
*/
uint8_t UsedAlignSelf(nsStyleContext* aParent) const;
/**
* Return the computed value for 'justify-content'.
*/
uint16_t ComputedJustifyContent() const { return mJustifyContent; }
/**
* Return the computed value for 'justify-items' given our parent StyleContext
* aParent (or null for the root).
@ -1755,15 +1740,17 @@ struct MOZ_NEEDS_MEMMOVABLE_MEMBERS nsStylePosition
nsStyleCoord mGridAutoRowsMax; // [reset] coord, percent, enum, calc, flex
uint8_t mGridAutoFlow; // [reset] enumerated. See nsStyleConsts.h
mozilla::StyleBoxSizing mBoxSizing; // [reset] see nsStyleConsts.h
private:
friend class nsRuleNode;
uint16_t mAlignContent; // [reset] fallback value in the high byte
uint8_t mAlignItems; // [reset] see nsStyleConsts.h
public:
uint8_t mAlignSelf; // [reset] see nsStyleConsts.h
private:
uint16_t mJustifyContent; // [reset] fallback value in the high byte
private:
friend class nsRuleNode;
// mJustifyItems should only be read via ComputedJustifyItems(), which
// lazily resolves its "auto" value. nsRuleNode needs direct access so
// it can set mJustifyItems' value when populating this struct.
uint8_t mJustifyItems; // [reset] see nsStyleConsts.h
public:
uint8_t mJustifySelf; // [reset] see nsStyleConsts.h

View File

@ -879,9 +879,15 @@ nsSVGIntegrationUtils::PaintMaskAndClipPath(const PaintFramesParams& aParams)
offsetToUserSpace, true);
}
context.PushGroupForBlendBack(gfxContentType::COLOR_ALPHA,
opacityApplied ? 1.0 : opacity,
maskSurface, maskTransform);
if (aParams.layerManager->GetRoot()->GetContentFlags() & Layer::CONTENT_COMPONENT_ALPHA) {
context.PushGroupAndCopyBackground(gfxContentType::COLOR_ALPHA,
opacityApplied ? 1.0 : opacity,
maskSurface, maskTransform);
} else {
context.PushGroupForBlendBack(gfxContentType::COLOR_ALPHA,
opacityApplied ? 1.0 : opacity,
maskSurface, maskTransform);
}
}
/* If this frame has only a trivial clipPath, set up cairo's clipping now so

View File

@ -283,17 +283,11 @@ nsSVGMaskFrame::GetMaskForMaskedFrame(gfxContext* aContext,
}
// Create alpha channel mask for output
RefPtr<DrawTarget> destMaskDT =
Factory::CreateDrawTarget(BackendType::CAIRO, maskSurfaceSize,
SurfaceFormat::A8);
if (!destMaskDT) {
RefPtr<DataSourceSurface> destMaskSurface =
Factory::CreateDataSourceSurface(maskSurfaceSize, SurfaceFormat::A8);
if (!destMaskSurface) {
return nullptr;
}
RefPtr<SourceSurface> destMaskSnapshot = destMaskDT->Snapshot();
if (!destMaskSnapshot) {
return nullptr;
}
RefPtr<DataSourceSurface> destMaskSurface = destMaskSnapshot->GetDataSurface();
DataSourceSurface::MappedSurface destMap;
if (!destMaskSurface->Map(DataSourceSurface::MapType::WRITE, &destMap)) {
return nullptr;

View File

@ -62,7 +62,7 @@ public final class HtmlAttributes implements Attributes {
private @Auto AttributeName[] names;
private @Auto String[] values; // XXX perhaps make this @NoLength?
// CPPONLY: private @Auto int[] lines; // XXX perhaps make this @NoLength?
// [NOCPP[

View File

@ -817,6 +817,8 @@ public class Tokenizer implements Locator {
}
@Inline private void appendCharRefBuf(char c) {
// CPPONLY: assert charRefBufLen < charRefBuf.length:
// CPPONLY: "RELEASE: Attempted to overrun charRefBuf!";
charRefBuf[charRefBufLen++] = c;
}
@ -850,7 +852,13 @@ public class Tokenizer implements Locator {
* @param c
* the UTF-16 code unit to append
*/
private void appendStrBuf(char c) {
@Inline private void appendStrBuf(char c) {
// CPPONLY: assert strBufLen < strBuf.length: "Previous buffer length insufficient.";
// CPPONLY: if (strBufLen == strBuf.length) {
// CPPONLY: if (!EnsureBufferSpace(1)) {
// CPPONLY: assert false: "RELEASE: Unable to recover from buffer reallocation failure";
// CPPONLY: } // TODO: Add telemetry when outer if fires but inner does not
// CPPONLY: }
strBuf[strBufLen++] = c;
}
@ -951,14 +959,15 @@ public class Tokenizer implements Locator {
}
private void appendStrBuf(@NoLength char[] buffer, int offset, int length) {
int reqLen = strBufLen + length;
if (strBuf.length < reqLen) {
char[] newBuf = new char[reqLen + (reqLen >> 1)];
System.arraycopy(strBuf, 0, newBuf, 0, strBuf.length);
strBuf = newBuf;
}
int newLen = strBufLen + length;
// CPPONLY: assert newLen <= strBuf.length: "Previous buffer length insufficient.";
// CPPONLY: if (strBuf.length < newLen) {
// CPPONLY: if (!EnsureBufferSpace(length)) {
// CPPONLY: assert false: "RELEASE: Unable to recover from buffer reallocation failure";
// CPPONLY: } // TODO: Add telemetry when outer if fires but inner does not
// CPPONLY: }
System.arraycopy(buffer, offset, strBuf, strBufLen, length);
strBufLen = reqLen;
strBufLen = newLen;
}
/**

View File

@ -221,12 +221,6 @@ nsHtml5Tokenizer::emitOrAppendCharRefBuf(int32_t returnState)
}
}
void
nsHtml5Tokenizer::appendStrBuf(char16_t c)
{
strBuf[strBufLen++] = c;
}
nsString*
nsHtml5Tokenizer::strBufToString()
{
@ -250,14 +244,15 @@ nsHtml5Tokenizer::emitStrBuf()
void
nsHtml5Tokenizer::appendStrBuf(char16_t* buffer, int32_t offset, int32_t length)
{
int32_t reqLen = strBufLen + length;
if (strBuf.length < reqLen) {
jArray<char16_t,int32_t> newBuf = jArray<char16_t,int32_t>::newJArray(reqLen + (reqLen >> 1));
nsHtml5ArrayCopy::arraycopy(strBuf, newBuf, strBuf.length);
strBuf = newBuf;
int32_t newLen = strBufLen + length;
MOZ_ASSERT(newLen <= strBuf.length, "Previous buffer length insufficient.");
if (MOZ_UNLIKELY(strBuf.length < newLen)) {
if (MOZ_UNLIKELY(!EnsureBufferSpace(length))) {
MOZ_CRASH("Unable to recover from buffer reallocation failure");
}
}
nsHtml5ArrayCopy::arraycopy(buffer, offset, strBuf, strBufLen, length);
strBufLen = reqLen;
strBufLen = newLen;
}
void

View File

@ -158,6 +158,7 @@ class nsHtml5Tokenizer
private:
inline void appendCharRefBuf(char16_t c)
{
MOZ_RELEASE_ASSERT(charRefBufLen < charRefBuf.length, "Attempted to overrun charRefBuf!");
charRefBuf[charRefBufLen++] = c;
}
@ -179,7 +180,17 @@ class nsHtml5Tokenizer
strBufLen = 0;
}
void appendStrBuf(char16_t c);
inline void appendStrBuf(char16_t c)
{
MOZ_ASSERT(strBufLen < strBuf.length, "Previous buffer length insufficient.");
if (MOZ_UNLIKELY(strBufLen == strBuf.length)) {
if (MOZ_UNLIKELY(!EnsureBufferSpace(1))) {
MOZ_CRASH("Unable to recover from buffer reallocation failure");
}
}
strBuf[strBufLen++] = c;
}
protected:
nsString* strBufToString();
private:

View File

@ -955,8 +955,8 @@ nsHtml5TreeBuilder::elementPopped(int32_t aNamespace, nsIAtom* aName, nsIContent
void
nsHtml5TreeBuilder::accumulateCharacters(const char16_t* aBuf, int32_t aStart, int32_t aLength)
{
MOZ_ASSERT(charBufferLen + aLength <= charBuffer.length,
"About to memcpy past the end of the buffer!");
MOZ_RELEASE_ASSERT(charBufferLen + aLength <= charBuffer.length,
"About to memcpy past the end of the buffer!");
memcpy(charBuffer + charBufferLen, aBuf + aStart, sizeof(char16_t) * aLength);
charBufferLen += aLength;
}

View File

@ -3,3 +3,13 @@
*OPT.OBJ/
*DBG.OBJ/
*DBG.OBJD/
*.bak
*.out
*.rej
*.patch
GPATH
GRTAGS
GTAGS
#*
.#*
.ycm_extra_conf.py*

View File

@ -1 +1 @@
233a44e96b22
e7553afc7665

View File

@ -45,11 +45,6 @@ queue.filter(task => {
queue.map(task => {
if (task.collection == "asan") {
// Disable LSan on BoGo runs, for now.
if (task.tests == "bogo") {
task.env.ASAN_OPTIONS = "detect_leaks=0";
}
// CRMF and FIPS tests still leak, unfortunately.
if (task.tests == "crmf" || task.tests == "fips") {
task.env.ASAN_OPTIONS = "detect_leaks=0";
@ -68,6 +63,10 @@ queue.map(task => {
}
}
// Enable TLS 1.3 for every task.
task.env = task.env || {};
task.env.NSS_ENABLE_TLS_1_3 = "1";
return task;
});

View File

@ -10,4 +10,3 @@
*/
#error "Do not include this header file."

View File

@ -48,6 +48,12 @@ TEST_F(DERIntegerDecodingTest, DecodeLong130) {
TestGetInteger(130, der, sizeof(der));
}
TEST_F(DERIntegerDecodingTest, DecodeLong130Padded) {
unsigned char der[sizeof(long) * 2] = {0};
der[sizeof(der) - 1] = {0x82};
TestGetInteger(130, der, sizeof(der));
}
TEST_F(DERIntegerDecodingTest, DecodeLong0) {
unsigned char der[] = {0x00};
TestGetInteger(0, der, sizeof(der));
@ -63,6 +69,12 @@ TEST_F(DERIntegerDecodingTest, DecodeLongMinus1) {
TestGetInteger(-1, der, sizeof(der));
}
TEST_F(DERIntegerDecodingTest, DecodeLongMinus1Padded) {
unsigned char der[sizeof(long) * 2];
memset(der, 0xFF, sizeof(der));
TestGetInteger(-1, der, sizeof(der));
}
TEST_F(DERIntegerDecodingTest, DecodeLongMax) {
unsigned char der[sizeof(long)];
GetDerLongMax(der, sizeof(long));

View File

@ -15,6 +15,7 @@
#include <iostream>
#include <map>
#include <memory>
#include <queue>
#include <string>
#include <typeinfo>
@ -25,6 +26,8 @@ class ConfigEntryBase {
ConfigEntryBase(const std::string& name, const std::string& type)
: name_(name), type_(type) {}
virtual ~ConfigEntryBase() {}
const std::string& type() const { return type_; }
virtual bool Parse(std::queue<const char*>* args) = 0;
@ -62,7 +65,8 @@ class Config {
template <typename T>
void AddEntry(const std::string& name, T init) {
entries_[name] = new ConfigEntry<T>(name, init);
entries_[name] = std::unique_ptr<ConfigEntryBase>(
new ConfigEntry<T>(name, init));
}
Status ParseArgs(int argc, char** argv);
@ -77,12 +81,12 @@ class Config {
private:
static std::string XformFlag(const std::string& arg);
std::map<std::string, ConfigEntryBase*> entries_;
std::map<std::string, std::unique_ptr<ConfigEntryBase>> entries_;
const ConfigEntryBase* entry(const std::string& key) const {
auto e = entries_.find(key);
if (e == entries_.end()) return nullptr;
return e->second;
return e->second.get();
}
};

View File

@ -11,9 +11,9 @@
"UnknownCurve":"Draft version mismatch (NSS=15, BoGo=14)",
"*MissingKeyShare*":"Draft version mismatch (NSS=15, BoGo=14)",
"SecondClientHelloWrongCurve":"Draft version mismatch (NSS=15, BoGo=14)",
"SendHelloRetryRequest":"Expects CurveX25519",
"SendHelloRetryRequest-2":"Expects CurveX25519",
"KeyUpdate":"KeyUpdate Unimplemented",
"*HelloRetryRequest*":"Draft version mismatch (NSS=15, BoGo=14)",
"*PartialEncryptedExtensionsWithServerHello*":"Draft version mismatch (NSS=15, BoGo=14)",
"*KeyUpdate*":"KeyUpdate Unimplemented",
"ClientAuth-NoFallback-TLS13":"Disagreement about alerts. Bug 1294975",
"ClientAuth-SHA1-Fallback":"Disagreement about alerts. Bug 1294975",
"SendWarningAlerts-TLS13":"NSS needs to trigger on warning alerts",

View File

@ -18,6 +18,8 @@
#include "nsskeys.h"
bool exitCodeUnimplemented = false;
std::string FormatError(PRErrorCode code) {
return std::string(":") + PORT_ErrorToName(code) + ":" + ":" +
PORT_ErrorToString(code);
@ -109,7 +111,11 @@ class TestAgent {
if (cfg_.get<std::string>("key-file") != "") {
key_ = ReadPrivateKey(cfg_.get<std::string>("key-file"));
if (!key_) exit(89); // Temporary to handle our inability to handle ECDSA
if (!key_) {
// Temporary to handle our inability to handle ECDSA.
exitCodeUnimplemented = true;
return false;
}
}
if (cfg_.get<std::string>("cert-file") != "") {
cert_ = ReadCertificate(cfg_.get<std::string>("cert-file"));
@ -122,11 +128,6 @@ class TestAgent {
std::cerr << "Couldn't configure server cert\n";
return false;
}
rv = SSL_ConfigServerSessionIDCache(1024, 0, 0, ".");
if (rv != SECSuccess) {
std::cerr << "Couldn't configure session cache\n";
return false;
}
} else {
// Client.
@ -273,43 +274,67 @@ std::unique_ptr<const Config> ReadConfig(int argc, char** argv) {
case Config::kOK:
break;
case Config::kUnknownFlag:
exit(89);
break;
exitCodeUnimplemented = true;
default:
exit(1);
return nullptr;
}
// Needed to change to std::unique_ptr<const Config>
return std::move(cfg);
}
void RunCycle(std::unique_ptr<const Config>& cfg) {
bool RunCycle(std::unique_ptr<const Config>& cfg) {
std::unique_ptr<TestAgent> agent(TestAgent::Create(*cfg));
if (!agent) {
exit(1);
return agent && agent->DoExchange() == SECSuccess;
}
int GetExitCode(bool success) {
if (exitCodeUnimplemented) {
return 89;
}
SECStatus rv = agent->DoExchange();
if (rv) {
exit(1);
if (success) {
return 0;
}
return 1;
}
int main(int argc, char** argv) {
std::unique_ptr<const Config> cfg = ReadConfig(argc, argv);
SECStatus rv = NSS_NoDB_Init(nullptr);
if (rv != SECSuccess) return 1;
rv = NSS_SetDomesticPolicy();
if (rv != SECSuccess) return 1;
// Run a single test cycle.
RunCycle(cfg);
if (cfg->get<bool>("resume")) {
std::cout << "Resuming" << std::endl;
RunCycle(cfg);
if (!cfg) {
return GetExitCode(false);
}
exit(0);
if (cfg->get<bool>("server")) {
if (SSL_ConfigServerSessionIDCache(1024, 0, 0, ".") != SECSuccess) {
std::cerr << "Couldn't configure session cache\n";
return 1;
}
}
if (NSS_NoDB_Init(nullptr) != SECSuccess) {
return 1;
}
// Run a single test cycle.
bool success = RunCycle(cfg);
if (success && cfg->get<bool>("resume")) {
std::cout << "Resuming" << std::endl;
success = RunCycle(cfg);
}
SSL_ClearSessionCache();
if (cfg->get<bool>("server")) {
SSL_ShutdownServerSessionIDCache();
}
if (NSS_Shutdown() != SECSuccess) {
success = false;
}
return GetExitCode(success);
}

View File

@ -33,6 +33,10 @@ ifdef NSS_SSL_ENABLE_ZLIB
include $(CORE_DEPTH)/coreconf/zlib.mk
endif
ifndef NSS_ENABLE_TLS_1_3
NSS_DISABLE_TLS_1_3=1
endif
ifdef NSS_DISABLE_TLS_1_3
# Run parameterized tests only, for which we can easily exclude TLS 1.3
CPPSRCS := $(filter-out $(shell grep -l '^TEST_F' $(CPPSRCS)), $(CPPSRCS))

View File

@ -68,6 +68,9 @@ SECStatus SSLInt_UpdateSSLv2ClientRandom(PRFileDesc *fd, uint8_t *rnd,
return rv;
}
// Ensure we don't overrun hs.client_random.
rnd_len = PR_MIN(SSL3_RANDOM_LENGTH, rnd_len);
// Zero the client_random struct.
PORT_Memset(&ss->ssl3.hs.client_random, 0, SSL3_RANDOM_LENGTH);

View File

@ -18,51 +18,26 @@
namespace nss_test {
// This is a 1-RTT ClientHello with ECDHE and DHE.
// This is a 1-RTT ClientHello with ECDHE.
const static uint8_t kCannedTls13ClientHello[] = {
0x01, 0x00, 0x01, 0xfc, 0x03, 0x04, 0x77, 0x5c, 0x3a, 0xd8, 0x3f, 0x43,
0x63, 0x98, 0xfa, 0x68, 0xfb, 0x01, 0x39, 0xff, 0x7c, 0x1a, 0x51, 0xa7,
0x92, 0xda, 0x97, 0xf5, 0x15, 0x78, 0xb3, 0xbb, 0x26, 0xa7, 0xed, 0x6f,
0x69, 0x71, 0x00, 0x00, 0x2a, 0xc0, 0x2b, 0xc0, 0x2f, 0xcc, 0xa9, 0xcc,
0xa8, 0x13, 0x01, 0xc0, 0x09, 0xc0, 0x13, 0xc0, 0x14, 0x00, 0x9e, 0xcc,
0xaa, 0x00, 0x33, 0x00, 0x32, 0x00, 0x39, 0x00, 0x38, 0x00, 0x16, 0x00,
0x13, 0x00, 0x2f, 0x00, 0x35, 0x00, 0x0a, 0x00, 0x05, 0x00, 0x04, 0x01,
0x00, 0x01, 0xa9, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x09, 0x00, 0x00, 0x06,
0x01, 0x00, 0x00, 0xcf, 0x03, 0x03, 0x6c, 0xb3, 0x46, 0x81, 0xc8, 0x1a,
0xf9, 0xd2, 0x05, 0x97, 0x48, 0x7c, 0xa8, 0x31, 0x03, 0x1c, 0x06, 0xa8,
0x62, 0xb1, 0x90, 0xd6, 0x21, 0x44, 0x7f, 0xc1, 0x9b, 0x87, 0x3e, 0xad,
0x91, 0x85, 0x00, 0x00, 0x06, 0x13, 0x01, 0x13, 0x03, 0x13, 0x02, 0x01,
0x00, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x09, 0x00, 0x00, 0x06,
0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0xff, 0x01, 0x00, 0x01, 0x00, 0x00,
0x0a, 0x00, 0x0a, 0x00, 0x08, 0x00, 0x17, 0x00, 0x18, 0x00, 0x19, 0x01,
0x00, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, 0xff, 0x02, 0x00, 0x02, 0x00,
0x0f, 0x00, 0x28, 0x01, 0x4b, 0x01, 0x49, 0x00, 0x17, 0x00, 0x41, 0x04,
0xbf, 0x31, 0xb4, 0x29, 0x96, 0xf4, 0xe6, 0x4a, 0xe3, 0xea, 0x87, 0x05,
0x38, 0x0e, 0x68, 0x02, 0xbc, 0x4a, 0x5d, 0x90, 0xed, 0xe7, 0xaa, 0x8e,
0xb8, 0x42, 0x84, 0xaa, 0x3a, 0x4f, 0x2b, 0xe3, 0x52, 0x9a, 0x9a, 0x76,
0xab, 0xf8, 0x2e, 0x59, 0xea, 0xcd, 0x2b, 0x2f, 0x03, 0x18, 0xd2, 0x0c,
0xc9, 0x07, 0x15, 0xca, 0xe6, 0x61, 0xf7, 0x79, 0x9f, 0xfe, 0xc5, 0x10,
0x40, 0x9e, 0x38, 0x33, 0x01, 0x00, 0x01, 0x00, 0xd8, 0x80, 0x1f, 0x06,
0x9a, 0xbb, 0xf7, 0xbb, 0xd4, 0x5c, 0x75, 0x1d, 0x8e, 0x09, 0x27, 0xad,
0x08, 0xb8, 0x16, 0x0f, 0x4f, 0x50, 0x79, 0xe1, 0x7e, 0xd4, 0x3b, 0xc0,
0x57, 0xcc, 0x00, 0x5e, 0x28, 0xd8, 0xb3, 0x16, 0x7f, 0x36, 0x48, 0x75,
0x8d, 0x03, 0xa4, 0x71, 0x86, 0x06, 0xf0, 0xe7, 0x57, 0x47, 0x35, 0xf0,
0x04, 0xfb, 0xf7, 0x6c, 0x7a, 0xdd, 0x05, 0x93, 0x53, 0x16, 0x12, 0x49,
0xbe, 0x35, 0x67, 0x47, 0x6e, 0x3a, 0x91, 0xef, 0x50, 0x09, 0x14, 0x98,
0x8b, 0x83, 0xc4, 0x62, 0x77, 0xf3, 0x57, 0x53, 0x3f, 0xf4, 0x82, 0xc0,
0x70, 0x25, 0x19, 0x9d, 0x93, 0xe2, 0xb9, 0x7b, 0xb4, 0x83, 0x31, 0xef,
0xd8, 0x3b, 0xd5, 0x25, 0x70, 0x64, 0x29, 0xa2, 0xc2, 0xc5, 0x73, 0x9a,
0xfe, 0x27, 0xca, 0xc0, 0x55, 0x34, 0x91, 0x95, 0x05, 0xbf, 0x5e, 0x54,
0x4d, 0x95, 0x43, 0x3d, 0x54, 0x6a, 0x89, 0x0b, 0x5e, 0xab, 0x08, 0x7b,
0xf8, 0x38, 0x0a, 0x56, 0x51, 0x9d, 0xbc, 0xdd, 0x46, 0xa9, 0xfc, 0x95,
0xe9, 0x75, 0x1c, 0xc8, 0x18, 0x7f, 0xed, 0xa9, 0xca, 0xb6, 0x5e, 0x77,
0x63, 0x33, 0xb1, 0xb5, 0x68, 0xce, 0xa5, 0x98, 0xec, 0x8c, 0x34, 0x98,
0x1c, 0xa9, 0xa5, 0x84, 0xec, 0xe6, 0xba, 0x0b, 0x11, 0xbf, 0x40, 0xa5,
0xf0, 0x3c, 0xd5, 0xd3, 0xac, 0x2f, 0x46, 0xed, 0xab, 0xc0, 0xc1, 0x78,
0x3f, 0x18, 0x64, 0x5b, 0xff, 0x31, 0xeb, 0x74, 0x06, 0x92, 0x42, 0x1e,
0x90, 0xf7, 0xea, 0xa5, 0x02, 0x33, 0x8e, 0x01, 0xe3, 0xfa, 0x70, 0x82,
0xe5, 0xe7, 0x67, 0x8b, 0x96, 0x20, 0x13, 0x2e, 0x65, 0x86, 0xab, 0x28,
0xc8, 0x1b, 0xfe, 0xb4, 0x98, 0xed, 0xa4, 0xa0, 0xee, 0xf9, 0x53, 0x74,
0x30, 0xac, 0x79, 0x2d, 0xf2, 0x92, 0xd0, 0x5e, 0x10, 0xd7, 0xb9, 0x41,
0x00, 0x0d, 0x00, 0x18, 0x00, 0x16, 0x04, 0x01, 0x05, 0x01, 0x06, 0x01,
0x02, 0x01, 0x04, 0x03, 0x05, 0x03, 0x06, 0x03, 0x02, 0x03, 0x05, 0x02,
0x04, 0x02, 0x02, 0x02, 0x00, 0x15, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
0x0a, 0x00, 0x12, 0x00, 0x10, 0x00, 0x17, 0x00, 0x18, 0x00, 0x19, 0x01,
0x00, 0x01, 0x01, 0x01, 0x02, 0x01, 0x03, 0x01, 0x04, 0x00, 0x28, 0x00,
0x47, 0x00, 0x45, 0x00, 0x17, 0x00, 0x41, 0x04, 0x86, 0x4a, 0xb9, 0xdc,
0x6a, 0x38, 0xa7, 0xce, 0xe7, 0xc2, 0x4f, 0xa6, 0x28, 0xb9, 0xdc, 0x65,
0xbf, 0x73, 0x47, 0x3c, 0x9c, 0x65, 0x8c, 0x47, 0x6d, 0x57, 0x22, 0x8a,
0xc2, 0xb3, 0xc6, 0x80, 0x72, 0x86, 0x08, 0x86, 0x8f, 0x52, 0xc5, 0xcb,
0xbf, 0x2a, 0xb5, 0x59, 0x64, 0xcc, 0x0c, 0x49, 0x95, 0x36, 0xe4, 0xd9,
0x2f, 0xd4, 0x24, 0x66, 0x71, 0x6f, 0x5d, 0x70, 0xe2, 0xa0, 0xea, 0x26,
0x00, 0x2b, 0x00, 0x03, 0x02, 0x7f, 0x10, 0x00, 0x0d, 0x00, 0x20, 0x00,
0x1e, 0x04, 0x03, 0x05, 0x03, 0x06, 0x03, 0x02, 0x03, 0x08, 0x04, 0x08,
0x05, 0x08, 0x06, 0x04, 0x01, 0x05, 0x01, 0x06, 0x01, 0x02, 0x01, 0x04,
0x02, 0x05, 0x02, 0x06, 0x02, 0x02, 0x02};
const static uint8_t kCannedTls13ServerHello[] = {
0x03, 0x04, 0x21, 0x12, 0xa7, 0xa7, 0x0d, 0x85, 0x8b, 0xb8, 0x0c, 0xbb,

View File

@ -303,12 +303,14 @@ static const TlsSignatureScheme kDummySignatureSchemesParamsArr[] = {
static const auto kDummySignatureSchemesParams =
::testing::ValuesIn(kDummySignatureSchemesParamsArr);
#ifndef NSS_DISABLE_TLS_1_3
static TlsSignatureScheme kSignatureSchemesParamsArr[] = {
kTlsSignatureRsaPkcs1Sha256, kTlsSignatureRsaPkcs1Sha384,
kTlsSignatureRsaPkcs1Sha512, kTlsSignatureEcdsaSecp256r1Sha256,
kTlsSignatureEcdsaSecp384r1Sha384, kTlsSignatureRsaPssSha256,
kTlsSignatureRsaPssSha384, kTlsSignatureRsaPssSha512,
};
#endif
INSTANTIATE_CIPHER_TEST_P(RC4, Stream, V10ToV12, kDummyNamedGroupParams,
kDummySignatureSchemesParams,
@ -363,6 +365,7 @@ INSTANTIATE_CIPHER_TEST_P(
TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA);
#ifndef NSS_DISABLE_TLS_1_3
INSTANTIATE_CIPHER_TEST_P(TLS13, All, V13,
::testing::ValuesIn(kFasterDHEGroups),
::testing::ValuesIn(kSignatureSchemesParamsArr),
@ -372,6 +375,7 @@ INSTANTIATE_CIPHER_TEST_P(TLS13AllGroups, All, V13,
::testing::ValuesIn(kAllDHEGroups),
::testing::Values(kTlsSignatureEcdsaSecp384r1Sha384),
TLS_AES_256_GCM_SHA384);
#endif
// Fields are: version, cipher suite, bulk cipher name, secretKeySize
struct SecStatusParams {

View File

@ -5,6 +5,7 @@
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "ssl.h"
#include "ssl3prot.h"
#include "sslerr.h"
#include "sslproto.h"
@ -181,6 +182,16 @@ class TlsExtensionTest13 : public TlsExtensionTestBase,
TlsExtensionTest13()
: TlsExtensionTestBase(TlsConnectTestBase::ToMode(GetParam()),
SSL_LIBRARY_VERSION_TLS_1_3) {}
void ConnectWithReplacementVersionList(uint16_t version) {
DataBuffer versions_buf;
size_t index = versions_buf.Write(0, 2, 1);
versions_buf.Write(index, version, 2);
client_->SetPacketFilter(new TlsExtensionReplacer(
ssl_tls13_supported_versions_xtn, versions_buf));
ConnectExpectFail();
}
};
class TlsExtensionTest13Stream : public TlsExtensionTestBase {
@ -491,42 +502,6 @@ TEST_P(TlsExtensionTest13, EmptyClientKeyShare) {
kTlsAlertHandshakeFailure);
}
TEST_P(TlsExtensionTest13, DropDraftVersion) {
EnsureTlsSetup();
client_->SetVersionRange(SSL_LIBRARY_VERSION_TLS_1_2,
SSL_LIBRARY_VERSION_TLS_1_3);
server_->SetVersionRange(SSL_LIBRARY_VERSION_TLS_1_2,
SSL_LIBRARY_VERSION_TLS_1_3);
client_->SetPacketFilter(
new TlsExtensionDropper(ssl_tls13_draft_version_xtn));
ConnectExpectFail();
// This will still fail (we can't just modify ClientHello without consequence)
// but the error is discovered later.
EXPECT_EQ(SSL_ERROR_DECRYPT_ERROR_ALERT, client_->error_code());
EXPECT_EQ(SSL_ERROR_BAD_HANDSHAKE_HASH_VALUE, server_->error_code());
}
TEST_P(TlsExtensionTest13, DropDraftVersionAndFail) {
EnsureTlsSetup();
// Since this is setup as TLS 1.3 only, expect the handshake to fail rather
// than just falling back to TLS 1.2.
client_->SetPacketFilter(
new TlsExtensionDropper(ssl_tls13_draft_version_xtn));
ConnectExpectFail();
EXPECT_EQ(SSL_ERROR_PROTOCOL_VERSION_ALERT, client_->error_code());
EXPECT_EQ(SSL_ERROR_UNSUPPORTED_VERSION, server_->error_code());
}
TEST_P(TlsExtensionTest13, ModifyDraftVersionAndFail) {
EnsureTlsSetup();
// As above, dropping back to 1.2 fails.
client_->SetPacketFilter(
new TlsExtensionDamager(ssl_tls13_draft_version_xtn, 1));
ConnectExpectFail();
EXPECT_EQ(SSL_ERROR_PROTOCOL_VERSION_ALERT, client_->error_code());
EXPECT_EQ(SSL_ERROR_UNSUPPORTED_VERSION, server_->error_code());
}
// These tests only work in stream mode because the client sends a
// cleartext alert which causes a MAC error on the server. With
// stream this causes handshake failure but with datagram, the
@ -712,6 +687,43 @@ TEST_F(TlsExtensionTest13Stream, ResumeBogusAuthModes) {
server_->CheckErrorCode(SSL_ERROR_BAD_MAC_READ);
}
// In these tests, we downgrade to TLS 1.2, causing the
// server to negotiate TLS 1.2.
// 1. Both sides only support TLS 1.3, so we get a cipher version
// error.
TEST_P(TlsExtensionTest13, RemoveTls13FromVersionList) {
ConnectWithReplacementVersionList(SSL_LIBRARY_VERSION_TLS_1_2);
client_->CheckErrorCode(SSL_ERROR_PROTOCOL_VERSION_ALERT);
server_->CheckErrorCode(SSL_ERROR_UNSUPPORTED_VERSION);
}
// 2. Server supports 1.2 and 1.3, client supports 1.2, so we
// can't negotiate any ciphers.
TEST_P(TlsExtensionTest13, RemoveTls13FromVersionListServerV12) {
server_->SetVersionRange(SSL_LIBRARY_VERSION_TLS_1_2,
SSL_LIBRARY_VERSION_TLS_1_3);
ConnectWithReplacementVersionList(SSL_LIBRARY_VERSION_TLS_1_2);
client_->CheckErrorCode(SSL_ERROR_NO_CYPHER_OVERLAP);
server_->CheckErrorCode(SSL_ERROR_NO_CYPHER_OVERLAP);
}
// 3. Server supports 1.2 and 1.3, client supports 1.2 and 1.3
// but advertises 1.2 (because we changed things).
TEST_P(TlsExtensionTest13, RemoveTls13FromVersionListBothV12) {
client_->SetVersionRange(SSL_LIBRARY_VERSION_TLS_1_2,
SSL_LIBRARY_VERSION_TLS_1_3);
server_->SetVersionRange(SSL_LIBRARY_VERSION_TLS_1_2,
SSL_LIBRARY_VERSION_TLS_1_3);
ConnectWithReplacementVersionList(SSL_LIBRARY_VERSION_TLS_1_2);
#ifndef TLS_1_3_DRAFT_VERSION
client_->CheckErrorCode(SSL_ERROR_RX_MALFORMED_SERVER_HELLO);
server_->CheckErrorCode(SSL_ERROR_ILLEGAL_PARAMETER_ALERT);
#else
client_->CheckErrorCode(SSL_ERROR_DECRYPT_ERROR_ALERT);
server_->CheckErrorCode(SSL_ERROR_BAD_HANDSHAKE_HASH_VALUE);
#endif
}
INSTANTIATE_TEST_CASE_P(ExtensionStream, TlsExtensionTestGeneric,
::testing::Combine(TlsConnectTestBase::kTlsModesStream,
TlsConnectTestBase::kTlsVAll));

View File

@ -421,7 +421,6 @@ TEST_F(TlsConnectTest, TestTls13ResumptionTwice) {
EXPECT_TRUE(client_->cipher_suite(&resumed_suite));
EXPECT_EQ(original_suite, resumed_suite);
// TODO(ekr@rtfm.com): This will change when we fix bug 1257047.
ASSERT_EQ(initialTicket, c2->extension());
ASSERT_NE(initialTicket, c2->extension());
}
} // namespace nss_test

View File

@ -6,6 +6,7 @@
#include "secerr.h"
#include "ssl.h"
#include "ssl3prot.h"
#include "sslerr.h"
#include "sslproto.h"
@ -17,26 +18,6 @@
namespace nss_test {
// Set the version number in the ClientHello.
class TlsInspectorClientHelloVersionSetter : public TlsHandshakeFilter {
public:
TlsInspectorClientHelloVersionSetter(uint16_t version) : version_(version) {}
virtual PacketFilter::Action FilterHandshake(const HandshakeHeader& header,
const DataBuffer& input,
DataBuffer* output) {
if (header.handshake_type() == kTlsHandshakeClientHello) {
*output = input;
output->Write(0, version_, 2);
return CHANGE;
}
return KEEP;
}
private:
uint16_t version_;
};
TEST_P(TlsConnectStream, ServerNegotiateTls10) {
uint16_t minver, maxver;
client_->GetVersionRange(&minver, &maxver);
@ -67,6 +48,7 @@ TEST_P(TlsConnectGeneric, ServerNegotiateTls12) {
SSL_LIBRARY_VERSION_TLS_1_2);
Connect();
}
#ifndef TLS_1_3_DRAFT_VERSION
// Test the ServerRandom version hack from
// [draft-ietf-tls-tls13-11 Section 6.3.1.1].
@ -91,6 +73,7 @@ TEST_F(DtlsConnectTest, TestDtlsVersion11) {
EXPECT_EQ(SSL_ERROR_UNSUPPORTED_VERSION, server_->error_code());
}
// Disabled as long as we have draft version.
TEST_F(TlsConnectTest, TestDowngradeDetectionToTls12) {
EnsureTlsSetup();
client_->SetPacketFilter(
@ -138,6 +121,7 @@ TEST_F(TlsConnectTest, TestFallbackFromTls13) {
ConnectExpectFail();
ASSERT_EQ(SSL_ERROR_RX_MALFORMED_SERVER_HELLO, client_->error_code());
}
#endif
// The TLS v1.3 spec section C.4 states that 'Implementations MUST NOT send or
// accept any records with a version less than { 3, 0 }'. Thus we will not
@ -178,13 +162,8 @@ TEST_P(TlsConnectStream, ConnectTls10AndServerRenegotiateHigher) {
server_->ResetPreliminaryInfo();
server_->StartRenegotiate();
Handshake();
if (test_version < SSL_LIBRARY_VERSION_TLS_1_3) {
client_->CheckErrorCode(SSL_ERROR_UNSUPPORTED_VERSION);
server_->CheckErrorCode(SSL_ERROR_ILLEGAL_PARAMETER_ALERT);
} else {
client_->CheckErrorCode(SSL_ERROR_HANDSHAKE_UNEXPECTED_ALERT);
server_->CheckErrorCode(SSL_ERROR_RENEGOTIATION_NOT_ALLOWED);
}
client_->CheckErrorCode(SSL_ERROR_UNSUPPORTED_VERSION);
server_->CheckErrorCode(SSL_ERROR_ILLEGAL_PARAMETER_ALERT);
}
TEST_P(TlsConnectStream, ConnectTls10AndClientRenegotiateHigher) {
@ -210,13 +189,8 @@ TEST_P(TlsConnectStream, ConnectTls10AndClientRenegotiateHigher) {
server_->ResetPreliminaryInfo();
client_->StartRenegotiate();
Handshake();
if (test_version < SSL_LIBRARY_VERSION_TLS_1_3) {
client_->CheckErrorCode(SSL_ERROR_UNSUPPORTED_VERSION);
server_->CheckErrorCode(SSL_ERROR_ILLEGAL_PARAMETER_ALERT);
} else {
client_->CheckErrorCode(SSL_ERROR_HANDSHAKE_UNEXPECTED_ALERT);
server_->CheckErrorCode(SSL_ERROR_RENEGOTIATION_NOT_ALLOWED);
}
client_->CheckErrorCode(SSL_ERROR_UNSUPPORTED_VERSION);
server_->CheckErrorCode(SSL_ERROR_ILLEGAL_PARAMETER_ALERT);
}
TEST_F(TlsConnectTest, Tls13RejectsRehandshakeClient) {
@ -257,4 +231,62 @@ TEST_P(TlsConnectGeneric, AlertBeforeServerHello) {
CheckConnected();
}
class Tls13NoSupportedVersions : public TlsConnectStreamTls12 {
protected:
void Run(uint16_t overwritten_client_version, uint16_t max_server_version) {
client_->SetVersionRange(SSL_LIBRARY_VERSION_TLS_1_2,
SSL_LIBRARY_VERSION_TLS_1_2);
server_->SetVersionRange(SSL_LIBRARY_VERSION_TLS_1_2, max_server_version);
client_->SetPacketFilter(
new TlsInspectorClientHelloVersionSetter(overwritten_client_version));
auto capture =
new TlsInspectorRecordHandshakeMessage(kTlsHandshakeServerHello);
server_->SetPacketFilter(capture);
ConnectExpectFail();
client_->CheckErrorCode(SSL_ERROR_DECRYPT_ERROR_ALERT);
server_->CheckErrorCode(SSL_ERROR_BAD_HANDSHAKE_HASH_VALUE);
const DataBuffer& server_hello = capture->buffer();
ASSERT_GT(server_hello.len(), 2U);
uint32_t ver;
ASSERT_TRUE(server_hello.Read(0, 2, &ver));
ASSERT_EQ(static_cast<uint32_t>(SSL_LIBRARY_VERSION_TLS_1_2), ver);
}
};
// If we offer a 1.3 ClientHello w/o supported_versions, the server should
// negotiate 1.2.
TEST_F(Tls13NoSupportedVersions,
Tls13ClientHelloWithoutSupportedVersionsServer12) {
Run(SSL_LIBRARY_VERSION_TLS_1_3, SSL_LIBRARY_VERSION_TLS_1_2);
}
TEST_F(Tls13NoSupportedVersions,
Tls13ClientHelloWithoutSupportedVersionsServer13) {
Run(SSL_LIBRARY_VERSION_TLS_1_3, SSL_LIBRARY_VERSION_TLS_1_3);
}
TEST_F(Tls13NoSupportedVersions,
Tls14ClientHelloWithoutSupportedVersionsServer13) {
Run(SSL_LIBRARY_VERSION_TLS_1_3 + 1, SSL_LIBRARY_VERSION_TLS_1_3);
}
// Offer 1.3 but with ClientHello.legacy_version == TLS 1.4. This
// causes a bad MAC error when we read EncryptedExtensions.
TEST_F(TlsConnectStreamTls13, Tls14ClientHelloWithSupportedVersions) {
client_->SetPacketFilter(new TlsInspectorClientHelloVersionSetter(
SSL_LIBRARY_VERSION_TLS_1_3 + 1));
auto capture =
new TlsInspectorRecordHandshakeMessage(kTlsHandshakeServerHello);
server_->SetPacketFilter(capture);
ConnectExpectFail();
client_->CheckErrorCode(SSL_ERROR_BAD_MAC_READ);
server_->CheckErrorCode(SSL_ERROR_BAD_MAC_READ);
const DataBuffer& server_hello = capture->buffer();
ASSERT_GT(server_hello.len(), 2U);
uint32_t ver;
ASSERT_TRUE(server_hello.Read(0, 2, &ver));
// This way we don't need to change with new draft version.
ASSERT_LT(static_cast<uint32_t>(SSL_LIBRARY_VERSION_TLS_1_2), ver);
}
} // namespace nss_test

View File

@ -142,11 +142,6 @@ bool TlsAgent::EnsureTlsSetup(PRFileDesc* modelSocket) {
EXPECT_EQ(SECSuccess, rv);
if (rv != SECSuccess) return false;
// Needs to be set before configuring server certs.
rv = SSL_OptionSet(ssl_fd_, SSL_NO_STEP_DOWN, PR_TRUE);
EXPECT_EQ(SECSuccess, rv);
if (rv != SECSuccess) return false;
if (role_ == SERVER) {
EXPECT_TRUE(ConfigServerCert(name_, true));

Some files were not shown because too many files have changed in this diff Show More