Merge mozilla-central and inbound

This commit is contained in:
Ed Morley 2014-08-13 17:03:24 +01:00
commit 9e41c131c5
124 changed files with 4505 additions and 686 deletions

View File

@ -1683,8 +1683,6 @@ pref("media.gmp-manager.certs.1.commonName", "aus4.mozilla.org");
pref("media.gmp-manager.certs.2.issuerName", "CN=Thawte SSL CA,O=\"Thawte, Inc.\",C=US");
pref("media.gmp-manager.certs.2.commonName", "aus4.mozilla.org");
// Delete HTTP cache v2 data of users that didn't opt-in manually
pref("browser.cache.auto_delete_cache_version", 1);
// Play with different values of the decay time and get telemetry,
// 0 means to randomize (and persist) the experiment value in users' profiles,
// -1 means no experiment is run and we use the preferred value for frecency (6h)

View File

@ -16,6 +16,7 @@ Implement HTML5 sandbox attribute for IFRAMEs - general tests
SimpleTest.expectAssertions(0, 1);
SimpleTest.waitForExplicitFinish();
SimpleTest.requestCompleteLog();
// a postMessage handler that is used by sandboxed iframes without
// 'allow-same-origin' to communicate pass/fail back to this main page.

View File

@ -58,7 +58,8 @@ public:
duration.value(),
framesCopied,
buffer.forget(),
aChannels));
aChannels,
aSampleRate));
// Remove the frames we just pushed into the queue and loop if there is
// more to be done.

View File

@ -71,10 +71,12 @@ public:
int64_t aDuration,
uint32_t aFrames,
AudioDataValue* aData,
uint32_t aChannels)
uint32_t aChannels,
uint32_t aRate)
: MediaData(AUDIO_SAMPLES, aOffset, aTime, aDuration)
, mFrames(aFrames)
, mChannels(aChannels)
, mRate(aRate)
, mAudioData(aData)
{
MOZ_COUNT_CTOR(AudioData);
@ -92,6 +94,7 @@ public:
const uint32_t mFrames;
const uint32_t mChannels;
const uint32_t mRate;
// At least one of mAudioBuffer/mAudioData must be non-null.
// mChannels channels, each with mFrames frames
nsRefPtr<SharedBuffer> mAudioBuffer;

View File

@ -2802,7 +2802,8 @@ MediaDecoderStateMachine::DropAudioUpToSeekTarget(AudioData* aSample)
duration.value(),
frames,
audioData.forget(),
channels));
channels,
audio->mRate));
AudioQueue().PushFront(data.forget());
return NS_OK;

View File

@ -260,7 +260,7 @@ AppleMP3Reader::AudioSampleCallback(UInt32 aNumBytes,
AudioData *audio = new AudioData(mDecoder->GetResource()->Tell(),
time, duration, numFrames,
reinterpret_cast<AudioDataValue *>(decoded.forget()),
mAudioChannels);
mAudioChannels, mAudioSampleRate);
mAudioQueue.Push(audio);
mCurrentAudioFrame += numFrames;

View File

@ -62,10 +62,9 @@ public:
AMR_AUDIO_FRAME,
UNKNOWN // FrameType not set
};
nsresult SwapInFrameData(nsTArray<uint8_t>& aData)
void SwapInFrameData(nsTArray<uint8_t>& aData)
{
mFrameData.SwapElements(aData);
return NS_OK;
}
nsresult SwapOutFrameData(nsTArray<uint8_t>& aData)
{

View File

@ -143,8 +143,7 @@ OmxVideoTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
videoData->SetFrameType((outFlags & OMXCodecWrapper::BUFFER_SYNC_FRAME) ?
EncodedFrame::AVC_I_FRAME : EncodedFrame::AVC_P_FRAME);
}
rv = videoData->SwapInFrameData(buffer);
NS_ENSURE_SUCCESS(rv, rv);
videoData->SwapInFrameData(buffer);
videoData->SetTimeStamp(outTimeStampUs);
aData.AppendEncodedFrame(videoData);
}
@ -187,8 +186,7 @@ OmxAudioTrackEncoder::AppendEncodedFrames(EncodedFrameContainer& aContainer)
MOZ_ASSERT(false, "audio codec not supported");
}
audiodata->SetTimeStamp(outTimeUs);
rv = audiodata->SwapInFrameData(frameData);
NS_ENSURE_SUCCESS(rv, rv);
audiodata->SwapInFrameData(frameData);
aContainer.AppendEncodedFrame(audiodata);
}

View File

@ -173,7 +173,6 @@ VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData)
vpx_codec_iter_t iter = nullptr;
EncodedFrame::FrameType frameType = EncodedFrame::VP8_P_FRAME;
nsTArray<uint8_t> frameData;
nsresult rv;
const vpx_codec_cx_pkt_t *pkt = nullptr;
while ((pkt = vpx_codec_get_cx_data(mVPXContext, &iter)) != nullptr) {
switch (pkt->kind) {
@ -212,8 +211,7 @@ VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData)
videoData->SetDuration(
(uint64_t)FramesToUsecs(pkt->data.frame.duration, mTrackRate).value());
}
rv = videoData->SwapInFrameData(frameData);
NS_ENSURE_SUCCESS(rv, rv);
videoData->SwapInFrameData(frameData);
VP8LOG("GetEncodedPartitions TimeStamp %lld Duration %lld\n",
videoData->GetTimeStamp(), videoData->GetDuration());
VP8LOG("frameType %d\n", videoData->GetFrameType());

View File

@ -193,7 +193,8 @@ public:
aDuration,
uint32_t(frames.value()),
samples,
mChannelCount);
mChannelCount,
mSampleRate);
}
private:

View File

@ -381,6 +381,10 @@ MP4Reader::ReadMetadata(MediaInfo* aInfo,
NS_ENSURE_TRUE(mAudio.mDecoder != nullptr, NS_ERROR_FAILURE);
nsresult rv = mAudio.mDecoder->Init();
NS_ENSURE_SUCCESS(rv, rv);
// Decode one audio frame to detect potentially incorrect channels count or
// sampling rate from demuxer.
Decode(kAudio);
}
if (HasVideo()) {
@ -585,7 +589,15 @@ MP4Reader::Output(TrackType aTrack, MediaData* aSample)
switch (aTrack) {
case kAudio: {
MOZ_ASSERT(aSample->mType == MediaData::AUDIO_SAMPLES);
AudioQueue().Push(static_cast<AudioData*>(aSample));
AudioData* audioData = static_cast<AudioData*>(aSample);
AudioQueue().Push(audioData);
if (audioData->mChannels != mInfo.mAudio.mChannels ||
audioData->mRate != mInfo.mAudio.mRate) {
LOG("MP4Reader::Output change of sampling rate:%d->%d",
mInfo.mAudio.mRate, audioData->mRate);
mInfo.mAudio.mRate = audioData->mRate;
mInfo.mAudio.mChannels = audioData->mChannels;
}
break;
}
case kVideo: {

View File

@ -133,7 +133,10 @@ PlatformDecoderModule::Create()
#endif
#ifdef MOZ_FFMPEG
if (sFFmpegDecoderEnabled) {
return FFmpegRuntimeLinker::CreateDecoderModule();
nsAutoPtr<PlatformDecoderModule> m(FFmpegRuntimeLinker::CreateDecoderModule());
if (m) {
return m.forget();
}
}
#endif
#ifdef MOZ_APPLEMEDIA

View File

@ -240,7 +240,7 @@ AppleATDecoder::SampleCallback(uint32_t aNumBytes,
AudioBufferList decBuffer;
decBuffer.mNumberBuffers = 1;
decBuffer.mBuffers[0].mNumberChannels = mConfig.channel_count;
decBuffer.mBuffers[0].mNumberChannels = mOutputFormat.mChannelsPerFrame;
decBuffer.mBuffers[0].mDataByteSize = decodedSize;
decBuffer.mBuffers[0].mData = decoded.get();
@ -271,7 +271,9 @@ AppleATDecoder::SampleCallback(uint32_t aNumBytes,
break;
}
const int rate = mConfig.samples_per_second;
const int rate = mOutputFormat.mSampleRate;
const int channels = mOutputFormat.mChannelsPerFrame;
int64_t time = FramesToUsecs(mCurrentAudioFrame, rate).value();
int64_t duration = FramesToUsecs(numFrames, rate).value();
@ -281,7 +283,7 @@ AppleATDecoder::SampleCallback(uint32_t aNumBytes,
AudioData *audio = new AudioData(mSamplePosition,
time, duration, numFrames,
reinterpret_cast<AudioDataValue *>(decoded.forget()),
rate);
channels, rate);
mCallback->Output(audio);
mHaveOutput = true;
@ -299,30 +301,30 @@ AppleATDecoder::SampleCallback(uint32_t aNumBytes,
void
AppleATDecoder::SetupDecoder()
{
AudioStreamBasicDescription inputFormat, outputFormat;
AudioStreamBasicDescription inputFormat;
// Fill in the input format description from the stream.
AppleUtils::GetProperty(mStream,
kAudioFileStreamProperty_DataFormat, &inputFormat);
// Fill in the output format manually.
PodZero(&outputFormat);
outputFormat.mFormatID = kAudioFormatLinearPCM;
outputFormat.mSampleRate = inputFormat.mSampleRate;
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
PodZero(&mOutputFormat);
mOutputFormat.mFormatID = kAudioFormatLinearPCM;
mOutputFormat.mSampleRate = inputFormat.mSampleRate;
mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
outputFormat.mBitsPerChannel = 32;
outputFormat.mFormatFlags =
mOutputFormat.mBitsPerChannel = 32;
mOutputFormat.mFormatFlags =
kLinearPCMFormatFlagIsFloat |
0;
#else
# error Unknown audio sample type
#endif
// Set up the decoder so it gives us one sample per frame
outputFormat.mFramesPerPacket = 1;
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame
= outputFormat.mChannelsPerFrame * outputFormat.mBitsPerChannel / 8;
mOutputFormat.mFramesPerPacket = 1;
mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame
= mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;
OSStatus rv = AudioConverterNew(&inputFormat, &outputFormat, &mConverter);
OSStatus rv = AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
if (rv) {
LOG("Error %d constructing AudioConverter", rv);
mConverter = nullptr;

View File

@ -52,6 +52,7 @@ private:
uint64_t mCurrentAudioFrame;
int64_t mSamplePosition;
bool mHaveOutput;
AudioStreamBasicDescription mOutputFormat;
void SetupDecoder();
void SubmitSample(nsAutoPtr<mp4_demuxer::MP4Sample> aSample);

View File

@ -180,7 +180,8 @@ EMEAACDecoder::Decoded(const nsTArray<int16_t>& aPCM,
duration.value(),
numFrames,
audioData.forget(),
aChannels));
aChannels,
aRate));
#ifdef LOG_SAMPLE_DECODE
LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",

View File

@ -36,9 +36,6 @@ FFmpegAACDecoder<LIBAV_VER>::Init()
static AudioDataValue*
CopyAndPackAudio(AVFrame* aFrame, uint32_t aNumChannels, uint32_t aNumSamples)
{
// These are the only two valid AAC packet sizes.
NS_ASSERTION(aNumSamples == 960 || aNumSamples == 1024,
"Should have exactly one AAC audio packet.");
MOZ_ASSERT(aNumChannels <= MAX_CHANNELS);
nsAutoArrayPtr<AudioDataValue> audio(
@ -93,13 +90,14 @@ FFmpegAACDecoder<LIBAV_VER>::DecodePacket(MP4Sample* aSample)
"Only one audio packet should be received at a time.");
uint32_t numChannels = mCodecContext->channels;
uint32_t samplingRate = mCodecContext->sample_rate;
nsAutoArrayPtr<AudioDataValue> audio(
CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples));
nsAutoPtr<AudioData> data(
new AudioData(packet.pos, aSample->composition_timestamp, aSample->duration,
mFrame->nb_samples, audio.forget(), numChannels));
mFrame->nb_samples, audio.forget(), numChannels, samplingRate));
mCallback->Output(data.forget());

View File

@ -99,6 +99,9 @@ FFmpegRuntimeLinker::Bind(const char* aLibName, uint32_t Version)
/* static */ PlatformDecoderModule*
FFmpegRuntimeLinker::CreateDecoderModule()
{
if (!Link()) {
return nullptr;
}
PlatformDecoderModule* module = sLib->Factory();
return module;
}

View File

@ -115,8 +115,13 @@ GonkAudioDecoderManager::CreateAudioData(int64_t aStreamOffset, AudioData **v) {
if (!duration.isValid()) {
return NS_ERROR_UNEXPECTED;
}
*v = new AudioData(aStreamOffset, timeUs, duration.value(), frames, buffer.forget(),
mAudioChannels);
*v = new AudioData(aStreamOffset,
timeUs,
duration.value(),
frames,
buffer.forget(),
mAudioChannels,
mAudioRate);
ReleaseAudioBuffer();
return NS_OK;
}

View File

@ -263,7 +263,8 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset,
duration,
numFrames,
audioData.forget(),
mAudioChannels);
mAudioChannels,
mAudioRate);
#ifdef LOG_SAMPLE_DECODE
LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",

View File

@ -434,7 +434,8 @@ nsresult OggReader::DecodeVorbis(ogg_packet* aPacket) {
duration,
frames,
buffer.forget(),
channels));
channels,
mVorbisState->mInfo.rate));
mDecodedAudioFrames += frames;
@ -550,7 +551,8 @@ nsresult OggReader::DecodeOpus(ogg_packet* aPacket) {
endTime - startTime,
frames,
buffer.forget(),
channels));
channels,
mOpusState->mRate));
mDecodedAudioFrames += frames;

View File

@ -243,7 +243,8 @@ bool WaveReader::DecodeAudioData()
static_cast<int64_t>(readSizeTime * USECS_PER_S),
static_cast<int32_t>(frames),
sampleBuffer.forget(),
mChannels));
mChannels,
mSampleRate));
return true;
}

View File

@ -613,11 +613,12 @@ bool WebMReader::DecodeAudioPacket(nestegg_packet* aPacket, int64_t aOffset)
total_frames += frames;
AudioQueue().Push(new AudioData(aOffset,
time.value(),
duration.value(),
frames,
buffer.forget(),
mChannels));
time.value(),
duration.value(),
frames,
buffer.forget(),
mChannels,
rate));
mAudioFrames += frames;
if (vorbis_synthesis_read(&mVorbisDsp, frames) != 0) {
return false;
@ -738,11 +739,12 @@ bool WebMReader::DecodeAudioPacket(nestegg_packet* aPacket, int64_t aOffset)
return false;
};
AudioQueue().Push(new AudioData(mDecoder->GetResource()->Tell(),
time.value(),
duration.value(),
frames,
buffer.forget(),
mChannels));
time.value(),
duration.value(),
frames,
buffer.forget(),
mChannels,
rate));
mAudioFrames += frames;
#else

View File

@ -650,7 +650,8 @@ WMFReader::DecodeAudioData()
duration,
numFrames,
pcmSamples.forget(),
mAudioChannels));
mAudioChannels,
mAudioRate));
#ifdef LOG_SAMPLE_DECODE
DECODER_LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u",

View File

@ -2676,9 +2676,15 @@ nsGlobalWindow::SetNewDocument(nsIDocument* aDocument,
if (!aState) {
if (reUseInnerWindow) {
if (newInnerWindow->mDoc != aDocument) {
newInnerWindow->mDoc = aDocument;
// The storage objects contain the URL of the window. We have to
// recreate them when the innerWindow is reused.
newInnerWindow->mLocalStorage = nullptr;
newInnerWindow->mSessionStorage = nullptr;
if (newInnerWindow->IsDOMBinding()) {
WindowBinding::ClearCachedDocumentValue(cx, newInnerWindow);
} else {
@ -11428,11 +11434,15 @@ nsGlobalWindow::Observe(nsISupports* aSubject, const char* aTopic,
// Clone the storage event included in the observer notification. We want
// to dispatch clones rather than the original event.
ErrorResult error;
nsRefPtr<StorageEvent> newEvent =
CloneStorageEvent(fireMozStorageChanged ?
NS_LITERAL_STRING("MozStorageChanged") :
NS_LITERAL_STRING("storage"),
event);
event, error);
if (error.Failed()) {
return error.ErrorCode();
}
newEvent->SetTrusted(true);
@ -11528,7 +11538,8 @@ nsGlobalWindow::Observe(nsISupports* aSubject, const char* aTopic,
already_AddRefed<StorageEvent>
nsGlobalWindow::CloneStorageEvent(const nsAString& aType,
const nsRefPtr<StorageEvent>& aEvent)
const nsRefPtr<StorageEvent>& aEvent,
ErrorResult& aRv)
{
MOZ_ASSERT(IsInnerWindow());
@ -11540,7 +11551,26 @@ nsGlobalWindow::CloneStorageEvent(const nsAString& aType,
aEvent->GetOldValue(dict.mOldValue);
aEvent->GetNewValue(dict.mNewValue);
aEvent->GetUrl(dict.mUrl);
dict.mStorageArea = aEvent->GetStorageArea();
nsRefPtr<DOMStorage> storageArea = aEvent->GetStorageArea();
MOZ_ASSERT(storageArea);
nsRefPtr<DOMStorage> storage;
if (storageArea->GetType() == DOMStorage::LocalStorage) {
storage = GetLocalStorage(aRv);
} else {
MOZ_ASSERT(storageArea->GetType() == DOMStorage::SessionStorage);
storage = GetSessionStorage(aRv);
}
if (aRv.Failed() || !storage) {
return nullptr;
}
MOZ_ASSERT(storage);
MOZ_ASSERT(storage->IsForkOf(storageArea));
dict.mStorageArea = storage;
nsRefPtr<StorageEvent> event = StorageEvent::Constructor(this, aType, dict);
return event.forget();

View File

@ -1386,7 +1386,8 @@ protected:
// Inner windows only.
already_AddRefed<mozilla::dom::StorageEvent>
CloneStorageEvent(const nsAString& aType,
const nsRefPtr<mozilla::dom::StorageEvent>& aEvent);
const nsRefPtr<mozilla::dom::StorageEvent>& aEvent,
mozilla::ErrorResult& aRv);
// Outer windows only.
nsDOMWindowList* GetWindowList();

View File

@ -24,6 +24,7 @@ skip-if = buildapp == 'mulet'
[test_bug989665.html]
[test_bug999456.html]
[test_bug1022229.html]
[test_bug1043106.html]
[test_clearTimeoutIntervalNoArg.html]
[test_consoleEmptyStack.html]
[test_constructor-assignment.html]

View File

@ -0,0 +1,43 @@
<!DOCTYPE HTML>
<html>
<!--
https://bugzilla.mozilla.org/show_bug.cgi?id=1043106
-->
<head>
<meta charset="utf-8">
<title>Test for Bug 1043106</title>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
</head>
<body>
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1043106">Mozilla Bug 1043106</a>
<iframe id="iframe"></iframe>
<script type="application/javascript">
var storage;
window.addEventListener("storage", function (event) {
ok(event.storageArea, storage, "The storageArea is correct");
runTests();
}, false);
var tests = [ { key: 'localStorage', storage: localStorage },
{ key: 'sessionStorage', storage: sessionStorage } ];
function runTests() {
if (!tests.length) {
SimpleTest.finish();
return;
}
var t = tests.shift();
storage = t.storage;
var ifr = document.getElementById("iframe");
ifr.src = "data:text/html,<script>" + t.key + ".setItem(\"a\",\"b\");</" + "script>";
}
SimpleTest.waitForExplicitFinish();
runTests();
</script>
</body>
</html>

View File

@ -7,6 +7,7 @@
#include "ContentEventHandler.h"
#include "IMEContentObserver.h"
#include "mozilla/AsyncEventDispatcher.h"
#include "mozilla/AutoRestore.h"
#include "mozilla/EventStateManager.h"
#include "mozilla/IMEStateManager.h"
#include "mozilla/TextComposition.h"
@ -87,6 +88,7 @@ IMEContentObserver::IMEContentObserver()
, mIsSelectionChangeEventPending(false)
, mSelectionChangeCausedOnlyByComposition(false)
, mIsPositionChangeEventPending(false)
, mIsFlushingPendingNotifications(false)
{
#ifdef DEBUG
TestMergingTextChangeData();
@ -429,12 +431,14 @@ class TextChangeEvent : public nsRunnable
{
public:
TextChangeEvent(IMEContentObserver* aDispatcher,
const IMEContentObserver::TextChangeData& aData)
IMEContentObserver::TextChangeData& aData)
: mDispatcher(aDispatcher)
, mData(aData)
{
MOZ_ASSERT(mDispatcher);
MOZ_ASSERT(mData.mStored);
// Reset mStored because this now consumes the data.
aData.mStored = false;
}
NS_IMETHOD Run()
@ -962,27 +966,73 @@ IMEContentObserver::MaybeNotifyIMEOfPositionChange()
FlushMergeableNotifications();
}
class AsyncMergeableNotificationsFlusher : public nsRunnable
{
public:
AsyncMergeableNotificationsFlusher(IMEContentObserver* aIMEContentObserver)
: mIMEContentObserver(aIMEContentObserver)
{
MOZ_ASSERT(mIMEContentObserver);
}
NS_IMETHOD Run()
{
mIMEContentObserver->FlushMergeableNotifications();
return NS_OK;
}
private:
nsRefPtr<IMEContentObserver> mIMEContentObserver;
};
void
IMEContentObserver::FlushMergeableNotifications()
{
// If we're in handling an edit action, this method will be called later.
// If this is already detached from the widget, this doesn't need to notify
// anything.
if (mIsEditorInTransaction || !mWidget) {
return;
}
// Notifying something may cause nested call of this method. For example,
// when somebody notified one of the notifications may dispatch query content
// event. Then, it causes flushing layout which may cause another layout
// change notification.
if (mIsFlushingPendingNotifications) {
// So, if this is already called, this should do nothing.
return;
}
AutoRestore<bool> flusing(mIsFlushingPendingNotifications);
mIsFlushingPendingNotifications = true;
// NOTE: Reset each pending flag because sending notification may cause
// another change.
if (mTextChangeData.mStored) {
nsContentUtils::AddScriptRunner(new TextChangeEvent(this, mTextChangeData));
mTextChangeData.mStored = false;
}
if (mIsSelectionChangeEventPending) {
mIsSelectionChangeEventPending = false;
nsContentUtils::AddScriptRunner(
new SelectionChangeEvent(this, mSelectionChangeCausedOnlyByComposition));
mIsSelectionChangeEventPending = false;
}
if (mIsPositionChangeEventPending) {
nsContentUtils::AddScriptRunner(new PositionChangeEvent(this));
mIsPositionChangeEventPending = false;
nsContentUtils::AddScriptRunner(new PositionChangeEvent(this));
}
// If notifications may cause new change, we should notify them now.
if (mTextChangeData.mStored ||
mIsSelectionChangeEventPending ||
mIsPositionChangeEventPending) {
nsRefPtr<AsyncMergeableNotificationsFlusher> asyncFlusher =
new AsyncMergeableNotificationsFlusher(this);
NS_DispatchToCurrentThread(asyncFlusher);
}
}

View File

@ -38,6 +38,8 @@ class IMEContentObserver MOZ_FINAL : public nsISelectionListener
, public nsSupportsWeakReference
, public nsIEditorObserver
{
friend class AsyncMergeableNotificationsFlusher;
public:
IMEContentObserver();
@ -219,6 +221,7 @@ private:
bool mIsSelectionChangeEventPending;
bool mSelectionChangeCausedOnlyByComposition;
bool mIsPositionChangeEventPending;
bool mIsFlushingPendingNotifications;
};
} // namespace mozilla

View File

@ -126,6 +126,12 @@ public:
bool IsPrivate() const { return mIsPrivate; }
bool IsSessionOnly() const { return mIsSessionOnly; }
bool IsForkOf(const DOMStorage* aOther) const
{
MOZ_ASSERT(aOther);
return mCache == aOther->mCache;
}
private:
~DOMStorage();

View File

@ -39,13 +39,8 @@
#include "jit/none/BaseMacroAssembler-none.h"
namespace JSC { typedef MacroAssemblerNone MacroAssembler; }
#elif WTF_CPU_ARM_THUMB2
#include "assembler/assembler/MacroAssemblerARMv7.h"
namespace JSC { typedef MacroAssemblerARMv7 MacroAssembler; }
#elif WTF_CPU_ARM_TRADITIONAL
#include "assembler/assembler/MacroAssemblerARM.h"
namespace JSC { typedef MacroAssemblerARM MacroAssembler; }
#elif JS_CODEGEN_ARM
// Merged with the jit backend support.
#elif WTF_CPU_MIPS
#include "assembler/assembler/MacroAssemblerMIPS.h"

View File

@ -1,100 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2009 University of Szeged
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#include "assembler/wtf/Platform.h"
#if ENABLE_ASSEMBLER && WTF_CPU_ARM_TRADITIONAL
#include "assembler/assembler/MacroAssemblerARM.h"
#if (WTF_OS_LINUX || WTF_OS_ANDROID) && !defined(JS_ARM_SIMULATOR)
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <elf.h>
#include <stdio.h>
// lame check for kernel version
// see bug 586550
#include <linux/version.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
#include <asm/procinfo.h>
#else
#include <asm/hwcap.h>
#endif
#endif
namespace JSC {
static bool isVFPPresent()
{
#ifdef JS_ARM_SIMULATOR
return true;
#else
#if WTF_OS_LINUX
int fd = open("/proc/self/auxv", O_RDONLY);
if (fd > 0) {
Elf32_auxv_t aux;
while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
if (aux.a_type == AT_HWCAP) {
close(fd);
return aux.a_un.a_val & HWCAP_VFP;
}
}
close(fd);
}
#endif
#if defined(__GNUC__) && defined(__VFP_FP__)
return true;
#endif
#ifdef WTF_OS_ANDROID
FILE *fp = fopen("/proc/cpuinfo", "r");
if (!fp)
return false;
char buf[1024];
fread(buf, sizeof(char), sizeof(buf), fp);
fclose(fp);
if (strstr(buf, "vfp"))
return true;
#endif
return false;
#endif // JS_ARM_SIMULATOR
}
const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent();
}
#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)

View File

@ -1,51 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2008 Apple Inc.
* Copyright (C) 2009, 2010 University of Szeged
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#ifndef assembler_assembler_MacroAssemblerARM_h
#define assembler_assembler_MacroAssemblerARM_h
#include "assembler/wtf/Platform.h"
#if ENABLE_ASSEMBLER && WTF_CPU_ARM_TRADITIONAL
namespace JSC {
class MacroAssemblerARM {
public:
static bool supportsFloatingPoint() { return s_isVFPPresent; }
static const bool s_isVFPPresent;
};
}
#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
#endif /* assembler_assembler_MacroAssemblerARM_h */

View File

@ -1,49 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Copyright (C) 2009 Apple Inc. All rights reserved.
* Copyright (C) 2010 University of Szeged
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ***** END LICENSE BLOCK ***** */
#ifndef assembler_assembler_MacroAssemblerARMv7_h
#define assembler_assembler_MacroAssemblerARMv7_h
#include "assembler/wtf/Platform.h"
#if ENABLE(ASSEMBLER)
namespace JSC {
class MacroAssemblerARMv7 {
public:
static bool supportsFloatingPoint() { return true; }
};
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
#endif /* assembler_assembler_MacroAssemblerARMv7_h */

View File

@ -298,12 +298,16 @@ private:
OP2_UCOMISD_VsdWsd = 0x2E,
OP2_MOVMSKPD_EdVd = 0x50,
OP2_ADDSD_VsdWsd = 0x58,
OP2_ADDPS_VpsWps = 0x58,
OP2_MULSD_VsdWsd = 0x59,
OP2_MULPS_VpsWps = 0x59,
OP2_CVTSS2SD_VsdEd = 0x5A,
OP2_CVTSD2SS_VsdEd = 0x5A,
OP2_SUBSD_VsdWsd = 0x5C,
OP2_SUBPS_VpsWps = 0x5C,
OP2_MINSD_VsdWsd = 0x5D,
OP2_DIVSD_VsdWsd = 0x5E,
OP2_DIVPS_VpsWps = 0x5E,
OP2_MAXSD_VsdWsd = 0x5F,
OP2_SQRTSD_VsdWsd = 0x51,
OP2_SQRTSS_VssWss = 0x51,
@ -328,7 +332,10 @@ private:
OP2_MOVZX_GvEw = 0xB7,
OP2_XADD_EvGv = 0xC1,
OP2_PEXTRW_GdUdIb = 0xC5,
OP2_SHUFPS_VpsWpsIb = 0xC6
OP2_SHUFPS_VpsWpsIb = 0xC6,
OP2_PXORDQ_VdqWdq = 0xEF,
OP2_PSUBD_VdqWdq = 0xFA,
OP2_PADDD_VdqWdq = 0xFE
} TwoByteOpcodeID;
typedef enum {
@ -661,6 +668,124 @@ public:
m_formatter.twoByteOp(OP2_XADD_EvGv, srcdest, base, index, scale, offset);
}
void paddd_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("paddd %s, %s", nameFPReg(src), nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PADDD_VdqWdq, (RegisterID)dst, (RegisterID)src);
}
void paddd_mr(int offset, RegisterID base, XMMRegisterID dst)
{
spew("paddd %s0x%x(%s), %s",
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PADDD_VdqWdq, (RegisterID)dst, base, offset);
}
void paddd_mr(const void* address, XMMRegisterID dst)
{
spew("paddd %p, %s",
address, nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PADDD_VdqWdq, (RegisterID)dst, address);
}
void psubd_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("psubd %s, %s", nameFPReg(src), nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PSUBD_VdqWdq, (RegisterID)dst, (RegisterID)src);
}
void psubd_mr(int offset, RegisterID base, XMMRegisterID dst)
{
spew("psubd %s0x%x(%s), %s",
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PSUBD_VdqWdq, (RegisterID)dst, base, offset);
}
void psubd_mr(const void* address, XMMRegisterID dst)
{
spew("psubd %p, %s",
address, nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PSUBD_VdqWdq, (RegisterID)dst, address);
}
void addps_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("addps %s, %s",
nameFPReg(src), nameFPReg(dst));
m_formatter.twoByteOp(OP2_ADDPS_VpsWps, (RegisterID)dst, (RegisterID)src);
}
void addps_mr(int offset, RegisterID base, XMMRegisterID dst)
{
spew("addps %s0x%x(%s), %s",
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
m_formatter.twoByteOp(OP2_ADDPS_VpsWps, (RegisterID)dst, base, offset);
}
void addps_mr(const void* address, XMMRegisterID dst)
{
spew("addps %p, %s",
address, nameFPReg(dst));
m_formatter.twoByteOp(OP2_ADDPS_VpsWps, (RegisterID)dst, address);
}
void subps_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("subps %s, %s",
nameFPReg(src), nameFPReg(dst));
m_formatter.twoByteOp(OP2_SUBPS_VpsWps, (RegisterID)dst, (RegisterID)src);
}
void subps_mr(int offset, RegisterID base, XMMRegisterID dst)
{
spew("subps %s0x%x(%s), %s",
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
m_formatter.twoByteOp(OP2_SUBPS_VpsWps, (RegisterID)dst, base, offset);
}
void subps_mr(const void* address, XMMRegisterID dst)
{
spew("subps %p, %s",
address, nameFPReg(dst));
m_formatter.twoByteOp(OP2_SUBPS_VpsWps, (RegisterID)dst, address);
}
void mulps_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("mulps %s, %s",
nameFPReg(src), nameFPReg(dst));
m_formatter.twoByteOp(OP2_MULPS_VpsWps, (RegisterID)dst, (RegisterID)src);
}
void mulps_mr(int offset, RegisterID base, XMMRegisterID dst)
{
spew("mulps %s0x%x(%s), %s",
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
m_formatter.twoByteOp(OP2_MULPS_VpsWps, (RegisterID)dst, base, offset);
}
void mulps_mr(const void* address, XMMRegisterID dst)
{
spew("mulps %p, %s",
address, nameFPReg(dst));
m_formatter.twoByteOp(OP2_MULPS_VpsWps, (RegisterID)dst, address);
}
void divps_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("divps %s, %s",
nameFPReg(src), nameFPReg(dst));
m_formatter.twoByteOp(OP2_DIVPS_VpsWps, (RegisterID)dst, (RegisterID)src);
}
void divps_mr(int offset, RegisterID base, XMMRegisterID dst)
{
spew("divps %s0x%x(%s), %s",
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameFPReg(dst));
m_formatter.twoByteOp(OP2_DIVPS_VpsWps, (RegisterID)dst, base, offset);
}
void divps_mr(const void* address, XMMRegisterID dst)
{
spew("divps %p, %s",
address, nameFPReg(dst));
m_formatter.twoByteOp(OP2_DIVPS_VpsWps, (RegisterID)dst, address);
}
void andl_rr(RegisterID src, RegisterID dst)
{
spew("andl %s, %s",
@ -2590,6 +2715,14 @@ public:
m_formatter.twoByteOp(OP2_MOVD_VdEd, (RegisterID)dst, src);
}
void pxor_rr(XMMRegisterID src, XMMRegisterID dst)
{
spew("pxor %s, %s",
nameFPReg(src), nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_PXORDQ_VdqWdq, (RegisterID)dst, (RegisterID)src);
}
void pshufd_irr(uint32_t mask, XMMRegisterID src, XMMRegisterID dst)
{
JS_ASSERT(mask < 256);
@ -2932,6 +3065,40 @@ public:
m_formatter.twoByteOp(OP2_MOVAPD_VsdWsd, (RegisterID)dst, (RegisterID)src);
}
#ifdef WTF_CPU_X86_64
JmpSrc movaps_ripr(XMMRegisterID dst)
{
spew("movaps ?(%%rip), %s",
nameFPReg(dst));
m_formatter.twoByteRipOp(OP2_MOVAPS_VsdWsd, (RegisterID)dst, 0);
return JmpSrc(m_formatter.size());
}
JmpSrc movdqa_ripr(XMMRegisterID dst)
{
spew("movdqa ?(%%rip), %s",
nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteRipOp(OP2_MOVDQ_VdqWdq, (RegisterID)dst, 0);
return JmpSrc(m_formatter.size());
}
#else
void movaps_mr(const void* address, XMMRegisterID dst)
{
spew("movaps %p, %s",
address, nameFPReg(dst));
m_formatter.twoByteOp(OP2_MOVAPS_VsdWsd, (RegisterID)dst, address);
}
void movdqa_mr(const void* address, XMMRegisterID dst)
{
spew("movdqa %p, %s",
address, nameFPReg(dst));
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_MOVDQ_VdqWdq, (RegisterID)dst, address);
}
#endif // WTF_CPU_X86_64
void movdqu_rm(XMMRegisterID src, int offset, RegisterID base)
{
spew("movdqu %s, %s0x%x(%s)",
@ -3346,6 +3513,19 @@ public:
m_formatter.floatConstant(f);
}
void int32x4Constant(const int32_t s[4])
{
spew(".int32x4 (%d %d %d %d)", s[0], s[1], s[2], s[3]);
MOZ_ASSERT(m_formatter.isAligned(16));
m_formatter.int32x4Constant(s);
}
void float32x4Constant(const float f[4])
{
spew(".float32x4 (%f %f %f %f)", f[0], f[1], f[2], f[3]);
MOZ_ASSERT(m_formatter.isAligned(16));
m_formatter.float32x4Constant(f);
}
void int64Constant(int64_t i)
{
spew(".quad %lld", (long long)i);
@ -4030,12 +4210,30 @@ private:
m_buffer.putIntUnchecked(u.u32);
}
void int32x4Constant(const int32_t s[4])
{
for (size_t i = 0; i < 4; ++i)
int32Constant(s[i]);
}
void float32x4Constant(const float s[4])
{
for (size_t i = 0; i < 4; ++i)
floatConstant(s[i]);
}
void int64Constant(int64_t i)
{
m_buffer.ensureSpace(sizeof(int64_t));
m_buffer.putInt64Unchecked(i);
}
void int32Constant(int32_t i)
{
m_buffer.ensureSpace(sizeof(int32_t));
m_buffer.putIntUnchecked(i);
}
// Administrative methods:
size_t size() const { return m_buffer.size(); }

View File

@ -417,7 +417,7 @@ TypedObjectMemory(HandleValue v)
template<typename V>
JSObject *
js::Create(JSContext *cx, typename V::Elem *data)
js::CreateSimd(JSContext *cx, typename V::Elem *data)
{
typedef typename V::Elem Elem;
Rooted<TypeDescr*> typeDescr(cx, &V::GetTypeDescr(*cx->global()));
@ -433,8 +433,8 @@ js::Create(JSContext *cx, typename V::Elem *data)
return result;
}
template JSObject *js::Create<Float32x4>(JSContext *cx, Float32x4::Elem *data);
template JSObject *js::Create<Int32x4>(JSContext *cx, Int32x4::Elem *data);
template JSObject *js::CreateSimd<Float32x4>(JSContext *cx, Float32x4::Elem *data);
template JSObject *js::CreateSimd<Int32x4>(JSContext *cx, Int32x4::Elem *data);
namespace js {
template<typename T>
@ -608,7 +608,7 @@ CoercedFunc(JSContext *cx, unsigned argc, Value *vp)
}
RetElem *coercedResult = reinterpret_cast<RetElem *>(result);
RootedObject obj(cx, Create<Out>(cx, coercedResult));
RootedObject obj(cx, CreateSimd<Out>(cx, coercedResult));
if (!obj)
return false;
@ -653,7 +653,7 @@ FuncWith(JSContext *cx, unsigned argc, Value *vp)
result[i] = OpWith<Elem>::apply(i, withAsBool, val[i]);
}
RootedObject obj(cx, Create<V>(cx, result));
RootedObject obj(cx, CreateSimd<V>(cx, result));
if (!obj)
return false;
@ -712,7 +712,7 @@ FuncShuffle(JSContext *cx, unsigned argc, Value *vp)
result[i] = val2[(maskArg >> (i * SELECT_SHIFT)) & SELECT_MASK];
}
RootedObject obj(cx, Create<V>(cx, result));
RootedObject obj(cx, CreateSimd<V>(cx, result));
if (!obj)
return false;
@ -740,7 +740,7 @@ Int32x4BinaryScalar(JSContext *cx, unsigned argc, Value *vp)
for (unsigned i = 0; i < 4; i++)
result[i] = Op::apply(val[i], bits);
RootedObject obj(cx, Create<Int32x4>(cx, result));
RootedObject obj(cx, CreateSimd<Int32x4>(cx, result));
if (!obj)
return false;
@ -764,7 +764,7 @@ FuncConvert(JSContext *cx, unsigned argc, Value *vp)
for (unsigned i = 0; i < Vret::lanes; i++)
result[i] = RetElem(val[i]);
RootedObject obj(cx, Create<Vret>(cx, result));
RootedObject obj(cx, CreateSimd<Vret>(cx, result));
if (!obj)
return false;
@ -783,7 +783,7 @@ FuncConvertBits(JSContext *cx, unsigned argc, Value *vp)
return ErrorBadArgs(cx);
RetElem *val = TypedObjectMemory<RetElem *>(args[0]);
RootedObject obj(cx, Create<Vret>(cx, val));
RootedObject obj(cx, CreateSimd<Vret>(cx, val));
if (!obj)
return false;
@ -805,7 +805,7 @@ FuncZero(JSContext *cx, unsigned argc, Value *vp)
for (unsigned i = 0; i < Vret::lanes; i++)
result[i] = RetElem(0);
RootedObject obj(cx, Create<Vret>(cx, result));
RootedObject obj(cx, CreateSimd<Vret>(cx, result));
if (!obj)
return false;
@ -831,7 +831,7 @@ FuncSplat(JSContext *cx, unsigned argc, Value *vp)
for (unsigned i = 0; i < Vret::lanes; i++)
result[i] = arg;
RootedObject obj(cx, Create<Vret>(cx, result));
RootedObject obj(cx, CreateSimd<Vret>(cx, result));
if (!obj)
return false;
@ -854,7 +854,7 @@ Int32x4Bool(JSContext *cx, unsigned argc, Value *vp)
for (unsigned i = 0; i < Int32x4::lanes; i++)
result[i] = args[i].toBoolean() ? 0xFFFFFFFF : 0x0;
RootedObject obj(cx, Create<Int32x4>(cx, result));
RootedObject obj(cx, CreateSimd<Int32x4>(cx, result));
if (!obj)
return false;
@ -882,7 +882,7 @@ Float32x4Clamp(JSContext *cx, unsigned argc, Value *vp)
result[i] = result[i] > upperLimit[i] ? upperLimit[i] : result[i];
}
RootedObject obj(cx, Create<Float32x4>(cx, result));
RootedObject obj(cx, CreateSimd<Float32x4>(cx, result));
if (!obj)
return false;
@ -917,7 +917,7 @@ Int32x4Select(JSContext *cx, unsigned argc, Value *vp)
orInt[i] = Or<int32_t>::apply(tr[i], fr[i]);
float *result = reinterpret_cast<float *>(orInt);
RootedObject obj(cx, Create<Float32x4>(cx, result));
RootedObject obj(cx, CreateSimd<Float32x4>(cx, result));
if (!obj)
return false;

View File

@ -164,7 +164,7 @@ struct Int32x4 {
};
template<typename V>
JSObject *Create(JSContext *cx, typename V::Elem *data);
JSObject *CreateSimd(JSContext *cx, typename V::Elem *data);
#define DECLARE_SIMD_FLOAT32X4_FUNCTION(Name, Func, Operands, Flags, MIRId) \
extern bool \

View File

@ -274,7 +274,8 @@ class GCRuntime
bool triggerGC(JS::gcreason::Reason reason);
bool triggerZoneGC(Zone *zone, JS::gcreason::Reason reason);
void maybeGC(Zone *zone);
bool maybeGC(Zone *zone);
void maybePeriodicFullGC();
void minorGC(JS::gcreason::Reason reason);
void minorGC(JSContext *cx, JS::gcreason::Reason reason);
void gcIfNeeded(JSContext *cx);
@ -487,7 +488,7 @@ class GCRuntime
void getNextZoneGroup();
void endMarkingZoneGroup();
void beginSweepingZoneGroup();
bool releaseObservedTypes();
bool shouldReleaseObservedTypes();
void endSweepingZoneGroup();
bool sweepPhase(SliceBudget &sliceBudget);
void endSweepPhase(JSGCInvocationKind gckind, bool lastGC);
@ -567,7 +568,6 @@ class GCRuntime
bool chunkAllocationSinceLastGC;
int64_t nextFullGCTime;
int64_t lastGCTime;
int64_t jitReleaseTime;
JSGCMode mode;
@ -589,6 +589,12 @@ class GCRuntime
*/
volatile uintptr_t isNeeded;
/* Incremented at the start of every major GC. */
uint64_t majorGCNumber;
/* The major GC number at which to release observed type information. */
uint64_t jitReleaseNumber;
/* Incremented on every GC slice. */
uint64_t number;
@ -624,6 +630,9 @@ class GCRuntime
/* Whether any sweeping will take place in the separate GC helper thread. */
bool sweepOnBackgroundThread;
/* Whether observed type information is being released in the current GC. */
bool releaseObservedTypes;
/* Whether any black->gray edges were found during marking. */
bool foundBlackGrayEdges;

View File

@ -0,0 +1,83 @@
setJitCompilerOption("baseline.usecount.trigger", 10);
setJitCompilerOption("ion.usecount.trigger", 20);
function join_check() {
var lengthWasCalled = false;
var obj = {"0": "", "1": ""};
Object.defineProperty(obj, "length", {
get : function(){ lengthWasCalled = true; return 2; },
enumerable : true,
configurable : true
});
var res = Array.prototype.join.call(obj, { toString: function () {
if (lengthWasCalled)
return "good";
else
return "bad";
}})
assertEq(res, "good");
}
function split(i) {
var x = (i + "->" + i).split("->");
assertEq(x[0], "" + i);
return i;
}
function join(i) {
var x = [i, i].join("->");
assertEq(x, i + "->" + i);
return i;
}
function split_join(i) {
var x = (i + "-" + i).split("-").join("->");
assertEq(x, i + "->" + i);
return i;
}
function split_join_2(i) {
var x = (i + "-" + i).split("-");
x.push("" + i);
var res = x.join("->");
assertEq(res, i + "->" + i + "->" + i);
return i;
}
function resumeHere() { bailout(); }
function split_join_3(i) {
var x = (i + "-" + i).split("-");
resumeHere();
var res = x.join("->");
assertEq(res, i + "->" + i);
return i;
}
function trip(i) {
if (i == 99)
assertEq(myjoin.arguments[1][0], "" + i)
}
function myjoin(i, x) {
trip(i);
return x.join("->");
}
function split_join_4(i) {
var x = (i + "-" + i).split("-");
var res = myjoin(i, x);
assertEq(res, i + "->" + i);
return i;
}
for (var i = 0; i < 100; ++i) {
join_check(i);
split(i);
join(i);
split_join(i);
split_join_2(i);
split_join_3(i);
split_join_4(i);
}

View File

@ -13,6 +13,7 @@
#include "jit/IonAnalysis.h"
#include "jit/IonLinker.h"
#include "jit/IonSpewer.h"
#include "jit/JitcodeMap.h"
#ifdef JS_ION_PERF
# include "jit/PerfSpewer.h"
#endif
@ -244,6 +245,21 @@ BaselineCompiler::compile()
if (script->compartment()->debugMode())
baselineScript->setDebugMode();
// Register a native => bytecode mapping entry for this script if needed.
if (cx->runtime()->jitRuntime()->isNativeToBytecodeMapEnabled(cx->runtime())) {
IonSpew(IonSpew_Profiling, "Added JitcodeGlobalEntry for baseline script %s:%d (%p)",
script->filename(), script->lineno(), baselineScript);
JitcodeGlobalEntry::BaselineEntry entry;
entry.init(code->raw(), code->raw() + code->instructionsSize(), script);
JitcodeGlobalTable *globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
if (!globalTable->addEntry(entry))
return Method_Error;
// Mark the jitcode as having a bytecode map.
code->setHasBytecodeMap();
}
script->setBaselineScript(cx, baselineScript);
return Method_Compiled;

View File

@ -9,6 +9,8 @@
#include "mozilla/DebugOnly.h"
#include "jit/IonLinker.h"
#include "jit/JitcodeMap.h"
#include "jit/PerfSpewer.h"
#include "jit/IonFrames-inl.h"
@ -659,6 +661,11 @@ jit::RecompileOnStackBaselineScriptsForDebugMode(JSContext *cx, JSCompartment *c
MinorGC(cx->runtime(), JS::gcreason::EVICT_NURSERY);
#endif
// When the profiler is enabled, we need to suppress sampling from here until
// the end of the function, since the basline jit scripts are in a state of
// flux.
AutoSuppressProfilerSampling suppressProfilerSampling(cx);
// Try to recompile all the scripts. If we encounter an error, we need to
// roll back as if none of the compilations happened, so that we don't
// crash.

View File

@ -446,6 +446,7 @@ BaselineScript::Destroy(FreeOp *fop, BaselineScript *script)
*/
JS_ASSERT(fop->runtime()->gc.nursery.isEmpty());
#endif
fop->delete_(script);
}
@ -690,6 +691,27 @@ BaselineScript::nativeCodeForPC(JSScript *script, jsbytecode *pc, PCMappingSlotI
jsbytecode *
BaselineScript::pcForReturnOffset(JSScript *script, uint32_t nativeOffset)
{
return pcForNativeOffset(script, nativeOffset, true);
}
jsbytecode *
BaselineScript::pcForReturnAddress(JSScript *script, uint8_t *nativeAddress)
{
JS_ASSERT(script->baselineScript() == this);
JS_ASSERT(nativeAddress >= method_->raw());
JS_ASSERT(nativeAddress < method_->raw() + method_->instructionsSize());
return pcForReturnOffset(script, uint32_t(nativeAddress - method_->raw()));
}
jsbytecode *
BaselineScript::pcForNativeOffset(JSScript *script, uint32_t nativeOffset)
{
return pcForNativeOffset(script, nativeOffset, false);
}
jsbytecode *
BaselineScript::pcForNativeOffset(JSScript *script, uint32_t nativeOffset, bool isReturn)
{
JS_ASSERT(script->baselineScript() == this);
JS_ASSERT(nativeOffset < method_->instructionsSize());
@ -707,14 +729,19 @@ BaselineScript::pcForReturnOffset(JSScript *script, uint32_t nativeOffset)
i--;
PCMappingIndexEntry &entry = pcMappingIndexEntry(i);
JS_ASSERT(nativeOffset >= entry.nativeOffset);
JS_ASSERT_IF(isReturn, nativeOffset >= entry.nativeOffset);
CompactBufferReader reader(pcMappingReader(i));
jsbytecode *curPC = script->offsetToPC(entry.pcOffset);
uint32_t curNativeOffset = entry.nativeOffset;
JS_ASSERT(script->containsPC(curPC));
JS_ASSERT(curNativeOffset <= nativeOffset);
JS_ASSERT_IF(isReturn, nativeOffset >= curNativeOffset);
// In the raw native-lookup case, the native code address can occur
// before the start of ops. Associate those with bytecode offset 0.
if (!isReturn && (curNativeOffset > nativeOffset))
return script->code();
while (true) {
// If the high bit is set, the native offset relative to the
@ -723,22 +750,28 @@ BaselineScript::pcForReturnOffset(JSScript *script, uint32_t nativeOffset)
if (b & 0x80)
curNativeOffset += reader.readUnsigned();
if (curNativeOffset == nativeOffset)
if (isReturn ? (nativeOffset == curNativeOffset) : (nativeOffset <= curNativeOffset))
return curPC;
// If this is a raw native lookup (not jsop return addresses), then
// the native address may lie in-between the last delta-entry in
// a pcMappingIndexEntry, and the next pcMappingIndexEntry.
if (!isReturn && !reader.more())
return curPC;
curPC += GetBytecodeLength(curPC);
}
MOZ_ASSUME_UNREACHABLE("Invalid pc");
MOZ_ASSUME_UNREACHABLE("Bad baseline jitcode address");
}
jsbytecode *
BaselineScript::pcForReturnAddress(JSScript *script, uint8_t *nativeAddress)
BaselineScript::pcForNativeAddress(JSScript *script, uint8_t *nativeAddress)
{
JS_ASSERT(script->baselineScript() == this);
JS_ASSERT(nativeAddress >= method_->raw());
JS_ASSERT(nativeAddress < method_->raw() + method_->instructionsSize());
return pcForReturnOffset(script, uint32_t(nativeAddress - method_->raw()));
return pcForNativeOffset(script, uint32_t(nativeAddress - method_->raw()));
}
void

View File

@ -315,9 +315,17 @@ struct BaselineScript
void copyPCMappingEntries(const CompactBufferWriter &entries);
uint8_t *nativeCodeForPC(JSScript *script, jsbytecode *pc, PCMappingSlotInfo *slotInfo = nullptr);
jsbytecode *pcForReturnOffset(JSScript *script, uint32_t nativeOffset);
jsbytecode *pcForReturnAddress(JSScript *script, uint8_t *nativeAddress);
jsbytecode *pcForNativeAddress(JSScript *script, uint8_t *nativeAddress);
jsbytecode *pcForNativeOffset(JSScript *script, uint32_t nativeOffset);
private:
jsbytecode *pcForNativeOffset(JSScript *script, uint32_t nativeOffset, bool isReturn);
public:
// Toggle debug traps (used for breakpoints and step mode) in the script.
// If |pc| is nullptr, toggle traps for all ops in the script. Else, only
// toggle traps at |pc|.

View File

@ -26,6 +26,7 @@
#include "jit/IonLinker.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/IonSpewer.h"
#include "jit/JitcodeMap.h"
#include "jit/Lowering.h"
#include "jit/MIRGenerator.h"
#include "jit/MoveEmitter.h"
@ -120,7 +121,7 @@ CodeGeneratorShared::addCache(LInstruction *lir, size_t cacheIndex)
cache->setIdempotent();
OutOfLineUpdateCache *ool = new(alloc()) OutOfLineUpdateCache(lir, cacheIndex);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, mir))
return false;
// OOL-specific state depends on the type of cache.
@ -184,7 +185,7 @@ CodeGenerator::visitValueToInt32(LValueToInt32 *lir)
Label fails;
if (lir->mode() == LValueToInt32::TRUNCATE) {
OutOfLineCode *oolDouble = oolTruncateDouble(temp, output);
OutOfLineCode *oolDouble = oolTruncateDouble(temp, output, lir->mir());
if (!oolDouble)
return false;
@ -691,7 +692,7 @@ CodeGenerator::visitTestOAndBranch(LTestOAndBranch *lir)
"Objects which can't emulate undefined should have been constant-folded");
OutOfLineTestObject *ool = new(alloc()) OutOfLineTestObject();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Label *truthy = getJumpLabelForBranch(lir->ifTruthy());
@ -714,7 +715,7 @@ CodeGenerator::visitTestVAndBranch(LTestVAndBranch *lir)
// object.
if (lir->mir()->operandMightEmulateUndefined() && input->mightBeType(MIRType_Object)) {
ool = new(alloc()) OutOfLineTestObject();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
}
@ -1317,7 +1318,7 @@ bool
CodeGenerator::visitInterruptCheckImplicit(LInterruptCheckImplicit *lir)
{
OutOfLineInterruptCheckImplicit *ool = new(alloc()) OutOfLineInterruptCheckImplicit(current, lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
lir->setOolEntry(ool->entry());
@ -1408,6 +1409,15 @@ CodeGenerator::visitStart(LStart *lir)
return true;
}
bool
CodeGenerator::visitPcOffset(LPcOffset *lir)
{
if (!addNativeToBytecodeEntry(lir->mir()->trackedSite()))
return false;
return true;
}
bool
CodeGenerator::visitReturn(LReturn *lir)
{
@ -2043,7 +2053,7 @@ CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO *lir)
{
#ifdef JSGC_GENERATIONAL
OutOfLineCallPostWriteBarrier *ool = new(alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Register temp = ToTempRegisterOrInvalid(lir->temp());
@ -2069,7 +2079,7 @@ CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV *lir)
{
#ifdef JSGC_GENERATIONAL
OutOfLineCallPostWriteBarrier *ool = new(alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object());
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Register temp = ToTempRegisterOrInvalid(lir->temp());
@ -2749,7 +2759,6 @@ CodeGenerator::visitArraySplice(LArraySplice *lir)
return callVM(ArraySpliceDenseInfo, lir);
}
bool
CodeGenerator::visitBail(LBail *lir)
{
@ -2985,7 +2994,7 @@ CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed *lir)
const void *limitAddr = GetIonContext()->runtime->addressOfJitStackLimit();
CheckOverRecursedFailure *ool = new(alloc()) CheckOverRecursedFailure(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
// Conditional forward (unlikely) branch to failure.
@ -3072,7 +3081,7 @@ CodeGenerator::visitCheckOverRecursedPar(LCheckOverRecursedPar *lir)
// Conditional forward (unlikely) branch to failure.
CheckOverRecursedFailure *ool = new(alloc()) CheckOverRecursedFailure(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
masm.branchPtr(Assembler::BelowOrEqual, StackPointer, tempReg, ool->entry());
@ -3405,6 +3414,14 @@ CodeGenerator::generateBody()
resetOsiPointRegs(iter->safepoint());
#endif
if (iter->mirRaw()) {
// Only add instructions that have a tracked inline script tree.
if (iter->mirRaw()->trackedSite().hasTree()) {
if (!addNativeToBytecodeEntry(iter->mirRaw()->trackedSite()))
return false;
}
}
if (!iter->accept(this))
return false;
@ -3540,7 +3557,7 @@ CodeGenerator::visitNewArray(LNewArray *lir)
return visitNewArrayCallVM(lir);
OutOfLineNewArray *ool = new(alloc()) OutOfLineNewArray(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
masm.createGCObject(objReg, tempReg, templateObject, lir->mir()->initialHeap(), ool->entry());
@ -3710,7 +3727,7 @@ CodeGenerator::visitNewObject(LNewObject *lir)
return visitNewObjectVMCall(lir);
OutOfLineNewObject *ool = new(alloc()) OutOfLineNewObject(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
bool initFixedSlots = ShouldInitFixedSlots(lir, templateObject);
@ -3923,6 +3940,9 @@ bool
CodeGenerator::emitAllocateGCThingPar(LInstruction *lir, Register objReg, Register cxReg,
Register tempReg1, Register tempReg2, JSObject *templateObj)
{
JS_ASSERT(lir->mirRaw());
JS_ASSERT(lir->mirRaw()->isInstruction());
gc::AllocKind allocKind = templateObj->tenuredGetAllocKind();
#ifdef JSGC_FJGENERATIONAL
OutOfLineCode *ool = oolCallVM(NewGCThingParInfo, lir,
@ -3931,7 +3951,7 @@ CodeGenerator::emitAllocateGCThingPar(LInstruction *lir, Register objReg, Regist
return false;
#else
OutOfLineNewGCThingPar *ool = new(alloc()) OutOfLineNewGCThingPar(lir, allocKind, objReg, cxReg);
if (!ool || !addOutOfLineCode(ool))
if (!ool || !addOutOfLineCode(ool, lir->mirRaw()->toInstruction()))
return false;
#endif
@ -4826,7 +4846,7 @@ CodeGenerator::visitIsNullOrLikeUndefined(LIsNullOrLikeUndefined *lir)
Label *notNullOrLikeUndefined;
if (lir->mir()->operandMightEmulateUndefined()) {
ool = new(alloc()) OutOfLineTestObjectWithLabels();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
nullOrLikeUndefined = ool->label1();
notNullOrLikeUndefined = ool->label2();
@ -4910,7 +4930,7 @@ CodeGenerator::visitIsNullOrLikeUndefinedAndBranch(LIsNullOrLikeUndefinedAndBran
OutOfLineTestObject *ool = nullptr;
if (lir->cmpMir()->operandMightEmulateUndefined()) {
ool = new(alloc()) OutOfLineTestObject();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->cmpMir()))
return false;
}
@ -4959,7 +4979,7 @@ CodeGenerator::visitEmulatesUndefined(LEmulatesUndefined *lir)
MOZ_ASSERT(op == JSOP_EQ || op == JSOP_NE, "Strict equality should have been folded");
OutOfLineTestObjectWithLabels *ool = new(alloc()) OutOfLineTestObjectWithLabels();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Label *emulatesUndefined = ool->label1();
@ -4993,7 +5013,7 @@ CodeGenerator::visitEmulatesUndefinedAndBranch(LEmulatesUndefinedAndBranch *lir)
MOZ_ASSERT(op == JSOP_EQ || op == JSOP_NE, "Strict equality should have been folded");
OutOfLineTestObject *ool = new(alloc()) OutOfLineTestObject();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->cmpMir()))
return false;
Label *equal;
@ -5497,7 +5517,7 @@ CodeGenerator::visitNotO(LNotO *lir)
"This should be constant-folded if the object can't emulate undefined.");
OutOfLineTestObjectWithLabels *ool = new(alloc()) OutOfLineTestObjectWithLabels();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Label *ifEmulatesUndefined = ool->label1();
@ -5536,7 +5556,7 @@ CodeGenerator::visitNotV(LNotV *lir)
// object.
if (lir->mir()->operandMightEmulateUndefined() && operand->mightBeType(MIRType_Object)) {
ool = new(alloc()) OutOfLineTestObjectWithLabels();
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
ifTruthy = ool->label1();
ifFalsy = ool->label2();
@ -5758,7 +5778,7 @@ bool
CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT *lir)
{
OutOfLineStoreElementHole *ool = new(alloc()) OutOfLineStoreElementHole(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Register elements = ToRegister(lir->elements());
@ -5783,7 +5803,7 @@ bool
CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV *lir)
{
OutOfLineStoreElementHole *ool = new(alloc()) OutOfLineStoreElementHole(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
Register elements = ToRegister(lir->elements());
@ -6110,6 +6130,18 @@ CodeGenerator::visitArrayConcat(LArrayConcat *lir)
return callVM(ArrayConcatDenseInfo, lir);
}
typedef JSString *(*ArrayJoinFn)(JSContext *, HandleObject, HandleString);
static const VMFunction ArrayJoinInfo = FunctionInfo<ArrayJoinFn>(jit::ArrayJoin);
bool
CodeGenerator::visitArrayJoin(LArrayJoin *lir)
{
pushArg(ToRegister(lir->separator()));
pushArg(ToRegister(lir->array()));
return callVM(ArrayJoinInfo, lir);
}
typedef JSObject *(*GetIteratorObjectFn)(JSContext *, HandleObject, uint32_t);
static const VMFunction GetIteratorObjectInfo = FunctionInfo<GetIteratorObjectFn>(GetIteratorObject);
@ -6564,6 +6596,13 @@ CodeGenerator::generate()
gen->info().script()->filename(),
gen->info().script()->lineno());
// Initialize native code table with an entry to the start of
// top-level script.
InlineScriptTree *tree = gen->info().inlineScriptTree();
jsbytecode *startPC = tree->script()->code();
if (!addNativeToBytecodeEntry(BytecodeSite(tree, startPC)))
return false;
if (!snapshots_.init())
return false;
@ -6618,22 +6657,74 @@ CodeGenerator::generate()
if (!generatePrologue())
return false;
// Reset native => bytecode map table with top-level script and startPc.
if (!addNativeToBytecodeEntry(BytecodeSite(tree, startPC)))
return false;
if (!generateBody())
return false;
// Reset native => bytecode map table with top-level script and startPc.
if (!addNativeToBytecodeEntry(BytecodeSite(tree, startPC)))
return false;
if (!generateEpilogue())
return false;
// Reset native => bytecode map table with top-level script and startPc.
if (!addNativeToBytecodeEntry(BytecodeSite(tree, startPC)))
return false;
if (!generateInvalidateEpilogue())
return false;
#if defined(JS_ION_PERF)
// Note the end of the inline code and start of the OOL code.
perfSpewer_.noteEndInlineCode(masm);
#endif
// native => bytecode entries for OOL code will be added
// by CodeGeneratorShared::generateOutOfLineCode
if (!generateOutOfLineCode())
return false;
// Add terminal entry.
if (!addNativeToBytecodeEntry(BytecodeSite(tree, startPC)))
return false;
// Dump Native to bytecode entries to spew.
dumpNativeToBytecodeEntries();
return !masm.oom();
}
struct AutoDiscardIonCode
{
JSContext *cx;
types::RecompileInfo *recompileInfo;
IonScript *ionScript;
bool keep;
AutoDiscardIonCode(JSContext *cx, types::RecompileInfo *recompileInfo)
: cx(cx), recompileInfo(recompileInfo), ionScript(nullptr), keep(false) {}
~AutoDiscardIonCode() {
if (keep)
return;
// Use js_free instead of IonScript::Destroy: the cache list and
// backedge list are still uninitialized.
if (ionScript)
js_free(ionScript);
recompileInfo->compilerOutput(cx->zone()->types)->invalidate();
}
void keepIonCode() {
keep = true;
}
};
bool
CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
{
@ -6678,6 +6769,8 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
if (executionMode == ParallelExecution)
AddPossibleCallees(cx, graph.mir(), callTargets);
AutoDiscardIonCode discardIonCode(cx, &recompileInfo);
IonScript *ionScript =
IonScript::New(cx, recompileInfo,
graph.totalSlotCount(), scriptFrameSize,
@ -6687,10 +6780,9 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
cacheList_.length(), runtimeData_.length(),
safepoints_.size(), callTargets.length(),
patchableBackedges_.length(), optimizationLevel);
if (!ionScript) {
recompileInfo.compilerOutput(cx->zone()->types)->invalidate();
if (!ionScript)
return false;
}
discardIonCode.ionScript = ionScript;
// Lock the runtime against interrupt callbacks during the link.
// We don't want an interrupt request to protect the code for the script
@ -6714,23 +6806,38 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
JitCode *code = (executionMode == SequentialExecution)
? linker.newCodeForIonScript(cx)
: linker.newCode<CanGC>(cx, ION_CODE);
if (!code) {
// Use js_free instead of IonScript::Destroy: the cache list and
// backedge list are still uninitialized.
js_free(ionScript);
recompileInfo.compilerOutput(cx->zone()->types)->invalidate();
if (!code)
return false;
// Encode native to bytecode map if profiling is enabled.
if (isNativeToBytecodeMapEnabled()) {
// Generate native-to-bytecode main table.
if (!generateCompactNativeToBytecodeMap(cx, code))
return false;
uint8_t *ionTableAddr = ((uint8_t *) nativeToBytecodeMap_) + nativeToBytecodeTableOffset_;
JitcodeIonTable *ionTable = (JitcodeIonTable *) ionTableAddr;
// Construct the IonEntry that will go into the global table.
JitcodeGlobalEntry::IonEntry entry;
if (!ionTable->makeIonEntry(cx, code, nativeToBytecodeScriptListLength_,
nativeToBytecodeScriptList_, entry))
{
return false;
}
// Add entry to the global table.
JitcodeGlobalTable *globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
if (!globalTable->addEntry(entry)) {
// Memory may have been allocated for the entry.
entry.destroy();
return false;
}
// Mark the jitcode as having a bytecode map.
code->setHasBytecodeMap();
}
ionScript->setMethod(code);
ionScript->setSkipArgCheckEntryOffset(getSkipArgCheckEntryOffset());
// If SPS is enabled, mark IonScript as having been instrumented with SPS
if (sps_.enabled())
ionScript->setHasSPSInstrumentation();
SetIonScript(script, executionMode, ionScript);
if (cx->runtime()->spsProfiler.enabled()) {
const char *filename = script->filename();
if (filename == nullptr)
@ -6744,6 +6851,15 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
js_free(buf);
}
ionScript->setMethod(code);
ionScript->setSkipArgCheckEntryOffset(getSkipArgCheckEntryOffset());
// If SPS is enabled, mark IonScript as having been instrumented with SPS
if (sps_.enabled())
ionScript->setHasSPSInstrumentation();
SetIonScript(script, executionMode, ionScript);
// In parallel execution mode, when we first compile a script, we
// don't know that its potential callees are compiled, so set a
// flag warning that the callees may not be fully compiled.
@ -6844,6 +6960,9 @@ CodeGenerator::link(JSContext *cx, types::CompilerConstraintList *constraints)
if (IonScriptCounts *counts = extractScriptCounts())
script->addIonCounts(counts);
// Make sure that AutoDiscardIonCode does not free the relevant info.
discardIonCode.keepIonCode();
return true;
}
@ -6875,7 +6994,7 @@ CodeGenerator::visitUnboxFloatingPoint(LUnboxFloatingPoint *lir)
// Out-of-line path to convert int32 to double or bailout
// if this instruction is fallible.
OutOfLineUnboxFloatingPoint *ool = new(alloc()) OutOfLineUnboxFloatingPoint(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
FloatRegister resultReg = ToFloatRegister(result);
@ -7673,7 +7792,7 @@ CodeGenerator::visitTypeOfV(LTypeOfV *lir)
// The input may be a callable object (result is "function") or may
// emulate undefined (result is "undefined"). Use an OOL path.
ool = new(alloc()) OutOfLineTypeOfV(lir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mir()))
return false;
masm.branchTestObject(Assembler::Equal, tag, ool->entry());

View File

@ -68,6 +68,7 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitParameter(LParameter *lir);
bool visitCallee(LCallee *lir);
bool visitStart(LStart *lir);
bool visitPcOffset(LPcOffset *lir);
bool visitReturn(LReturn *ret);
bool visitDefVar(LDefVar *lir);
bool visitDefFun(LDefFun *lir);
@ -251,6 +252,7 @@ class CodeGenerator : public CodeGeneratorSpecific
bool visitArrayPushV(LArrayPushV *lir);
bool visitArrayPushT(LArrayPushT *lir);
bool visitArrayConcat(LArrayConcat *lir);
bool visitArrayJoin(LArrayJoin *lir);
bool visitLoadTypedArrayElement(LLoadTypedArrayElement *lir);
bool visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole *lir);
bool visitStoreTypedArrayElement(LStoreTypedArrayElement *lir);

View File

@ -68,6 +68,11 @@ class CompactBufferReader
uint32_t b1 = readByte();
return b0 | (b1 << 8);
}
uint32_t readNativeEndianUint32_t() {
// Must be at 4-byte boundary
JS_ASSERT(uintptr_t(buffer_) % sizeof(uint32_t) == 0);
return *reinterpret_cast<const uint32_t *>(buffer_);
}
uint32_t readUnsigned() {
return readVariableLength();
}
@ -93,6 +98,10 @@ class CompactBufferReader
MOZ_ASSERT(start < end_);
MOZ_ASSERT(buffer_ < end_);
}
const uint8_t *currentPosition() const {
return buffer_;
}
};
class CompactBufferWriter
@ -140,6 +149,15 @@ class CompactBufferWriter
writeByte(value & 0xFF);
writeByte(value >> 8);
}
void writeNativeEndianUint32_t(uint32_t value) {
// Must be at 4-byte boundary
JS_ASSERT(length() % sizeof(uint32_t) == 0);
writeFixedUint32_t(0);
if (oom())
return;
uint8_t *endPtr = buffer() + length();
reinterpret_cast<uint32_t *>(endPtr)[-1] = value;
}
size_t length() const {
return buffer_.length();
}

View File

@ -83,6 +83,9 @@ class InlineScriptTree {
bool isOutermostCaller() const {
return caller_ == nullptr;
}
bool hasCaller() const {
return caller_ != nullptr;
}
InlineScriptTree *outermostCaller() {
if (isOutermostCaller())
return this;
@ -97,12 +100,27 @@ class InlineScriptTree {
return script_;
}
InlineScriptTree *children() const {
bool hasChildren() const {
return children_ != nullptr;
}
InlineScriptTree *firstChild() const {
JS_ASSERT(hasChildren());
return children_;
}
bool hasNextCallee() const {
return nextCallee_ != nullptr;
}
InlineScriptTree *nextCallee() const {
JS_ASSERT(hasNextCallee());
return nextCallee_;
}
unsigned depth() const {
if (isOutermostCaller())
return 1;
return 1 + caller_->depth();
}
};
class BytecodeSite {
@ -119,7 +137,14 @@ class BytecodeSite {
BytecodeSite(InlineScriptTree *tree, jsbytecode *pc)
: tree_(tree), pc_(pc)
{}
{
JS_ASSERT(tree_ != nullptr);
JS_ASSERT(pc_ != nullptr);
}
bool hasTree() const {
return tree_ != nullptr;
}
InlineScriptTree *tree() const {
return tree_;
@ -128,6 +153,10 @@ class BytecodeSite {
jsbytecode *pc() const {
return pc_;
}
JSScript *script() const {
return tree_ ? tree_->script() : nullptr;
}
};

View File

@ -27,6 +27,7 @@
#include "jit/IonBuilder.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/IonSpewer.h"
#include "jit/JitcodeMap.h"
#include "jit/JitCommon.h"
#include "jit/JitCompartment.h"
#include "jit/LICM.h"
@ -165,7 +166,8 @@ JitRuntime::JitRuntime()
functionWrappers_(nullptr),
osrTempData_(nullptr),
ionCodeProtected_(false),
ionReturnOverride_(MagicValue(JS_ARG_POISON))
ionReturnOverride_(MagicValue(JS_ARG_POISON)),
jitcodeGlobalTable_(nullptr)
{
}
@ -177,6 +179,10 @@ JitRuntime::~JitRuntime()
// Note: The interrupt lock is not taken here, as JitRuntime is only
// destroyed along with its containing JSRuntime.
js_delete(ionAlloc_);
// By this point, the jitcode global table should be empty.
JS_ASSERT_IF(jitcodeGlobalTable_, jitcodeGlobalTable_->empty());
js_delete(jitcodeGlobalTable_);
}
bool
@ -289,6 +295,10 @@ JitRuntime::initialize(JSContext *cx)
return false;
}
jitcodeGlobalTable_ = cx->new_<JitcodeGlobalTable>();
if (!jitcodeGlobalTable_)
return false;
return true;
}
@ -761,6 +771,12 @@ JitCode::finalize(FreeOp *fop)
// to read the contents of the pool we are releasing references in.
JS_ASSERT(fop->runtime()->currentThreadOwnsInterruptLock());
// If this jitcode has a bytecode map, de-register it.
if (hasBytecodeMap_) {
JS_ASSERT(fop->runtime()->jitRuntime()->hasJitcodeGlobalTable());
fop->runtime()->jitRuntime()->getJitcodeGlobalTable()->removeEntry(raw());
}
// Buffer can be freed at any time hereafter. Catch use-after-free bugs.
// Don't do this if the Ion code is protected, as the signal handler will
// deadlock trying to reacquire the interrupt lock.

View File

@ -1277,6 +1277,9 @@ IonBuilder::traverseBytecode()
}
#endif
if (isNativeToBytecodeMapEnabled())
current->add(MPcOffset::New(alloc()));
// Nothing in inspectOpcode() is allowed to advance the pc.
JSOp op = JSOp(*pc);
if (!inspectOpcode(op))

View File

@ -675,6 +675,7 @@ class IonBuilder : public MIRGenerator
InliningStatus inlineArrayPopShift(CallInfo &callInfo, MArrayPopShift::Mode mode);
InliningStatus inlineArrayPush(CallInfo &callInfo);
InliningStatus inlineArrayConcat(CallInfo &callInfo);
InliningStatus inlineArrayJoin(CallInfo &callInfo);
InliningStatus inlineArraySplice(CallInfo &callInfo);
// Math natives.

View File

@ -16,6 +16,7 @@
#include "jit/Ion.h"
#include "jit/IonLinker.h"
#include "jit/IonSpewer.h"
#include "jit/JitcodeMap.h"
#include "jit/Lowering.h"
#ifdef JS_ION_PERF
# include "jit/PerfSpewer.h"
@ -429,6 +430,22 @@ IonCache::linkAndAttachStub(JSContext *cx, MacroAssembler &masm, StubAttacher &a
attachStub(masm, attacher, code);
// Add entry to native => bytecode mapping for this stub if needed.
if (cx->runtime()->jitRuntime()->isNativeToBytecodeMapEnabled(cx->runtime())) {
JitcodeGlobalEntry::IonCacheEntry entry;
entry.init(code->raw(), code->raw() + code->instructionsSize(), rejoinAddress());
// Add entry to the global table.
JitcodeGlobalTable *globalTable = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
if (!globalTable->addEntry(entry)) {
entry.destroy();
return false;
}
// Mark the jitcode as having a bytecode map.
code->setHasBytecodeMap();
}
return true;
}

View File

@ -213,6 +213,9 @@ class IonCache
profilerLeavePc_ = pc;
}
// Get the address at which IC rejoins the mainline jitcode.
virtual void *rejoinAddress() = 0;
virtual void emitInitialJump(MacroAssembler &masm, AddCacheState &addState) = 0;
virtual void bindInitialJump(MacroAssembler &masm, AddCacheState &addState) = 0;
virtual void updateBaseAddress(JitCode *code, MacroAssembler &masm);
@ -398,6 +401,10 @@ class RepatchIonCache : public IonCache
// Update the labels once the code is finalized.
void updateBaseAddress(JitCode *code, MacroAssembler &masm);
virtual void *rejoinAddress() MOZ_OVERRIDE {
return rejoinLabel().raw();
}
};
//
@ -496,6 +503,10 @@ class DispatchIonCache : public IonCache
// Fix up the first stub pointer once the code is finalized.
void updateBaseAddress(JitCode *code, MacroAssembler &masm);
virtual void *rejoinAddress() MOZ_OVERRIDE {
return rejoinLabel_.raw();
}
};
// Define the cache kind and pre-declare data structures used for calling inline

View File

@ -44,6 +44,8 @@ class JitCode : public gc::BarrieredCell<JitCode>
uint8_t kind_ : 3; // jit::CodeKind, for the memory reporters.
bool invalidated_ : 1; // Whether the code object has been invalidated.
// This is necessary to prevent GC tracing.
bool hasBytecodeMap_ : 1; // Whether the code object has been registered with
// native=>bytecode mapping tables.
#if JS_BITS_PER_WORD == 32
// Ensure JitCode is gc::Cell aligned.
@ -66,7 +68,8 @@ class JitCode : public gc::BarrieredCell<JitCode>
preBarrierTableBytes_(0),
headerSize_(headerSize),
kind_(kind),
invalidated_(false)
invalidated_(false),
hasBytecodeMap_(false)
{
MOZ_ASSERT(CodeKind(kind_) == kind);
MOZ_ASSERT(headerSize_ == headerSize);
@ -89,6 +92,9 @@ class JitCode : public gc::BarrieredCell<JitCode>
uint8_t *raw() const {
return code_;
}
uint8_t *rawEnd() const {
return code_ + insnSize_;
}
size_t instructionsSize() const {
return insnSize_;
}
@ -98,6 +104,10 @@ class JitCode : public gc::BarrieredCell<JitCode>
invalidated_ = true;
}
void setHasBytecodeMap() {
hasBytecodeMap_ = true;
}
void togglePreBarriers(bool enabled);
// If this JitCode object has been, effectively, corrupted due to

View File

@ -19,6 +19,7 @@
#include "jit/Ion.h"
#include "jit/IonMacroAssembler.h"
#include "jit/IonSpewer.h"
#include "jit/JitcodeMap.h"
#include "jit/JitCompartment.h"
#include "jit/ParallelFunctions.h"
#include "jit/PcScriptCache.h"
@ -112,6 +113,7 @@ JitFrameIterator::JitFrameIterator(IonJSFrameLayout *fp, ExecutionMode mode)
mode_(mode),
kind_(Kind_FrameIterator)
{
verifyReturnAddressUsingNativeToBytecodeMap();
}
IonBailoutIterator *
@ -328,6 +330,9 @@ JitFrameIterator::operator++()
type_ = JitFrame_BaselineStub;
returnAddressToFp_ = current()->returnAddress();
current_ = prev;
verifyReturnAddressUsingNativeToBytecodeMap();
return *this;
}
@ -2229,6 +2234,80 @@ JitFrameIterator::dump() const
fputc('\n', stderr);
}
#ifdef DEBUG
bool
JitFrameIterator::verifyReturnAddressUsingNativeToBytecodeMap()
{
JS_ASSERT(returnAddressToFp_ != nullptr);
// Only handle Ion frames for now.
if (type_ != JitFrame_IonJS && type_ != JitFrame_BaselineJS)
return true;
JSRuntime *rt = js::TlsPerThreadData.get()->runtimeIfOnOwnerThread();
// Don't verify on non-main-thread.
if (!rt)
return true;
// Don't verify if sampling is being suppressed.
if (!rt->isProfilerSamplingEnabled())
return true;
if (rt->isHeapMinorCollecting())
return true;
JitRuntime *jitrt = rt->jitRuntime();
// Look up and print bytecode info for the native address.
JitcodeGlobalEntry entry;
if (!jitrt->getJitcodeGlobalTable()->lookup(returnAddressToFp_, &entry))
return true;
IonSpew(IonSpew_Profiling, "Found nativeToBytecode entry for %p: %p - %p",
returnAddressToFp_, entry.nativeStartAddr(), entry.nativeEndAddr());
JitcodeGlobalEntry::BytecodeLocationVector location;
uint32_t depth = UINT32_MAX;
if (!entry.callStackAtAddr(rt, returnAddressToFp_, location, &depth))
return false;
JS_ASSERT(depth > 0 && depth != UINT32_MAX);
JS_ASSERT(location.length() == depth);
IonSpew(IonSpew_Profiling, "Found bytecode location of depth %d:", depth);
for (size_t i = 0; i < location.length(); i++) {
IonSpew(IonSpew_Profiling, " %s:%d - %d",
location[i].script->filename(), location[i].script->lineno(),
(int) (location[i].pc - location[i].script->code()));
}
if (type_ == JitFrame_IonJS) {
// Create an InlineFrameIterator here and verify the mapped info against the iterator info.
InlineFrameIterator inlineFrames(GetJSContextFromJitCode(), this);
for (size_t idx = 0; idx < location.length(); idx++) {
JS_ASSERT(idx < location.length());
JS_ASSERT_IF(idx < location.length() - 1, inlineFrames.more());
IonSpew(IonSpew_Profiling, "Match %d: ION %s:%d(%d) vs N2B %s:%d(%d)",
(int)idx,
inlineFrames.script()->filename(),
inlineFrames.script()->lineno(),
inlineFrames.pc() - inlineFrames.script()->code(),
location[idx].script->filename(),
location[idx].script->lineno(),
location[idx].pc - location[idx].script->code());
JS_ASSERT(inlineFrames.script() == location[idx].script);
if (inlineFrames.more())
++inlineFrames;
}
}
return true;
}
#endif // DEBUG
IonJSFrameLayout *
InvalidationBailoutStack::fp() const
{

View File

@ -250,6 +250,7 @@ jit::CheckLogging()
" range Range Analysis\n"
" unroll Loop unrolling\n"
" logs C1 and JSON visualization logging\n"
" profiling Profiling-related information\n"
" all Everything\n"
"\n"
" bl-aborts Baseline compiler abort messages\n"
@ -304,6 +305,8 @@ jit::CheckLogging()
EnableChannel(IonSpew_CacheFlush);
if (ContainsFlag(env, "logs"))
EnableIonDebugLogging();
if (ContainsFlag(env, "profiling"))
EnableChannel(IonSpew_Profiling);
if (ContainsFlag(env, "all"))
LoggingBits = uint32_t(-1);

View File

@ -56,6 +56,8 @@ namespace jit {
_(Safepoints) \
/* Debug info about Pools*/ \
_(Pools) \
/* Profiling-related information */ \
_(Profiling) \
/* Debug info about the I$ */ \
_(CacheFlush) \
\

View File

@ -7,6 +7,7 @@
#ifndef jit_IonTypes_h
#define jit_IonTypes_h
#include "mozilla/HashFunctions.h"
#include "mozilla/TypedEnum.h"
#include "jstypes.h"
@ -232,6 +233,111 @@ static const uint32_t VECTOR_SCALE_BITS = 2;
static const uint32_t VECTOR_SCALE_SHIFT = ELEMENT_TYPE_BITS + ELEMENT_TYPE_SHIFT;
static const uint32_t VECTOR_SCALE_MASK = (1 << VECTOR_SCALE_BITS) - 1;
class SimdConstant {
public:
enum Type {
Int32x4,
Float32x4,
Undefined = -1
};
private:
Type type_;
union {
int32_t i32x4[4];
float f32x4[4];
} u;
bool defined() const {
return type_ != Undefined;
}
void fillInt32x4(int32_t x, int32_t y, int32_t z, int32_t w)
{
type_ = Int32x4;
u.i32x4[0] = x;
u.i32x4[1] = y;
u.i32x4[2] = z;
u.i32x4[3] = w;
}
void fillFloat32x4(float x, float y, float z, float w)
{
type_ = Float32x4;
u.f32x4[0] = x;
u.f32x4[1] = y;
u.f32x4[2] = z;
u.f32x4[3] = w;
}
public:
// Doesn't have a default constructor, as it would prevent it from being
// included in unions.
static SimdConstant CreateX4(int32_t x, int32_t y, int32_t z, int32_t w) {
SimdConstant cst;
cst.fillInt32x4(x, y, z, w);
return cst;
}
static SimdConstant CreateX4(int32_t *array) {
SimdConstant cst;
cst.fillInt32x4(array[0], array[1], array[2], array[3]);
return cst;
}
static SimdConstant CreateX4(float x, float y, float z, float w) {
SimdConstant cst;
cst.fillFloat32x4(x, y, z, w);
return cst;
}
static SimdConstant CreateX4(float *array) {
SimdConstant cst;
cst.fillFloat32x4(array[0], array[1], array[2], array[3]);
return cst;
}
uint32_t length() const {
JS_ASSERT(defined());
switch(type_) {
case Int32x4:
case Float32x4:
return 4;
case Undefined:
break;
}
MOZ_CRASH("Unexpected SIMD kind");
}
Type type() const {
JS_ASSERT(defined());
return type_;
}
const int32_t *asInt32x4() const {
JS_ASSERT(defined() && type_ == Int32x4);
return u.i32x4;
}
const float *asFloat32x4() const {
JS_ASSERT(defined() && type_ == Float32x4);
return u.f32x4;
}
bool operator==(const SimdConstant &rhs) const {
JS_ASSERT(defined() && rhs.defined());
if (type() != rhs.type())
return false;
return memcmp(&u, &rhs.u, sizeof(u)) == 0;
}
// SimdConstant is a HashPolicy
typedef SimdConstant Lookup;
static HashNumber hash(const SimdConstant &val) {
return mozilla::HashBytes(&val.u, sizeof(SimdConstant));
}
static bool match(const SimdConstant &lhs, const SimdConstant &rhs) {
return lhs == rhs;
}
};
// The ordering of this enumeration is important: Anything < Value is a
// specialized type. Furthermore, anything < String has trivial conversion to
// a number.

View File

@ -56,6 +56,7 @@ typedef void (*EnterJitCode)(void *code, unsigned argc, Value *argv, Interpreter
size_t numStackValues, Value *vp);
class IonBuilder;
class JitcodeGlobalTable;
// ICStubSpace is an abstraction for allocation policy and storage for stub data.
// There are two kinds of stubs: optimized stubs and fallback stubs (the latter
@ -232,6 +233,9 @@ class JitRuntime
// their callee.
js::Value ionReturnOverride_;
// Global table of jitcode native address => bytecode address mappings.
JitcodeGlobalTable *jitcodeGlobalTable_;
private:
JitCode *generateExceptionTailStub(JSContext *cx);
JitCode *generateBailoutTailStub(JSContext *cx);
@ -381,6 +385,23 @@ class JitRuntime
JS_ASSERT(!v.isMagic());
ionReturnOverride_ = v;
}
bool hasJitcodeGlobalTable() const {
return jitcodeGlobalTable_ != nullptr;
}
JitcodeGlobalTable *getJitcodeGlobalTable() {
JS_ASSERT(hasJitcodeGlobalTable());
return jitcodeGlobalTable_;
}
bool isNativeToBytecodeMapEnabled(JSRuntime *rt) {
#ifdef DEBUG
return true;
#else // DEBUG
return rt->spsProfiler.enabled();
#endif // DEBUG
}
};
class JitZone

View File

@ -258,6 +258,12 @@ class JitFrameIterator
void dump() const;
inline BaselineFrame *baselineFrame() const;
#ifdef DEBUG
bool verifyReturnAddressUsingNativeToBytecodeMap();
#else
inline bool verifyReturnAddressUsingNativeToBytecodeMap() { return true; }
#endif
};
class IonJSFrameLayout;

702
js/src/jit/JitcodeMap.cpp Normal file
View File

@ -0,0 +1,702 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/JitcodeMap.h"
#include "mozilla/DebugOnly.h"
#include "jit/BaselineJIT.h"
#include "jit/IonSpewer.h"
#include "js/Vector.h"
namespace js {
namespace jit {
bool
JitcodeGlobalEntry::IonEntry::callStackAtAddr(JSRuntime *rt, void *ptr,
BytecodeLocationVector &results,
uint32_t *depth) const
{
JS_ASSERT(containsPointer(ptr));
uint32_t ptrOffset = reinterpret_cast<uint8_t *>(ptr) -
reinterpret_cast<uint8_t *>(nativeStartAddr());
uint32_t regionIdx = regionTable()->findRegionEntry(ptrOffset);
JS_ASSERT(regionIdx < regionTable()->numRegions());
JitcodeRegionEntry region = regionTable()->regionEntry(regionIdx);
*depth = region.scriptDepth();
JitcodeRegionEntry::ScriptPcIterator locationIter = region.scriptPcIterator();
JS_ASSERT(locationIter.hasMore());
bool first = true;
while (locationIter.hasMore()) {
uint32_t scriptIdx, pcOffset;
locationIter.readNext(&scriptIdx, &pcOffset);
// For the first entry pushed (innermost frame), the pcOffset is obtained
// from the delta-run encodings.
if (first) {
pcOffset = region.findPcOffset(ptrOffset, pcOffset);
first = false;
}
JSScript *script = getScript(scriptIdx);
jsbytecode *pc = script->offsetToPC(pcOffset);
if (!results.append(BytecodeLocation(script, pc)))
return false;
}
return true;
}
void
JitcodeGlobalEntry::IonEntry::destroy()
{
// The region table is stored at the tail of the compacted data,
// which means the start of the region table is a pointer to
// the _middle_ of the memory space allocated for it.
//
// When freeing it, obtain the payload start pointer first.
if (regionTable_)
js_free((void*) (regionTable_->payloadStart()));
regionTable_ = nullptr;
// Single tag is just pointer-to-jsscript, no memory to free.
ScriptListTag tag = scriptListTag();
if (tag > Single)
js_free(scriptListPointer());
scriptList_ = 0;
}
bool
JitcodeGlobalEntry::BaselineEntry::callStackAtAddr(JSRuntime *rt, void *ptr,
BytecodeLocationVector &results,
uint32_t *depth) const
{
JS_ASSERT(containsPointer(ptr));
JS_ASSERT(script_->hasBaselineScript());
jsbytecode *pc = script_->baselineScript()->pcForNativeAddress(script_, (uint8_t*) ptr);
if (!results.append(BytecodeLocation(script_, pc)))
return false;
*depth = 1;
return true;
}
bool
JitcodeGlobalEntry::IonCacheEntry::callStackAtAddr(JSRuntime *rt, void *ptr,
BytecodeLocationVector &results,
uint32_t *depth) const
{
JS_ASSERT(containsPointer(ptr));
// There must exist an entry for the rejoin addr if this entry exists.
JitRuntime *jitrt = rt->jitRuntime();
JitcodeGlobalEntry entry;
jitrt->getJitcodeGlobalTable()->lookupInfallible(rejoinAddr(), &entry);
JS_ASSERT(entry.isIon());
return entry.callStackAtAddr(rt, rejoinAddr(), results, depth);
}
static int ComparePointers(const void *a, const void *b) {
const uint8_t *a_ptr = reinterpret_cast<const uint8_t *>(a);
const uint8_t *b_ptr = reinterpret_cast<const uint8_t *>(b);
if (a_ptr < b_ptr)
return -1;
if (a_ptr > b_ptr)
return 1;
return 0;
}
/* static */ int
JitcodeGlobalEntry::compare(const JitcodeGlobalEntry &ent1, const JitcodeGlobalEntry &ent2)
{
// Both parts of compare cannot be a query.
JS_ASSERT(!(ent1.isQuery() && ent2.isQuery()));
// Ensure no overlaps for non-query lookups.
JS_ASSERT_IF(!ent1.isQuery() && !ent2.isQuery(), !ent1.overlapsWith(ent2));
return ComparePointers(ent1.nativeStartAddr(), ent2.nativeStartAddr());
}
bool
JitcodeGlobalTable::lookup(void *ptr, JitcodeGlobalEntry *result)
{
JS_ASSERT(result);
// Construct a JitcodeGlobalEntry::Query to do the lookup
JitcodeGlobalEntry query = JitcodeGlobalEntry::MakeQuery(ptr);
return tree_.contains(query, result);
}
void
JitcodeGlobalTable::lookupInfallible(void *ptr, JitcodeGlobalEntry *result)
{
mozilla::DebugOnly<bool> success = lookup(ptr, result);
JS_ASSERT(success);
}
bool
JitcodeGlobalTable::addEntry(const JitcodeGlobalEntry &entry)
{
// Should only add Main entries for now.
JS_ASSERT(entry.isIon() || entry.isBaseline() || entry.isIonCache());
return tree_.insert(entry);
}
void
JitcodeGlobalTable::removeEntry(void *startAddr)
{
JitcodeGlobalEntry query = JitcodeGlobalEntry::MakeQuery(startAddr);
tree_.remove(query);
}
/* static */ void
JitcodeRegionEntry::WriteHead(CompactBufferWriter &writer,
uint32_t nativeOffset, uint8_t scriptDepth)
{
writer.writeUnsigned(nativeOffset);
writer.writeByte(scriptDepth);
}
/* static */ void
JitcodeRegionEntry::ReadHead(CompactBufferReader &reader,
uint32_t *nativeOffset, uint8_t *scriptDepth)
{
*nativeOffset = reader.readUnsigned();
*scriptDepth = reader.readByte();
}
/* static */ void
JitcodeRegionEntry::WriteScriptPc(CompactBufferWriter &writer,
uint32_t scriptIdx, uint32_t pcOffset)
{
writer.writeUnsigned(scriptIdx);
writer.writeUnsigned(pcOffset);
}
/* static */ void
JitcodeRegionEntry::ReadScriptPc(CompactBufferReader &reader,
uint32_t *scriptIdx, uint32_t *pcOffset)
{
*scriptIdx = reader.readUnsigned();
*pcOffset = reader.readUnsigned();
}
/* static */ void
JitcodeRegionEntry::WriteDelta(CompactBufferWriter &writer,
uint32_t nativeDelta, int32_t pcDelta)
{
if (pcDelta >= 0) {
// 1 and 2-byte formats possible.
// NNNN-BBB0
if (pcDelta <= ENC1_PC_DELTA_MAX && nativeDelta <= ENC1_NATIVE_DELTA_MAX) {
uint8_t encVal = ENC1_MASK_VAL | (pcDelta << ENC1_PC_DELTA_SHIFT) |
(nativeDelta << ENC1_NATIVE_DELTA_SHIFT);
writer.writeByte(encVal);
return;
}
// NNNN-NNNN BBBB-BB01
if (pcDelta <= ENC2_PC_DELTA_MAX && nativeDelta <= ENC2_NATIVE_DELTA_MAX) {
uint16_t encVal = ENC2_MASK_VAL | (pcDelta << ENC2_PC_DELTA_SHIFT) |
(nativeDelta << ENC2_NATIVE_DELTA_SHIFT);
writer.writeByte(encVal & 0xff);
writer.writeByte((encVal >> 8) & 0xff);
return;
}
}
// NNNN-NNNN NNNB-BBBB BBBB-B011
if (pcDelta >= ENC3_PC_DELTA_MIN && pcDelta <= ENC3_PC_DELTA_MAX &&
nativeDelta <= ENC3_NATIVE_DELTA_MAX)
{
uint32_t encVal = ENC3_MASK_VAL |
((pcDelta << ENC3_PC_DELTA_SHIFT) & ENC3_PC_DELTA_MASK) |
(nativeDelta << ENC3_NATIVE_DELTA_SHIFT);
writer.writeByte(encVal & 0xff);
writer.writeByte((encVal >> 8) & 0xff);
writer.writeByte((encVal >> 16) & 0xff);
return;
}
// NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
if (pcDelta >= ENC4_PC_DELTA_MIN && pcDelta <= ENC4_PC_DELTA_MAX &&
nativeDelta <= ENC4_NATIVE_DELTA_MAX)
{
uint32_t encVal = ENC4_MASK_VAL |
((pcDelta << ENC4_PC_DELTA_SHIFT) & ENC4_PC_DELTA_MASK) |
(nativeDelta << ENC4_NATIVE_DELTA_SHIFT);
writer.writeByte(encVal & 0xff);
writer.writeByte((encVal >> 8) & 0xff);
writer.writeByte((encVal >> 16) & 0xff);
writer.writeByte((encVal >> 24) & 0xff);
return;
}
// Should never get here.
MOZ_CRASH("pcDelta/nativeDelta values are too large to encode.");
}
/* static */ void
JitcodeRegionEntry::ReadDelta(CompactBufferReader &reader,
uint32_t *nativeDelta, int32_t *pcDelta)
{
// NB:
// It's possible to get nativeDeltas with value 0 in two cases:
//
// 1. The last region's run. This is because the region table's start
// must be 4-byte aligned, and we must insert padding bytes to align the
// payload section before emitting the table.
//
// 2. A zero-offset nativeDelta with a negative pcDelta.
//
// So if nativeDelta is zero, then pcDelta must be <= 0.
// NNNN-BBB0
const uint32_t firstByte = reader.readByte();
if ((firstByte & ENC1_MASK) == ENC1_MASK_VAL) {
uint32_t encVal = firstByte;
*nativeDelta = encVal >> ENC1_NATIVE_DELTA_SHIFT;
*pcDelta = (encVal & ENC1_PC_DELTA_MASK) >> ENC1_PC_DELTA_SHIFT;
JS_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
return;
}
// NNNN-NNNN BBBB-BB01
const uint32_t secondByte = reader.readByte();
if ((firstByte & ENC2_MASK) == ENC2_MASK_VAL) {
uint32_t encVal = firstByte | secondByte << 8;
*nativeDelta = encVal >> ENC2_NATIVE_DELTA_SHIFT;
*pcDelta = (encVal & ENC2_PC_DELTA_MASK) >> ENC2_PC_DELTA_SHIFT;
JS_ASSERT(*pcDelta != 0);
JS_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
return;
}
// NNNN-NNNN NNNB-BBBB BBBB-B011
const uint32_t thirdByte = reader.readByte();
if ((firstByte & ENC3_MASK) == ENC3_MASK_VAL) {
uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16;
*nativeDelta = encVal >> ENC3_NATIVE_DELTA_SHIFT;
uint32_t pcDeltaU = (encVal & ENC3_PC_DELTA_MASK) >> ENC3_PC_DELTA_SHIFT;
// Fix sign if necessary.
if (pcDeltaU > ENC3_PC_DELTA_MAX)
pcDeltaU |= ~ENC3_PC_DELTA_MAX;
*pcDelta = pcDeltaU;
JS_ASSERT(*pcDelta != 0);
JS_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
return;
}
// NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
JS_ASSERT((firstByte & ENC4_MASK) == ENC4_MASK_VAL);
const uint32_t fourthByte = reader.readByte();
uint32_t encVal = firstByte | secondByte << 8 | thirdByte << 16 | fourthByte << 24;
*nativeDelta = encVal >> ENC4_NATIVE_DELTA_SHIFT;
uint32_t pcDeltaU = (encVal & ENC4_PC_DELTA_MASK) >> ENC4_PC_DELTA_SHIFT;
// fix sign if necessary
if (pcDeltaU > ENC4_PC_DELTA_MAX)
pcDeltaU |= ~ENC4_PC_DELTA_MAX;
*pcDelta = pcDeltaU;
JS_ASSERT(*pcDelta != 0);
JS_ASSERT_IF(*nativeDelta == 0, *pcDelta <= 0);
}
/* static */ uint32_t
JitcodeRegionEntry::ExpectedRunLength(const CodeGeneratorShared::NativeToBytecode *entry,
const CodeGeneratorShared::NativeToBytecode *end)
{
JS_ASSERT(entry < end);
// We always use the first entry, so runLength starts at 1
uint32_t runLength = 1;
uint32_t curNativeOffset = entry->nativeOffset.offset();
uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
for (auto nextEntry = entry + 1; nextEntry != end; nextEntry += 1) {
// If the next run moves to a different inline site, stop the run.
if (nextEntry->tree != entry->tree)
break;
uint32_t nextNativeOffset = nextEntry->nativeOffset.offset();
uint32_t nextBytecodeOffset = nextEntry->tree->script()->pcToOffset(nextEntry->pc);
JS_ASSERT(nextNativeOffset >= curNativeOffset);
uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
int32_t bytecodeDelta = int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
// If deltas are too large (very unlikely), stop the run.
if (!IsDeltaEncodeable(nativeDelta, bytecodeDelta))
break;
runLength++;
// If the run has grown to its maximum length, stop the run.
if (runLength == MAX_RUN_LENGTH)
break;
curNativeOffset = nextNativeOffset;
curBytecodeOffset = nextBytecodeOffset;
}
return runLength;
}
struct JitcodeMapBufferWriteSpewer
{
#ifdef DEBUG
CompactBufferWriter *writer;
uint32_t startPos;
static const uint32_t DumpMaxBytes = 50;
JitcodeMapBufferWriteSpewer(CompactBufferWriter &w)
: writer(&w), startPos(writer->length())
{}
void spewAndAdvance(const char *name) {
uint32_t curPos = writer->length();
const uint8_t *start = writer->buffer() + startPos;
const uint8_t *end = writer->buffer() + curPos;
const char *MAP = "0123456789ABCDEF";
uint32_t bytes = end - start;
char buffer[DumpMaxBytes * 3];
for (uint32_t i = 0; i < bytes; i++) {
buffer[i*3] = MAP[(start[i] >> 4) & 0xf];
buffer[i*3 + 1] = MAP[(start[i] >> 0) & 0xf];
buffer[i*3 + 2] = ' ';
}
if (bytes >= DumpMaxBytes)
buffer[DumpMaxBytes*3 - 1] = '\0';
else
buffer[bytes*3 - 1] = '\0';
IonSpew(IonSpew_Profiling, "%s@%d[%d bytes] - %s", name, int(startPos), int(bytes), buffer);
// Move to the end of the current buffer.
startPos = writer->length();
}
#else // !DEBUG
JitcodeMapBufferWriteSpewer(CompactBufferWriter &w) {}
void spewAndAdvance(const char *name) {}
#endif // DEBUG
};
// Write a run, starting at the given NativeToBytecode entry, into the given buffer writer.
/* static */ bool
JitcodeRegionEntry::WriteRun(CompactBufferWriter &writer,
JSScript **scriptList, uint32_t scriptListSize,
uint32_t runLength, const CodeGeneratorShared::NativeToBytecode *entry)
{
JS_ASSERT(runLength > 0);
JS_ASSERT(runLength <= MAX_RUN_LENGTH);
// Calculate script depth.
JS_ASSERT(entry->tree->depth() <= 0xff);
uint8_t scriptDepth = entry->tree->depth();
uint32_t regionNativeOffset = entry->nativeOffset.offset();
JitcodeMapBufferWriteSpewer spewer(writer);
// Write the head info.
IonSpew(IonSpew_Profiling, " Head Info: nativeOffset=%d scriptDepth=%d",
int(regionNativeOffset), int(scriptDepth));
WriteHead(writer, regionNativeOffset, scriptDepth);
spewer.spewAndAdvance(" ");
// Write each script/pc pair.
{
InlineScriptTree *curTree = entry->tree;
jsbytecode *curPc = entry->pc;
for (uint8_t i = 0; i < scriptDepth; i++) {
// Find the index of the script within the list.
// NB: scriptList is guaranteed to contain curTree->script()
uint32_t scriptIdx = 0;
for (; scriptIdx < scriptListSize; scriptIdx++) {
if (scriptList[scriptIdx] == curTree->script())
break;
}
JS_ASSERT(scriptIdx < scriptListSize);
uint32_t pcOffset = curTree->script()->pcToOffset(curPc);
IonSpew(IonSpew_Profiling, " Script/PC %d: scriptIdx=%d pcOffset=%d",
int(i), int(scriptIdx), int(pcOffset));
WriteScriptPc(writer, scriptIdx, pcOffset);
spewer.spewAndAdvance(" ");
JS_ASSERT_IF(i < scriptDepth - 1, curTree->hasCaller());
curPc = curTree->callerPc();
curTree = curTree->caller();
}
}
// Start writing runs.
uint32_t curNativeOffset = entry->nativeOffset.offset();
uint32_t curBytecodeOffset = entry->tree->script()->pcToOffset(entry->pc);
IonSpew(IonSpew_Profiling, " Writing Delta Run from nativeOffset=%d bytecodeOffset=%d",
int(curNativeOffset), int(curBytecodeOffset));
// Skip first entry because it is implicit in the header. Start at subsequent entry.
for (uint32_t i = 1; i < runLength; i++) {
JS_ASSERT(entry[i].tree == entry->tree);
uint32_t nextNativeOffset = entry[i].nativeOffset.offset();
uint32_t nextBytecodeOffset = entry[i].tree->script()->pcToOffset(entry[i].pc);
JS_ASSERT(nextNativeOffset >= curNativeOffset);
uint32_t nativeDelta = nextNativeOffset - curNativeOffset;
int32_t bytecodeDelta = int32_t(nextBytecodeOffset) - int32_t(curBytecodeOffset);
JS_ASSERT(IsDeltaEncodeable(nativeDelta, bytecodeDelta));
IonSpew(IonSpew_Profiling, " RunEntry native: %d-%d [%d] bytecode: %d-%d [%d]",
int(curNativeOffset), int(nextNativeOffset), int(nativeDelta),
int(curBytecodeOffset), int(nextBytecodeOffset), int(bytecodeDelta));
WriteDelta(writer, nativeDelta, bytecodeDelta);
// Spew the bytecode in these ranges.
if (curBytecodeOffset < nextBytecodeOffset) {
IonSpewStart(IonSpew_Profiling, " OPS: ");
uint32_t curBc = curBytecodeOffset;
while (curBc < nextBytecodeOffset) {
jsbytecode *pc = entry[i].tree->script()->offsetToPC(curBc);
JSOp op = JSOp(*pc);
IonSpewCont(IonSpew_Profiling, "%s ", js_CodeName[op]);
curBc += GetBytecodeLength(pc);
}
IonSpewFin(IonSpew_Profiling);
}
spewer.spewAndAdvance(" ");
curNativeOffset = nextNativeOffset;
curBytecodeOffset = nextBytecodeOffset;
}
if (writer.oom())
return false;
return true;
}
void
JitcodeRegionEntry::unpack()
{
CompactBufferReader reader(data_, end_);
ReadHead(reader, &nativeOffset_, &scriptDepth_);
JS_ASSERT(scriptDepth_ > 0);
scriptPcStack_ = reader.currentPosition();
// Skip past script/pc stack
for (unsigned i = 0; i < scriptDepth_; i++) {
uint32_t scriptIdx, pcOffset;
ReadScriptPc(reader, &scriptIdx, &pcOffset);
}
deltaRun_ = reader.currentPosition();
}
uint32_t
JitcodeRegionEntry::findPcOffset(uint32_t queryNativeOffset, uint32_t startPcOffset) const
{
DeltaIterator iter = deltaIterator();
uint32_t curNativeOffset = nativeOffset();
uint32_t curPcOffset = startPcOffset;
while (iter.hasMore()) {
uint32_t nativeDelta;
int32_t pcDelta;
iter.readNext(&nativeDelta, &pcDelta);
// The start address of the next delta-run entry is counted towards
// the current delta-run entry, because return addresses should
// associate with the bytecode op prior (the call) not the op after.
if (queryNativeOffset <= curNativeOffset + nativeDelta)
break;
curNativeOffset += nativeDelta;
curPcOffset += pcDelta;
}
return curPcOffset;
}
bool
JitcodeIonTable::makeIonEntry(JSContext *cx, JitCode *code,
uint32_t numScripts, JSScript **scripts,
JitcodeGlobalEntry::IonEntry &out)
{
typedef JitcodeGlobalEntry::IonEntry::SizedScriptList SizedScriptList;
JS_ASSERT(numScripts > 0);
if (numScripts == 1) {
out.init(code->raw(), code->raw() + code->instructionsSize(), scripts[0], this);
return true;
}
if (numScripts < uint32_t(JitcodeGlobalEntry::IonEntry::Multi)) {
out.init(code->raw(), code->raw() + code->instructionsSize(), numScripts, scripts, this);
return true;
}
// Create SizedScriptList
void *mem = cx->malloc_(SizedScriptList::AllocSizeFor(numScripts));
if (!mem)
return false;
SizedScriptList *scriptList = new (mem) SizedScriptList(numScripts, scripts);
out.init(code->raw(), code->raw() + code->instructionsSize(), scriptList, this);
return true;
}
uint32_t
JitcodeIonTable::findRegionEntry(uint32_t nativeOffset) const
{
static const uint32_t LINEAR_SEARCH_THRESHOLD = 8;
uint32_t regions = numRegions();
JS_ASSERT(regions > 0);
// For small region lists, just search linearly.
if (regions <= LINEAR_SEARCH_THRESHOLD) {
JitcodeRegionEntry previousEntry = regionEntry(0);
for (uint32_t i = 1; i < regions; i++) {
JitcodeRegionEntry nextEntry = regionEntry(i);
JS_ASSERT(nextEntry.nativeOffset() >= previousEntry.nativeOffset());
// See note in binary-search code below about why we use '<=' here instead of
// '<'. Short explanation: regions are closed at their ending addresses,
// and open at their starting addresses.
if (nativeOffset <= nextEntry.nativeOffset())
return i-1;
previousEntry = nextEntry;
}
// If nothing found, assume it falls within last region.
return regions - 1;
}
// For larger ones, binary search the region table.
uint32_t idx = 0;
uint32_t count = regions;
while (count > 1) {
uint32_t step = count/2;
uint32_t mid = idx + step;
JitcodeRegionEntry midEntry = regionEntry(mid);
// A region memory range is closed at its ending address, not starting
// address. This is because the return address for calls must associate
// with the call's bytecode PC, not the PC of the bytecode operator after
// the call.
//
// So a query is < an entry if the query nativeOffset is <= the start address
// of the entry, and a query is >= an entry if the query nativeOffset is > the
// start address of an entry.
if (nativeOffset <= midEntry.nativeOffset()) {
// Target entry is below midEntry.
count = step;
} else { // if (nativeOffset > midEntry.nativeOffset())
// Target entry is at midEntry or above.
idx = mid;
count -= step;
}
}
return idx;
}
/* static */ bool
JitcodeIonTable::WriteIonTable(CompactBufferWriter &writer,
JSScript **scriptList, uint32_t scriptListSize,
const CodeGeneratorShared::NativeToBytecode *start,
const CodeGeneratorShared::NativeToBytecode *end,
uint32_t *tableOffsetOut, uint32_t *numRegionsOut)
{
JS_ASSERT(tableOffsetOut != nullptr);
JS_ASSERT(numRegionsOut != nullptr);
JS_ASSERT(writer.length() == 0);
JS_ASSERT(scriptListSize > 0);
IonSpew(IonSpew_Profiling, "Writing native to bytecode map for %s:%d (%d entries)",
scriptList[0]->filename(), scriptList[0]->lineno(),
int(end - start));
IonSpew(IonSpew_Profiling, " ScriptList of size %d", int(scriptListSize));
for (uint32_t i = 0; i < scriptListSize; i++) {
IonSpew(IonSpew_Profiling, " Script %d - %s:%d",
int(i), scriptList[i]->filename(), int(scriptList[i]->lineno()));
}
// Write out runs first. Keep a vector tracking the positive offsets from payload
// start to the run.
const CodeGeneratorShared::NativeToBytecode *curEntry = start;
js::Vector<uint32_t, 32, SystemAllocPolicy> runOffsets;
while (curEntry != end) {
// Calculate the length of the next run.
uint32_t runLength = JitcodeRegionEntry::ExpectedRunLength(curEntry, end);
JS_ASSERT(runLength > 0);
JS_ASSERT(runLength <= (end - curEntry));
IonSpew(IonSpew_Profiling, " Run at entry %d, length %d, buffer offset %d",
int(curEntry - start), int(runLength), int(writer.length()));
// Store the offset of the run.
if (!runOffsets.append(writer.length()))
return false;
// Encode the run.
if (!JitcodeRegionEntry::WriteRun(writer, scriptList, scriptListSize, runLength, curEntry))
return false;
curEntry += runLength;
}
// Done encoding regions. About to start table. Ensure we are aligned to 4 bytes
// since table is composed of uint32_t values.
uint32_t padding = sizeof(uint32_t) - (writer.length() % sizeof(uint32_t));
if (padding == sizeof(uint32_t))
padding = 0;
IonSpew(IonSpew_Profiling, " Padding %d bytes after run @%d",
int(padding), int(writer.length()));
for (uint32_t i = 0; i < padding; i++)
writer.writeByte(0);
// Now at start of table.
uint32_t tableOffset = writer.length();
// The table being written at this point will be accessed directly via uint32_t
// pointers, so all writes below use native endianness.
// Write out numRegions
IonSpew(IonSpew_Profiling, " Writing numRuns=%d", int(runOffsets.length()));
writer.writeNativeEndianUint32_t(runOffsets.length());
// Write out region offset table. The offsets in |runOffsets| are currently forward
// offsets from the beginning of the buffer. We convert them to backwards offsets
// from the start of the table before writing them into their table entries.
for (uint32_t i = 0; i < runOffsets.length(); i++) {
IonSpew(IonSpew_Profiling, " Run %d offset=%d backOffset=%d @%d",
int(i), int(runOffsets[i]), int(tableOffset - runOffsets[i]), int(writer.length()));
writer.writeNativeEndianUint32_t(tableOffset - runOffsets[i]);
}
if (writer.oom())
return false;
*tableOffsetOut = tableOffset;
*numRegionsOut = runOffsets.length();
return true;
}
} // namespace jit
} // namespace js

865
js/src/jit/JitcodeMap.h Normal file
View File

@ -0,0 +1,865 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_JitcodeMap_h
#define jit_JitcodeMap_h
#include "ds/SplayTree.h"
#include "jit/CompactBuffer.h"
#include "jit/CompileInfo.h"
#include "jit/shared/CodeGenerator-shared.h"
namespace js {
namespace jit {
/*
* The Ion jitcode map implements tables to allow mapping from addresses in ion jitcode
* to the list of (JSScript *, jsbytecode *) pairs that are implicitly active in the frame at
* that point in the native code.
*
* To represent this information efficiently, a multi-level table is used.
*
* At the top level, a global splay-tree of JitcodeGlobalEntry describings the mapping for
* each individual IonCode script generated by compiles. The entries are ordered by their
* nativeStartAddr.
*
* Every entry in the table is of fixed size, but there are different entry types,
* distinguished by the kind field.
*/
class JitcodeIonTable;
class JitcodeRegionEntry;
class JitcodeGlobalEntry
{
public:
enum Kind {
INVALID = 0,
Ion,
Baseline,
IonCache,
Query,
LIMIT
};
JS_STATIC_ASSERT(LIMIT <= 8);
struct BytecodeLocation {
JSScript *script;
jsbytecode *pc;
BytecodeLocation(JSScript *script, jsbytecode *pc) : script(script), pc(pc) {}
};
typedef Vector<BytecodeLocation, 0, SystemAllocPolicy> BytecodeLocationVector;
struct BaseEntry
{
void *nativeStartAddr_;
void *nativeEndAddr_;
Kind kind_;
void init() {
nativeStartAddr_ = nullptr;
nativeEndAddr_ = nullptr;
kind_ = INVALID;
}
void init(Kind kind, void *nativeStartAddr, void *nativeEndAddr) {
JS_ASSERT(nativeStartAddr);
JS_ASSERT(nativeEndAddr);
JS_ASSERT(kind > INVALID && kind < LIMIT);
nativeStartAddr_ = nativeStartAddr;
nativeEndAddr_ = nativeEndAddr;
kind_ = kind;
}
Kind kind() const {
return kind_;
}
void *nativeStartAddr() const {
return nativeStartAddr_;
}
void *nativeEndAddr() const {
return nativeEndAddr_;
}
bool startsBelowPointer(void *ptr) const {
return ((uint8_t *)nativeStartAddr()) <= ((uint8_t *) ptr);
}
bool endsAbovePointer(void *ptr) const {
return ((uint8_t *)nativeEndAddr()) > ((uint8_t *) ptr);
}
bool containsPointer(void *ptr) const {
return startsBelowPointer(ptr) && endsAbovePointer(ptr);
}
};
struct IonEntry : public BaseEntry
{
uintptr_t scriptList_;
// regionTable_ points to the start of the region table within the
// packed map for compile represented by this entry. Since the
// region table occurs at the tail of the memory region, this pointer
// points somewhere inside the region memory space, and not to the start
// of the memory space.
JitcodeIonTable *regionTable_;
static const unsigned LowBits = 3;
static const uintptr_t LowMask = (uintptr_t(1) << LowBits) - 1;
enum ScriptListTag {
Single = 0,
Multi = 7
};
struct SizedScriptList {
uint32_t size;
JSScript *scripts[0];
SizedScriptList(uint32_t sz, JSScript **scr) : size(sz) {
for (uint32_t i = 0; i < size; i++)
scripts[i] = scr[i];
}
static uint32_t AllocSizeFor(uint32_t nscripts) {
return sizeof(SizedScriptList) + (nscripts * sizeof(JSScript *));
}
};
void init(void *nativeStartAddr, void *nativeEndAddr,
JSScript *script, JitcodeIonTable *regionTable)
{
JS_ASSERT((uintptr_t(script) & LowMask) == 0);
JS_ASSERT(script);
JS_ASSERT(regionTable);
BaseEntry::init(Ion, nativeStartAddr, nativeEndAddr);
scriptList_ = uintptr_t(script);
regionTable_ = regionTable;
}
void init(void *nativeStartAddr, void *nativeEndAddr,
unsigned numScripts, JSScript **scripts, JitcodeIonTable *regionTable)
{
JS_ASSERT((uintptr_t(scripts) & LowMask) == 0);
JS_ASSERT(numScripts >= 1);
JS_ASSERT(numScripts <= 6);
JS_ASSERT(scripts);
JS_ASSERT(regionTable);
BaseEntry::init(Ion, nativeStartAddr, nativeEndAddr);
scriptList_ = uintptr_t(scripts) | numScripts;
regionTable_ = regionTable;
}
void init(void *nativeStartAddr, void *nativeEndAddr,
SizedScriptList *scripts, JitcodeIonTable *regionTable)
{
JS_ASSERT((uintptr_t(scripts) & LowMask) == 0);
JS_ASSERT(scripts->size > 6);
JS_ASSERT(scripts);
JS_ASSERT(regionTable);
BaseEntry::init(Ion, nativeStartAddr, nativeEndAddr);
scriptList_ = uintptr_t(scripts) | uintptr_t(Multi);
regionTable_ = regionTable;
}
ScriptListTag scriptListTag() const {
return static_cast<ScriptListTag>(scriptList_ & LowMask);
}
void *scriptListPointer() const {
return reinterpret_cast<void *>(scriptList_ & ~LowMask);
}
JSScript *singleScript() const {
JS_ASSERT(scriptListTag() == Single);
return reinterpret_cast<JSScript *>(scriptListPointer());
}
JSScript **rawScriptArray() const {
JS_ASSERT(scriptListTag() < Multi);
return reinterpret_cast<JSScript **>(scriptListPointer());
}
SizedScriptList *sizedScriptList() const {
JS_ASSERT(scriptListTag() == Multi);
return reinterpret_cast<SizedScriptList *>(scriptListPointer());
}
unsigned numScripts() const {
ScriptListTag tag = scriptListTag();
if (tag == Single)
return 1;
if (tag < Multi) {
JS_ASSERT(int(tag) >= 2);
return static_cast<unsigned>(tag);
}
return sizedScriptList()->size;
}
JSScript *getScript(unsigned idx) const {
JS_ASSERT(idx < numScripts());
ScriptListTag tag = scriptListTag();
if (tag == Single)
return singleScript();
if (tag < Multi) {
JS_ASSERT(int(tag) >= 2);
return rawScriptArray()[idx];
}
return sizedScriptList()->scripts[idx];
}
void destroy();
JitcodeIonTable *regionTable() const {
return regionTable_;
}
int scriptIndex(JSScript *script) const {
unsigned count = numScripts();
for (unsigned i = 0; i < count; i++) {
if (getScript(i) == script)
return i;
}
return -1;
}
bool callStackAtAddr(JSRuntime *rt, void *ptr, BytecodeLocationVector &results,
uint32_t *depth) const;
};
struct BaselineEntry : public BaseEntry
{
JSScript *script_;
void init(void *nativeStartAddr, void *nativeEndAddr, JSScript *script)
{
JS_ASSERT(script != nullptr);
BaseEntry::init(Baseline, nativeStartAddr, nativeEndAddr);
script_ = script;
}
JSScript *script() const {
return script_;
}
void destroy() {}
bool callStackAtAddr(JSRuntime *rt, void *ptr, BytecodeLocationVector &results,
uint32_t *depth) const;
};
struct IonCacheEntry : public BaseEntry
{
void *rejoinAddr_;
void init(void *nativeStartAddr, void *nativeEndAddr, void *rejoinAddr)
{
JS_ASSERT(rejoinAddr != nullptr);
BaseEntry::init(IonCache, nativeStartAddr, nativeEndAddr);
rejoinAddr_ = rejoinAddr;
}
void *rejoinAddr() const {
return rejoinAddr_;
}
void destroy() {}
bool callStackAtAddr(JSRuntime *rt, void *ptr, BytecodeLocationVector &results,
uint32_t *depth) const;
};
// QueryEntry is never stored in the table, just used for queries
// where an instance of JitcodeGlobalEntry is required to do tree
// lookups.
struct QueryEntry : public BaseEntry
{
void init(void *addr) {
BaseEntry::init(Query, addr, addr);
}
uint8_t *addr() const {
return reinterpret_cast<uint8_t *>(nativeStartAddr());
}
void destroy() {}
};
private:
union {
// Shadowing BaseEntry instance to allow access to base fields
// and type extraction.
BaseEntry base_;
// The most common entry type: describing jitcode generated by
// Ion main-line code.
IonEntry ion_;
// Baseline jitcode.
BaselineEntry baseline_;
// IonCache stubs.
IonCacheEntry ionCache_;
// When doing queries on the SplayTree for particular addresses,
// the query addresses are representd using a QueryEntry.
QueryEntry query_;
};
public:
JitcodeGlobalEntry() {
base_.init();
}
JitcodeGlobalEntry(const IonEntry &ion) {
ion_ = ion;
}
JitcodeGlobalEntry(const BaselineEntry &baseline) {
baseline_ = baseline;
}
JitcodeGlobalEntry(const IonCacheEntry &ionCache) {
ionCache_ = ionCache;
}
JitcodeGlobalEntry(const QueryEntry &query) {
query_ = query;
}
static JitcodeGlobalEntry MakeQuery(void *ptr) {
QueryEntry query;
query.init(ptr);
return JitcodeGlobalEntry(query);
}
void destroy() {
switch (kind()) {
case Ion:
ionEntry().destroy();
break;
case Baseline:
baselineEntry().destroy();
break;
case IonCache:
ionCacheEntry().destroy();
break;
case Query:
queryEntry().destroy();
break;
default:
MOZ_ASSUME_UNREACHABLE("Invalid JitcodeGlobalEntry kind.");
}
}
void *nativeStartAddr() const {
return base_.nativeStartAddr();
}
void *nativeEndAddr() const {
return base_.nativeEndAddr();
}
bool startsBelowPointer(void *ptr) const {
return base_.startsBelowPointer(ptr);
}
bool endsAbovePointer(void *ptr) const {
return base_.endsAbovePointer(ptr);
}
bool containsPointer(void *ptr) const {
return base_.containsPointer(ptr);
}
bool overlapsWith(const JitcodeGlobalEntry &entry) const {
// Catch full containment of |entry| within |this|, and partial overlaps.
if (containsPointer(entry.nativeStartAddr()) || containsPointer(entry.nativeEndAddr()))
return true;
// Catch full containment of |this| within |entry|.
if (startsBelowPointer(entry.nativeEndAddr()) && endsAbovePointer(entry.nativeStartAddr()))
return true;
return false;
}
Kind kind() const {
return base_.kind();
}
bool isIon() const {
return kind() == Ion;
}
bool isBaseline() const {
return kind() == Baseline;
}
bool isIonCache() const {
return kind() == IonCache;
}
bool isQuery() const {
return kind() == Query;
}
IonEntry &ionEntry() {
JS_ASSERT(isIon());
return ion_;
}
BaselineEntry &baselineEntry() {
JS_ASSERT(isBaseline());
return baseline_;
}
IonCacheEntry &ionCacheEntry() {
JS_ASSERT(isIonCache());
return ionCache_;
}
QueryEntry &queryEntry() {
JS_ASSERT(isQuery());
return query_;
}
const IonEntry &ionEntry() const {
JS_ASSERT(isIon());
return ion_;
}
const BaselineEntry &baselineEntry() const {
JS_ASSERT(isBaseline());
return baseline_;
}
const IonCacheEntry &ionCacheEntry() const {
JS_ASSERT(isIonCache());
return ionCache_;
}
const QueryEntry &queryEntry() const {
JS_ASSERT(isQuery());
return query_;
}
// Read the inline call stack at a given point in the native code and append into
// the given vector. Innermost (script,pc) pair will be appended first, and
// outermost appended last.
//
// Returns false on memory failure.
bool callStackAtAddr(JSRuntime *rt, void *ptr, BytecodeLocationVector &results,
uint32_t *depth) const
{
switch (kind()) {
case Ion:
return ionEntry().callStackAtAddr(rt, ptr, results, depth);
case Baseline:
return baselineEntry().callStackAtAddr(rt, ptr, results, depth);
case IonCache:
return ionCacheEntry().callStackAtAddr(rt, ptr, results, depth);
default:
MOZ_ASSUME_UNREACHABLE("Invalid JitcodeGlobalEntry kind.");
}
return false;
}
// Figure out the number of the (JSScript *, jsbytecode *) pairs that are active
// at this location.
uint32_t lookupInlineCallDepth(void *ptr);
// Compare two global entries.
static int compare(const JitcodeGlobalEntry &ent1, const JitcodeGlobalEntry &ent2);
};
/*
* Global table of JitcodeGlobalEntry values sorted by native address range.
*/
class JitcodeGlobalTable
{
public:
typedef SplayTree<JitcodeGlobalEntry, JitcodeGlobalEntry> EntryTree;
typedef Vector<JitcodeGlobalEntry, 0, SystemAllocPolicy> EntryVector;
private:
static const size_t LIFO_CHUNK_SIZE = 16 * 1024;
LifoAlloc treeAlloc_;
EntryTree tree_;
EntryVector entries_;
public:
JitcodeGlobalTable() : treeAlloc_(LIFO_CHUNK_SIZE), tree_(&treeAlloc_), entries_() {}
~JitcodeGlobalTable() {}
bool empty() const {
return tree_.empty();
}
bool lookup(void *ptr, JitcodeGlobalEntry *result);
void lookupInfallible(void *ptr, JitcodeGlobalEntry *result);
bool addEntry(const JitcodeGlobalEntry::IonEntry &entry) {
return addEntry(JitcodeGlobalEntry(entry));
}
bool addEntry(const JitcodeGlobalEntry::BaselineEntry &entry) {
return addEntry(JitcodeGlobalEntry(entry));
}
bool addEntry(const JitcodeGlobalEntry::IonCacheEntry &entry) {
return addEntry(JitcodeGlobalEntry(entry));
}
void removeEntry(void *startAddr);
private:
bool addEntry(const JitcodeGlobalEntry &entry);
};
/*
* Container class for main jitcode table.
* The Region table's memory is structured as follows:
*
* +------------------------------------------------+ |
* | Region 1 Run | |
* |------------------------------------------------| |
* | Region 2 Run | |
* | | |
* | | |
* |------------------------------------------------| |
* | Region 3 Run | |
* | | |
* |------------------------------------------------| |-- Payload
* | | |
* | ... | |
* | | |
* |------------------------------------------------| |
* | Region M Run | |
* | | |
* +================================================+ <- RegionTable pointer points here
* | uint23_t numRegions = M | |
* +------------------------------------------------+ |
* | Region 1 | |
* | uint32_t entryOffset = size(Payload) | |
* +------------------------------------------------+ |
* | | |-- Table
* | ... | |
* | | |
* +------------------------------------------------+ |
* | Region M | |
* | uint32_t entryOffset | |
* +------------------------------------------------+ |
*
* The region table is composed of two sections: a tail section that contains a table of
* fixed-size entries containing offsets into the the head section, and a head section that
* holds a sequence of variable-sized runs. The table in the tail section serves to
* locate the variable-length encoded structures in the head section.
*
* The entryOffsets in the table indicate the bytes offset to subtract from the regionTable
* pointer to arrive at the encoded region in the payload.
*
*
* Variable-length entries in payload
* ----------------------------------
* The entryOffsets in the region table's fixed-sized entries refer to a location within the
* variable-length payload section. This location contains a compactly encoded "run" of
* mappings.
*
* Each run starts by describing the offset within the native code it starts at, and the
* sequence of (JSScript *, jsbytecode *) pairs active at that site. Following that, there
* are a number of variable-length entries encoding (nativeOffsetDelta, bytecodeOffsetDelta)
* pairs for the run.
*
* VarUint32 nativeOffset;
* - The offset from nativeStartAddr in the global table entry at which
* the jitcode for this region starts.
*
* Uint8_t scriptDepth;
* - The depth of inlined scripts for this region.
*
* List<VarUint32> inlineScriptPcStack;
* - We encode (2 * scriptDepth) VarUint32s here. Each pair of uint32s are taken
* as an index into the scriptList in the global table entry, and a pcOffset
* respectively.
*
* List<NativeAndBytecodeDelta> deltaRun;
* - The rest of the entry is a deltaRun that stores a series of variable-length
* encoded NativeAndBytecodeDelta datums.
*/
class JitcodeRegionEntry
{
private:
static const unsigned MAX_RUN_LENGTH = 100;
public:
static void WriteHead(CompactBufferWriter &writer,
uint32_t nativeOffset, uint8_t scriptDepth);
static void ReadHead(CompactBufferReader &reader,
uint32_t *nativeOffset, uint8_t *scriptDepth);
static void WriteScriptPc(CompactBufferWriter &writer, uint32_t scriptIdx, uint32_t pcOffset);
static void ReadScriptPc(CompactBufferReader &reader, uint32_t *scriptIdx, uint32_t *pcOffset);
static void WriteDelta(CompactBufferWriter &writer, uint32_t nativeDelta, int32_t pcDelta);
static void ReadDelta(CompactBufferReader &reader, uint32_t *nativeDelta, int32_t *pcDelta);
// Given a pointer into an array of NativeToBytecode (and a pointer to the end of the array),
// compute the number of entries that would be consume by outputting a run starting
// at this one.
static uint32_t ExpectedRunLength(const CodeGeneratorShared::NativeToBytecode *entry,
const CodeGeneratorShared::NativeToBytecode *end);
// Write a run, starting at the given NativeToBytecode entry, into the given buffer writer.
static bool WriteRun(CompactBufferWriter &writer,
JSScript **scriptList, uint32_t scriptListSize,
uint32_t runLength, const CodeGeneratorShared::NativeToBytecode *entry);
// Delta Run entry formats are encoded little-endian:
//
// byte 0
// NNNN-BBB0
// Single byte format. nativeDelta in [0, 15], pcDelta in [0, 7]
//
static const uint32_t ENC1_MASK = 0x1;
static const uint32_t ENC1_MASK_VAL = 0x0;
static const uint32_t ENC1_NATIVE_DELTA_MAX = 0xf;
static const unsigned ENC1_NATIVE_DELTA_SHIFT = 4;
static const uint32_t ENC1_PC_DELTA_MASK = 0x0e;
static const int32_t ENC1_PC_DELTA_MAX = 0x7;
static const unsigned ENC1_PC_DELTA_SHIFT = 1;
// byte 1 byte 0
// NNNN-NNNN BBBB-BB01
// Two-byte format. nativeDelta in [0, 255], pcDelta in [0, 63]
//
static const uint32_t ENC2_MASK = 0x3;
static const uint32_t ENC2_MASK_VAL = 0x1;
static const uint32_t ENC2_NATIVE_DELTA_MAX = 0xff;
static const unsigned ENC2_NATIVE_DELTA_SHIFT = 8;
static const uint32_t ENC2_PC_DELTA_MASK = 0x00fc;
static const int32_t ENC2_PC_DELTA_MAX = 0x3f;
static const unsigned ENC2_PC_DELTA_SHIFT = 2;
// byte 2 byte 1 byte 0
// NNNN-NNNN NNNB-BBBB BBBB-B011
// Three-byte format. nativeDelta in [0, 2047], pcDelta in [-512, 511]
//
static const uint32_t ENC3_MASK = 0x7;
static const uint32_t ENC3_MASK_VAL = 0x3;
static const uint32_t ENC3_NATIVE_DELTA_MAX = 0x7ff;
static const unsigned ENC3_NATIVE_DELTA_SHIFT = 13;
static const uint32_t ENC3_PC_DELTA_MASK = 0x001ff8;
static const int32_t ENC3_PC_DELTA_MAX = 0x1ff;
static const int32_t ENC3_PC_DELTA_MIN = -ENC3_PC_DELTA_MAX - 1;
static const unsigned ENC3_PC_DELTA_SHIFT = 3;
// byte 3 byte 2 byte 1 byte 0
// NNNN-NNNN NNNN-NNNN BBBB-BBBB BBBB-B111
// Three-byte format. nativeDelta in [0, 65535], pcDelta in [-4096, 4095]
static const uint32_t ENC4_MASK = 0x7;
static const uint32_t ENC4_MASK_VAL = 0x7;
static const uint32_t ENC4_NATIVE_DELTA_MAX = 0xffff;
static const unsigned ENC4_NATIVE_DELTA_SHIFT = 16;
static const uint32_t ENC4_PC_DELTA_MASK = 0x0000fff8;
static const int32_t ENC4_PC_DELTA_MAX = 0xfff;
static const int32_t ENC4_PC_DELTA_MIN = -ENC4_PC_DELTA_MAX - 1;
static const unsigned ENC4_PC_DELTA_SHIFT = 3;
static bool IsDeltaEncodeable(uint32_t nativeDelta, int32_t pcDelta) {
return (nativeDelta <= ENC4_NATIVE_DELTA_MAX) &&
(pcDelta >= ENC4_PC_DELTA_MIN) && (pcDelta <= ENC4_PC_DELTA_MAX);
}
private:
const uint8_t *data_;
const uint8_t *end_;
// Unpacked state from jitcode entry.
uint32_t nativeOffset_;
uint8_t scriptDepth_;
const uint8_t *scriptPcStack_;
const uint8_t *deltaRun_;
void unpack();
public:
JitcodeRegionEntry(const uint8_t *data, const uint8_t *end)
: data_(data), end_(end),
nativeOffset_(0), scriptDepth_(0),
scriptPcStack_(nullptr), deltaRun_(nullptr)
{
JS_ASSERT(data_ < end_);
unpack();
JS_ASSERT(scriptPcStack_ < end_);
JS_ASSERT(deltaRun_ <= end_);
}
uint32_t nativeOffset() const {
return nativeOffset_;
}
uint32_t scriptDepth() const {
return scriptDepth_;
}
class ScriptPcIterator
{
private:
uint32_t count_;
const uint8_t *start_;
const uint8_t *end_;
uint32_t idx_;
const uint8_t *cur_;
public:
ScriptPcIterator(uint32_t count, const uint8_t *start, const uint8_t *end)
: count_(count), start_(start), end_(end), idx_(0), cur_(start_)
{}
bool hasMore() const
{
JS_ASSERT((idx_ == count_) == (cur_ == end_));
JS_ASSERT((idx_ < count_) == (cur_ < end_));
return cur_ < end_;
}
void readNext(uint32_t *scriptIdxOut, uint32_t *pcOffsetOut)
{
JS_ASSERT(scriptIdxOut);
JS_ASSERT(pcOffsetOut);
JS_ASSERT(hasMore());
CompactBufferReader reader(cur_, end_);
ReadScriptPc(reader, scriptIdxOut, pcOffsetOut);
cur_ = reader.currentPosition();
JS_ASSERT(cur_ <= end_);
idx_++;
JS_ASSERT_IF(idx_ == count_, cur_ == end_);
}
void reset() {
idx_ = 0;
cur_ = start_;
}
};
ScriptPcIterator scriptPcIterator() const {
// End of script+pc sequence is the start of the delta run.
return ScriptPcIterator(scriptDepth_, scriptPcStack_, deltaRun_);
}
class DeltaIterator {
private:
const uint8_t *start_;
const uint8_t *end_;
const uint8_t *cur_;
public:
DeltaIterator(const uint8_t *start, const uint8_t *end)
: start_(start), end_(end), cur_(start)
{}
bool hasMore() const
{
JS_ASSERT(cur_ <= end_);
return cur_ < end_;
}
void readNext(uint32_t *nativeDeltaOut, int32_t *pcDeltaOut)
{
JS_ASSERT(nativeDeltaOut != nullptr);
JS_ASSERT(pcDeltaOut != nullptr);
JS_ASSERT(hasMore());
CompactBufferReader reader(cur_, end_);
ReadDelta(reader, nativeDeltaOut, pcDeltaOut);
cur_ = reader.currentPosition();
JS_ASSERT(cur_ <= end_);
}
void reset() {
cur_ = start_;
}
};
DeltaIterator deltaIterator() const {
return DeltaIterator(deltaRun_, end_);
}
uint32_t findPcOffset(uint32_t queryNativeOffset, uint32_t startPcOffset) const;
};
class JitcodeIonTable
{
private:
/* Variable length payload section "below" here. */
uint32_t numRegions_;
uint32_t regionOffsets_[0];
const uint8_t *payloadEnd() const {
return reinterpret_cast<const uint8_t *>(this);
}
public:
JitcodeIonTable(uint32_t numRegions)
: numRegions_(numRegions)
{
for (uint32_t i = 0; i < numRegions; i++)
regionOffsets_[i] = 0;
}
bool makeIonEntry(JSContext *cx, JitCode *code, uint32_t numScripts, JSScript **scripts,
JitcodeGlobalEntry::IonEntry &out);
uint32_t numRegions() const {
return numRegions_;
}
uint32_t regionOffset(uint32_t regionIndex) const {
JS_ASSERT(regionIndex < numRegions());
return regionOffsets_[regionIndex];
}
JitcodeRegionEntry regionEntry(uint32_t regionIndex) const {
const uint8_t *regionStart = payloadEnd() - regionOffset(regionIndex);
const uint8_t *regionEnd = payloadEnd();
if (regionIndex < numRegions_ - 1)
regionEnd -= regionOffset(regionIndex + 1);
return JitcodeRegionEntry(regionStart, regionEnd);
}
bool regionContainsOffset(uint32_t regionIndex, uint32_t nativeOffset) {
JS_ASSERT(regionIndex < numRegions());
JitcodeRegionEntry ent = regionEntry(regionIndex);
if (nativeOffset < ent.nativeOffset())
return false;
if (regionIndex == numRegions_ - 1)
return true;
return nativeOffset < regionEntry(regionIndex + 1).nativeOffset();
}
uint32_t findRegionEntry(uint32_t offset) const;
const uint8_t *payloadStart() const {
// The beginning of the payload the beginning of the first region are the same.
return payloadEnd() - regionOffset(0);
}
static bool WriteIonTable(CompactBufferWriter &writer,
JSScript **scriptList, uint32_t scriptListSize,
const CodeGeneratorShared::NativeToBytecode *start,
const CodeGeneratorShared::NativeToBytecode *end,
uint32_t *tableOffsetOut, uint32_t *numRegionsOut);
};
} // namespace jit
} // namespace js
#endif /* jit_JitcodeMap_h */

View File

@ -128,6 +128,25 @@ class LMoveGroup : public LInstructionHelper<0, 0, 0>
}
};
// Constructs a SIMD value with 4 components (e.g. int32x4, float32x4).
class LSimdValueX4 : public LInstructionHelper<1, 4, 0>
{
public:
LIR_HEADER(SimdValueX4)
LSimdValueX4(const LAllocation &x, const LAllocation &y,
const LAllocation &z, const LAllocation &w)
{
setOperand(0, x);
setOperand(1, y);
setOperand(2, z);
setOperand(3, w);
}
MSimdValueX4 *mir() const {
return mir_->toSimdValueX4();
}
};
// Extracts an element from a given SIMD int32x4 lane.
class LSimdExtractElementI : public LInstructionHelper<1, 1, 0>
{
@ -166,6 +185,42 @@ class LSimdExtractElementF : public LInstructionHelper<1, 1, 0>
}
};
// Binary SIMD arithmetic operation between two SIMD operands
class LSimdBinaryArith : public LInstructionHelper<1, 2, 0>
{
public:
LSimdBinaryArith() {}
const LAllocation *lhs() {
return getOperand(0);
}
const LAllocation *rhs() {
return getOperand(1);
}
MSimdBinaryArith::Operation operation() const {
return mir_->toSimdBinaryArith()->operation();
}
const char *extraName() const {
return MSimdBinaryArith::OperationName(operation());
}
};
// Binary SIMD arithmetic operation between two Int32x4 operands
class LSimdBinaryArithIx4 : public LSimdBinaryArith
{
public:
LIR_HEADER(SimdBinaryArithIx4);
LSimdBinaryArithIx4() : LSimdBinaryArith() {}
};
// Binary SIMD arithmetic operation between two Float32x4 operands
class LSimdBinaryArithFx4 : public LSimdBinaryArith
{
public:
LIR_HEADER(SimdBinaryArithFx4);
LSimdBinaryArithFx4() : LSimdBinaryArith() {}
};
// Constant 32-bit integer.
class LInteger : public LInstructionHelper<1, 0, 0>
{
@ -253,6 +308,26 @@ class LFloat32 : public LInstructionHelper<1, 0, 0>
}
};
// Constant SIMD int32x4
class LInt32x4 : public LInstructionHelper<1, 0, 0>
{
public:
LIR_HEADER(Int32x4);
explicit LInt32x4() {}
const SimdConstant &getValue() const { return mir_->toSimdConstant()->value(); }
};
// Constant SIMD float32x4
class LFloat32x4 : public LInstructionHelper<1, 0, 0>
{
public:
LIR_HEADER(Float32x4);
explicit LFloat32x4() {}
const SimdConstant &getValue() const { return mir_->toSimdConstant()->value(); }
};
// A constant Value.
class LValue : public LInstructionHelper<BOX_PIECES, 0, 0>
{
@ -725,6 +800,10 @@ class LCheckOverRecursed : public LInstructionHelper<0, 0, 0>
LCheckOverRecursed()
{ }
MCheckOverRecursed *mir() const {
return mir_->toCheckOverRecursed();
}
};
class LCheckOverRecursedPar : public LInstructionHelper<0, 1, 1>
@ -744,6 +823,10 @@ class LCheckOverRecursedPar : public LInstructionHelper<0, 1, 1>
const LDefinition *getTempReg() {
return getTemp(0);
}
MCheckOverRecursedPar *mir() const {
return mir_->toCheckOverRecursedPar();
}
};
class LAsmJSInterruptCheck : public LInstructionHelper<0, 0, 1>
@ -804,6 +887,9 @@ class LInterruptCheckImplicit : public LInstructionHelper<0, 0, 0>
void setOolEntry(Label *oolEntry) {
oolEntry_ = oolEntry;
}
MInterruptCheck *mir() const {
return mir_->toInterruptCheck();
}
};
class LInterruptCheckPar : public LInstructionHelper<0, 1, 1>
@ -823,6 +909,9 @@ class LInterruptCheckPar : public LInstructionHelper<0, 1, 1>
const LDefinition *getTempReg() {
return getTemp(0);
}
MInterruptCheckPar *mir() const {
return mir_->toInterruptCheckPar();
}
};
class LDefVar : public LCallInstructionHelper<0, 1, 0>
@ -2767,6 +2856,10 @@ class LAddI : public LBinaryMath<0>
void setRecoversInput() {
recoversInput_ = true;
}
MAdd *mir() const {
return mir_->toAdd();
}
};
// Subtracts two integers, returning an integer value.
@ -2791,6 +2884,9 @@ class LSubI : public LBinaryMath<0>
void setRecoversInput() {
recoversInput_ = true;
}
MSub *mir() const {
return mir_->toSub();
}
};
// Performs an add, sub, mul, or div on two double values.
@ -3133,6 +3229,9 @@ class LValueToInt32 : public LInstructionHelper<1, BOX_PIECES, 2>
JS_ASSERT(mode_ == TRUNCATE);
return mir_->toTruncateToInt32();
}
MInstruction *mir() const {
return mir_->toInstruction();
}
};
// Convert a double to an int32.
@ -3187,6 +3286,10 @@ class LTruncateDToInt32 : public LInstructionHelper<1, 1, 1>
const LDefinition *tempFloat() {
return getTemp(0);
}
MTruncateToInt32 *mir() const {
return mir_->toTruncateToInt32();
}
};
// Convert a float32 to a truncated int32.
@ -3205,6 +3308,10 @@ class LTruncateFToInt32 : public LInstructionHelper<1, 1, 1>
const LDefinition *tempFloat() {
return getTemp(0);
}
MTruncateToInt32 *mir() const {
return mir_->toTruncateToInt32();
}
};
// Convert a boolean value to a string.
@ -3287,6 +3394,17 @@ class LStart : public LInstructionHelper<0, 0, 0>
LIR_HEADER(Start)
};
// No-op instruction that prints nativeOffset, script, pcOffset during codegen.
class LPcOffset : public LInstructionHelper<0, 0, 0>
{
public:
LIR_HEADER(PcOffset)
const MPcOffset *mir() const {
return mir_->toPcOffset();
}
};
// Passed the BaselineFrame address in the OsrFrameReg by SideCannon().
// Forwards this object to the LOsrValues for Value materialization.
class LOsrEntry : public LInstructionHelper<1, 0, 0>
@ -4267,6 +4385,27 @@ class LArrayConcat : public LCallInstructionHelper<1, 2, 2>
}
};
class LArrayJoin : public LCallInstructionHelper<1, 2, 0>
{
public:
LIR_HEADER(ArrayJoin)
LArrayJoin(const LAllocation &array, const LAllocation &sep) {
setOperand(0, array);
setOperand(1, sep);
}
const MArrayJoin *mir() const {
return mir_->toArrayJoin();
}
const LAllocation *array() {
return getOperand(0);
}
const LAllocation *separator() {
return getOperand(1);
}
};
// Load a typed value from a typed array's elements vector.
class LLoadTypedArrayElement : public LInstructionHelper<1, 2, 1>
{

View File

@ -16,8 +16,13 @@
_(Pointer) \
_(Double) \
_(Float32) \
_(SimdValueX4) \
_(Int32x4) \
_(Float32x4) \
_(SimdExtractElementI) \
_(SimdExtractElementF) \
_(SimdBinaryArithIx4) \
_(SimdBinaryArithFx4) \
_(Value) \
_(CloneLiteral) \
_(Parameter) \
@ -151,6 +156,7 @@
_(DoubleToString) \
_(ValueToString) \
_(Start) \
_(PcOffset) \
_(OsrEntry) \
_(OsrValue) \
_(OsrScopeChain) \
@ -200,6 +206,7 @@
_(ArrayPushV) \
_(ArrayPushT) \
_(ArrayConcat) \
_(ArrayJoin) \
_(StoreElementHoleV) \
_(StoreElementHoleT) \
_(LoadTypedArrayElement) \

View File

@ -1674,6 +1674,13 @@ LIRGenerator::visitStart(MStart *start)
return add(lir);
}
bool
LIRGenerator::visitPcOffset(MPcOffset *pcOffset)
{
LPcOffset *lir = new(alloc()) LPcOffset;
return add(lir, pcOffset);
}
bool
LIRGenerator::visitNop(MNop *nop)
{
@ -2758,6 +2765,18 @@ LIRGenerator::visitArrayConcat(MArrayConcat *ins)
return defineReturn(lir, ins) && assignSafepoint(lir, ins);
}
bool
LIRGenerator::visitArrayJoin(MArrayJoin *ins)
{
JS_ASSERT(ins->type() == MIRType_String);
JS_ASSERT(ins->array()->type() == MIRType_Object);
JS_ASSERT(ins->sep()->type() == MIRType_String);
LArrayJoin *lir = new(alloc()) LArrayJoin(useRegisterAtStart(ins->array()),
useRegisterAtStart(ins->sep()));
return defineReturn(lir, ins) && assignSafepoint(lir, ins);
}
bool
LIRGenerator::visitStringSplit(MStringSplit *ins)
{
@ -3638,6 +3657,31 @@ LIRGenerator::visitRecompileCheck(MRecompileCheck *ins)
return assignSafepoint(lir, ins);
}
bool
LIRGenerator::visitSimdValueX4(MSimdValueX4 *ins)
{
LAllocation x = useRegisterAtStart(ins->getOperand(0));
LAllocation y = useRegisterAtStart(ins->getOperand(1));
LAllocation z = useRegisterAtStart(ins->getOperand(2));
LAllocation w = useRegisterAtStart(ins->getOperand(3));
return define(new(alloc()) LSimdValueX4(x, y, z, w), ins);
}
bool
LIRGenerator::visitSimdConstant(MSimdConstant *ins)
{
JS_ASSERT(IsSimdType(ins->type()));
if (ins->type() == MIRType_Int32x4)
return define(new(alloc()) LInt32x4(), ins);
if (ins->type() == MIRType_Float32x4)
return define(new(alloc()) LFloat32x4(), ins);
MOZ_ASSUME_UNREACHABLE("Unknown SIMD kind when generating constant");
return false;
}
bool
LIRGenerator::visitSimdExtractElement(MSimdExtractElement *ins)
{
@ -3660,6 +3704,25 @@ LIRGenerator::visitSimdExtractElement(MSimdExtractElement *ins)
return false;
}
bool
LIRGenerator::visitSimdBinaryArith(MSimdBinaryArith *ins)
{
JS_ASSERT(IsSimdType(ins->type()));
if (ins->type() == MIRType_Int32x4) {
LSimdBinaryArithIx4 *add = new(alloc()) LSimdBinaryArithIx4();
return lowerForFPU(add, ins, ins->lhs(), ins->rhs());
}
if (ins->type() == MIRType_Float32x4) {
LSimdBinaryArithFx4 *add = new(alloc()) LSimdBinaryArithFx4();
return lowerForFPU(add, ins, ins->lhs(), ins->rhs());
}
MOZ_ASSUME_UNREACHABLE("Unknown SIMD kind when adding values");
return false;
}
static void
SpewResumePoint(MBasicBlock *block, MInstruction *ins, MResumePoint *resumePoint)
{

View File

@ -141,6 +141,7 @@ class LIRGenerator : public LIRGeneratorSpecific
bool visitFromCharCode(MFromCharCode *ins);
bool visitStringSplit(MStringSplit *ins);
bool visitStart(MStart *start);
bool visitPcOffset(MPcOffset *pcOffset);
bool visitOsrEntry(MOsrEntry *entry);
bool visitNop(MNop *nop);
bool visitLimitedTruncate(MLimitedTruncate *nop);
@ -199,6 +200,7 @@ class LIRGenerator : public LIRGeneratorSpecific
bool visitArrayPopShift(MArrayPopShift *ins);
bool visitArrayPush(MArrayPush *ins);
bool visitArrayConcat(MArrayConcat *ins);
bool visitArrayJoin(MArrayJoin *ins);
bool visitLoadTypedArrayElement(MLoadTypedArrayElement *ins);
bool visitLoadTypedArrayElementHole(MLoadTypedArrayElementHole *ins);
bool visitLoadTypedArrayElementStatic(MLoadTypedArrayElementStatic *ins);
@ -264,6 +266,9 @@ class LIRGenerator : public LIRGeneratorSpecific
bool visitGetDOMMember(MGetDOMMember *ins);
bool visitRecompileCheck(MRecompileCheck *ins);
bool visitSimdExtractElement(MSimdExtractElement *ins);
bool visitSimdBinaryArith(MSimdBinaryArith *ins);
bool visitSimdValueX4(MSimdValueX4 *ins);
bool visitSimdConstant(MSimdConstant *ins);
};
} // namespace jit

View File

@ -42,6 +42,8 @@ IonBuilder::inlineNativeCall(CallInfo &callInfo, JSFunction *target)
return inlineArrayPush(callInfo);
if (native == js::array_concat)
return inlineArrayConcat(callInfo);
if (native == js::array_join)
return inlineArrayJoin(callInfo);
if (native == js::array_splice)
return inlineArraySplice(callInfo);
@ -477,6 +479,29 @@ IonBuilder::inlineArraySplice(CallInfo &callInfo)
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineArrayJoin(CallInfo &callInfo)
{
if (callInfo.argc() != 1 || callInfo.constructing())
return InliningStatus_Error;
if (getInlineReturnType() != MIRType_String)
return InliningStatus_Error;
if (callInfo.thisArg()->type() != MIRType_Object)
return InliningStatus_Error;
if (callInfo.getArg(0)->type() != MIRType_String)
return InliningStatus_Error;
callInfo.setImplicitlyUsedUnchecked();
MArrayJoin *ins = MArrayJoin::New(alloc(), callInfo.thisArg(), callInfo.getArg(0));
current->add(ins);
current->push(ins);
return InliningStatus_Inlined;
}
IonBuilder::InliningStatus
IonBuilder::inlineArrayPush(CallInfo &callInfo)
{

View File

@ -30,6 +30,7 @@ using namespace js::jit;
using mozilla::NumbersAreIdentical;
using mozilla::IsFloat32Representable;
using mozilla::Maybe;
using mozilla::DebugOnly;
#ifdef DEBUG
size_t MUse::index() const
@ -588,6 +589,39 @@ MConstant::canProduceFloat32() const
return true;
}
MDefinition*
MSimdValueX4::foldsTo(TempAllocator &alloc)
{
DebugOnly<MIRType> scalarType = SimdTypeToScalarType(type());
for (size_t i = 0; i < 4; ++i) {
MDefinition *op = getOperand(i);
if (!op->isConstant())
return this;
JS_ASSERT(op->type() == scalarType);
}
SimdConstant cst;
switch (type()) {
case MIRType_Int32x4: {
int32_t a[4];
for (size_t i = 0; i < 4; ++i)
a[i] = getOperand(i)->toConstant()->value().toInt32();
cst = SimdConstant::CreateX4(a);
break;
}
case MIRType_Float32x4: {
float a[4];
for (size_t i = 0; i < 4; ++i)
a[i] = getOperand(i)->toConstant()->value().toNumber();
cst = SimdConstant::CreateX4(a);
break;
}
default: MOZ_ASSUME_UNREACHABLE("unexpected type in MSimdValueX4::foldsTo");
}
return MSimdConstant::New(alloc, cst, type());
}
MCloneLiteral *
MCloneLiteral::New(TempAllocator &alloc, MDefinition *obj)
{
@ -3421,6 +3455,32 @@ MBoundsCheck::foldsTo(TempAllocator &alloc)
return this;
}
MDefinition *
MArrayJoin::foldsTo(TempAllocator &alloc) {
MDefinition *arr = array();
if (!arr->isStringSplit())
return this;
this->setRecoveredOnBailout();
if (arr->hasLiveDefUses()) {
this->setNotRecoveredOnBailout();
return this;
}
// We're replacing foo.split(bar).join(baz) by
// foo.replace(bar, baz). MStringSplit could be recovered by
// a bailout. As we are removing its last use, and its result
// could be captured by a resume point, this MStringSplit will
// be executed on the bailout path.
MDefinition *string = arr->toStringSplit()->string();
MDefinition *pattern = arr->toStringSplit()->separator();
MDefinition *replacement = sep();
setNotRecoveredOnBailout();
return MStringReplace::New(alloc, string, pattern, replacement);
}
bool
jit::ElementAccessIsDenseNative(MDefinition *obj, MDefinition *id)
{

View File

@ -13,6 +13,7 @@
#define jit_MIR_h
#include "mozilla/Array.h"
#include "mozilla/DebugOnly.h"
#include "jit/CompilerRoot.h"
#include "jit/FixedList.h"
@ -1094,6 +1095,24 @@ class MStart : public MNullaryInstruction
}
};
class MPcOffset : public MNullaryInstruction
{
private:
MPcOffset() {
setGuard();
}
public:
INSTRUCTION_HEADER(PcOffset)
static MPcOffset *New(TempAllocator &alloc) {
return new(alloc) MPcOffset();
}
AliasSet getAliasSet() const {
return AliasSet::None();
}
};
// Instruction marking on entrypoint for on-stack replacement.
// OSR may occur at loop headers (at JSOP_TRACE).
// There is at most one MOsrEntry per MIRGraph.
@ -1233,6 +1252,77 @@ class MConstant : public MNullaryInstruction
ALLOW_CLONE(MConstant)
};
// Generic constructor of SIMD valuesX4.
class MSimdValueX4 : public MQuaternaryInstruction
{
protected:
MSimdValueX4(MIRType type, MDefinition *x, MDefinition *y, MDefinition *z, MDefinition *w)
: MQuaternaryInstruction(x, y, z, w)
{
JS_ASSERT(IsSimdType(type));
mozilla::DebugOnly<MIRType> scalarType = SimdTypeToScalarType(type);
JS_ASSERT(scalarType == x->type());
JS_ASSERT(scalarType == y->type());
JS_ASSERT(scalarType == z->type());
JS_ASSERT(scalarType == w->type());
setMovable();
setResultType(type);
}
public:
INSTRUCTION_HEADER(SimdValueX4)
static MSimdValueX4 *New(TempAllocator &alloc, MIRType type, MDefinition *x,
MDefinition *y, MDefinition *z, MDefinition *w)
{
return new(alloc) MSimdValueX4(type, x, y, z, w);
}
AliasSet getAliasSet() const {
return AliasSet::None();
}
bool congruentTo(const MDefinition *ins) const {
return congruentIfOperandsEqual(ins);
}
MDefinition *foldsTo(TempAllocator &alloc);
};
// A constant SIMD value.
class MSimdConstant : public MNullaryInstruction
{
SimdConstant value_;
protected:
MSimdConstant(const SimdConstant &v, MIRType type) : value_(v) {
JS_ASSERT(IsSimdType(type));
setResultType(type);
setMovable();
}
public:
INSTRUCTION_HEADER(SimdConstant);
static MSimdConstant *New(TempAllocator &alloc, const SimdConstant &v, MIRType type) {
return new(alloc) MSimdConstant(v, type);
}
bool congruentTo(const MDefinition *ins) const {
if (!ins->isSimdConstant())
return false;
return value() == ins->toSimdConstant()->value();
}
const SimdConstant &value() const {
return value_;
}
AliasSet getAliasSet() const {
return AliasSet::None();
}
};
// Extracts a lane element from a given vector type, given by its lane symbol.
class MSimdExtractElement : public MUnaryInstruction
{
@ -1274,6 +1364,63 @@ class MSimdExtractElement : public MUnaryInstruction
}
};
class MSimdBinaryArith : public MBinaryInstruction
{
public:
enum Operation {
Add,
Sub,
Mul,
Div
};
static const char* OperationName(Operation op) {
switch (op) {
case Add: return "Add";
case Sub: return "Sub";
case Mul: return "Mul";
case Div: return "Div";
}
MOZ_ASSUME_UNREACHABLE("unexpected operation");
}
private:
Operation operation_;
MSimdBinaryArith(MDefinition *left, MDefinition *right, Operation op, MIRType type)
: MBinaryInstruction(left, right), operation_(op)
{
JS_ASSERT_IF(type == MIRType_Int32x4, op == Add || op == Sub);
JS_ASSERT(IsSimdType(type));
JS_ASSERT(left->type() == right->type());
JS_ASSERT(left->type() == type);
setResultType(type);
setMovable();
if (op == Add || op == Mul)
setCommutative();
}
public:
INSTRUCTION_HEADER(SimdBinaryArith);
static MSimdBinaryArith *NewAsmJS(TempAllocator &alloc, MDefinition *left, MDefinition *right,
Operation op, MIRType t)
{
return new(alloc) MSimdBinaryArith(left, right, op, t);
}
AliasSet getAliasSet() const {
return AliasSet::None();
}
Operation operation() const { return operation_; }
bool congruentTo(const MDefinition *ins) const {
if (!binaryCongruentTo(ins))
return false;
return operation_ == ins->toSimdBinaryArith()->operation();
}
};
// Deep clone a constant JSObject.
class MCloneLiteral
: public MUnaryInstruction,
@ -7166,6 +7313,39 @@ class MArrayConcat
}
};
class MArrayJoin
: public MBinaryInstruction,
public MixPolicy<ObjectPolicy<0>, StringPolicy<1> >
{
MArrayJoin(MDefinition *array, MDefinition *sep)
: MBinaryInstruction(array, sep)
{
setResultType(MIRType_String);
}
public:
INSTRUCTION_HEADER(ArrayJoin)
static MArrayJoin *New(TempAllocator &alloc, MDefinition *array, MDefinition *sep)
{
return new (alloc) MArrayJoin(array, sep);
}
TypePolicy *typePolicy() {
return this;
}
MDefinition *array() const {
return getOperand(0);
}
MDefinition *sep() const {
return getOperand(1);
}
bool possiblyCalls() const {
return true;
}
virtual AliasSet getAliasSet() const {
return AliasSet::Load(AliasSet::Element | AliasSet::ObjectFields);
}
MDefinition *foldsTo(TempAllocator &alloc);
};
class MLoadTypedArrayElement
: public MBinaryInstruction
{

View File

@ -76,7 +76,21 @@ class MIRGenerator
}
bool instrumentedProfiling() {
return GetIonContext()->runtime->spsProfiler().enabled();
if (!instrumentedProfilingIsCached_) {
instrumentedProfiling_ = GetIonContext()->runtime->spsProfiler().enabled();
instrumentedProfilingIsCached_ = true;
}
return instrumentedProfiling_;
}
bool isNativeToBytecodeMapEnabled() {
if (compilingAsmJS())
return false;
#ifdef DEBUG
return true;
#else
return instrumentedProfiling();
#endif
}
// Whether the main thread is trying to cancel this build.
@ -167,6 +181,9 @@ class MIRGenerator
// slots is not compatible with that.
bool modifiesFrameArguments_;
bool instrumentedProfiling_;
bool instrumentedProfilingIsCached_;
#if defined(JS_ION_PERF)
AsmJSPerfSpewer asmJSPerfSpewer_;

View File

@ -34,6 +34,8 @@ MIRGenerator::MIRGenerator(CompileCompartment *compartment, const JitCompileOpti
usesSimdCached_(false),
minAsmJSHeapLength_(AsmJSAllocationGranularity),
modifiesFrameArguments_(false),
instrumentedProfiling_(false),
instrumentedProfilingIsCached_(false),
options(options)
{ }

View File

@ -12,7 +12,10 @@ namespace jit {
#define MIR_OPCODE_LIST(_) \
_(Constant) \
_(SimdValueX4) \
_(SimdConstant) \
_(SimdExtractElement) \
_(SimdBinaryArith) \
_(CloneLiteral) \
_(Parameter) \
_(Callee) \
@ -103,6 +106,7 @@ namespace jit {
_(InitProp) \
_(InitPropGetterSetter) \
_(Start) \
_(PcOffset) \
_(OsrEntry) \
_(Nop) \
_(LimitedTruncate) \
@ -157,6 +161,7 @@ namespace jit {
_(ArrayPopShift) \
_(ArrayPush) \
_(ArrayConcat) \
_(ArrayJoin) \
_(LoadTypedArrayElement) \
_(LoadTypedArrayElementHole) \
_(LoadTypedArrayElementStatic) \

View File

@ -112,7 +112,10 @@ class ParallelSafetyVisitor : public MDefinitionVisitor
// obviously safe for now. We can loosen as we need.
SAFE_OP(Constant)
SAFE_OP(SimdValueX4)
SAFE_OP(SimdConstant)
SAFE_OP(SimdExtractElement)
SAFE_OP(SimdBinaryArith)
UNSAFE_OP(CloneLiteral)
SAFE_OP(Parameter)
SAFE_OP(Callee)
@ -196,6 +199,7 @@ class ParallelSafetyVisitor : public MDefinitionVisitor
UNSAFE_OP(InitProp)
UNSAFE_OP(InitPropGetterSetter)
SAFE_OP(Start)
SAFE_OP(PcOffset)
UNSAFE_OP(OsrEntry)
SAFE_OP(Nop)
SAFE_OP(LimitedTruncate)
@ -287,6 +291,7 @@ class ParallelSafetyVisitor : public MDefinitionVisitor
SAFE_OP(NewCallObjectPar)
SAFE_OP(LambdaPar)
UNSAFE_OP(ArrayConcat)
UNSAFE_OP(ArrayJoin)
UNSAFE_OP(GetDOMProperty)
UNSAFE_OP(GetDOMMember)
UNSAFE_OP(SetDOMProperty)

View File

@ -457,6 +457,44 @@ ArrayConcatDense(JSContext *cx, HandleObject obj1, HandleObject obj2, HandleObje
return &argv[0].toObject();
}
JSString *
ArrayJoin(JSContext *cx, HandleObject array, HandleString sep)
{
// The annotations in this function follow the first steps of join
// specified in ES5.
// Step 1
RootedObject obj(cx, array);
if (!obj)
return nullptr;
AutoCycleDetector detector(cx, obj);
if (!detector.init())
return nullptr;
if (detector.foundCycle())
return nullptr;
// Steps 2 and 3
uint32_t length;
if (!GetLengthProperty(cx, obj, &length))
return nullptr;
// Steps 4 and 5
RootedLinearString sepstr(cx);
if (sep) {
sepstr = sep->ensureLinear(cx);
if (!sepstr)
return nullptr;
} else {
sepstr = cx->names().comma;
}
// Step 6 to 11
return js::ArrayJoin<false>(cx, obj, sepstr, length);
}
bool
CharCodeAt(JSContext *cx, HandleString str, int32_t index, uint32_t *code)
{

View File

@ -631,6 +631,7 @@ bool ArrayPopDense(JSContext *cx, HandleObject obj, MutableHandleValue rval);
bool ArrayPushDense(JSContext *cx, HandleObject obj, HandleValue v, uint32_t *length);
bool ArrayShiftDense(JSContext *cx, HandleObject obj, MutableHandleValue rval);
JSObject *ArrayConcatDense(JSContext *cx, HandleObject obj1, HandleObject obj2, HandleObject res);
JSString *ArrayJoin(JSContext *cx, HandleObject array, HandleString sep);
bool CharCodeAt(JSContext *cx, HandleString str, int32_t index, uint32_t *code);
JSFlatString *StringFromCharCode(JSContext *cx, int32_t code);

View File

@ -16,28 +16,98 @@
#include "jit/arm/Assembler-arm.h"
#include "jit/RegisterSets.h"
#define HWCAP_USE_HARDFP_ABI (1 << 27)
#if !(defined(ANDROID) || defined(MOZ_B2G)) && !defined(JS_ARM_SIMULATOR)
#define HWCAP_ARMv7 (1 << 28)
#include <asm/hwcap.h>
#if defined(ANDROID) || defined(MOZ_B2G) || defined(JS_ARM_SIMULATOR)
// The Android NDK does not include the hwcap.h kernel header, and it is not
// defined when building the simulator, so inline the header defines we need.
# define HWCAP_VFP (1 << 6)
# define HWCAP_NEON (1 << 12)
# define HWCAP_VFPv3 (1 << 13)
# define HWCAP_VFPv3D16 (1 << 14) /* also set for VFPv4-D16 */
# define HWCAP_VFPv4 (1 << 16)
# define HWCAP_IDIVA (1 << 17)
# define HWCAP_IDIVT (1 << 18)
# define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
# define AT_HWCAP 16
#else
#define HWCAP_VFP (1<<0)
#define HWCAP_VFPv3 (1<<1)
#define HWCAP_VFPv3D16 (1<<2)
#define HWCAP_VFPv4 (1<<3)
#define HWCAP_IDIVA (1<<4)
#define HWCAP_IDIVT (1<<5)
#define HWCAP_NEON (1<<6)
#define HWCAP_ARMv7 (1<<7)
# include <asm/hwcap.h>
# if !defined(HWCAP_IDIVA)
# define HWCAP_IDIVA (1 << 17)
# endif
# if !defined(HWCAP_VFPD32)
# define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
# endif
#endif
// Not part of the HWCAP flag, but we need to know this, and this bit is not
// used so we are using it.
#define HWCAP_ARMv7 (1 << 28)
// Also take a bit to flag the use of the hardfp ABI.
#define HWCAP_USE_HARDFP_ABI (1 << 27)
namespace js {
namespace jit {
// Parse the Linux kernel cpuinfo features. This is also used to parse the
// override features which has some extensions: 'armv7' and 'hardfp'.
uint32_t
ParseARMCpuFeatures(const char *features, bool override = false)
{
uint32_t flags = 0;
for (;;) {
char ch = *features;
if (!ch) {
// End of string.
break;
}
if (ch == ' ' || ch == ',') {
// Skip separator characters.
features++;
continue;
}
// Find the end of the token.
const char *end = features + 1;
for (; ; end++) {
ch = *end;
if (!ch || ch == ' ' || ch == ',')
break;
}
size_t count = end - features;
if (count == 3 && strncmp(features, "vfp", 3) == 0)
flags |= HWCAP_VFP;
else if (count == 4 && strncmp(features, "neon", 4) == 0)
flags |= HWCAP_NEON;
else if (count == 5 && strncmp(features, "vfpv3", 5) == 0)
flags |= HWCAP_VFPv3;
else if (count == 8 && strncmp(features, "vfpv3d16", 8) == 0)
flags |= HWCAP_VFPv3D16;
else if (count == 5 && strncmp(features, "vfpv4", 5) == 0)
flags |= HWCAP_VFPv4;
else if (count == 5 && strncmp(features, "idiva", 5) == 0)
flags |= HWCAP_IDIVA;
else if (count == 5 && strncmp(features, "idivt", 5) == 0)
flags |= HWCAP_IDIVT;
else if (count == 6 && strncmp(features, "vfpd32", 6) == 0)
flags |= HWCAP_VFPD32;
else if (count == 5 && strncmp(features, "armv7", 5) == 0)
flags |= HWCAP_ARMv7;
#if defined(JS_ARM_SIMULATOR)
else if (count == 6 && strncmp(features, "hardfp", 6) == 0)
flags |= HWCAP_USE_HARDFP_ABI;
#endif
else if (override)
fprintf(stderr, "Warning: unexpected ARM feature at: %s\n", features);
features = end;
}
IonSpew(IonSpew_Codegen, "ARM features: '%s'\n flags: 0x%x\n", features, flags);
return flags;
}
// The override flags parsed from the ARMHWCAP environment variable or from the
// --arm-hwcap js shell argument.
static uint32_t armHwCapFlags = 0;
volatile static uint32_t armHwCapFlags = 0;
bool
ParseARMHwCapFlags(const char *armHwCap)
@ -65,6 +135,7 @@ ParseARMHwCapFlags(const char *armHwCap)
" vfpv4 \n"
" idiva \n"
" idivt \n"
" vfpd32 \n"
#if defined(JS_ARM_SIMULATOR)
" hardfp \n"
#endif
@ -74,168 +145,108 @@ ParseARMHwCapFlags(const char *armHwCap)
/*NOTREACHED*/
}
// Canonicalize each token to have a leading and trailing space.
const char *start = armHwCap; // Token start.
for (;;) {
char ch = *start;
if (!ch) {
// End of string.
break;
}
if (ch == ' ' || ch == ',') {
// Skip separator characters.
start++;
continue;
}
// Find the end of the token.
const char *end = start + 1;
for (; ; end++) {
ch = *end;
if (!ch || ch == ' ' || ch == ',')
break;
}
size_t count = end - start;
if (count == 3 && strncmp(start, "vfp", 3) == 0)
flags |= HWCAP_VFP;
else if (count == 5 && strncmp(start, "vfpv3", 5) == 0)
flags |= HWCAP_VFPv3;
else if (count == 8 && strncmp(start, "vfpv3d16", 8) == 0)
flags |= HWCAP_VFPv3D16;
else if (count == 5 && strncmp(start, "vfpv4", 5) == 0)
flags |= HWCAP_VFPv4;
else if (count == 5 && strncmp(start, "idiva", 5) == 0)
flags |= HWCAP_IDIVA;
else if (count == 5 && strncmp(start, "idivt", 5) == 0)
flags |= HWCAP_IDIVT;
else if (count == 4 && strncmp(start, "neon", 4) == 0)
flags |= HWCAP_NEON;
else if (count == 5 && strncmp(start, "armv7", 5) == 0)
flags |= HWCAP_ARMv7;
#if defined(JS_ARM_SIMULATOR)
else if (count == 6 && strncmp(start, "hardfp", 6) == 0)
flags |= HWCAP_USE_HARDFP_ABI;
#endif
else
fprintf(stderr, "Warning: unexpected ARMHWCAP flag at: %s\n", start);
start = end;
}
#ifdef DEBUG
IonSpew(IonSpew_Codegen, "ARMHWCAP: '%s'\n flags: 0x%x\n", armHwCap, flags);
#endif
armHwCapFlags = flags;
armHwCapFlags = ParseARMCpuFeatures(armHwCap, /* override = */ true);
return true;
}
uint32_t GetARMFlags()
{
static bool isSet = false;
static uint32_t flags = 0;
volatile static bool isSet = false;
volatile static uint32_t flags = 0;
if (isSet)
return flags;
const char *env = getenv("ARMHWCAP");
if (ParseARMHwCapFlags(env) || armHwCapFlags) {
isSet = true;
flags = armHwCapFlags;
isSet = true;
return flags;
}
#ifdef JS_CODEGEN_ARM_HARDFP
flags |= HWCAP_USE_HARDFP_ABI;
#endif
#ifdef JS_ARM_SIMULATOR
isSet = true;
flags = HWCAP_ARMv7 | HWCAP_VFP | HWCAP_VFPv3 | HWCAP_VFPv4 | HWCAP_NEON;
return flags;
#else
#if WTF_OS_LINUX
#if defined(WTF_OS_LINUX) || defined(WTF_OS_ANDROID) || defined(MOZ_B2G)
bool readAuxv = false;
int fd = open("/proc/self/auxv", O_RDONLY);
if (fd > 0) {
Elf32_auxv_t aux;
while (read(fd, &aux, sizeof(Elf32_auxv_t))) {
struct { uint32_t a_type; uint32_t a_val; } aux;
while (read(fd, &aux, sizeof(aux))) {
if (aux.a_type == AT_HWCAP) {
close(fd);
flags = aux.a_un.a_val;
isSet = true;
#if defined(__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
// This should really be detected at runtime, but /proc/*/auxv
// doesn't seem to carry the ISA. We could look in /proc/cpuinfo
// as well, but the chances that it will be different from this
// are low.
flags |= HWCAP_ARMv7;
#endif
return flags;
flags = aux.a_val;
readAuxv = true;
break;
}
}
close(fd);
}
#if defined(__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
flags = HWCAP_ARMv7;
#endif
isSet = true;
return flags;
#elif defined(WTF_OS_ANDROID) || defined(MOZ_B2G)
FILE *fp = fopen("/proc/cpuinfo", "r");
if (!fp)
return false;
char buf[1024];
memset(buf, 0, sizeof(buf));
size_t len = fread(buf, sizeof(char), sizeof(buf) - 2, fp);
fclose(fp);
// Canonicalize each token to have a leading and trailing space.
buf[len] = ' ';
buf[len + 1] = '\0';
for (size_t i = 0; i < len; i++) {
char ch = buf[i];
if (!ch)
break;
else if (ch == '\n')
buf[i] = 0x20;
else
buf[i] = ch;
if (!readAuxv) {
// Read the Features if the auxv is not available.
FILE *fp = fopen("/proc/cpuinfo", "r");
if (fp) {
char buf[1024];
memset(buf, 0, sizeof(buf));
size_t len = fread(buf, sizeof(char), sizeof(buf) - 1, fp);
fclose(fp);
buf[len] = '\0';
char *featureList = strstr(buf, "Features");
if (featureList) {
if (char *featuresEnd = strstr(featureList, "\n"))
*featuresEnd = '\0';
flags = ParseARMCpuFeatures(featureList + 8);
}
if (strstr(buf, "ARMv7"))
flags |= HWCAP_ARMv7;
}
}
#endif
if (strstr(buf, " vfp "))
flags |= HWCAP_VFP;
// If compiled to use specialized features then these features can be
// assumed to be present otherwise the compiler would fail to run.
if (strstr(buf, " vfpv3 "))
#ifdef JS_CODEGEN_ARM_HARDFP
// Compiled to use the hardfp ABI.
flags |= HWCAP_USE_HARDFP_ABI;
#endif
#if defined(__VFP_FP__) && !defined(__SOFTFP__)
// Compiled to use VFP instructions so assume VFP support.
flags |= HWCAP_VFP;
#endif
#if defined(__ARM_ARCH_7__) || defined (__ARM_ARCH_7A__)
// Compiled to use ARMv7 instructions so assume the ARMv7 arch.
flags |= HWCAP_ARMv7;
#endif
#endif // JS_ARM_SIMULATOR
// Canonicalize the flags. These rules are also applied to the features
// supplied for simulation.
// The VFPv3 feature is expected when the VFPv3D16 is reported, but add it
// just in case of a kernel difference in feature reporting.
if (flags & HWCAP_VFPv3D16)
flags |= HWCAP_VFPv3;
if (strstr(buf, " vfpv3d16 "))
flags |= HWCAP_VFPv3D16;
if (strstr(buf, " vfpv4 "))
flags |= HWCAP_VFPv4;
if (strstr(buf, " idiva "))
flags |= HWCAP_IDIVA;
if (strstr(buf, " idivt "))
flags |= HWCAP_IDIVT;
if (strstr(buf, " neon "))
flags |= HWCAP_NEON;
// Not part of the HWCAP flag, but we need to know this, and we're not using
// that bit, so... we are using it.
if (strstr(buf, "ARMv7"))
// If VFPv3 or Neon is supported then this must be an ARMv7.
if (flags & (HWCAP_VFPv3 | HWCAP_NEON))
flags |= HWCAP_ARMv7;
#ifdef DEBUG
IonSpew(IonSpew_Codegen, "ARMHWCAP: '%s'\n flags: 0x%x\n", buf, flags);
#endif
// Some old kernels report VFP and not VFPv3, but if ARMv7 then it must be
// VFPv3.
if (flags & HWCAP_VFP && flags & HWCAP_ARMv7)
flags |= HWCAP_VFPv3;
// Older kernels do not implement the HWCAP_VFPD32 flag.
if ((flags & HWCAP_VFPv3) && !(flags & HWCAP_VFPv3D16))
flags |= HWCAP_VFPD32;
IonSpew(IonSpew_Codegen, "ARM HWCAP: 0x%x\n", flags);
isSet = true;
return flags;
#endif
return 0;
#endif // JS_ARM_SIMULATOR
}
bool HasMOVWT()
@ -253,20 +264,12 @@ bool HasVFP()
bool Has32DP()
{
return (GetARMFlags() & HWCAP_VFPv3) && !(GetARMFlags() & HWCAP_VFPv3D16);
}
bool UseConvReg()
{
return Has32DP();
return GetARMFlags() & HWCAP_VFPD32;
}
bool HasIDIV()
{
#if defined HWCAP_IDIVA
return GetARMFlags() & HWCAP_IDIVA;
#else
return false;
#endif
}
// This is defined in the header and inlined when not using the simulator.

View File

@ -180,8 +180,12 @@ CodeGeneratorARM::bailoutIf(Assembler::Condition condition, LSnapshot *snapshot)
// We could not use a jump table, either because all bailout IDs were
// reserved, or a jump table is not optimal for this frame size or
// platform. Whatever, we will generate a lazy bailout.
InlineScriptTree *tree = snapshot->mir()->block()->trackedTree();
OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
if (!addOutOfLineCode(ool))
// All bailout code is associated with the bytecodeSite of the block we are
// bailing out from.
if (!addOutOfLineCode(ool, BytecodeSite(tree, tree->script()->code())))
return false;
masm.ma_b(ool->entry(), condition);
@ -206,10 +210,13 @@ CodeGeneratorARM::bailoutFrom(Label *label, LSnapshot *snapshot)
frameClass_.frameSize() == masm.framePushed());
// On ARM we don't use a bailout table.
InlineScriptTree *tree = snapshot->mir()->block()->trackedTree();
OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot, masm.framePushed());
if (!addOutOfLineCode(ool)) {
// All bailout code is associated with the bytecodeSite of the block we are
// bailing out from.
if (!addOutOfLineCode(ool, BytecodeSite(tree, tree->script()->code())))
return false;
}
masm.retarget(label, ool->entry());
@ -1104,7 +1111,7 @@ CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch *mir, Register index, Reg
if (!ool->addCodeLabel(cl))
return false;
}
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, mir))
return false;
return true;
@ -1253,13 +1260,15 @@ CodeGeneratorARM::emitRoundDouble(FloatRegister src, Register dest, Label *fail)
bool
CodeGeneratorARM::visitTruncateDToInt32(LTruncateDToInt32 *ins)
{
return emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()));
return emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
ins->mir());
}
bool
CodeGeneratorARM::visitTruncateFToInt32(LTruncateFToInt32 *ins)
{
return emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()));
return emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
ins->mir());
}
static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };

View File

@ -227,8 +227,13 @@ class CodeGeneratorARM : public CodeGeneratorShared
public:
// Unimplemented SIMD instructions
bool visitSimdValueX4(LSimdValueX4 *lir) { MOZ_ASSUME_UNREACHABLE("NYI"); }
bool visitInt32x4(LInt32x4 *ins) { MOZ_ASSUME_UNREACHABLE("NYI"); }
bool visitFloat32x4(LFloat32x4 *ins) { MOZ_ASSUME_UNREACHABLE("NYI"); }
bool visitSimdExtractElementI(LSimdExtractElementI *ins) { MOZ_ASSUME_UNREACHABLE("NYI"); }
bool visitSimdExtractElementF(LSimdExtractElementF *ins) { MOZ_ASSUME_UNREACHABLE("NYI"); }
bool visitSimdBinaryArithIx4(LSimdBinaryArithIx4 *lir) { MOZ_ASSUME_UNREACHABLE("NYI"); }
bool visitSimdBinaryArithFx4(LSimdBinaryArithFx4 *lir) { MOZ_ASSUME_UNREACHABLE("NYI"); }
};
typedef CodeGeneratorARM CodeGeneratorSpecific;

View File

@ -1332,6 +1332,16 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void loadPrivate(const Address &address, Register dest);
void loadAlignedInt32x4(const Address &addr, FloatRegister dest) { MOZ_ASSUME_UNREACHABLE("NYI"); }
void storeAlignedInt32x4(FloatRegister src, Address addr) { MOZ_ASSUME_UNREACHABLE("NYI"); }
void loadUnalignedInt32x4(const Address &addr, FloatRegister dest) { MOZ_ASSUME_UNREACHABLE("NYI"); }
void storeUnalignedInt32x4(FloatRegister src, Address addr) { MOZ_ASSUME_UNREACHABLE("NYI"); }
void loadAlignedFloat32x4(const Address &addr, FloatRegister dest) { MOZ_ASSUME_UNREACHABLE("NYI"); }
void storeAlignedFloat32x4(FloatRegister src, Address addr) { MOZ_ASSUME_UNREACHABLE("NYI"); }
void loadUnalignedFloat32x4(const Address &addr, FloatRegister dest) { MOZ_ASSUME_UNREACHABLE("NYI"); }
void storeUnalignedFloat32x4(FloatRegister src, Address addr) { MOZ_ASSUME_UNREACHABLE("NYI"); }
void loadDouble(const Address &addr, FloatRegister dest);
void loadDouble(const BaseIndex &src, FloatRegister dest);

View File

@ -1365,13 +1365,15 @@ CodeGeneratorMIPS::visitRoundF(LRoundF *lir)
bool
CodeGeneratorMIPS::visitTruncateDToInt32(LTruncateDToInt32 *ins)
{
return emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()));
return emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
ins->mir());
}
bool
CodeGeneratorMIPS::visitTruncateFToInt32(LTruncateFToInt32 *ins)
{
return emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()));
return emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
ins->mir());
}
static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };

View File

@ -340,6 +340,14 @@ class AssemblerX86Shared : public AssemblerShared
label->bind(masm.size());
masm.floatConstant(f);
}
void writeInt32x4Constant(const SimdConstant &v, Label *label) {
label->bind(masm.size());
masm.int32x4Constant(v.asInt32x4());
}
void writeFloat32x4Constant(const SimdConstant &v, Label *label) {
label->bind(masm.size());
masm.float32x4Constant(v.asFloat32x4());
}
void movl(Imm32 imm32, Register dest) {
masm.movl_i32r(imm32.value, dest.code());
}
@ -1463,6 +1471,106 @@ class AssemblerX86Shared : public AssemblerShared
JS_ASSERT(HasSSE2());
masm.movd_rr(src.code(), dest.code());
}
void paddd(const Operand &src, FloatRegister dest) {
JS_ASSERT(HasSSE2());
switch (src.kind()) {
case Operand::FPREG:
masm.paddd_rr(src.fpu(), dest.code());
break;
case Operand::MEM_REG_DISP:
masm.paddd_mr(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.paddd_mr(src.address(), dest.code());
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
}
}
void psubd(const Operand &src, FloatRegister dest) {
JS_ASSERT(HasSSE2());
switch (src.kind()) {
case Operand::FPREG:
masm.psubd_rr(src.fpu(), dest.code());
break;
case Operand::MEM_REG_DISP:
masm.psubd_mr(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.psubd_mr(src.address(), dest.code());
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
}
}
void addps(const Operand &src, FloatRegister dest) {
JS_ASSERT(HasSSE2());
switch (src.kind()) {
case Operand::FPREG:
masm.addps_rr(src.fpu(), dest.code());
break;
case Operand::MEM_REG_DISP:
masm.addps_mr(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.addps_mr(src.address(), dest.code());
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
}
}
void subps(const Operand &src, FloatRegister dest) {
JS_ASSERT(HasSSE2());
switch (src.kind()) {
case Operand::FPREG:
masm.subps_rr(src.fpu(), dest.code());
break;
case Operand::MEM_REG_DISP:
masm.subps_mr(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.subps_mr(src.address(), dest.code());
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
}
}
void mulps(const Operand &src, FloatRegister dest) {
JS_ASSERT(HasSSE2());
switch (src.kind()) {
case Operand::FPREG:
masm.mulps_rr(src.fpu(), dest.code());
break;
case Operand::MEM_REG_DISP:
masm.mulps_mr(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.mulps_mr(src.address(), dest.code());
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
}
}
void divps(const Operand &src, FloatRegister dest) {
JS_ASSERT(HasSSE2());
switch (src.kind()) {
case Operand::FPREG:
masm.divps_rr(src.fpu(), dest.code());
break;
case Operand::MEM_REG_DISP:
masm.divps_mr(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.divps_mr(src.address(), dest.code());
break;
default:
MOZ_ASSUME_UNREACHABLE("unexpected operand kind");
}
}
void pxor(FloatRegister src, FloatRegister dest) {
JS_ASSERT(HasSSE2());
masm.pxor_rr(src.code(), dest.code());
}
void pshufd(uint32_t mask, FloatRegister src, FloatRegister dest) {
JS_ASSERT(HasSSE2());
masm.pshufd_irr(mask, src.code(), dest.code());

View File

@ -8,9 +8,11 @@
#include "mozilla/DebugOnly.h"
#include "jit/CompactBuffer.h"
#include "jit/IonCaches.h"
#include "jit/IonMacroAssembler.h"
#include "jit/IonSpewer.h"
#include "jit/JitcodeMap.h"
#include "jit/MIR.h"
#include "jit/MIRGenerator.h"
#include "jit/ParallelFunctions.h"
@ -49,6 +51,12 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph, Mac
pushedArgs_(0),
#endif
lastOsiPointOffset_(0),
nativeToBytecodeMap_(nullptr),
nativeToBytecodeMapSize_(0),
nativeToBytecodeTableOffset_(0),
nativeToBytecodeNumRegions_(0),
nativeToBytecodeScriptList_(nullptr),
nativeToBytecodeScriptListLength_(0),
sps_(&GetIonContext()->runtime->spsProfiler(), &lastNotInlinedPC_),
osrEntryOffset_(0),
skipArgCheckEntryOffset_(0),
@ -123,6 +131,13 @@ CodeGeneratorShared::generateOutOfLineCode()
{
JSScript *topScript = sps_.getPushed();
for (size_t i = 0; i < outOfLineCode_.length(); i++) {
// Add native => bytecode mapping entries for OOL sites.
// Not enabled on asm.js yet since asm doesn't contain bytecode mappings.
if (!gen->compilingAsmJS()) {
if (!addNativeToBytecodeEntry(outOfLineCode_[i]->bytecodeSite()))
return false;
}
if (!gen->alloc().ensureBallast())
return false;
@ -147,20 +162,134 @@ CodeGeneratorShared::generateOutOfLineCode()
}
bool
CodeGeneratorShared::addOutOfLineCode(OutOfLineCode *code)
CodeGeneratorShared::addOutOfLineCode(OutOfLineCode *code, const MInstruction *mir)
{
JS_ASSERT(mir);
return addOutOfLineCode(code, mir->trackedSite());
}
bool
CodeGeneratorShared::addOutOfLineCode(OutOfLineCode *code, const BytecodeSite &site)
{
code->setFramePushed(masm.framePushed());
// If an OOL instruction adds another OOL instruction, then use the original
// instruction's script/pc instead of the basic block's that we're on
// because they're probably not relevant any more.
if (oolIns)
code->setSource(oolIns->script(), oolIns->pc());
else
code->setSource(current ? current->mir()->info().script() : nullptr, lastPC_);
JS_ASSERT_IF(code->script(), code->script()->containsPC(code->pc()));
code->setBytecodeSite(site);
JS_ASSERT_IF(!gen->compilingAsmJS(), code->script()->containsPC(code->pc()));
return outOfLineCode_.append(code);
}
bool
CodeGeneratorShared::addNativeToBytecodeEntry(const BytecodeSite &site)
{
// Skip the table entirely if profiling is not enabled.
if (!isNativeToBytecodeMapEnabled())
return true;
JS_ASSERT(site.tree());
JS_ASSERT(site.pc());
InlineScriptTree *tree = site.tree();
jsbytecode *pc = site.pc();
uint32_t nativeOffset = masm.currentOffset();
JS_ASSERT_IF(nativeToBytecodeList_.empty(), nativeOffset == 0);
if (!nativeToBytecodeList_.empty()) {
size_t lastIdx = nativeToBytecodeList_.length() - 1;
NativeToBytecode &lastEntry = nativeToBytecodeList_[lastIdx];
JS_ASSERT(nativeOffset >= lastEntry.nativeOffset.offset());
// If the new entry is for the same inlineScriptTree and same
// bytecodeOffset, but the nativeOffset has changed, do nothing.
// The same site just generated some more code.
if (lastEntry.tree == tree && lastEntry.pc == pc) {
IonSpew(IonSpew_Profiling, " => In-place update [%u-%u]",
lastEntry.nativeOffset.offset(), nativeOffset);
return true;
}
// If the new entry is for the same native offset, then update the
// previous entry with the new bytecode site, since the previous
// bytecode site did not generate any native code.
if (lastEntry.nativeOffset.offset() == nativeOffset) {
lastEntry.tree = tree;
lastEntry.pc = pc;
IonSpew(IonSpew_Profiling, " => Overwriting zero-length native region.");
// This overwrite might have made the entry merge-able with a
// previous one. If so, merge it.
if (lastIdx > 0) {
NativeToBytecode &nextToLastEntry = nativeToBytecodeList_[lastIdx - 1];
if (nextToLastEntry.tree == lastEntry.tree && nextToLastEntry.pc == lastEntry.pc) {
IonSpew(IonSpew_Profiling, " => Merging with previous region");
nativeToBytecodeList_.erase(&lastEntry);
}
}
dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
return true;
}
}
// Otherwise, some native code was generated for the previous bytecode site.
// Add a new entry for code that is about to be generated.
NativeToBytecode entry;
entry.nativeOffset = CodeOffsetLabel(nativeOffset);
entry.tree = tree;
entry.pc = pc;
if (!nativeToBytecodeList_.append(entry))
return false;
IonSpew(IonSpew_Profiling, " => Push new entry.");
dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
return true;
}
void
CodeGeneratorShared::dumpNativeToBytecodeEntries()
{
#ifdef DEBUG
InlineScriptTree *topTree = gen->info().inlineScriptTree();
IonSpewStart(IonSpew_Profiling, "Native To Bytecode Entries for %s:%d\n",
topTree->script()->filename(), topTree->script()->lineno());
for (unsigned i = 0; i < nativeToBytecodeList_.length(); i++)
dumpNativeToBytecodeEntry(i);
#endif
}
void
CodeGeneratorShared::dumpNativeToBytecodeEntry(uint32_t idx)
{
#ifdef DEBUG
NativeToBytecode &ref = nativeToBytecodeList_[idx];
InlineScriptTree *tree = ref.tree;
JSScript *script = tree->script();
uint32_t nativeOffset = ref.nativeOffset.offset();
unsigned nativeDelta = 0;
unsigned pcDelta = 0;
if (idx + 1 < nativeToBytecodeList_.length()) {
NativeToBytecode *nextRef = &ref + 1;
nativeDelta = nextRef->nativeOffset.offset() - nativeOffset;
if (nextRef->tree == ref.tree)
pcDelta = nextRef->pc - ref.pc;
}
IonSpewStart(IonSpew_Profiling, " %08x [+%-6d] => %-6d [%-4d] {%-10s} (%s:%d",
ref.nativeOffset.offset(),
nativeDelta,
ref.pc - script->code(),
pcDelta,
js_CodeName[JSOp(*ref.pc)],
script->filename(), script->lineno());
for (tree = tree->caller(); tree; tree = tree->caller()) {
IonSpewCont(IonSpew_Profiling, " <= %s:%d", tree->script()->filename(),
tree->script()->lineno());
}
IonSpewCont(IonSpew_Profiling, ")");
IonSpewFin(IonSpew_Profiling);
#endif
}
// see OffsetOfFrameSlot
static inline int32_t
ToStackIndex(LAllocation *a)
@ -415,6 +544,206 @@ CodeGeneratorShared::encodeSafepoints()
}
}
bool
CodeGeneratorShared::createNativeToBytecodeScriptList(JSContext *cx)
{
js::Vector<JSScript *, 0, SystemAllocPolicy> scriptList;
InlineScriptTree *tree = gen->info().inlineScriptTree();
for (;;) {
// Add script from current tree.
bool found = false;
for (uint32_t i = 0; i < scriptList.length(); i++) {
if (scriptList[i] == tree->script()) {
found = true;
break;
}
}
if (!found) {
if (!scriptList.append(tree->script()))
return false;
}
// Process rest of tree
// If children exist, emit children.
if (tree->hasChildren()) {
tree = tree->firstChild();
continue;
}
// Otherwise, find the first tree up the chain (including this one)
// that contains a next sibling.
while (!tree->hasNextCallee() && tree->hasCaller())
tree = tree->caller();
// If we found a sibling, use it.
if (tree->hasNextCallee()) {
tree = tree->nextCallee();
continue;
}
// Otherwise, we must have reached the top without finding any siblings.
JS_ASSERT(tree->isOutermostCaller());
break;
}
// Allocate array for list.
JSScript **data = (JSScript **) cx->malloc_(scriptList.length() * sizeof(JSScript **));
if (!data)
return false;
for (uint32_t i = 0; i < scriptList.length(); i++)
data[i] = scriptList[i];
// Success.
nativeToBytecodeScriptListLength_ = scriptList.length();
nativeToBytecodeScriptList_ = data;
return true;
}
bool
CodeGeneratorShared::generateCompactNativeToBytecodeMap(JSContext *cx, JitCode *code)
{
JS_ASSERT(nativeToBytecodeScriptListLength_ == 0);
JS_ASSERT(nativeToBytecodeScriptList_ == nullptr);
JS_ASSERT(nativeToBytecodeMap_ == nullptr);
JS_ASSERT(nativeToBytecodeMapSize_ == 0);
JS_ASSERT(nativeToBytecodeTableOffset_ == 0);
JS_ASSERT(nativeToBytecodeNumRegions_ == 0);
// Iterate through all nativeToBytecode entries, fix up their masm offsets.
for (unsigned i = 0; i < nativeToBytecodeList_.length(); i++) {
NativeToBytecode &entry = nativeToBytecodeList_[i];
// Fixup code offsets.
entry.nativeOffset = CodeOffsetLabel(masm.actualOffset(entry.nativeOffset.offset()));
}
if (!createNativeToBytecodeScriptList(cx))
return false;
JS_ASSERT(nativeToBytecodeScriptListLength_ > 0);
JS_ASSERT(nativeToBytecodeScriptList_ != nullptr);
CompactBufferWriter writer;
uint32_t tableOffset = 0;
uint32_t numRegions = 0;
if (!JitcodeIonTable::WriteIonTable(
writer, nativeToBytecodeScriptList_, nativeToBytecodeScriptListLength_,
&nativeToBytecodeList_[0],
&nativeToBytecodeList_[0] + nativeToBytecodeList_.length(),
&tableOffset, &numRegions))
{
return false;
}
JS_ASSERT(tableOffset > 0);
JS_ASSERT(numRegions > 0);
// Writer is done, copy it to sized buffer.
uint8_t *data = (uint8_t *) cx->malloc_(writer.length());
if (!data)
return false;
memcpy(data, writer.buffer(), writer.length());
nativeToBytecodeMap_ = data;
nativeToBytecodeMapSize_ = writer.length();
nativeToBytecodeTableOffset_ = tableOffset;
nativeToBytecodeNumRegions_ = numRegions;
verifyCompactNativeToBytecodeMap(code);
IonSpew(IonSpew_Profiling, "Compact Native To Bytecode Map [%p-%p]",
data, data + nativeToBytecodeMapSize_);
return true;
}
void
CodeGeneratorShared::verifyCompactNativeToBytecodeMap(JitCode *code)
{
#ifdef DEBUG
JS_ASSERT(nativeToBytecodeScriptListLength_ > 0);
JS_ASSERT(nativeToBytecodeScriptList_ != nullptr);
JS_ASSERT(nativeToBytecodeMap_ != nullptr);
JS_ASSERT(nativeToBytecodeMapSize_ > 0);
JS_ASSERT(nativeToBytecodeTableOffset_ > 0);
JS_ASSERT(nativeToBytecodeNumRegions_ > 0);
// The pointer to the table must be 4-byte aligned
const uint8_t *tablePtr = nativeToBytecodeMap_ + nativeToBytecodeTableOffset_;
JS_ASSERT(uintptr_t(tablePtr) % sizeof(uint32_t) == 0);
// Verify that numRegions was encoded correctly.
const JitcodeIonTable *ionTable = reinterpret_cast<const JitcodeIonTable *>(tablePtr);
JS_ASSERT(ionTable->numRegions() == nativeToBytecodeNumRegions_);
// Region offset for first region should be at the start of the payload region.
// Since the offsets are backward from the start of the table, the first entry
// backoffset should be equal to the forward table offset from the start of the
// allocated data.
JS_ASSERT(ionTable->regionOffset(0) == nativeToBytecodeTableOffset_);
// Verify each region.
for (uint32_t i = 0; i < ionTable->numRegions(); i++) {
// Back-offset must point into the payload region preceding the table, not before it.
JS_ASSERT(ionTable->regionOffset(i) <= nativeToBytecodeTableOffset_);
// Back-offset must point to a later area in the payload region than previous
// back-offset. This means that back-offsets decrease monotonically.
JS_ASSERT_IF(i > 0, ionTable->regionOffset(i) < ionTable->regionOffset(i - 1));
JitcodeRegionEntry entry = ionTable->regionEntry(i);
// Ensure native code offset for region falls within jitcode.
JS_ASSERT(entry.nativeOffset() <= code->instructionsSize());
// Read out script/pc stack and verify.
JitcodeRegionEntry::ScriptPcIterator scriptPcIter = entry.scriptPcIterator();
while (scriptPcIter.hasMore()) {
uint32_t scriptIdx = 0, pcOffset = 0;
scriptPcIter.readNext(&scriptIdx, &pcOffset);
// Ensure scriptIdx refers to a valid script in the list.
JS_ASSERT(scriptIdx < nativeToBytecodeScriptListLength_);
JSScript *script = nativeToBytecodeScriptList_[scriptIdx];
// Ensure pcOffset falls within the script.
JS_ASSERT(pcOffset < script->length());
}
// Obtain the original nativeOffset and pcOffset and script.
uint32_t curNativeOffset = entry.nativeOffset();
JSScript *script = nullptr;
uint32_t curPcOffset = 0;
{
uint32_t scriptIdx = 0;
scriptPcIter.reset();
scriptPcIter.readNext(&scriptIdx, &curPcOffset);
script = nativeToBytecodeScriptList_[scriptIdx];
}
// Read out nativeDeltas and pcDeltas and verify.
JitcodeRegionEntry::DeltaIterator deltaIter = entry.deltaIterator();
while (deltaIter.hasMore()) {
uint32_t nativeDelta = 0;
int32_t pcDelta = 0;
deltaIter.readNext(&nativeDelta, &pcDelta);
curNativeOffset += nativeDelta;
curPcOffset = uint32_t(int32_t(curPcOffset) + pcDelta);
// Ensure that nativeOffset still falls within jitcode after delta.
JS_ASSERT(curNativeOffset <= code->instructionsSize());
// Ensure that pcOffset still falls within bytecode after delta.
JS_ASSERT(curPcOffset < script->length());
}
}
#endif // DEBUG
}
bool
CodeGeneratorShared::markSafepoint(LInstruction *ins)
{
@ -766,18 +1095,18 @@ class OutOfLineTruncateSlow : public OutOfLineCodeBase<CodeGeneratorShared>
};
OutOfLineCode *
CodeGeneratorShared::oolTruncateDouble(FloatRegister src, Register dest)
CodeGeneratorShared::oolTruncateDouble(FloatRegister src, Register dest, MInstruction *mir)
{
OutOfLineTruncateSlow *ool = new(alloc()) OutOfLineTruncateSlow(src, dest);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, mir))
return nullptr;
return ool;
}
bool
CodeGeneratorShared::emitTruncateDouble(FloatRegister src, Register dest)
CodeGeneratorShared::emitTruncateDouble(FloatRegister src, Register dest, MInstruction *mir)
{
OutOfLineCode *ool = oolTruncateDouble(src, dest);
OutOfLineCode *ool = oolTruncateDouble(src, dest, mir);
if (!ool)
return false;
@ -787,10 +1116,10 @@ CodeGeneratorShared::emitTruncateDouble(FloatRegister src, Register dest)
}
bool
CodeGeneratorShared::emitTruncateFloat32(FloatRegister src, Register dest)
CodeGeneratorShared::emitTruncateFloat32(FloatRegister src, Register dest, MInstruction *mir)
{
OutOfLineTruncateSlow *ool = new(alloc()) OutOfLineTruncateSlow(src, dest, true);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, mir))
return false;
masm.branchTruncateFloat32(src, dest, ool->entry());

View File

@ -97,11 +97,32 @@ class CodeGeneratorShared : public LInstructionVisitor
js::Vector<CodeOffsetLabel, 0, SystemAllocPolicy> patchableTLScripts_;
#endif
public:
struct NativeToBytecode {
CodeOffsetLabel nativeOffset;
InlineScriptTree *tree;
jsbytecode *pc;
};
protected:
js::Vector<NativeToBytecode, 0, SystemAllocPolicy> nativeToBytecodeList_;
uint8_t *nativeToBytecodeMap_;
uint32_t nativeToBytecodeMapSize_;
uint32_t nativeToBytecodeTableOffset_;
uint32_t nativeToBytecodeNumRegions_;
JSScript **nativeToBytecodeScriptList_;
uint32_t nativeToBytecodeScriptListLength_;
// When profiling is enabled, this is the instrumentation manager which
// maintains state of what script is currently being generated (for inline
// scripts) and when instrumentation needs to be emitted or skipped.
IonInstrumentation sps_;
bool isNativeToBytecodeMapEnabled() {
return gen->isNativeToBytecodeMapEnabled();
}
protected:
// The offset of the first instruction of the OSR entry block from the
// beginning of the code buffer.
@ -223,6 +244,10 @@ class CodeGeneratorShared : public LInstructionVisitor
void verifyOsiPointRegs(LSafepoint *safepoint);
#endif
bool addNativeToBytecodeEntry(const BytecodeSite &site);
void dumpNativeToBytecodeEntries();
void dumpNativeToBytecodeEntry(uint32_t idx);
public:
MIRGenerator &mirGen() const {
return *gen;
@ -289,6 +314,11 @@ class CodeGeneratorShared : public LInstructionVisitor
// safepoint offsets.
void encodeSafepoints();
// Fixup offsets of native-to-bytecode map.
bool createNativeToBytecodeScriptList(JSContext *cx);
bool generateCompactNativeToBytecodeMap(JSContext *cx, JitCode *code);
void verifyCompactNativeToBytecodeMap(JitCode *code);
// Mark the safepoint on |ins| as corresponding to the current assembler location.
// The location should be just after a call.
bool markSafepoint(LInstruction *ins);
@ -307,9 +337,9 @@ class CodeGeneratorShared : public LInstructionVisitor
// an invalidation marker.
void ensureOsiSpace();
OutOfLineCode *oolTruncateDouble(FloatRegister src, Register dest);
bool emitTruncateDouble(FloatRegister src, Register dest);
bool emitTruncateFloat32(FloatRegister src, Register dest);
OutOfLineCode *oolTruncateDouble(FloatRegister src, Register dest, MInstruction *mir);
bool emitTruncateDouble(FloatRegister src, Register dest, MInstruction *mir);
bool emitTruncateFloat32(FloatRegister src, Register dest, MInstruction *mir);
void emitPreBarrier(Register base, const LAllocation *index, MIRType type);
void emitPreBarrier(Address address, MIRType type);
@ -438,7 +468,8 @@ class CodeGeneratorShared : public LInstructionVisitor
ReciprocalMulConstants computeDivisionConstants(int d);
protected:
bool addOutOfLineCode(OutOfLineCode *code);
bool addOutOfLineCode(OutOfLineCode *code, const MInstruction *mir);
bool addOutOfLineCode(OutOfLineCode *code, const BytecodeSite &site);
bool hasOutOfLineCode() { return !outOfLineCode_.empty(); }
bool generateOutOfLineCode();
@ -498,14 +529,12 @@ class OutOfLineCode : public TempObject
Label entry_;
Label rejoin_;
uint32_t framePushed_;
jsbytecode *pc_;
JSScript *script_;
BytecodeSite site_;
public:
OutOfLineCode()
: framePushed_(0),
pc_(nullptr),
script_(nullptr)
site_()
{ }
virtual bool generate(CodeGeneratorShared *codegen) = 0;
@ -525,15 +554,17 @@ class OutOfLineCode : public TempObject
uint32_t framePushed() const {
return framePushed_;
}
void setSource(JSScript *script, jsbytecode *pc) {
script_ = script;
pc_ = pc;
void setBytecodeSite(const BytecodeSite &site) {
site_ = site;
}
jsbytecode *pc() {
return pc_;
const BytecodeSite &bytecodeSite() const {
return site_;
}
JSScript *script() {
return script_;
jsbytecode *pc() const {
return site_.pc();
}
JSScript *script() const {
return site_.script();
}
};
@ -729,8 +760,11 @@ inline OutOfLineCode *
CodeGeneratorShared::oolCallVM(const VMFunction &fun, LInstruction *lir, const ArgSeq &args,
const StoreOutputTo &out)
{
JS_ASSERT(lir->mirRaw());
JS_ASSERT(lir->mirRaw()->isInstruction());
OutOfLineCode *ool = new(alloc()) OutOfLineCallVM<ArgSeq, StoreOutputTo>(lir, fun, args, out);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, lir->mirRaw()->toInstruction()))
return nullptr;
return ool;
}

View File

@ -420,8 +420,12 @@ CodeGeneratorX86Shared::bailout(const T &binder, LSnapshot *snapshot)
// We could not use a jump table, either because all bailout IDs were
// reserved, or a jump table is not optimal for this frame size or
// platform. Whatever, we will generate a lazy bailout.
//
// All bailout code is associated with the bytecodeSite of the block we are
// bailing out from.
InlineScriptTree *tree = snapshot->mir()->block()->trackedTree();
OutOfLineBailout *ool = new(alloc()) OutOfLineBailout(snapshot);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, BytecodeSite(tree, tree->script()->code())))
return false;
binder(masm, ool->entry());
@ -623,7 +627,7 @@ CodeGeneratorX86Shared::visitAddI(LAddI *ins)
if (ins->snapshot()) {
if (ins->recoversInput()) {
OutOfLineUndoALUOperation *ool = new(alloc()) OutOfLineUndoALUOperation(ins);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, ins->mir()))
return false;
masm.j(Assembler::Overflow, ool->entry());
} else {
@ -645,7 +649,7 @@ CodeGeneratorX86Shared::visitSubI(LSubI *ins)
if (ins->snapshot()) {
if (ins->recoversInput()) {
OutOfLineUndoALUOperation *ool = new(alloc()) OutOfLineUndoALUOperation(ins);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, ins->mir()))
return false;
masm.j(Assembler::Overflow, ool->entry());
} else {
@ -763,7 +767,7 @@ CodeGeneratorX86Shared::visitMulI(LMulI *ins)
if (mul->canBeNegativeZero()) {
// Jump to an OOL path if the result is 0.
MulNegativeZeroCheck *ool = new(alloc()) MulNegativeZeroCheck(ins);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, mul))
return false;
masm.testl(ToRegister(lhs), ToRegister(lhs));
@ -843,7 +847,7 @@ CodeGeneratorX86Shared::visitUDivOrMod(LUDivOrMod *ins)
}
if (ool) {
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, ins->mir()))
return false;
masm.bind(ool->rejoin());
}
@ -1090,7 +1094,7 @@ CodeGeneratorX86Shared::visitDivI(LDivI *ins)
masm.bind(&done);
if (ool) {
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, mir))
return false;
masm.bind(ool->rejoin());
}
@ -1276,13 +1280,13 @@ CodeGeneratorX86Shared::visitModI(LModI *ins)
masm.bind(&done);
if (overflow) {
if (!addOutOfLineCode(overflow))
if (!addOutOfLineCode(overflow, ins->mir()))
return false;
masm.bind(overflow->done());
}
if (ool) {
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, ins->mir()))
return false;
masm.bind(ool->rejoin());
}
@ -1488,7 +1492,7 @@ CodeGeneratorX86Shared::emitTableSwitchDispatch(MTableSwitch *mir, Register inde
// generate the case entries (we don't yet know their offsets in the
// instruction stream).
OutOfLineTableSwitch *ool = new(alloc()) OutOfLineTableSwitch(mir);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, mir))
return false;
// Compute the position where a pointer to the right case stands.
@ -2053,6 +2057,57 @@ CodeGeneratorX86Shared::visitNegF(LNegF *ins)
return true;
}
bool
CodeGeneratorX86Shared::visitInt32x4(LInt32x4 *ins)
{
const LDefinition *out = ins->getDef(0);
masm.loadConstantInt32x4(ins->getValue(), ToFloatRegister(out));
return true;
}
bool
CodeGeneratorX86Shared::visitFloat32x4(LFloat32x4 *ins)
{
const LDefinition *out = ins->getDef(0);
masm.loadConstantFloat32x4(ins->getValue(), ToFloatRegister(out));
return true;
}
bool
CodeGeneratorX86Shared::visitSimdValueX4(LSimdValueX4 *ins)
{
FloatRegister output = ToFloatRegister(ins->output());
MSimdValueX4 *mir = ins->mir();
JS_ASSERT(IsSimdType(mir->type()));
JS_STATIC_ASSERT(sizeof(float) == sizeof(int32_t));
masm.reserveStack(Simd128DataSize);
// TODO see bug 1051860 for possible optimizations.
switch (mir->type()) {
case MIRType_Int32x4: {
for (size_t i = 0; i < 4; ++i) {
Register r = ToRegister(ins->getOperand(i));
masm.store32(r, Address(StackPointer, i * sizeof(int32_t)));
}
masm.loadAlignedInt32x4(Address(StackPointer, 0), output);
break;
}
case MIRType_Float32x4: {
for (size_t i = 0; i < 4; ++i) {
FloatRegister r = ToFloatRegister(ins->getOperand(i));
masm.storeFloat32(r, Address(StackPointer, i * sizeof(float)));
}
masm.loadAlignedFloat32x4(Address(StackPointer, 0), output);
break;
}
default: MOZ_ASSUME_UNREACHABLE("Unknown SIMD kind");
}
masm.freeStack(Simd128DataSize);
return true;
}
bool
CodeGeneratorX86Shared::visitSimdExtractElementI(LSimdExtractElementI *ins)
{
@ -2092,6 +2147,56 @@ CodeGeneratorX86Shared::visitSimdExtractElementF(LSimdExtractElementF *ins)
return true;
}
bool
CodeGeneratorX86Shared::visitSimdBinaryArithIx4(LSimdBinaryArithIx4 *ins)
{
FloatRegister lhs = ToFloatRegister(ins->lhs());
Operand rhs = ToOperand(ins->rhs());
JS_ASSERT(ToFloatRegister(ins->output()) == lhs);
MSimdBinaryArith::Operation op = ins->operation();
switch (op) {
case MSimdBinaryArith::Add:
masm.packedAddInt32(rhs, lhs);
return true;
case MSimdBinaryArith::Sub:
masm.packedSubInt32(rhs, lhs);
return true;
case MSimdBinaryArith::Mul:
// we can do mul with a single instruction only if we have SSE4.1
// using the PMULLD instruction.
case MSimdBinaryArith::Div:
// x86 doesn't have SIMD i32 div.
break;
}
MOZ_ASSUME_UNREACHABLE("unexpected SIMD op");
}
bool
CodeGeneratorX86Shared::visitSimdBinaryArithFx4(LSimdBinaryArithFx4 *ins)
{
FloatRegister lhs = ToFloatRegister(ins->lhs());
Operand rhs = ToOperand(ins->rhs());
JS_ASSERT(ToFloatRegister(ins->output()) == lhs);
MSimdBinaryArith::Operation op = ins->operation();
switch (op) {
case MSimdBinaryArith::Add:
masm.packedAddFloat32(rhs, lhs);
return true;
case MSimdBinaryArith::Sub:
masm.packedSubFloat32(rhs, lhs);
return true;
case MSimdBinaryArith::Mul:
masm.packedMulFloat32(rhs, lhs);
return true;
case MSimdBinaryArith::Div:
masm.packedDivFloat32(rhs, lhs);
return true;
}
MOZ_ASSUME_UNREACHABLE("unexpected SIMD op");
}
bool
CodeGeneratorX86Shared::visitForkJoinGetSlice(LForkJoinGetSlice *ins)
{

View File

@ -205,8 +205,13 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
bool visitNegF(LNegF *lir);
// SIMD operators
bool visitSimdValueX4(LSimdValueX4 *lir);
bool visitInt32x4(LInt32x4 *ins);
bool visitFloat32x4(LFloat32x4 *ins);
bool visitSimdExtractElementI(LSimdExtractElementI *lir);
bool visitSimdExtractElementF(LSimdExtractElementF *lir);
bool visitSimdBinaryArithIx4(LSimdBinaryArithIx4 *lir);
bool visitSimdBinaryArithFx4(LSimdBinaryArithFx4 *lir);
// Out of line visitors.
bool visitOutOfLineBailout(OutOfLineBailout *ool);

View File

@ -482,6 +482,12 @@ class MacroAssemblerX86Shared : public Assembler
void storeUnalignedInt32x4(FloatRegister src, const Address &dest) {
movdqu(src, Operand(dest));
}
void packedAddInt32(const Operand &src, FloatRegister dest) {
paddd(src, dest);
}
void packedSubInt32(const Operand &src, FloatRegister dest) {
psubd(src, dest);
}
void loadAlignedFloat32x4(const Address &src, FloatRegister dest) {
movaps(Operand(src), dest);
@ -498,6 +504,18 @@ class MacroAssemblerX86Shared : public Assembler
void storeUnalignedFloat32x4(FloatRegister src, const Address &dest) {
movups(src, Operand(dest));
}
void packedAddFloat32(const Operand &src, FloatRegister dest) {
addps(src, dest);
}
void packedSubFloat32(const Operand &src, FloatRegister dest) {
subps(src, dest);
}
void packedMulFloat32(const Operand &src, FloatRegister dest) {
mulps(src, dest);
}
void packedDivFloat32(const Operand &src, FloatRegister dest) {
divps(src, dest);
}
static uint32_t ComputeShuffleMask(SimdLane x, SimdLane y = LaneX,
SimdLane z = LaneX, SimdLane w = LaneX)
@ -665,6 +683,30 @@ class MacroAssemblerX86Shared : public Assembler
return false;
}
bool maybeInlineInt32x4(const SimdConstant &v, const FloatRegister &dest) {
static const SimdConstant zero = SimdConstant::CreateX4(0, 0, 0, 0);
static const SimdConstant minusOne = SimdConstant::CreateX4(-1, -1, -1, -1);
if (v == zero) {
pxor(dest, dest);
return true;
}
if (v == minusOne) {
pcmpeqw(dest, dest);
return true;
}
return false;
}
bool maybeInlineFloat32x4(const SimdConstant &v, const FloatRegister &dest) {
static const SimdConstant zero = SimdConstant::CreateX4(0.f, 0.f, 0.f, 0.f);
if (v == zero) {
// This won't get inlined if the SimdConstant v contains -0 in any
// lane, as operator== here does a memcmp.
xorps(dest, dest);
return true;
}
return false;
}
void convertBoolToInt32(Register source, Register dest) {
// Note that C++ bool is only 1 byte, so zero extend it to clear the
// higher-order bits.

View File

@ -262,7 +262,7 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
if (!mir->skipBoundsCheck()) {
bool isFloat32Load = vt == Scalar::Float32;
ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, ins->mir()))
return false;
CodeOffsetLabel cmp = masm.cmplWithPatch(ToRegister(ptr), Imm32(0));
@ -416,7 +416,7 @@ CodeGeneratorX64::visitTruncateDToInt32(LTruncateDToInt32 *ins)
// On x64, branchTruncateDouble uses cvttsd2sq. Unlike the x86
// implementation, this should handle most doubles and we can just
// call a stub if it fails.
return emitTruncateDouble(input, output);
return emitTruncateDouble(input, output, ins->mir());
}
bool
@ -428,5 +428,5 @@ CodeGeneratorX64::visitTruncateFToInt32(LTruncateFToInt32 *ins)
// On x64, branchTruncateFloat32 uses cvttss2sq. Unlike the x86
// implementation, this should handle most floats and we can just
// call a stub if it fails.
return emitTruncateFloat32(input, output);
return emitTruncateFloat32(input, output, ins->mir());
}

View File

@ -79,6 +79,66 @@ MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
masm.setNextJump(j, prev);
}
MacroAssemblerX64::SimdData *
MacroAssemblerX64::getSimdData(const SimdConstant &v)
{
if (!simdMap_.initialized()) {
enoughMemory_ &= simdMap_.init();
if (!enoughMemory_)
return nullptr;
}
size_t index;
if (SimdMap::AddPtr p = simdMap_.lookupForAdd(v)) {
index = p->value();
} else {
index = simds_.length();
enoughMemory_ &= simds_.append(SimdData(v));
enoughMemory_ &= simdMap_.add(p, v, index);
if (!enoughMemory_)
return nullptr;
}
return &simds_[index];
}
void
MacroAssemblerX64::loadConstantInt32x4(const SimdConstant &v, FloatRegister dest)
{
JS_ASSERT(v.type() == SimdConstant::Int32x4);
if (maybeInlineInt32x4(v, dest))
return;
SimdData *val = getSimdData(v);
if (!val)
return;
JS_ASSERT(!val->uses.bound());
JS_ASSERT(val->type() == SimdConstant::Int32x4);
JmpSrc j = masm.movdqa_ripr(dest.code());
JmpSrc prev = JmpSrc(val->uses.use(j.offset()));
masm.setNextJump(j, prev);
}
void
MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister dest)
{
JS_ASSERT(v.type() == SimdConstant::Float32x4);
if (maybeInlineFloat32x4(v, dest))
return;
SimdData *val = getSimdData(v);
if (!val)
return;
JS_ASSERT(!val->uses.bound());
JS_ASSERT(val->type() == SimdConstant::Float32x4);
JmpSrc j = masm.movaps_ripr(dest.code());
JmpSrc prev = JmpSrc(val->uses.use(j.offset()));
masm.setNextJump(j, prev);
}
void
MacroAssemblerX64::finish()
{
@ -98,6 +158,19 @@ MacroAssemblerX64::finish()
masm.floatConstant(flt.value);
}
// SIMD memory values must be suitably aligned.
if (!simds_.empty())
masm.align(SimdStackAlignment);
for (size_t i = 0; i < simds_.length(); i++) {
SimdData &v = simds_[i];
bind(&v.uses);
switch(v.type()) {
case SimdConstant::Int32x4: masm.int32x4Constant(v.value.asInt32x4()); break;
case SimdConstant::Float32x4: masm.float32x4Constant(v.value.asFloat32x4()); break;
default: MOZ_ASSUME_UNREACHABLE("unexpected SimdConstant type");
}
}
MacroAssemblerX86Shared::finish();
}

View File

@ -66,6 +66,17 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
typedef HashMap<float, size_t, DefaultHasher<float>, SystemAllocPolicy> FloatMap;
FloatMap floatMap_;
struct SimdData {
SimdConstant value;
NonAssertingLabel uses;
SimdData(const SimdConstant &v) : value(v) {}
SimdConstant::Type type() { return value.type(); }
};
Vector<SimdData, 0, SystemAllocPolicy> simds_;
typedef HashMap<SimdConstant, size_t, SimdConstant, SystemAllocPolicy> SimdMap;
SimdMap simdMap_;
void setupABICall(uint32_t arg);
protected:
@ -1199,6 +1210,11 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
void loadConstantDouble(double d, FloatRegister dest);
void loadConstantFloat32(float f, FloatRegister dest);
private:
SimdData *getSimdData(const SimdConstant &v);
public:
void loadConstantInt32x4(const SimdConstant &v, FloatRegister dest);
void loadConstantFloat32x4(const SimdConstant &v, FloatRegister dest);
void branchTruncateDouble(FloatRegister src, Register dest, Label *fail) {
cvttsd2sq(src, dest);

View File

@ -310,7 +310,7 @@ CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic
bool isFloat32Load = (vt == Scalar::Float32);
if (!mir->fallible()) {
ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, ins->mir()))
return false;
}
@ -356,7 +356,7 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
bool isFloat32Load = vt == Scalar::Float32;
OutOfLineLoadTypedArrayOutOfBounds *ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), isFloat32Load);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, mir))
return false;
CodeOffsetLabel cmp = masm.cmplWithPatch(ptrReg, Imm32(0));
@ -641,7 +641,7 @@ CodeGeneratorX86::visitTruncateDToInt32(LTruncateDToInt32 *ins)
Register output = ToRegister(ins->output());
OutOfLineTruncate *ool = new(alloc()) OutOfLineTruncate(ins);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, ins->mir()))
return false;
masm.branchTruncateDouble(input, output, ool->entry());
@ -656,7 +656,7 @@ CodeGeneratorX86::visitTruncateFToInt32(LTruncateFToInt32 *ins)
Register output = ToRegister(ins->output());
OutOfLineTruncateFloat32 *ool = new(alloc()) OutOfLineTruncateFloat32(ins);
if (!addOutOfLineCode(ool))
if (!addOutOfLineCode(ool, ins->mir()))
return false;
masm.branchTruncateFloat32(input, output, ool->entry());

View File

@ -110,6 +110,58 @@ MacroAssemblerX86::addConstantFloat32(float f, FloatRegister dest)
flt->uses.setPrev(masm.size());
}
MacroAssemblerX86::SimdData *
MacroAssemblerX86::getSimdData(const SimdConstant &v)
{
if (!simdMap_.initialized()) {
enoughMemory_ &= simdMap_.init();
if (!enoughMemory_)
return nullptr;
}
size_t index;
SimdMap::AddPtr p = simdMap_.lookupForAdd(v);
if (p) {
index = p->value();
} else {
index = simds_.length();
enoughMemory_ &= simds_.append(SimdData(v));
enoughMemory_ &= simdMap_.add(p, v, index);
if (!enoughMemory_)
return nullptr;
}
SimdData &simd = simds_[index];
JS_ASSERT(!simd.uses.bound());
return &simd;
}
void
MacroAssemblerX86::loadConstantInt32x4(const SimdConstant &v, FloatRegister dest)
{
JS_ASSERT(v.type() == SimdConstant::Int32x4);
if (maybeInlineInt32x4(v, dest))
return;
SimdData *i4 = getSimdData(v);
if (!i4)
return;
JS_ASSERT(i4->type() == SimdConstant::Int32x4);
masm.movdqa_mr(reinterpret_cast<const void *>(i4->uses.prev()), dest.code());
i4->uses.setPrev(masm.size());
}
void
MacroAssemblerX86::loadConstantFloat32x4(const SimdConstant &v, FloatRegister dest)
{
JS_ASSERT(v.type() == SimdConstant::Float32x4);
if (maybeInlineFloat32x4(v, dest))
return;
SimdData *f4 = getSimdData(v);
if (!f4)
return;
JS_ASSERT(f4->type() == SimdConstant::Float32x4);
masm.movaps_mr(reinterpret_cast<const void *>(f4->uses.prev()), dest.code());
f4->uses.setPrev(masm.size());
}
void
MacroAssemblerX86::finish()
{
@ -132,6 +184,22 @@ MacroAssemblerX86::finish()
if (!enoughMemory_)
return;
}
// SIMD memory values must be suitably aligned.
if (!simds_.empty())
masm.align(SimdStackAlignment);
for (size_t i = 0; i < simds_.length(); i++) {
CodeLabel cl(simds_[i].uses);
SimdData &v = simds_[i];
switch (v.type()) {
case SimdConstant::Int32x4: writeInt32x4Constant(v.value, cl.src()); break;
case SimdConstant::Float32x4: writeFloat32x4Constant(v.value, cl.src()); break;
default: MOZ_ASSUME_UNREACHABLE("unexpected SimdConstant type");
}
enoughMemory_ &= addCodeLabel(cl);
if (!enoughMemory_)
return;
}
}
void

View File

@ -38,14 +38,24 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
Float(float value) : value(value) {}
};
Vector<Float, 0, SystemAllocPolicy> floats_;
struct SimdData {
SimdConstant value;
AbsoluteLabel uses;
SimdData(const SimdConstant &v) : value(v) {}
SimdConstant::Type type() { return value.type(); }
};
Vector<SimdData, 0, SystemAllocPolicy> simds_;
typedef HashMap<double, size_t, DefaultHasher<double>, SystemAllocPolicy> DoubleMap;
DoubleMap doubleMap_;
typedef HashMap<float, size_t, DefaultHasher<float>, SystemAllocPolicy> FloatMap;
FloatMap floatMap_;
typedef HashMap<SimdConstant, size_t, SimdConstant, SystemAllocPolicy> SimdMap;
SimdMap simdMap_;
Double *getDouble(double d);
Float *getFloat(float f);
SimdData *getSimdData(const SimdConstant &v);
protected:
MoveResolver moveResolver_;
@ -929,6 +939,8 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
void addConstantDouble(double d, FloatRegister dest);
void loadConstantFloat32(float f, FloatRegister dest);
void addConstantFloat32(float f, FloatRegister dest);
void loadConstantInt32x4(const SimdConstant &v, FloatRegister dest);
void loadConstantFloat32x4(const SimdConstant &v, FloatRegister dest);
void branchTruncateDouble(FloatRegister src, Register dest, Label *fail) {
cvttsd2si(src, dest);

View File

@ -1879,7 +1879,9 @@ JS_GC(JSRuntime *rt)
JS_PUBLIC_API(void)
JS_MaybeGC(JSContext *cx)
{
MaybeGC(cx);
GCRuntime &gc = cx->runtime()->gc;
if (!gc.maybeGC(cx->zone()))
gc.maybePeriodicFullGC();
}
JS_PUBLIC_API(void)

View File

@ -1039,13 +1039,74 @@ ArrayJoinKernel(JSContext *cx, SeparatorOp sepOp, HandleObject obj, uint32_t len
}
template <bool Locale>
static bool
ArrayJoin(JSContext *cx, CallArgs &args)
JSString *
js::ArrayJoin(JSContext *cx, HandleObject obj, HandleLinearString sepstr, uint32_t length)
{
// This method is shared by Array.prototype.join and
// Array.prototype.toLocaleString. The steps in ES5 are nearly the same, so
// the annotations in this function apply to both toLocaleString and join.
// Steps 1 to 6, should be done by the caller.
JS::Anchor<JSString*> anchor(sepstr);
// Step 6 is implicit in the loops below.
// An optimized version of a special case of steps 7-11: when length==1 and
// the 0th element is a string, ToString() of that element is a no-op and
// so it can be immediately returned as the result.
if (length == 1 && !Locale && obj->is<ArrayObject>() &&
obj->getDenseInitializedLength() == 1)
{
const Value &elem0 = obj->getDenseElement(0);
if (elem0.isString()) {
return elem0.toString();
}
}
StringBuffer sb(cx);
if (sepstr->hasTwoByteChars() && !sb.ensureTwoByteChars())
return nullptr;
// The separator will be added |length - 1| times, reserve space for that
// so that we don't have to unnecessarily grow the buffer.
size_t seplen = sepstr->length();
if (length > 0 && !sb.reserve(seplen * (length - 1)))
return nullptr;
// Various optimized versions of steps 7-10.
if (seplen == 0) {
EmptySeparatorOp op;
if (!ArrayJoinKernel<Locale>(cx, op, obj, length, sb))
return nullptr;
} else if (seplen == 1) {
jschar c = sepstr->latin1OrTwoByteChar(0);
if (c <= JSString::MAX_LATIN1_CHAR) {
CharSeparatorOp<Latin1Char> op(c);
if (!ArrayJoinKernel<Locale>(cx, op, obj, length, sb))
return nullptr;
} else {
CharSeparatorOp<jschar> op(c);
if (!ArrayJoinKernel<Locale>(cx, op, obj, length, sb))
return nullptr;
}
} else {
StringSeparatorOp op(sepstr);
if (!ArrayJoinKernel<Locale>(cx, op, obj, length, sb))
return nullptr;
}
// Step 11
JSString *str = sb.finishString();
if (!str)
return nullptr;
return str;
}
template <bool Locale>
bool
ArrayJoin(JSContext *cx, CallArgs &args)
{
// Step 1
RootedObject obj(cx, ToObject(cx, args.thisv()));
if (!obj)
@ -1078,60 +1139,12 @@ ArrayJoin(JSContext *cx, CallArgs &args)
sepstr = cx->names().comma;
}
JS::Anchor<JSString*> anchor(sepstr);
// Step 6 is implicit in the loops below.
// An optimized version of a special case of steps 7-11: when length==1 and
// the 0th element is a string, ToString() of that element is a no-op and
// so it can be immediately returned as the result.
if (length == 1 && !Locale && obj->is<ArrayObject>() &&
obj->getDenseInitializedLength() == 1)
{
const Value &elem0 = obj->getDenseElement(0);
if (elem0.isString()) {
args.rval().setString(elem0.toString());
return true;
}
}
StringBuffer sb(cx);
if (sepstr->hasTwoByteChars() && !sb.ensureTwoByteChars())
// Step 6 to 11
JSString *res = js::ArrayJoin<Locale>(cx, obj, sepstr, length);
if (!res)
return false;
// The separator will be added |length - 1| times, reserve space for that
// so that we don't have to unnecessarily grow the buffer.
size_t seplen = sepstr->length();
if (length > 0 && !sb.reserve(seplen * (length - 1)))
return false;
// Various optimized versions of steps 7-10.
if (seplen == 0) {
EmptySeparatorOp op;
if (!ArrayJoinKernel<Locale>(cx, op, obj, length, sb))
return false;
} else if (seplen == 1) {
jschar c = sepstr->latin1OrTwoByteChar(0);
if (c <= JSString::MAX_LATIN1_CHAR) {
CharSeparatorOp<Latin1Char> op(c);
if (!ArrayJoinKernel<Locale>(cx, op, obj, length, sb))
return false;
} else {
CharSeparatorOp<jschar> op(c);
if (!ArrayJoinKernel<Locale>(cx, op, obj, length, sb))
return false;
}
} else {
StringSeparatorOp op(sepstr);
if (!ArrayJoinKernel<Locale>(cx, op, obj, length, sb))
return false;
}
// Step 11
JSString *str = sb.finishString();
if (!str)
return false;
args.rval().setString(str);
args.rval().setString(res);
return true;
}
@ -1183,8 +1196,8 @@ array_toLocaleString(JSContext *cx, unsigned argc, Value *vp)
}
/* ES5 15.4.4.5 */
static bool
array_join(JSContext *cx, unsigned argc, Value *vp)
bool
js::array_join(JSContext *cx, unsigned argc, Value *vp)
{
JS_CHECK_RECURSION(cx, return false);
@ -2955,7 +2968,7 @@ static const JSFunctionSpec array_methods[] = {
JS_FN(js_toLocaleString_str,array_toLocaleString,0,0),
/* Perl-ish methods. */
JS_FN("join", array_join, 1,JSFUN_GENERIC_NATIVE),
JS_FN("join", js::array_join, 1,JSFUN_GENERIC_NATIVE),
JS_FN("reverse", array_reverse, 0,JSFUN_GENERIC_NATIVE),
JS_FN("sort", array_sort, 1,JSFUN_GENERIC_NATIVE),
JS_FN("push", array_push, 1,JSFUN_GENERIC_NATIVE),

View File

@ -140,10 +140,20 @@ array_splice_impl(JSContext *cx, unsigned argc, js::Value *vp, bool pop);
extern bool
array_concat(JSContext *cx, unsigned argc, js::Value *vp);
template <bool Locale>
JSString *
ArrayJoin(JSContext *cx, HandleObject obj, HandleLinearString sepstr, uint32_t length);
extern bool
array_concat_dense(JSContext *cx, Handle<ArrayObject*> arr1, Handle<ArrayObject*> arr2,
Handle<ArrayObject*> result);
bool
array_join(JSContext *cx, unsigned argc, js::Value *vp);
extern JSString *
array_join_impl(JSContext *cx, HandleValue array, HandleString sep);
extern void
ArrayShiftMoveElements(JSObject *obj);

View File

@ -1112,12 +1112,13 @@ GCRuntime::GCRuntime(JSRuntime *rt) :
chunkAllocationSinceLastGC(false),
nextFullGCTime(0),
lastGCTime(0),
jitReleaseTime(0),
mode(JSGC_MODE_INCREMENTAL),
decommitThreshold(32 * 1024 * 1024),
cleanUpEverything(false),
grayBitsValid(false),
isNeeded(0),
majorGCNumber(0),
jitReleaseNumber(0),
number(0),
startNumber(0),
isFull(false),
@ -1248,8 +1249,11 @@ GCRuntime::initZeal()
#endif
/* Lifetime for type sets attached to scripts containing observed types. */
static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000;
/*
* Lifetime in number of major GCs for type sets attached to scripts containing
* observed types.
*/
static const uint64_t JIT_SCRIPT_RELEASE_TYPES_PERIOD = 20;
bool
GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
@ -1276,9 +1280,7 @@ GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
tunables.setParameter(JSGC_MAX_BYTES, maxbytes);
setMaxMallocBytes(maxbytes);
#ifndef JS_MORE_DETERMINISTIC
jitReleaseTime = PRMJ_Now() + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
#endif
jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD;
#ifdef JSGC_GENERATIONAL
if (!nursery.init(maxNurseryBytes))
@ -2415,13 +2417,7 @@ GCRuntime::triggerZoneGC(Zone *zone, JS::gcreason::Reason reason)
return true;
}
void
js::MaybeGC(JSContext *cx)
{
cx->runtime()->gc.maybeGC(cx->zone());
}
void
bool
GCRuntime::maybeGC(Zone *zone)
{
JS_ASSERT(CurrentThreadCanAccessRuntime(rt));
@ -2430,13 +2426,13 @@ GCRuntime::maybeGC(Zone *zone)
if (zealMode == ZealAllocValue || zealMode == ZealPokeValue) {
JS::PrepareForFullGC(rt);
GC(rt, GC_NORMAL, JS::gcreason::MAYBEGC);
return;
return true;
}
#endif
if (isNeeded) {
GCSlice(rt, GC_NORMAL, JS::gcreason::MAYBEGC);
return;
return true;
}
double factor = schedulingState.inHighFrequencyGCMode() ? 0.85 : 0.9;
@ -2447,15 +2443,25 @@ GCRuntime::maybeGC(Zone *zone)
{
PrepareZoneForGC(zone);
GCSlice(rt, GC_NORMAL, JS::gcreason::MAYBEGC);
return;
return true;
}
#ifndef JS_MORE_DETERMINISTIC
return false;
}
void
GCRuntime::maybePeriodicFullGC()
{
/*
* Trigger a periodic full GC.
*
* This is a source of non-determinism, but is not called from the shell.
*
* Access to the counters and, on 32 bit, setting gcNextFullGCTime below
* is not atomic and a race condition could trigger or suppress the GC. We
* tolerate this.
*/
#ifndef JS_MORE_DETERMINISTIC
int64_t now = PRMJ_Now();
if (nextFullGCTime && nextFullGCTime <= now) {
if (chunkAllocationSinceLastGC ||
@ -2881,7 +2887,7 @@ GCHelperState::onBackgroundThread()
}
bool
GCRuntime::releaseObservedTypes()
GCRuntime::shouldReleaseObservedTypes()
{
bool releaseTypes = false;
@ -2890,13 +2896,12 @@ GCRuntime::releaseObservedTypes()
releaseTypes = true;
#endif
#ifndef JS_MORE_DETERMINISTIC
int64_t now = PRMJ_Now();
if (now >= jitReleaseTime)
/* We may miss the exact target GC due to resets. */
if (majorGCNumber >= jitReleaseNumber)
releaseTypes = true;
if (releaseTypes)
jitReleaseTime = now + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
#endif
jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD;
return releaseTypes;
}
@ -4157,10 +4162,9 @@ GCRuntime::beginSweepingZoneGroup()
zone->discardJitCode(&fop);
}
bool releaseTypes = releaseObservedTypes();
for (GCCompartmentGroupIter c(rt); !c.done(); c.next()) {
gcstats::AutoSCC scc(stats, zoneGroupIndex);
c->sweep(&fop, releaseTypes && !c->zone()->isPreservingCode());
c->sweep(&fop, releaseObservedTypes && !c->zone()->isPreservingCode());
}
for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
@ -4174,7 +4178,7 @@ GCRuntime::beginSweepingZoneGroup()
// code and new script information in the zone, the only things
// whose correctness depends on the type constraints.
bool oom = false;
zone->sweep(&fop, releaseTypes && !zone->isPreservingCode(), &oom);
zone->sweep(&fop, releaseObservedTypes && !zone->isPreservingCode(), &oom);
if (oom) {
zone->setPreservingCode(false);
@ -4264,6 +4268,8 @@ GCRuntime::beginSweepPhase(bool lastGC)
sweepOnBackgroundThread = !lastGC && !TraceEnabled() && CanUseExtraThreads();
releaseObservedTypes = shouldReleaseObservedTypes();
#ifdef DEBUG
for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
JS_ASSERT(!c->gcIncomingGrayPointers);
@ -4986,6 +4992,8 @@ GCRuntime::gcCycle(bool incremental, int64_t budget, JSGCInvocationKind gckind,
interFrameGC = true;
number++;
if (incrementalState == NO_INCREMENTAL)
majorGCNumber++;
// It's ok if threads other than the main thread have suppressGC set, as
// they are operating on zones which will not be collected from here.

View File

@ -971,9 +971,6 @@ TriggerGC(JSRuntime *rt, JS::gcreason::Reason reason);
extern bool
TriggerZoneGC(Zone *zone, JS::gcreason::Reason reason);
extern void
MaybeGC(JSContext *cx);
extern void
ReleaseAllJITCode(FreeOp *op);

View File

@ -691,7 +691,7 @@ AllocateObjectForCacheHit(JSContext *cx, AllocKind kind, InitialHeap heap)
JSObject *obj = AllocateObject<NoGC>(cx, kind, 0, heap);
if (!obj && allowGC) {
MaybeGC(cx);
cx->runtime()->gc.maybeGC(cx->zone());
return nullptr;
}

View File

@ -167,6 +167,7 @@ UNIFIED_SOURCES += [
'jit/IonMacroAssembler.cpp',
'jit/IonOptimizationLevels.cpp',
'jit/IonSpewer.cpp',
'jit/JitcodeMap.cpp',
'jit/JitOptions.cpp',
'jit/JSONSpewer.cpp',
'jit/LICM.cpp',
@ -378,10 +379,6 @@ if CONFIG['ENABLE_ION']:
SOURCES += [
'assembler/assembler/MacroAssemblerX86Common.cpp',
]
elif CONFIG['JS_CODEGEN_ARM']:
SOURCES += [
'assembler/assembler/MacroAssemblerARM.cpp',
]
if CONFIG['JS_HAS_CTYPES']:
SOURCES += [

View File

@ -22,6 +22,7 @@
#include "js/Vector.h"
#include "vm/ArgumentsObject.h"
#include "vm/DebuggerMemory.h"
#include "vm/SPSProfiler.h"
#include "vm/WrapperObject.h"
#include "jsgcinlines.h"
@ -2300,6 +2301,7 @@ Debugger::construct(JSContext *cx, unsigned argc, Value *vp)
bool
Debugger::addDebuggeeGlobal(JSContext *cx, Handle<GlobalObject*> global)
{
AutoSuppressProfilerSampling suppressProfilerSampling(cx);
AutoDebugModeInvalidation invalidate(global->compartment());
return addDebuggeeGlobal(cx, global, invalidate);
}

View File

@ -178,6 +178,7 @@ JSRuntime::JSRuntime(JSRuntime *parentRuntime)
debugMode(false),
spsProfiler(thisFromCtor()),
profilingScripts(false),
suppressProfilerSampling(false),
hadOutOfMemory(false),
haveCreatedContext(false),
data(nullptr),
@ -235,12 +236,12 @@ JSRuntime::JSRuntime(JSRuntime *parentRuntime)
static bool
JitSupportsFloatingPoint()
{
if (!JSC::MacroAssembler::supportsFloatingPoint())
return false;
#if WTF_ARM_ARCH_VERSION == 6
#if defined(JS_CODEGEN_ARM)
if (!js::jit::HasVFP())
return false;
#else
if (!JSC::MacroAssembler::supportsFloatingPoint())
return false;
#endif
return true;

Some files were not shown because too many files have changed in this diff Show More