mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-12-12 00:50:40 +00:00
Bug 1579749 - Decouple checkpoints and snapshots, r=jlast.
Differential Revision: https://phabricator.services.mozilla.com/D45141 --HG-- extra : moz-landing-system : lando
This commit is contained in:
parent
73adec6cbf
commit
80421ff6e7
@ -125,9 +125,6 @@ function ChildProcess(id, recording) {
|
||||
// in the process of being scanned by this child.
|
||||
this.scannedCheckpoints = new Set();
|
||||
|
||||
// Checkpoints in savedCheckpoints which haven't been sent to the child yet.
|
||||
this.needSaveCheckpoints = [];
|
||||
|
||||
// Whether this child has diverged from the recording and cannot run forward.
|
||||
this.divergedFromRecording = false;
|
||||
|
||||
@ -242,18 +239,28 @@ ChildProcess.prototype = {
|
||||
addSavedCheckpoint(checkpoint) {
|
||||
dumpv(`AddSavedCheckpoint #${this.id} ${checkpoint}`);
|
||||
this.savedCheckpoints.add(checkpoint);
|
||||
if (checkpoint != FirstCheckpointId) {
|
||||
this.needSaveCheckpoints.push(checkpoint);
|
||||
}
|
||||
},
|
||||
|
||||
// Get any checkpoints to inform the child that it needs to save.
|
||||
flushNeedSaveCheckpoints() {
|
||||
const rv = this.needSaveCheckpoints;
|
||||
this.needSaveCheckpoints = [];
|
||||
// Get the checkpoints which the child must save in the range [start, end].
|
||||
savedCheckpointsInRange(start, end) {
|
||||
const rv = [];
|
||||
for (let i = start; i <= end; i++) {
|
||||
if (this.savedCheckpoints.has(i)) {
|
||||
rv.push(i);
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
},
|
||||
|
||||
// Get the checkpoints which the child must save when running to endpoint.
|
||||
getCheckpointsToSave(endpoint) {
|
||||
assert(endpoint >= this.pausePoint().checkpoint);
|
||||
return this.savedCheckpointsInRange(
|
||||
this.pausePoint().checkpoint + 1,
|
||||
endpoint
|
||||
);
|
||||
},
|
||||
|
||||
// Get the last saved checkpoint equal to or prior to checkpoint.
|
||||
lastSavedCheckpoint(checkpoint) {
|
||||
while (!this.savedCheckpoints.has(checkpoint)) {
|
||||
@ -582,7 +589,7 @@ function maybeReachPoint(child, endpoint) {
|
||||
contents: {
|
||||
kind: "runToPoint",
|
||||
endpoint,
|
||||
needSaveCheckpoints: child.flushNeedSaveCheckpoints(),
|
||||
saveCheckpoints: child.getCheckpointsToSave(endpoint.checkpoint),
|
||||
},
|
||||
onFinished() {},
|
||||
destination: endpoint,
|
||||
@ -596,9 +603,14 @@ function maybeReachPoint(child, endpoint) {
|
||||
|
||||
// Send the child to its most recent saved checkpoint at or before target.
|
||||
function restoreCheckpointPriorTo(target) {
|
||||
target = child.lastSavedCheckpoint(target);
|
||||
// We must skip past any snapshots that are after target.
|
||||
const savedCheckpoints = child.savedCheckpointsInRange(
|
||||
target + 1,
|
||||
child.pausePoint().checkpoint
|
||||
);
|
||||
const numSnapshots = savedCheckpoints.length;
|
||||
child.sendManifest({
|
||||
contents: { kind: "restoreCheckpoint", target },
|
||||
contents: { kind: "restoreSnapshot", numSnapshots },
|
||||
onFinished({ restoredCheckpoint }) {
|
||||
assert(restoredCheckpoint);
|
||||
child.divergedFromRecording = false;
|
||||
@ -725,7 +737,7 @@ async function scanRecording(checkpoint) {
|
||||
return {
|
||||
kind: "scanRecording",
|
||||
endpoint,
|
||||
needSaveCheckpoints: child.flushNeedSaveCheckpoints(),
|
||||
saveCheckpoints: child.getCheckpointsToSave(endpoint),
|
||||
};
|
||||
},
|
||||
onFinished(child, { duration }) {
|
||||
@ -1326,7 +1338,7 @@ let gNearbyPoints = [];
|
||||
const NumNearbyBreakpointHits = 2;
|
||||
|
||||
// How many frame steps are nearby points, on either side of the pause point.
|
||||
const NumNearbySteps = 4;
|
||||
const NumNearbySteps = 12;
|
||||
|
||||
function nextKnownBreakpointHit(point, forward) {
|
||||
let checkpoint = getSavedCheckpoint(point.checkpoint);
|
||||
|
@ -885,7 +885,7 @@ function ClearPausedState() {
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// The manifest that is currently being processed.
|
||||
let gManifest;
|
||||
let gManifest = { kind: "primordial" };
|
||||
|
||||
// When processing certain manifests this tracks the execution time when the
|
||||
// manifest started executing.
|
||||
@ -902,28 +902,23 @@ let gPauseOnDebuggerStatement = false;
|
||||
const gManifestStartHandlers = {
|
||||
resume({ breakpoints, pauseOnDebuggerStatement }) {
|
||||
RecordReplayControl.resumeExecution();
|
||||
gManifestStartTime = RecordReplayControl.currentExecutionTime();
|
||||
breakpoints.forEach(ensurePositionHandler);
|
||||
|
||||
gPauseOnDebuggerStatement = pauseOnDebuggerStatement;
|
||||
dbg.onDebuggerStatement = debuggerStatementHit;
|
||||
},
|
||||
|
||||
restoreCheckpoint({ target }) {
|
||||
RecordReplayControl.restoreCheckpoint(target);
|
||||
restoreSnapshot({ numSnapshots }) {
|
||||
RecordReplayControl.restoreSnapshot(numSnapshots);
|
||||
throwError("Unreachable!");
|
||||
},
|
||||
|
||||
runToPoint({ needSaveCheckpoints }) {
|
||||
for (const checkpoint of needSaveCheckpoints) {
|
||||
RecordReplayControl.saveCheckpoint(checkpoint);
|
||||
}
|
||||
runToPoint() {
|
||||
RecordReplayControl.resumeExecution();
|
||||
},
|
||||
|
||||
scanRecording(manifest) {
|
||||
gManifestStartTime = RecordReplayControl.currentExecutionTime();
|
||||
gManifestStartHandlers.runToPoint(manifest);
|
||||
RecordReplayControl.resumeExecution();
|
||||
},
|
||||
|
||||
findHits({ position, startpoint, endpoint }) {
|
||||
@ -1006,6 +1001,7 @@ const gManifestStartHandlers = {
|
||||
function ManifestStart(manifest) {
|
||||
try {
|
||||
gManifest = manifest;
|
||||
gManifestStartTime = RecordReplayControl.currentExecutionTime();
|
||||
|
||||
if (gManifestStartHandlers[manifest.kind]) {
|
||||
gManifestStartHandlers[manifest.kind](manifest);
|
||||
@ -1017,12 +1013,6 @@ function ManifestStart(manifest) {
|
||||
}
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
function BeforeCheckpoint() {
|
||||
clearPositionHandlers();
|
||||
stopScanningAllScripts();
|
||||
}
|
||||
|
||||
const FirstCheckpointId = 1;
|
||||
|
||||
// The most recent encountered checkpoint.
|
||||
@ -1066,18 +1056,36 @@ function finishResume(point) {
|
||||
// Handlers that run after a checkpoint is reached to see if the manifest has
|
||||
// finished. This does not need to be specified for all manifests.
|
||||
const gManifestFinishedAfterCheckpointHandlers = {
|
||||
primordial(_, point) {
|
||||
// The primordial manifest runs forward to the first checkpoint, saves it,
|
||||
// and then finishes.
|
||||
assert(point.checkpoint == FirstCheckpointId);
|
||||
if (!newSnapshot(point)) {
|
||||
return;
|
||||
}
|
||||
RecordReplayControl.manifestFinished({ point });
|
||||
},
|
||||
|
||||
resume(_, point) {
|
||||
clearPositionHandlers();
|
||||
finishResume(point);
|
||||
},
|
||||
|
||||
runToPoint({ endpoint }, point) {
|
||||
runToPoint({ endpoint, saveCheckpoints }, point) {
|
||||
assert(endpoint.checkpoint >= point.checkpoint);
|
||||
if (saveCheckpoints.includes(point.checkpoint) && !newSnapshot(point)) {
|
||||
return;
|
||||
}
|
||||
if (!endpoint.position && point.checkpoint == endpoint.checkpoint) {
|
||||
RecordReplayControl.manifestFinished({ point });
|
||||
}
|
||||
},
|
||||
|
||||
scanRecording({ endpoint }, point) {
|
||||
scanRecording({ endpoint, saveCheckpoints }, point) {
|
||||
stopScanningAllScripts();
|
||||
if (saveCheckpoints.includes(point.checkpoint) && !newSnapshot(point)) {
|
||||
return;
|
||||
}
|
||||
if (point.checkpoint == endpoint) {
|
||||
const duration =
|
||||
RecordReplayControl.currentExecutionTime() - gManifestStartTime;
|
||||
@ -1110,19 +1118,7 @@ const gManifestPrepareAfterCheckpointHandlers = {
|
||||
};
|
||||
|
||||
function processManifestAfterCheckpoint(point, restoredCheckpoint) {
|
||||
// After rewinding gManifest won't be correct, so we always mark the current
|
||||
// manifest as finished and rely on the middleman to give us a new one.
|
||||
if (restoredCheckpoint) {
|
||||
RecordReplayControl.manifestFinished({ restoredCheckpoint, point });
|
||||
}
|
||||
|
||||
if (!gManifest) {
|
||||
// The process is considered to have an initial manifest to run forward to
|
||||
// the first checkpoint.
|
||||
assert(point.checkpoint == FirstCheckpointId);
|
||||
RecordReplayControl.manifestFinished({ point });
|
||||
assert(gManifest);
|
||||
} else if (gManifestFinishedAfterCheckpointHandlers[gManifest.kind]) {
|
||||
if (gManifestFinishedAfterCheckpointHandlers[gManifest.kind]) {
|
||||
gManifestFinishedAfterCheckpointHandlers[gManifest.kind](gManifest, point);
|
||||
}
|
||||
|
||||
@ -1132,12 +1128,12 @@ function processManifestAfterCheckpoint(point, restoredCheckpoint) {
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
function AfterCheckpoint(id, restoredCheckpoint) {
|
||||
function HitCheckpoint(id) {
|
||||
gLastCheckpoint = id;
|
||||
const point = currentExecutionPoint();
|
||||
|
||||
try {
|
||||
processManifestAfterCheckpoint(point, restoredCheckpoint);
|
||||
processManifestAfterCheckpoint(point);
|
||||
} catch (e) {
|
||||
printError("AfterCheckpoint", e);
|
||||
}
|
||||
@ -1180,6 +1176,18 @@ function debuggerStatementHit() {
|
||||
}
|
||||
}
|
||||
|
||||
function newSnapshot(point) {
|
||||
if (RecordReplayControl.newSnapshot()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// After rewinding gManifest won't be correct, so we always mark the current
|
||||
// manifest as finished and rely on the middleman to give us a new one.
|
||||
RecordReplayControl.manifestFinished({ restoredCheckpoint: true, point });
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Handler Helpers
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
@ -1885,8 +1893,7 @@ function printError(why, e) {
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
var EXPORTED_SYMBOLS = [
|
||||
"ManifestStart",
|
||||
"BeforeCheckpoint",
|
||||
"AfterCheckpoint",
|
||||
"HitCheckpoint",
|
||||
"NewTimeWarpTarget",
|
||||
"ScriptResumeFrame",
|
||||
];
|
||||
|
@ -11,8 +11,7 @@
|
||||
[scriptable, uuid(8b86b71f-8471-472e-9997-c5f21f9d0598)]
|
||||
interface rrIReplay : nsISupports {
|
||||
void ManifestStart(in jsval manifest);
|
||||
void BeforeCheckpoint();
|
||||
void AfterCheckpoint(in long checkpoint, in bool restoredCheckpoint);
|
||||
void HitCheckpoint(in long checkpoint);
|
||||
long NewTimeWarpTarget();
|
||||
void ScriptResumeFrame(in long script);
|
||||
};
|
||||
|
@ -29,17 +29,17 @@ namespace recordreplay {
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Memory Snapshots Overview.
|
||||
//
|
||||
// Checkpoints are periodically saved, storing in memory enough information
|
||||
// Snapshots are periodically saved, storing in memory enough information
|
||||
// for the process to restore the contents of all tracked memory as it
|
||||
// rewinds to earlier checkpoitns. There are two components to a saved
|
||||
// checkpoint:
|
||||
// rewinds to the point the snapshot was made. There are two components to a
|
||||
// snapshot:
|
||||
//
|
||||
// - Stack contents for each thread are completely saved on disk at each saved
|
||||
// checkpoint. This is handled by ThreadSnapshot.cpp
|
||||
// - Stack contents for each thread are completely saved for each snapshot.
|
||||
// This is handled by ThreadSnapshot.cpp
|
||||
//
|
||||
// - Heap and static memory contents (tracked memory) are saved in memory as
|
||||
// the contents of pages modified before either the the next saved checkpoint
|
||||
// or the current execution point (if this is the last saved checkpoint).
|
||||
// the contents of pages modified before either the the next snapshot
|
||||
// or the current execution point (if this is the last snapshot).
|
||||
// This is handled here.
|
||||
//
|
||||
// Heap memory is only tracked when allocated with TrackedMemoryKind.
|
||||
@ -51,25 +51,25 @@ namespace recordreplay {
|
||||
// without fork (i.e. Windows). The following example shows how snapshots are
|
||||
// generated:
|
||||
//
|
||||
// #1 Save Checkpoint A. The initial snapshot tabulates all allocated tracked
|
||||
// #1 Save snapshot A. The initial snapshot tabulates all allocated tracked
|
||||
// memory in the process, and write-protects all of it.
|
||||
//
|
||||
// #2 Write pages P0 and P1. Writing to the pages trips the fault handler. The
|
||||
// handler creates copies of the initial contents of P0 and P1 (P0a and P1a)
|
||||
// and unprotects the pages.
|
||||
//
|
||||
// #3 Save Checkpoint B. P0a and P1a, along with any other pages modified
|
||||
// between A and B, become associated with checkpoint A. All modified pages
|
||||
// #3 Save snapshot B. P0a and P1a, along with any other pages modified
|
||||
// between A and B, become associated with snapshot A. All modified pages
|
||||
// are reprotected.
|
||||
//
|
||||
// #4 Write pages P1 and P2. Again, writing to the pages trips the fault
|
||||
// handler and copies P1b and P2b are created and the pages are unprotected.
|
||||
//
|
||||
// #5 Save Checkpoint C. P1b and P2b become associated with snapshot B, and the
|
||||
// #5 Save snapshot C. P1b and P2b become associated with snapshot B, and the
|
||||
// modified pages are reprotected.
|
||||
//
|
||||
// If we were to then rewind from C to A, we would read and restore P1b/P2b,
|
||||
// followed by P0a/P1a. All data associated with checkpoints A and later is
|
||||
// followed by P0a/P1a. All data associated with snapshots A and later is
|
||||
// discarded (we can only rewind; we cannot jump forward in time).
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
@ -77,17 +77,18 @@ namespace recordreplay {
|
||||
// Snapshot Threads Overview.
|
||||
//
|
||||
// After step #3 above, the main thread has created a diff snapshot with the
|
||||
// copies of the original contents of pages modified between two saved
|
||||
// checkpoints. These page copies are initially all in memory. It is the
|
||||
// responsibility of the snapshot threads to do the following:
|
||||
// copies of the original contents of pages modified between two snapshots.
|
||||
// These page copies are initially all in memory. It is the responsibility of
|
||||
// the snapshot threads to do the following:
|
||||
//
|
||||
// 1. When rewinding to the last saved checkpoint, snapshot threads are used to
|
||||
// 1. When rewinding to the last snapshot, snapshot threads are used to
|
||||
// restore the original contents of pages using their in-memory copies.
|
||||
//
|
||||
// There are a fixed number of snapshot threads that are spawned when the
|
||||
// first checkpoint is saved. Threads are each responsible for distinct sets of
|
||||
// heap memory pages (see AddDirtyPageToWorklist), avoiding synchronization
|
||||
// issues between different snapshot threads.
|
||||
// first snapshot is saved, which is always at the first checkpoint. Threads are
|
||||
// each responsible for distinct sets of heap memory pages
|
||||
// (see AddDirtyPageToWorklist), avoiding synchronization issues between
|
||||
// different snapshot threads.
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
@ -129,12 +130,12 @@ struct AllocatedMemoryRegion {
|
||||
};
|
||||
};
|
||||
|
||||
// Information about a page which was modified between two saved checkpoints.
|
||||
// Information about a page which was modified between two snapshots.
|
||||
struct DirtyPage {
|
||||
// Base address of the page.
|
||||
uint8_t* mBase;
|
||||
|
||||
// Copy of the page at the first checkpoint. Written by the dirty memory
|
||||
// Copy of the page at the first snapshot. Written by the dirty memory
|
||||
// handler via HandleDirtyMemoryFault if this is in the active page set,
|
||||
// otherwise accessed by snapshot threads.
|
||||
uint8_t* mOriginal;
|
||||
@ -158,19 +159,14 @@ typedef SplayTree<DirtyPage, DirtyPage::AddressSort,
|
||||
AllocPolicy<MemoryKind::SortedDirtyPageSet>, 4>
|
||||
SortedDirtyPageSet;
|
||||
|
||||
// A set of dirty pages associated with some checkpoint.
|
||||
// A set of dirty pages associated with some snapshot.
|
||||
struct DirtyPageSet {
|
||||
// Checkpoint associated with this set.
|
||||
size_t mCheckpoint;
|
||||
|
||||
// All dirty pages in the set. Pages may be added or destroyed by the main
|
||||
// thread when all other threads are idle, by the dirty memory handler when
|
||||
// it is active and this is the active page set, and by the snapshot thread
|
||||
// which owns this set.
|
||||
InfallibleVector<DirtyPage, 256, AllocPolicy<MemoryKind::DirtyPageSet>>
|
||||
mPages;
|
||||
|
||||
explicit DirtyPageSet(size_t aCheckpoint) : mCheckpoint(aCheckpoint) {}
|
||||
};
|
||||
|
||||
// Worklist used by each snapshot thread.
|
||||
@ -182,7 +178,7 @@ struct SnapshotThreadWorklist {
|
||||
size_t mThreadId;
|
||||
|
||||
// Sets of pages in the thread's worklist. Each set is for a different diff,
|
||||
// with the oldest checkpoints first.
|
||||
// with the oldest snapshots first.
|
||||
InfallibleVector<DirtyPageSet, 256, AllocPolicy<MemoryKind::Generic>> mSets;
|
||||
};
|
||||
|
||||
@ -296,9 +292,9 @@ struct MemoryInfo {
|
||||
// Whether new dirty pages or allocated regions are allowed.
|
||||
bool mMemoryChangesAllowed;
|
||||
|
||||
// Untracked memory regions allocated before the first checkpoint. This is
|
||||
// only accessed on the main thread, and is not a vector because of reentrancy
|
||||
// issues.
|
||||
// Untracked memory regions allocated before the first checkpoint/snapshot.
|
||||
// This is only accessed on the main thread, and is not a vector because of
|
||||
// reentrancy issues.
|
||||
static const size_t MaxInitialUntrackedRegions = 512;
|
||||
AllocatedMemoryRegion mInitialUntrackedRegions[MaxInitialUntrackedRegions];
|
||||
SpinLock mInitialUntrackedRegionsLock;
|
||||
@ -313,7 +309,7 @@ struct MemoryInfo {
|
||||
mTrackedRegionsByAllocationOrder;
|
||||
SpinLock mTrackedRegionsLock;
|
||||
|
||||
// Pages from |trackedRegions| modified since the last saved checkpoint.
|
||||
// Pages from |trackedRegions| modified since the last snapshot.
|
||||
// Accessed by any thread (usually the dirty memory handler) when memory
|
||||
// changes are allowed, and by the main thread when memory changes are not
|
||||
// allowed.
|
||||
@ -551,7 +547,7 @@ bool HandleDirtyMemoryFault(uint8_t* aAddress) {
|
||||
AutoSpinLock lock(gMemoryInfo->mActiveDirtyLock);
|
||||
|
||||
// Check to see if this is already an active dirty page. Once a page has been
|
||||
// marked as dirty it will be accessible until the next checkpoint is saved,
|
||||
// marked as dirty it will be accessible until the next snapshot is taken,
|
||||
// but it's possible for multiple threads to access the same protected memory
|
||||
// before we have a chance to unprotect it, in which case we'll end up here
|
||||
// multiple times for the page.
|
||||
@ -601,7 +597,7 @@ void UnrecoverableSnapshotFailure() {
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void AddInitialUntrackedMemoryRegion(uint8_t* aBase, size_t aSize) {
|
||||
MOZ_RELEASE_ASSERT(!HasSavedAnyCheckpoint());
|
||||
MOZ_RELEASE_ASSERT(!NumSnapshots());
|
||||
|
||||
if (gInitializationFailureMessage) {
|
||||
return;
|
||||
@ -631,7 +627,7 @@ void AddInitialUntrackedMemoryRegion(uint8_t* aBase, size_t aSize) {
|
||||
}
|
||||
|
||||
static void RemoveInitialUntrackedRegion(uint8_t* aBase, size_t aSize) {
|
||||
MOZ_RELEASE_ASSERT(!HasSavedAnyCheckpoint());
|
||||
MOZ_RELEASE_ASSERT(!NumSnapshots());
|
||||
AutoSpinLock lock(gMemoryInfo->mInitialUntrackedRegionsLock);
|
||||
|
||||
for (AllocatedMemoryRegion& region : gMemoryInfo->mInitialUntrackedRegions) {
|
||||
@ -858,7 +854,7 @@ static void ProcessAllInitialMemoryRegions() {
|
||||
static FreeRegionSet gFreeRegions(MemoryKind::Tracked);
|
||||
|
||||
// The size of gMemoryInfo->mTrackedRegionsByAllocationOrder we expect to see
|
||||
// at the point of the last saved checkpoint.
|
||||
// at the point of the last snapshot.
|
||||
static size_t gNumTrackedRegions;
|
||||
|
||||
static void UpdateNumTrackedRegionsForSnapshot() {
|
||||
@ -867,7 +863,7 @@ static void UpdateNumTrackedRegionsForSnapshot() {
|
||||
}
|
||||
|
||||
void FixupFreeRegionsAfterRewind() {
|
||||
// All memory that has been allocated since the associated checkpoint was
|
||||
// All memory that has been allocated since the associated snapshot was
|
||||
// reached is now free, and may be reused for new allocations.
|
||||
size_t newTrackedRegions =
|
||||
gMemoryInfo->mTrackedRegionsByAllocationOrder.length();
|
||||
@ -998,10 +994,10 @@ void RegisterAllocatedMemory(void* aBaseAddress, size_t aSize,
|
||||
uint8_t* aAddress = reinterpret_cast<uint8_t*>(aBaseAddress);
|
||||
|
||||
if (aKind != MemoryKind::Tracked) {
|
||||
if (!HasSavedAnyCheckpoint()) {
|
||||
if (!NumSnapshots()) {
|
||||
AddInitialUntrackedMemoryRegion(aAddress, aSize);
|
||||
}
|
||||
} else if (HasSavedAnyCheckpoint()) {
|
||||
} else if (NumSnapshots()) {
|
||||
EnsureMemoryChangesAllowed();
|
||||
DirectWriteProtectMemory(aAddress, aSize, true);
|
||||
AddTrackedRegion(aAddress, aSize, true);
|
||||
@ -1012,7 +1008,7 @@ void CheckFixedMemory(void* aAddress, size_t aSize) {
|
||||
MOZ_RELEASE_ASSERT(aAddress == PageBase(aAddress));
|
||||
MOZ_RELEASE_ASSERT(aSize == RoundupSizeToPageBoundary(aSize));
|
||||
|
||||
if (!HasSavedAnyCheckpoint()) {
|
||||
if (!NumSnapshots()) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1043,7 +1039,7 @@ void RestoreWritableFixedMemory(void* aAddress, size_t aSize) {
|
||||
MOZ_RELEASE_ASSERT(aAddress == PageBase(aAddress));
|
||||
MOZ_RELEASE_ASSERT(aSize == RoundupSizeToPageBoundary(aSize));
|
||||
|
||||
if (!HasSavedAnyCheckpoint()) {
|
||||
if (!NumSnapshots()) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1064,7 +1060,7 @@ void* AllocateMemoryTryAddress(void* aAddress, size_t aSize, MemoryKind aKind) {
|
||||
gMemoryInfo->mMemoryBalance[(size_t)aKind] += aSize;
|
||||
}
|
||||
|
||||
if (HasSavedAnyCheckpoint()) {
|
||||
if (NumSnapshots()) {
|
||||
if (void* res = FreeRegionSet::Get(aKind).Extract(aAddress, aSize)) {
|
||||
return res;
|
||||
}
|
||||
@ -1096,8 +1092,9 @@ void DeallocateMemory(void* aAddress, size_t aSize, MemoryKind aKind) {
|
||||
gMemoryInfo->mMemoryBalance[(size_t)aKind] -= aSize;
|
||||
}
|
||||
|
||||
// Memory is returned to the system before saving the first checkpoint.
|
||||
if (!HasSavedAnyCheckpoint()) {
|
||||
// Memory is returned to the system before reaching the first checkpoint and
|
||||
// saving the first snapshot.
|
||||
if (!NumSnapshots()) {
|
||||
if (IsReplaying() && aKind != MemoryKind::Tracked) {
|
||||
RemoveInitialUntrackedRegion((uint8_t*)aAddress, aSize);
|
||||
}
|
||||
@ -1128,10 +1125,7 @@ void DeallocateMemory(void* aAddress, size_t aSize, MemoryKind aKind) {
|
||||
// this thread which were modified since the last recorded diff snapshot.
|
||||
static void SnapshotThreadRestoreLastDiffSnapshot(
|
||||
SnapshotThreadWorklist* aWorklist) {
|
||||
size_t checkpoint = GetLastSavedCheckpoint();
|
||||
|
||||
DirtyPageSet& set = aWorklist->mSets.back();
|
||||
MOZ_RELEASE_ASSERT(set.mCheckpoint == checkpoint);
|
||||
|
||||
// Copy the original contents of all pages.
|
||||
for (size_t index = 0; index < set.mPages.length(); index++) {
|
||||
@ -1207,7 +1201,6 @@ static void AddDirtyPageToWorklist(uint8_t* aAddress, uint8_t* aOriginal,
|
||||
&gMemoryInfo->mSnapshotWorklists[pageIndex];
|
||||
MOZ_RELEASE_ASSERT(!worklist->mSets.empty());
|
||||
DirtyPageSet& set = worklist->mSets.back();
|
||||
MOZ_RELEASE_ASSERT(set.mCheckpoint == GetLastSavedCheckpoint());
|
||||
set.mPages.emplaceBack(aAddress, aOriginal, aExecutable);
|
||||
}
|
||||
}
|
||||
@ -1267,7 +1260,7 @@ void TakeDiffMemorySnapshot() {
|
||||
// Add a DirtyPageSet to each snapshot thread's worklist for this snapshot.
|
||||
for (size_t i = 0; i < NumSnapshotThreads; i++) {
|
||||
SnapshotThreadWorklist* worklist = &gMemoryInfo->mSnapshotWorklists[i];
|
||||
worklist->mSets.emplaceBack(GetLastSavedCheckpoint());
|
||||
worklist->mSets.emplaceBack();
|
||||
}
|
||||
|
||||
// Distribute remaining active dirty pages to the snapshot thread worklists.
|
||||
@ -1285,12 +1278,12 @@ void TakeDiffMemorySnapshot() {
|
||||
gMemoryInfo->mSnapshotThreadsShouldIdle.ActivateEnd();
|
||||
}
|
||||
|
||||
void RestoreMemoryToLastSavedCheckpoint() {
|
||||
void RestoreMemoryToLastSnapshot() {
|
||||
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
|
||||
MOZ_RELEASE_ASSERT(!gMemoryInfo->mMemoryChangesAllowed);
|
||||
|
||||
// Restore all dirty regions that have been modified since the last
|
||||
// checkpoint was saved/restored.
|
||||
// snapshot was saved/restored.
|
||||
for (SortedDirtyPageSet::Iter iter = gMemoryInfo->mActiveDirty.begin();
|
||||
!iter.done(); ++iter) {
|
||||
MemoryMove(iter.ref().mBase, iter.ref().mOriginal, PageSize);
|
||||
@ -1301,7 +1294,7 @@ void RestoreMemoryToLastSavedCheckpoint() {
|
||||
gMemoryInfo->mActiveDirty.clear();
|
||||
}
|
||||
|
||||
void RestoreMemoryToLastSavedDiffCheckpoint() {
|
||||
void RestoreMemoryToLastDiffSnapshot() {
|
||||
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
|
||||
MOZ_RELEASE_ASSERT(!gMemoryInfo->mMemoryChangesAllowed);
|
||||
MOZ_RELEASE_ASSERT(gMemoryInfo->mActiveDirty.empty());
|
||||
|
@ -15,21 +15,20 @@ namespace recordreplay {
|
||||
|
||||
// Memory Snapshots Overview.
|
||||
//
|
||||
// As described in ProcessRewind.h, some subset of the checkpoints which are
|
||||
// reached during execution are saved, so that their state can be restored
|
||||
// later. Memory snapshots are used to save and restore the contents of all
|
||||
// heap memory: everything except thread stacks (see ThreadSnapshot.h for
|
||||
// saving and restoring these) and untracked memory (which is not saved or
|
||||
// restored, see ProcessRecordReplay.h).
|
||||
// As described in ProcessRewind.h, periodically snapshots are saved so that
|
||||
// their state can be restored later. Memory snapshots are used to save and
|
||||
// restore the contents of all heap memory: everything except thread stacks
|
||||
// (see ThreadSnapshot.h for saving and restoring these) and untracked memory
|
||||
// (which is not saved or restored, see ProcessRecordReplay.h).
|
||||
//
|
||||
// Each memory snapshot is a diff of the heap memory contents compared to the
|
||||
// next one. See MemorySnapshot.cpp for how diffs are represented and computed.
|
||||
//
|
||||
// Rewinding must restore the exact contents of heap memory that existed when
|
||||
// the target checkpoint was reached. Because of this, memory that is allocated
|
||||
// at a point when a checkpoint is saved will never actually be returned to the
|
||||
// system. We instead keep a set of free blocks that are unused at the current
|
||||
// point of execution and are available to satisfy new allocations.
|
||||
// the target snapshot was reached. Because of this, memory that is allocated
|
||||
// after a point when a snapshot has been saved will never actually be returned
|
||||
// to the system. We instead keep a set of free blocks that are unused at the
|
||||
// current point of execution and are available to satisfy new allocations.
|
||||
|
||||
// Make sure that a block of memory in a fixed allocation is already allocated.
|
||||
void CheckFixedMemory(void* aAddress, size_t aSize);
|
||||
@ -47,12 +46,12 @@ void* AllocateMemoryTryAddress(void* aAddress, size_t aSize, MemoryKind aKind);
|
||||
void RegisterAllocatedMemory(void* aBaseAddress, size_t aSize,
|
||||
MemoryKind aKind);
|
||||
|
||||
// Exclude a region of memory from snapshots, before the first checkpoint has
|
||||
// been reached.
|
||||
// Exclude a region of memory from snapshots, before the first snapshot has
|
||||
// been taken.
|
||||
void AddInitialUntrackedMemoryRegion(uint8_t* aBase, size_t aSize);
|
||||
|
||||
// Return whether a range of memory is in a tracked region. This excludes
|
||||
// memory that was allocated after the last checkpoint and is not write
|
||||
// memory that was allocated after the last snapshot and is not write
|
||||
// protected.
|
||||
bool MemoryRangeIsTracked(void* aAddress, size_t aSize);
|
||||
|
||||
@ -62,22 +61,17 @@ void InitializeMemorySnapshots();
|
||||
// Take the first heap memory snapshot.
|
||||
void TakeFirstMemorySnapshot();
|
||||
|
||||
// Take a differential heap memory snapshot compared to the last one,
|
||||
// associated with the last saved checkpoint.
|
||||
// Take a differential heap memory snapshot compared to the last one.
|
||||
void TakeDiffMemorySnapshot();
|
||||
|
||||
// Restore all heap memory to its state when the most recent checkpoint was
|
||||
// saved. This requires no checkpoints to have been saved after this one.
|
||||
void RestoreMemoryToLastSavedCheckpoint();
|
||||
// Restore all heap memory to its state when the most recent snapshot was
|
||||
// taken.
|
||||
void RestoreMemoryToLastSnapshot();
|
||||
|
||||
// Restore all heap memory to its state at a checkpoint where a complete diff
|
||||
// was saved vs. the following saved checkpoint. This requires that no
|
||||
// tracked heap memory has been changed since the last saved checkpoint.
|
||||
void RestoreMemoryToLastSavedDiffCheckpoint();
|
||||
|
||||
// Erase all information from the last diff snapshot taken, so that tracked
|
||||
// heap memory changes are with respect to the previous checkpoint.
|
||||
void EraseLastSavedDiffMemorySnapshot();
|
||||
// Restore all heap memory to its state at a snapshot where a complete diff
|
||||
// was saved vs. the following snapshot. This requires that no tracked heap
|
||||
// memory has been changed since the last snapshot.
|
||||
void RestoreMemoryToLastDiffSnapshot();
|
||||
|
||||
// Set whether to allow changes to tracked heap memory at this point. If such
|
||||
// changes occur when they are not allowed then the process will crash.
|
||||
|
@ -379,8 +379,8 @@ static PreambleResult MiddlemanPreamble_sendmsg(CallArguments* aArguments) {
|
||||
}
|
||||
|
||||
static PreambleResult Preamble_mprotect(CallArguments* aArguments) {
|
||||
// Ignore any mprotect calls that occur after saving a checkpoint.
|
||||
if (!HasSavedAnyCheckpoint()) {
|
||||
// Ignore any mprotect calls that occur after taking a snapshot.
|
||||
if (!NumSnapshots()) {
|
||||
return PreambleResult::PassThrough;
|
||||
}
|
||||
aArguments->Rval<ssize_t>() = 0;
|
||||
@ -408,7 +408,7 @@ static PreambleResult Preamble_mmap(CallArguments* aArguments) {
|
||||
// Get an anonymous mapping for the result.
|
||||
if (flags & MAP_FIXED) {
|
||||
// For fixed allocations, make sure this memory region is mapped and zero.
|
||||
if (!HasSavedAnyCheckpoint()) {
|
||||
if (!NumSnapshots()) {
|
||||
// Make sure this memory region is writable.
|
||||
CallFunction<int>(gOriginal_mprotect, address, size,
|
||||
PROT_READ | PROT_WRITE | PROT_EXEC);
|
||||
@ -421,10 +421,10 @@ static PreambleResult Preamble_mmap(CallArguments* aArguments) {
|
||||
}
|
||||
} else {
|
||||
// We have to call mmap itself, which can change memory protection flags
|
||||
// for memory that is already allocated. If we haven't saved a checkpoint
|
||||
// then this is no problem, but after saving a checkpoint we have to make
|
||||
// for memory that is already allocated. If we haven't taken a snapshot
|
||||
// then this is no problem, but after taking a snapshot we have to make
|
||||
// sure that protection flags are what we expect them to be.
|
||||
int newProt = HasSavedAnyCheckpoint() ? (PROT_READ | PROT_EXEC) : prot;
|
||||
int newProt = NumSnapshots() ? (PROT_READ | PROT_EXEC) : prot;
|
||||
memory = CallFunction<void*>(gOriginal_mmap, address, size, newProt, flags,
|
||||
fd, offset);
|
||||
|
||||
@ -608,7 +608,7 @@ static ssize_t WaitForCvar(pthread_mutex_t* aMutex, pthread_cond_t* aCond,
|
||||
if (!lock) {
|
||||
if (IsReplaying() && !AreThreadEventsPassedThrough()) {
|
||||
Thread* thread = Thread::Current();
|
||||
if (thread->MaybeWaitForCheckpointSave(
|
||||
if (thread->MaybeWaitForSnapshot(
|
||||
[=]() { pthread_mutex_unlock(aMutex); })) {
|
||||
// We unlocked the mutex while the thread idled, so don't wait on the
|
||||
// condvar: the state the thread is waiting on may have changed and it
|
||||
@ -903,7 +903,7 @@ static PreambleResult Preamble_mach_vm_map(CallArguments* aArguments) {
|
||||
} else if (AreThreadEventsPassedThrough()) {
|
||||
// We should only reach this at startup, when initializing the graphics
|
||||
// shared memory block.
|
||||
MOZ_RELEASE_ASSERT(!HasSavedAnyCheckpoint());
|
||||
MOZ_RELEASE_ASSERT(!NumSnapshots());
|
||||
return PreambleResult::PassThrough;
|
||||
}
|
||||
|
||||
@ -916,9 +916,9 @@ static PreambleResult Preamble_mach_vm_map(CallArguments* aArguments) {
|
||||
}
|
||||
|
||||
static PreambleResult Preamble_mach_vm_protect(CallArguments* aArguments) {
|
||||
// Ignore any mach_vm_protect calls that occur after saving a checkpoint, as
|
||||
// Ignore any mach_vm_protect calls that occur after taking a snapshot, as
|
||||
// for mprotect.
|
||||
if (!HasSavedAnyCheckpoint()) {
|
||||
if (!NumSnapshots()) {
|
||||
return PreambleResult::PassThrough;
|
||||
}
|
||||
aArguments->Rval<size_t>() = KERN_SUCCESS;
|
||||
|
@ -20,21 +20,15 @@
|
||||
namespace mozilla {
|
||||
namespace recordreplay {
|
||||
|
||||
// The most recent checkpoint which was encountered.
|
||||
static size_t gLastCheckpoint = InvalidCheckpointId;
|
||||
|
||||
// Information about the current rewinding state. The contents of this structure
|
||||
// are in untracked memory.
|
||||
struct RewindInfo {
|
||||
// The most recent checkpoint which was encountered.
|
||||
size_t mLastCheckpoint;
|
||||
|
||||
// Checkpoints which have been saved. This includes only entries from
|
||||
// mShouldSaveCheckpoints, plus all temporary checkpoints.
|
||||
InfallibleVector<SavedCheckpoint, 1024, AllocPolicy<MemoryKind::Generic>>
|
||||
mSavedCheckpoints;
|
||||
|
||||
// Unsorted list of checkpoints which the middleman has instructed us to
|
||||
// save. All those equal to or prior to mLastCheckpoint will have been saved.
|
||||
InfallibleVector<size_t, 1024, AllocPolicy<MemoryKind::Generic>>
|
||||
mShouldSaveCheckpoints;
|
||||
// Thread stacks for snapshots which have been saved.
|
||||
InfallibleVector<AllSavedThreadStacks, 1024, AllocPolicy<MemoryKind::Generic>>
|
||||
mSnapshots;
|
||||
};
|
||||
|
||||
static RewindInfo* gRewindInfo;
|
||||
@ -51,22 +45,14 @@ void InitializeRewindState() {
|
||||
void* memory = AllocateMemory(sizeof(RewindInfo), MemoryKind::Generic);
|
||||
gRewindInfo = new (memory) RewindInfo();
|
||||
|
||||
// The first checkpoint is implicitly saved while replaying: we won't be able
|
||||
// to get a manifest from the middleman telling us what to save until after
|
||||
// this checkpoint has been reached.
|
||||
if (IsReplaying()) {
|
||||
gRewindInfo->mShouldSaveCheckpoints.append(FirstCheckpointId);
|
||||
}
|
||||
|
||||
gMainThreadCallbackMonitor = new Monitor();
|
||||
}
|
||||
|
||||
void RestoreCheckpointAndResume(size_t aCheckpoint) {
|
||||
void RestoreSnapshotAndResume(size_t aNumSnapshots) {
|
||||
MOZ_RELEASE_ASSERT(IsReplaying());
|
||||
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
|
||||
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
|
||||
MOZ_RELEASE_ASSERT(aCheckpoint == gRewindInfo->mLastCheckpoint ||
|
||||
aCheckpoint < gRewindInfo->mLastCheckpoint);
|
||||
MOZ_RELEASE_ASSERT(aNumSnapshots < gRewindInfo->mSnapshots.length());
|
||||
|
||||
// Make sure we don't lose pending main thread callbacks due to rewinding.
|
||||
{
|
||||
@ -79,95 +65,78 @@ void RestoreCheckpointAndResume(size_t aCheckpoint) {
|
||||
double start = CurrentTime();
|
||||
|
||||
{
|
||||
// Rewind heap memory to the target checkpoint, which must have been saved.
|
||||
// Rewind heap memory to the target snapshot.
|
||||
AutoDisallowMemoryChanges disallow;
|
||||
size_t newCheckpoint = gRewindInfo->mSavedCheckpoints.back().mCheckpoint;
|
||||
RestoreMemoryToLastSavedCheckpoint();
|
||||
while (aCheckpoint < newCheckpoint) {
|
||||
gRewindInfo->mSavedCheckpoints.back().ReleaseContents();
|
||||
gRewindInfo->mSavedCheckpoints.popBack();
|
||||
RestoreMemoryToLastSavedDiffCheckpoint();
|
||||
newCheckpoint = gRewindInfo->mSavedCheckpoints.back().mCheckpoint;
|
||||
RestoreMemoryToLastSnapshot();
|
||||
for (size_t i = 0; i < aNumSnapshots; i++) {
|
||||
gRewindInfo->mSnapshots.back().ReleaseContents();
|
||||
gRewindInfo->mSnapshots.popBack();
|
||||
RestoreMemoryToLastDiffSnapshot();
|
||||
}
|
||||
MOZ_RELEASE_ASSERT(newCheckpoint == aCheckpoint);
|
||||
}
|
||||
|
||||
FixupFreeRegionsAfterRewind();
|
||||
|
||||
double end = CurrentTime();
|
||||
PrintSpew("Restore #%d -> #%d %.2fs\n", (int)gRewindInfo->mLastCheckpoint,
|
||||
(int)aCheckpoint, (end - start) / 1000000.0);
|
||||
PrintSpew("Restore %.2fs\n", (end - start) / 1000000.0);
|
||||
|
||||
// Finally, let threads restore themselves to their stacks at the checkpoint
|
||||
// Finally, let threads restore themselves to their stacks at the snapshot
|
||||
// we are rewinding to.
|
||||
RestoreAllThreads(gRewindInfo->mSavedCheckpoints.back());
|
||||
RestoreAllThreads(gRewindInfo->mSnapshots.back());
|
||||
Unreachable();
|
||||
}
|
||||
|
||||
void SetSaveCheckpoint(size_t aCheckpoint, bool aSave) {
|
||||
MOZ_RELEASE_ASSERT(aCheckpoint > gRewindInfo->mLastCheckpoint);
|
||||
VectorAddOrRemoveEntry(gRewindInfo->mShouldSaveCheckpoints, aCheckpoint,
|
||||
aSave);
|
||||
bool NewSnapshot() {
|
||||
if (IsRecording()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Thread::WaitForIdleThreads();
|
||||
|
||||
PrintSpew("Saving snapshot...\n");
|
||||
|
||||
double start = CurrentTime();
|
||||
|
||||
// Record either the first or a subsequent diff memory snapshot.
|
||||
if (gRewindInfo->mSnapshots.empty()) {
|
||||
TakeFirstMemorySnapshot();
|
||||
} else {
|
||||
TakeDiffMemorySnapshot();
|
||||
}
|
||||
gRewindInfo->mSnapshots.emplaceBack();
|
||||
|
||||
double end = CurrentTime();
|
||||
|
||||
bool reached = true;
|
||||
|
||||
// Save all thread stacks for the snapshot. If we rewind here from a
|
||||
// later point of execution then this will return false.
|
||||
if (SaveAllThreads(gRewindInfo->mSnapshots.back())) {
|
||||
PrintSpew("Saved snapshot %.2fs\n", (end - start) / 1000000.0);
|
||||
} else {
|
||||
PrintSpew("Restored snapshot\n");
|
||||
|
||||
reached = false;
|
||||
|
||||
// After restoring, make sure all threads have updated their stacks
|
||||
// before letting any of them resume execution. Threads might have
|
||||
// pointers into each others' stacks.
|
||||
WaitForIdleThreadsToRestoreTheirStacks();
|
||||
}
|
||||
|
||||
Thread::ResumeIdleThreads();
|
||||
|
||||
return reached;
|
||||
}
|
||||
|
||||
bool NewCheckpoint() {
|
||||
void NewCheckpoint() {
|
||||
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
|
||||
MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
|
||||
MOZ_RELEASE_ASSERT(!HasDivergedFromRecording());
|
||||
|
||||
js::BeforeCheckpoint();
|
||||
gLastCheckpoint++;
|
||||
|
||||
// Get the ID of the new checkpoint.
|
||||
size_t checkpoint = gRewindInfo->mLastCheckpoint + 1;
|
||||
|
||||
// Save all checkpoints the middleman tells us to, and temporary checkpoints
|
||||
// (which the middleman never knows about).
|
||||
bool save = VectorContains(gRewindInfo->mShouldSaveCheckpoints, checkpoint);
|
||||
bool reachedCheckpoint = true;
|
||||
|
||||
if (save) {
|
||||
Thread::WaitForIdleThreads();
|
||||
|
||||
PrintSpew("Starting checkpoint...\n");
|
||||
|
||||
double start = CurrentTime();
|
||||
|
||||
// Record either the first or a subsequent diff memory snapshot.
|
||||
if (gRewindInfo->mSavedCheckpoints.empty()) {
|
||||
TakeFirstMemorySnapshot();
|
||||
} else {
|
||||
TakeDiffMemorySnapshot();
|
||||
}
|
||||
gRewindInfo->mSavedCheckpoints.emplaceBack(checkpoint);
|
||||
|
||||
double end = CurrentTime();
|
||||
|
||||
// Save all thread stacks for the checkpoint. If we rewind here from a
|
||||
// later point of execution then this will return false.
|
||||
if (SaveAllThreads(gRewindInfo->mSavedCheckpoints.back())) {
|
||||
PrintSpew("Saved checkpoint #%d %.2fs\n", (int)checkpoint,
|
||||
(end - start) / 1000000.0);
|
||||
} else {
|
||||
PrintSpew("Restored checkpoint #%d\n", (int)checkpoint);
|
||||
|
||||
reachedCheckpoint = false;
|
||||
|
||||
// After restoring, make sure all threads have updated their stacks
|
||||
// before letting any of them resume execution. Threads might have
|
||||
// pointers into each others' stacks.
|
||||
WaitForIdleThreadsToRestoreTheirStacks();
|
||||
}
|
||||
|
||||
Thread::ResumeIdleThreads();
|
||||
} else {
|
||||
PrintSpew("Skipping checkpoint #%d\n", (int)checkpoint);
|
||||
}
|
||||
|
||||
gRewindInfo->mLastCheckpoint = checkpoint;
|
||||
|
||||
js::AfterCheckpoint(checkpoint, !reachedCheckpoint);
|
||||
|
||||
return reachedCheckpoint;
|
||||
js::HitCheckpoint(gLastCheckpoint);
|
||||
}
|
||||
|
||||
static bool gUnhandledDivergeAllowed;
|
||||
@ -211,7 +180,7 @@ void DisallowUnhandledDivergeFromRecording() {
|
||||
|
||||
void EnsureNotDivergedFromRecording() {
|
||||
// If we have diverged from the recording and encounter an operation we can't
|
||||
// handle, rewind to the last checkpoint.
|
||||
// handle, rewind to the last snapshot.
|
||||
AssertEventsAreNotPassedThrough();
|
||||
if (HasDivergedFromRecording()) {
|
||||
MOZ_RELEASE_ASSERT(gUnhandledDivergeAllowed);
|
||||
@ -222,36 +191,18 @@ void EnsureNotDivergedFromRecording() {
|
||||
MOZ_CRASH("Recording divergence while repainting");
|
||||
}
|
||||
|
||||
PrintSpew("Unhandled recording divergence, restoring checkpoint...\n");
|
||||
RestoreCheckpointAndResume(
|
||||
gRewindInfo->mSavedCheckpoints.back().mCheckpoint);
|
||||
PrintSpew("Unhandled recording divergence, restoring snapshot...\n");
|
||||
RestoreSnapshotAndResume(0);
|
||||
Unreachable();
|
||||
}
|
||||
}
|
||||
|
||||
bool HasSavedAnyCheckpoint() {
|
||||
return gRewindInfo && !gRewindInfo->mSavedCheckpoints.empty();
|
||||
}
|
||||
|
||||
bool HasSavedCheckpoint(size_t aCheckpoint) {
|
||||
if (!gRewindInfo) {
|
||||
return false;
|
||||
}
|
||||
for (const SavedCheckpoint& saved : gRewindInfo->mSavedCheckpoints) {
|
||||
if (saved.mCheckpoint == aCheckpoint) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
size_t NumSnapshots() {
|
||||
return gRewindInfo ? gRewindInfo->mSnapshots.length() : 0;
|
||||
}
|
||||
|
||||
size_t GetLastCheckpoint() {
|
||||
return gRewindInfo ? gRewindInfo->mLastCheckpoint : InvalidCheckpointId;
|
||||
}
|
||||
|
||||
size_t GetLastSavedCheckpoint() {
|
||||
MOZ_RELEASE_ASSERT(HasSavedAnyCheckpoint());
|
||||
return gRewindInfo->mSavedCheckpoints.back().mCheckpoint;
|
||||
return gLastCheckpoint;
|
||||
}
|
||||
|
||||
static bool gMainThreadShouldPause = false;
|
||||
@ -289,12 +240,12 @@ void PauseMainThreadAndServiceCallbacks() {
|
||||
}
|
||||
}
|
||||
|
||||
// As for RestoreCheckpointAndResume, we shouldn't resume the main thread
|
||||
// As for RestoreSnapshotAndResume, we shouldn't resume the main thread
|
||||
// while it still has callbacks to execute.
|
||||
MOZ_RELEASE_ASSERT(gMainThreadCallbacks.empty());
|
||||
|
||||
// If we diverge from the recording the only way we can get back to resuming
|
||||
// normal execution is to rewind to a checkpoint prior to the divergence.
|
||||
// normal execution is to rewind to a snapshot prior to the divergence.
|
||||
MOZ_RELEASE_ASSERT(!HasDivergedFromRecording());
|
||||
|
||||
gMainThreadIsPaused = false;
|
||||
|
@ -29,15 +29,13 @@ namespace recordreplay {
|
||||
// Checkpoints form a basis for identifying a particular point in execution,
|
||||
// and in allowing replaying processes to rewind themselves.
|
||||
//
|
||||
// A subset of checkpoints are saved: the contents of each thread's stack is
|
||||
// copied, along with enough information to restore the contents of heap memory
|
||||
// at the checkpoint.
|
||||
// In a replaying process, snapshots can be taken that retain enough information
|
||||
// to restore the contents of heap memory and thread stacks at the point the
|
||||
// snapshot was taken. Snapshots are usually taken when certain checkpoints are
|
||||
// reached, but they can be taken at other points as well.
|
||||
//
|
||||
// Saved checkpoints are in part represented as diffs vs the following
|
||||
// saved checkpoint. This requires some different handling for the most recent
|
||||
// saved checkpoint (whose diff has not been computed) and earlier saved
|
||||
// checkpoints. See MemorySnapshot.h and Thread.h for more on how saved
|
||||
// checkpoints are represented.
|
||||
// See MemorySnapshot.h and ThreadSnapshot.h for information on how snapshots
|
||||
// are represented.
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
@ -46,18 +44,13 @@ namespace recordreplay {
|
||||
// 1. While performing the replay, execution proceeds until the main thread
|
||||
// hits either a breakpoint or a checkpoint.
|
||||
//
|
||||
// 2. The main thread then calls a hook (JS::replay::hooks.hitBreakpointReplay
|
||||
// or gAfterCheckpointHook), which may decide to pause the main thread and
|
||||
// give it a callback to invoke using PauseMainThreadAndInvokeCallback.
|
||||
// 2. Control then enters the logic in devtools/server/actors/replay/replay.js,
|
||||
// which may decide to pause the main thread.
|
||||
//
|
||||
// 3. Now that the main thread is paused, the replay message loop thread
|
||||
// (see ChildIPC.h) can give it additional callbacks to invoke using
|
||||
// PauseMainThreadAndInvokeCallback.
|
||||
//
|
||||
// 4. These callbacks can inspect the paused state, diverge from the recording
|
||||
// 3. The replay logic can inspect the process state, diverge from the recording
|
||||
// by calling DivergeFromRecording, and eventually can unpause the main
|
||||
// thread and allow execution to resume by calling ResumeExecution
|
||||
// (if DivergeFromRecording was not called) or RestoreCheckpointAndResume.
|
||||
// (if DivergeFromRecording was not called) or RestoreSnapshotAndResume.
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
@ -73,7 +66,7 @@ namespace recordreplay {
|
||||
// passed through, but other events that require interacting with the system
|
||||
// will trigger an unhandled divergence from the recording via
|
||||
// EnsureNotDivergedFromRecording, causing the process to rewind to the most
|
||||
// recent saved checkpoint. The debugger will recognize this rewind and play
|
||||
// recent snapshot. The debugger will recognize this rewind and play
|
||||
// back in a way that restores the state when DivergeFromRecording() was
|
||||
// called, but without performing the later operation that triggered the
|
||||
// rewind.
|
||||
@ -86,11 +79,8 @@ static const size_t FirstCheckpointId = 1;
|
||||
// Initialize state needed for rewinding.
|
||||
void InitializeRewindState();
|
||||
|
||||
// Set whether this process should save a particular checkpoint.
|
||||
void SetSaveCheckpoint(size_t aCheckpoint, bool aSave);
|
||||
|
||||
// Invoke a callback on the main thread, and pause it until ResumeExecution or
|
||||
// RestoreCheckpointAndResume are called. When the main thread is not paused,
|
||||
// RestoreSnapshotAndResume are called. When the main thread is not paused,
|
||||
// this must be called on the main thread itself. When the main thread is
|
||||
// already paused, this may be called from any thread.
|
||||
void PauseMainThreadAndInvokeCallback(const std::function<void()>& aCallback);
|
||||
@ -103,49 +93,45 @@ bool MainThreadShouldPause();
|
||||
// longer needs to pause.
|
||||
void PauseMainThreadAndServiceCallbacks();
|
||||
|
||||
// Return whether any checkpoints have been saved.
|
||||
bool HasSavedAnyCheckpoint();
|
||||
|
||||
// Return whether a specific checkpoint has been saved.
|
||||
bool HasSavedCheckpoint(size_t aCheckpoint);
|
||||
// Return how many snapshots have been taken.
|
||||
size_t NumSnapshots();
|
||||
|
||||
// Get the ID of the most recently encountered checkpoint.
|
||||
size_t GetLastCheckpoint();
|
||||
|
||||
// Get the ID of the most recent saved checkpoint.
|
||||
size_t GetLastSavedCheckpoint();
|
||||
|
||||
// When paused at a breakpoint or at a checkpoint, restore a checkpoint that
|
||||
// was saved earlier and resume execution.
|
||||
void RestoreCheckpointAndResume(size_t aCheckpoint);
|
||||
// When paused at a breakpoint or at a checkpoint, restore a snapshot that
|
||||
// was saved earlier. aNumSnapshots is the number of snapshots to skip over
|
||||
// when restoring.
|
||||
void RestoreSnapshotAndResume(size_t aNumSnapshots);
|
||||
|
||||
// When paused at a breakpoint or at a checkpoint, unpause and proceed with
|
||||
// execution.
|
||||
void ResumeExecution();
|
||||
|
||||
// Allow execution after this point to diverge from the recording. Execution
|
||||
// will remain diverged until an earlier checkpoint is restored.
|
||||
// will remain diverged until an earlier snapshot is restored.
|
||||
//
|
||||
// If an unhandled divergence occurs (see the 'Recording Divergence' comment
|
||||
// in ProcessRewind.h) then the process rewinds to the most recent saved
|
||||
// checkpoint.
|
||||
// in ProcessRewind.h) then the process rewinds to the most recent snapshot.
|
||||
void DivergeFromRecording();
|
||||
|
||||
// After a call to DivergeFromRecording(), this may be called to prevent future
|
||||
// unhandled divergence from causing earlier checkpoints to be restored
|
||||
// unhandled divergence from causing earlier snapshots to be restored
|
||||
// (the process will immediately crash instead). This state lasts until a new
|
||||
// call to DivergeFromRecording, or to an explicit restore of an earlier
|
||||
// checkpoint.
|
||||
// snapshot.
|
||||
void DisallowUnhandledDivergeFromRecording();
|
||||
|
||||
// Make sure that execution has not diverged from the recording after a call to
|
||||
// DivergeFromRecording, by rewinding to the last saved checkpoint if so.
|
||||
// DivergeFromRecording, by rewinding to the last snapshot if so.
|
||||
void EnsureNotDivergedFromRecording();
|
||||
|
||||
// Note a checkpoint at the current execution position. This checkpoint will be
|
||||
// saved if it was instructed to do so via a manifest. This method returns true
|
||||
// if the checkpoint was just saved, and false if it was just restored.
|
||||
bool NewCheckpoint();
|
||||
// Note a checkpoint at the current execution position.
|
||||
void NewCheckpoint();
|
||||
|
||||
// Create a new snapshot that can be restored later. This method returns true
|
||||
// if the snapshot was just taken, and false if it was just restored.
|
||||
bool NewSnapshot();
|
||||
|
||||
} // namespace recordreplay
|
||||
} // namespace mozilla
|
||||
|
@ -430,7 +430,7 @@ void Thread::NotifyUnrecordedWait(
|
||||
}
|
||||
}
|
||||
|
||||
bool Thread::MaybeWaitForCheckpointSave(
|
||||
bool Thread::MaybeWaitForSnapshot(
|
||||
const std::function<void()>& aReleaseCallback) {
|
||||
MOZ_RELEASE_ASSERT(!PassThroughEvents());
|
||||
if (IsMainThread()) {
|
||||
|
@ -49,7 +49,7 @@ namespace recordreplay {
|
||||
// thread attempts to take a recorded lock and blocks in Lock::Wait.
|
||||
// For other threads (any thread which has diverged from the recording,
|
||||
// or JS helper threads even when no recording divergence has occurred),
|
||||
// NotifyUnrecordedWait and MaybeWaitForCheckpointSave are used to enter
|
||||
// NotifyUnrecordedWait and MaybeWaitForSnapshot are used to enter
|
||||
// this state when the thread performs a blocking operation.
|
||||
//
|
||||
// 4. Once all recorded threads are idle, the main thread is able to record
|
||||
@ -285,15 +285,13 @@ class Thread {
|
||||
// main thread is already waiting for other threads to become idle.
|
||||
//
|
||||
// The callback should poke the thread so that it is no longer blocked on the
|
||||
// resource. The thread must call MaybeWaitForCheckpointSave before blocking
|
||||
// again.
|
||||
// resource. The thread must call MaybeWaitForSnapshot before blocking again.
|
||||
//
|
||||
// MaybeWaitForCheckpointSave takes a callback to release any resources
|
||||
// before the thread begins idling. The return value is whether this callback
|
||||
// was invoked.
|
||||
// MaybeWaitForSnapshot takes a callback to release any resources before the
|
||||
// thread begins idling. The return value is whether this callback was
|
||||
// invoked.
|
||||
void NotifyUnrecordedWait(const std::function<void()>& aNotifyCallback);
|
||||
bool MaybeWaitForCheckpointSave(
|
||||
const std::function<void()>& aReleaseCallback);
|
||||
bool MaybeWaitForSnapshot(const std::function<void()>& aReleaseCallback);
|
||||
|
||||
// Wait for all other threads to enter the idle state necessary for saving
|
||||
// or restoring a checkpoint. This may only be called on the main thread.
|
||||
|
@ -19,7 +19,7 @@ namespace recordreplay {
|
||||
#define THREAD_STACK_TOP_SIZE 2048
|
||||
|
||||
// Information about a thread's state, for use in saving or restoring
|
||||
// checkpoints. The contents of this structure are in preserved memory.
|
||||
// snapshots. The contents of this structure are in preserved memory.
|
||||
struct ThreadState {
|
||||
// Whether this thread should update its state when no longer idle. This is
|
||||
// only used for non-main threads.
|
||||
@ -51,7 +51,7 @@ struct ThreadState {
|
||||
|
||||
// For each non-main thread, whether that thread should update its stack and
|
||||
// state when it is no longer idle. This also stores restore info for the
|
||||
// main thread, which immediately updates its state when restoring checkpoints.
|
||||
// main thread, which immediately updates its state when restoring snapshots.
|
||||
static ThreadState* gThreadState;
|
||||
|
||||
void InitializeThreadSnapshots(size_t aNumThreads) {
|
||||
@ -248,7 +248,7 @@ bool ShouldRestoreThreadStack(size_t aId) {
|
||||
return gThreadState[aId].mShouldRestore;
|
||||
}
|
||||
|
||||
bool SaveAllThreads(SavedCheckpoint& aSaved) {
|
||||
bool SaveAllThreads(AllSavedThreadStacks& aSaved) {
|
||||
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
|
||||
|
||||
AutoPassThroughThreadEvents pt; // setjmp may perform system calls.
|
||||
@ -266,7 +266,7 @@ bool SaveAllThreads(SavedCheckpoint& aSaved) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void RestoreAllThreads(const SavedCheckpoint& aSaved) {
|
||||
void RestoreAllThreads(const AllSavedThreadStacks& aSaved) {
|
||||
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
|
||||
|
||||
// These will be matched by the Auto* classes in SaveAllThreads().
|
||||
|
@ -17,10 +17,10 @@ namespace recordreplay {
|
||||
// Thread Snapshots Overview.
|
||||
//
|
||||
// The functions below are used when a thread saves or restores its stack and
|
||||
// register state at a checkpoint. The steps taken in saving and restoring a
|
||||
// thread snapshot are as follows:
|
||||
// register state. The steps taken in saving and restoring a thread snapshot are
|
||||
// as follows:
|
||||
//
|
||||
// 1. Before idling (non-main threads) or before reaching a checkpoint (main
|
||||
// 1. Before idling (non-main threads) or before creating a snapshot (main
|
||||
// thread), the thread calls SaveThreadState. This saves the register state
|
||||
// for the thread as well as a portion of the top of the stack, and after
|
||||
// saving the state it returns true.
|
||||
@ -72,12 +72,9 @@ struct SavedThreadStack {
|
||||
}
|
||||
};
|
||||
|
||||
struct SavedCheckpoint {
|
||||
size_t mCheckpoint;
|
||||
struct AllSavedThreadStacks {
|
||||
SavedThreadStack mStacks[MaxRecordedThreadId];
|
||||
|
||||
explicit SavedCheckpoint(size_t aCheckpoint) : mCheckpoint(aCheckpoint) {}
|
||||
|
||||
void ReleaseContents() {
|
||||
for (SavedThreadStack& stack : mStacks) {
|
||||
stack.ReleaseContents();
|
||||
@ -89,13 +86,13 @@ struct SavedCheckpoint {
|
||||
// own stack and the stacks of all other threads. The return value is true if
|
||||
// the stacks were just saved, or false if they were just restored due to a
|
||||
// rewind from a later point of execution.
|
||||
bool SaveAllThreads(SavedCheckpoint& aSavedCheckpoint);
|
||||
bool SaveAllThreads(AllSavedThreadStacks& aSaved);
|
||||
|
||||
// Restore the saved stacks for a checkpoint and rewind state to that point.
|
||||
// Restore a set of saved stacks and rewind state to that point.
|
||||
// This function does not return.
|
||||
void RestoreAllThreads(const SavedCheckpoint& aSavedCheckpoint);
|
||||
void RestoreAllThreads(const AllSavedThreadStacks& aSaved);
|
||||
|
||||
// After rewinding to an earlier checkpoint, the main thread will call this to
|
||||
// After rewinding to an earlier point, the main thread will call this to
|
||||
// ensure that each thread has woken up and restored its own stack contents.
|
||||
// The main thread does not itself write to the stacks of other threads.
|
||||
void WaitForIdleThreadsToRestoreTheirStacks();
|
||||
|
@ -259,15 +259,11 @@ typedef EmptyMessage<MessageType::BeginFatalError> BeginFatalErrorMessage;
|
||||
static const gfx::SurfaceFormat gSurfaceFormat = gfx::SurfaceFormat::R8G8B8X8;
|
||||
|
||||
struct PaintMessage : public Message {
|
||||
// Checkpoint whose state is being painted.
|
||||
uint32_t mCheckpointId;
|
||||
|
||||
uint32_t mWidth;
|
||||
uint32_t mHeight;
|
||||
|
||||
PaintMessage(uint32_t aCheckpointId, uint32_t aWidth, uint32_t aHeight)
|
||||
PaintMessage(uint32_t aWidth, uint32_t aHeight)
|
||||
: Message(MessageType::Paint, sizeof(*this)),
|
||||
mCheckpointId(aCheckpointId),
|
||||
mWidth(aWidth),
|
||||
mHeight(aHeight) {}
|
||||
};
|
||||
|
@ -493,8 +493,7 @@ static void PaintFromMainThread() {
|
||||
|
||||
if (IsMainChild() && gDrawTargetBuffer) {
|
||||
memcpy(gGraphicsShmem, gDrawTargetBuffer, gDrawTargetBufferSize);
|
||||
gChannel->SendMessage(
|
||||
PaintMessage(GetLastCheckpoint(), gPaintWidth, gPaintHeight));
|
||||
gChannel->SendMessage(PaintMessage(gPaintWidth, gPaintHeight));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -433,7 +433,7 @@ void ManifestStart(const CharBuffer& aContents) {
|
||||
DisallowUnhandledDivergeFromRecording();
|
||||
}
|
||||
|
||||
void BeforeCheckpoint() {
|
||||
void HitCheckpoint(size_t aCheckpoint) {
|
||||
if (!IsInitialized()) {
|
||||
SetupDevtoolsSandbox();
|
||||
}
|
||||
@ -442,21 +442,11 @@ void BeforeCheckpoint() {
|
||||
AutoSafeJSContext cx;
|
||||
JSAutoRealm ar(cx, xpc::PrivilegedJunkScope());
|
||||
|
||||
if (NS_FAILED(gReplay->BeforeCheckpoint())) {
|
||||
if (NS_FAILED(gReplay->HitCheckpoint(aCheckpoint))) {
|
||||
MOZ_CRASH("BeforeCheckpoint");
|
||||
}
|
||||
}
|
||||
|
||||
void AfterCheckpoint(size_t aCheckpoint, bool aRestoredCheckpoint) {
|
||||
AutoDisallowThreadEvents disallow;
|
||||
AutoSafeJSContext cx;
|
||||
JSAutoRealm ar(cx, xpc::PrivilegedJunkScope());
|
||||
|
||||
if (NS_FAILED(gReplay->AfterCheckpoint(aCheckpoint, aRestoredCheckpoint))) {
|
||||
MOZ_CRASH("AfterCheckpoint");
|
||||
}
|
||||
}
|
||||
|
||||
static ProgressCounter gProgressCounter;
|
||||
|
||||
extern "C" {
|
||||
@ -655,6 +645,13 @@ static bool RecordReplay_AreThreadEventsDisallowed(JSContext* aCx,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool RecordReplay_NewSnapshot(JSContext* aCx, unsigned aArgc,
|
||||
Value* aVp) {
|
||||
CallArgs args = CallArgsFromVp(aArgc, aVp);
|
||||
args.rval().setBoolean(NewSnapshot());
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool RecordReplay_DivergeFromRecording(JSContext* aCx, unsigned aArgc,
|
||||
Value* aVp) {
|
||||
CallArgs args = CallArgsFromVp(aArgc, aVp);
|
||||
@ -741,8 +738,8 @@ static bool RecordReplay_ResumeExecution(JSContext* aCx, unsigned aArgc,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool RecordReplay_RestoreCheckpoint(JSContext* aCx, unsigned aArgc,
|
||||
Value* aVp) {
|
||||
static bool RecordReplay_RestoreSnapshot(JSContext* aCx, unsigned aArgc,
|
||||
Value* aVp) {
|
||||
CallArgs args = CallArgsFromVp(aArgc, aVp);
|
||||
|
||||
if (!args.get(0).isNumber()) {
|
||||
@ -750,13 +747,13 @@ static bool RecordReplay_RestoreCheckpoint(JSContext* aCx, unsigned aArgc,
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t checkpoint = args.get(0).toNumber();
|
||||
if (!HasSavedCheckpoint(checkpoint)) {
|
||||
JS_ReportErrorASCII(aCx, "Only saved checkpoints can be restored");
|
||||
size_t numSnapshots = args.get(0).toNumber();
|
||||
if (numSnapshots >= NumSnapshots()) {
|
||||
JS_ReportErrorASCII(aCx, "Haven't saved enough checkpoints");
|
||||
return false;
|
||||
}
|
||||
|
||||
RestoreCheckpointAndResume(checkpoint);
|
||||
RestoreSnapshotAndResume(numSnapshots);
|
||||
|
||||
JS_ReportErrorASCII(aCx, "Unreachable!");
|
||||
return false;
|
||||
@ -815,27 +812,6 @@ static bool RecordReplay_SetMainChild(JSContext* aCx, unsigned aArgc,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool RecordReplay_SaveCheckpoint(JSContext* aCx, unsigned aArgc,
|
||||
Value* aVp) {
|
||||
CallArgs args = CallArgsFromVp(aArgc, aVp);
|
||||
|
||||
if (!args.get(0).isNumber()) {
|
||||
JS_ReportErrorASCII(aCx, "Bad checkpoint ID");
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t checkpoint = args.get(0).toNumber();
|
||||
if (checkpoint <= GetLastCheckpoint()) {
|
||||
JS_ReportErrorASCII(aCx, "Can't save checkpoint in the past");
|
||||
return false;
|
||||
}
|
||||
|
||||
SetSaveCheckpoint(checkpoint, true);
|
||||
|
||||
args.rval().setUndefined();
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool RecordReplay_GetContent(JSContext* aCx, unsigned aArgc,
|
||||
Value* aVp) {
|
||||
CallArgs args = CallArgsFromVp(aArgc, aVp);
|
||||
@ -1354,6 +1330,7 @@ static const JSFunctionSpec gRecordReplayMethods[] = {
|
||||
JS_FN("childId", RecordReplay_ChildId, 0, 0),
|
||||
JS_FN("areThreadEventsDisallowed", RecordReplay_AreThreadEventsDisallowed,
|
||||
0, 0),
|
||||
JS_FN("newSnapshot", RecordReplay_NewSnapshot, 0, 0),
|
||||
JS_FN("divergeFromRecording", RecordReplay_DivergeFromRecording, 0, 0),
|
||||
JS_FN("progressCounter", RecordReplay_ProgressCounter, 0, 0),
|
||||
JS_FN("advanceProgressCounter", RecordReplay_AdvanceProgressCounter, 0, 0),
|
||||
@ -1361,11 +1338,10 @@ static const JSFunctionSpec gRecordReplayMethods[] = {
|
||||
RecordReplay_ShouldUpdateProgressCounter, 1, 0),
|
||||
JS_FN("manifestFinished", RecordReplay_ManifestFinished, 1, 0),
|
||||
JS_FN("resumeExecution", RecordReplay_ResumeExecution, 0, 0),
|
||||
JS_FN("restoreCheckpoint", RecordReplay_RestoreCheckpoint, 1, 0),
|
||||
JS_FN("restoreSnapshot", RecordReplay_RestoreSnapshot, 1, 0),
|
||||
JS_FN("currentExecutionTime", RecordReplay_CurrentExecutionTime, 0, 0),
|
||||
JS_FN("flushRecording", RecordReplay_FlushRecording, 0, 0),
|
||||
JS_FN("setMainChild", RecordReplay_SetMainChild, 0, 0),
|
||||
JS_FN("saveCheckpoint", RecordReplay_SaveCheckpoint, 1, 0),
|
||||
JS_FN("getContent", RecordReplay_GetContent, 1, 0),
|
||||
JS_FN("repaint", RecordReplay_Repaint, 0, 0),
|
||||
JS_FN("memoryUsage", RecordReplay_MemoryUsage, 0, 0),
|
||||
|
@ -68,14 +68,8 @@ void AfterSaveRecording();
|
||||
// The following hooks are used in the recording/replaying process to
|
||||
// call methods defined by the JS sandbox.
|
||||
|
||||
// Called when running forward, immediately before hitting a normal or
|
||||
// temporary checkpoint.
|
||||
void BeforeCheckpoint();
|
||||
|
||||
// Called immediately after hitting a normal or temporary checkpoint, either
|
||||
// when running forward or immediately after rewinding. aRestoredCheckpoint is
|
||||
// true if we just rewound.
|
||||
void AfterCheckpoint(size_t aCheckpoint, bool aRestoredCheckpoint);
|
||||
// Called when running forward and a normal checkpoint was reached.
|
||||
void HitCheckpoint(size_t aCheckpoint);
|
||||
|
||||
// Called when a child crashes, returning whether the crash was recovered from.
|
||||
bool RecoverFromCrash(parent::ChildProcessInfo* aChild);
|
||||
|
Loading…
Reference in New Issue
Block a user