Merge m-c to inbound. a=merge

This commit is contained in:
Ryan VanderMeulen 2016-08-31 09:54:24 -04:00
commit fa68902503
119 changed files with 3644 additions and 1457 deletions

View File

@ -1612,13 +1612,20 @@
// Make sure the browser is destroyed so it unregisters from observer notifications
aBrowser.destroy();
// Make sure to restore the original droppedLinkHandler.
// Make sure to restore the original droppedLinkHandler and
// relatedBrowser.
let droppedLinkHandler = aBrowser.droppedLinkHandler;
let relatedBrowser = aBrowser.relatedBrowser;
// Change the "remote" attribute.
let parent = aBrowser.parentNode;
parent.removeChild(aBrowser);
aBrowser.setAttribute("remote", aShouldBeRemote ? "true" : "false");
// NB: This works with the hack in the browser constructor that
// turns this normal property into a field.
aBrowser.relatedBrowser = relatedBrowser;
parent.appendChild(aBrowser);
aBrowser.userTypedValue = oldUserTypedValue;

View File

@ -778,7 +778,7 @@ def check_have_64_bit(have_64_bit, compiler_have_64_bit):
@depends(c_compiler)
def default_debug_flags(compiler_info):
# Debug info is ON by default.
if compiler_info.type == 'msvc':
if compiler_info.type in ('msvc', 'clang-cl'):
return '-Zi'
return '-g'

View File

@ -353,6 +353,13 @@ const TEST_DATA = [
input: "color: blue \\9 no\\_need",
expected: [{name: "color", value: "blue \\9 no_need", priority: "", offsets: [0, 23]}]
},
// Regression test for bug 1297890 - don't paste tokens.
{
parseComments: true,
input: "stroke-dasharray: 1/*ThisIsAComment*/2;",
expected: [{name: "stroke-dasharray", value: "1 2", priority: "", offsets: [0, 39]}]
},
];
function run_test() {

View File

@ -78,7 +78,7 @@ function getMessageComponent(message) {
case MESSAGE_TYPE.LOG:
return componentMap.get("PageError");
default:
componentMap.get("DefaultRenderer");
return componentMap.get("DefaultRenderer");
}
}

View File

@ -45,6 +45,15 @@ function ConsoleApiCall(props) {
const icon = MessageIcon({level});
const repeat = MessageRepeat({repeat: message.repeat});
const shouldRenderFrame = frame && frame.source !== "debugger eval code";
const location = dom.span({ className: "message-location devtools-monospace" },
shouldRenderFrame ? FrameView({
frame,
onClick: onViewSourceInDebugger,
showEmptyPathAsHost: true,
sourceMapService
}) : null
);
let collapse = "";
let attachment = "";
@ -83,7 +92,6 @@ function ConsoleApiCall(props) {
classes.push("open");
}
const shouldRenderFrame = frame && frame.source !== "debugger eval code";
return dom.div({
className: classes.join(" ")
},
@ -92,23 +100,14 @@ function ConsoleApiCall(props) {
icon,
collapse,
dom.span({className: "message-body-wrapper"},
dom.span({},
dom.span({className: "message-flex-body"},
dom.span({className: "message-body devtools-monospace"},
messageBody
),
repeat,
dom.span({ className: "message-location devtools-monospace" },
shouldRenderFrame ? FrameView({
frame,
onClick: onViewSourceInDebugger,
showEmptyPathAsHost: true,
sourceMapService
}) : null
)
dom.span({className: "message-flex-body"},
dom.span({className: "message-body devtools-monospace"},
messageBody
),
attachment
)
repeat,
location
),
attachment
)
);
}

View File

@ -47,8 +47,12 @@ function ConsoleCommand(props) {
// @TODO add timestamp
// @TODO add indent if necessary
icon,
dom.span({className: "message-body-wrapper message-body devtools-monospace"},
dom.span({}, message.messageText)
dom.span({ className: "message-body-wrapper" },
dom.span({ className: "message-flex-body" },
dom.span({ className: "message-body devtools-monospace" },
message.messageText
)
)
)
);
}

View File

@ -42,10 +42,11 @@ function EvaluationResult(props) {
// @TODO add timestamp
// @TODO add indent if needed with console.group
icon,
dom.span(
{className: "message-body-wrapper message-body devtools-monospace"},
dom.span({},
GripMessageBody({grip: message.parameters})
dom.span({ className: "message-body-wrapper" },
dom.span({ className: "message-flex-body" },
dom.span({ className: "message-body devtools-monospace" },
GripMessageBody({grip: message.parameters})
)
)
)
);

View File

@ -12,6 +12,7 @@ const {
DOM: dom,
PropTypes
} = require("devtools/client/shared/vendor/react");
const FrameView = createFactory(require("devtools/client/shared/components/frame"));
const MessageRepeat = createFactory(require("devtools/client/webconsole/new-console-output/components/message-repeat").MessageRepeat);
const MessageIcon = createFactory(require("devtools/client/webconsole/new-console-output/components/message-icon").MessageIcon);
@ -22,11 +23,20 @@ PageError.propTypes = {
};
function PageError(props) {
const { message } = props;
const {source, level} = message;
const { message, sourceMapService, onViewSourceInDebugger } = props;
const { source, level, frame } = message;
const repeat = MessageRepeat({repeat: message.repeat});
const icon = MessageIcon({level});
const shouldRenderFrame = frame && frame.source !== "debugger eval code";
const location = dom.span({ className: "message-location devtools-monospace" },
shouldRenderFrame ? FrameView({
frame,
onClick: onViewSourceInDebugger,
showEmptyPathAsHost: true,
sourceMapService
}) : null
);
const classes = ["message"];
@ -42,13 +52,14 @@ function PageError(props) {
className: classes.join(" ")
},
icon,
dom.span(
{className: "message-body-wrapper message-body devtools-monospace"},
dom.span({},
message.messageText
dom.span({ className: "message-body-wrapper" },
dom.span({ className: "message-flex-body" },
dom.span({ className: "message-body devtools-monospace" },
message.messageText
),
repeat
)
),
repeat
)
);
}

View File

@ -25,6 +25,6 @@ describe("EvaluationResult component:", () => {
});
function getMessageBody(rendered) {
const queryPath = "div.message.cm-s-mozilla span.message-body-wrapper.message-body.devtools-monospace";
const queryPath = "div.message span.message-body-wrapper span.message-body";
return rendered.querySelector(queryPath);
}

View File

@ -23,6 +23,6 @@ describe("PageError component:", () => {
});
function getMessageBody(rendered) {
const queryPath = "div.message span.message-body-wrapper.message-body.devtools-monospace";
const queryPath = "div.message span.message-body-wrapper span.message-body";
return rendered.querySelector(queryPath);
}

View File

@ -116,11 +116,18 @@ function transformPacket(packet) {
level = MESSAGE_LEVEL.INFO;
}
const frame = {
source: pageError.sourceName,
line: pageError.lineNumber,
column: pageError.columnNumber
};
return new ConsoleMessage({
source: MESSAGE_SOURCE.JAVASCRIPT,
type: MESSAGE_TYPE.LOG,
level,
messageText: pageError.errorMessage,
frame,
});
}

View File

@ -367,6 +367,8 @@ function parseDeclarationsInternal(isCssPropertyKnown, inputString,
// Insert the new declarations just before the final element.
let lastDecl = declarations.pop();
declarations = [...declarations, ...newDecls, lastDecl];
} else {
current += " ";
}
} else {
current += inputString.substring(token.startOffset, token.endOffset);

View File

@ -123,6 +123,7 @@ function dumpSHistory(theWindow)
var popup = window.open('file_bug590573_1.html');
var gTestContinuation = null;
var loads = 0;
function pageLoad()
{
@ -131,94 +132,99 @@ function pageLoad()
dumpSHistory(window);
if (loads == 1) {
is(popup.scrollY, 0, "test 1");
popup.scroll(0, 100);
popup.history.pushState('', '', '?pushed');
is(popup.scrollY, 100, "test 2");
popup.scroll(0, 200); // set state-2's position to 200
popup.history.back();
is(popup.scrollY, 100, "test 3");
popup.scroll(0, 150); // set original page's position to 150
popup.history.forward();
is(popup.scrollY, 200, "test 4");
popup.history.back();
is(popup.scrollY, 150, "test 5");
popup.history.forward();
is(popup.scrollY, 200, "test 6");
// At this point, the history looks like:
// PATH POSITION
// file_bug590573_1.html 150 <-- oldest
// file_bug590573_1.html?pushed 200 <-- newest, current
// Now test that the scroll position is persisted when we have real
// navigations involved. First, we need to spin the event loop so that the
// navigation doesn't replace our current history entry.
setTimeout(pageLoad, 0);
if (!gTestContinuation) {
gTestContinuation = testBody();
}
else if (loads == 2) {
page2LoadCallbackEnabled = true;
popup.location = 'file_bug590573_2.html';
}
else if (loads == 3) {
ok(popup.location.href.match('file_bug590573_2.html$'),
"Location was " + popup.location +
" but should end with file_bug590573_2.html");
is(popup.scrollY, 0, "test 7");
popup.scroll(0, 300);
// We need to spin the event loop again before we go back, otherwise the
// scroll positions don't get updated properly.
setTimeout(pageLoad, 0);
}
else if (loads == 4) {
page1PageShowCallbackEnabled = true;
popup.history.back();
}
else if (loads == 5) {
// Spin the event loop again so that we get the right scroll positions.
setTimeout(pageLoad, 0);
}
else if (loads == 6) {
is(popup.location.search, "?pushed");
ok(popup.document.getElementById('div1'), 'page should have div1.');
is(popup.scrollY, 200, "test 8");
popup.history.back();
is(popup.scrollY, 150, "test 9");
popup.history.forward();
is(popup.scrollY, 200, "test 10");
// Spin one last time...
setTimeout(pageLoad, 0);
}
else if (loads == 7) {
page2PageShowCallbackEnabled = true;
popup.history.forward();
}
else if (loads == 8) {
// Bug 821821, on Android tegras we get 299 instead of 300 sometimes
if (popup.scrollY >= 299 && popup.scrollY <= 300) {
is(1, 1, "test 11");
} else {
is(1, 0, "test 11, got " + popup.scrollY + " for popup.scrollY instead of 299|300");
}
popup.close();
var ret = gTestContinuation.next();
if (ret.done) {
SimpleTest.finish();
}
else {
ok(false, "Got extra load!");
}
function* testBody()
{
is(popup.scrollY, 0, "test 1");
popup.scroll(0, 100);
popup.history.pushState('', '', '?pushed');
is(popup.scrollY, 100, "test 2");
popup.scroll(0, 200); // set state-2's position to 200
popup.history.back();
is(popup.scrollY, 100, "test 3");
popup.scroll(0, 150); // set original page's position to 150
popup.history.forward();
is(popup.scrollY, 200, "test 4");
popup.history.back();
is(popup.scrollY, 150, "test 5");
popup.history.forward();
is(popup.scrollY, 200, "test 6");
// At this point, the history looks like:
// PATH POSITION
// file_bug590573_1.html 150 <-- oldest
// file_bug590573_1.html?pushed 200 <-- newest, current
// Now test that the scroll position is persisted when we have real
// navigations involved. First, we need to spin the event loop so that the
// navigation doesn't replace our current history entry.
setTimeout(pageLoad, 0);
yield;
page2LoadCallbackEnabled = true;
popup.location = 'file_bug590573_2.html';
yield;
ok(popup.location.href.match('file_bug590573_2.html$'),
"Location was " + popup.location +
" but should end with file_bug590573_2.html");
is(popup.scrollY, 0, "test 7");
popup.scroll(0, 300);
// We need to spin the event loop again before we go back, otherwise the
// scroll positions don't get updated properly.
setTimeout(pageLoad, 0);
yield;
page1PageShowCallbackEnabled = true;
popup.history.back();
yield;
// Spin the event loop again so that we get the right scroll positions.
setTimeout(pageLoad, 0);
yield;
is(popup.location.search, "?pushed");
ok(popup.document.getElementById('div1'), 'page should have div1.');
is(popup.scrollY, 200, "test 8");
popup.history.back();
is(popup.scrollY, 150, "test 9");
popup.history.forward();
is(popup.scrollY, 200, "test 10");
// Spin one last time...
setTimeout(pageLoad, 0);
yield;
page2PageShowCallbackEnabled = true;
popup.history.forward();
yield;
// Bug 821821, on Android tegras we get 299 instead of 300 sometimes
if (popup.scrollY >= 299 && popup.scrollY <= 300) {
is(1, 1, "test 11");
} else {
is(1, 0, "test 11, got " + popup.scrollY + " for popup.scrollY instead of 299|300");
}
popup.close();
}
</script>

View File

@ -526,8 +526,17 @@ EventStateManager::PreHandleEvent(nsPresContext* aPresContext,
NS_WARN_IF_FALSE(!aTargetFrame ||
!aTargetFrame->GetContent() ||
aTargetFrame->GetContent() == aTargetContent ||
aTargetFrame->GetContent()->GetFlattenedTreeParent() == aTargetContent,
aTargetFrame->GetContent()->GetFlattenedTreeParent() == aTargetContent ||
aTargetFrame->IsGeneratedContentFrame(),
"aTargetFrame should be related with aTargetContent");
#if DEBUG
if (aTargetFrame && aTargetFrame->IsGeneratedContentFrame()) {
nsCOMPtr<nsIContent> targetContent;
aTargetFrame->GetContentForEvent(aEvent, getter_AddRefs(targetContent));
MOZ_ASSERT(aTargetContent == targetContent,
"Unexpected target for generated content frame!");
}
#endif
mCurrentTarget = aTargetFrame;
mCurrentTargetContent = nullptr;

View File

@ -1705,16 +1705,17 @@ ContentParent::AllocateLayerTreeId(ContentParent* aContent,
GPUProcessManager* gpu = GPUProcessManager::Get();
*aId = gpu->AllocateLayerTreeId();
if (!aContent || !aTopLevel) {
return false;
}
gpu->MapLayerTreeId(*aId, aContent->OtherPid());
if (!gfxPlatform::AsyncPanZoomEnabled()) {
return true;
}
if (!aContent || !aTopLevel) {
return false;
}
return aContent->SendNotifyLayerAllocated(aTabId, *aId);
}

View File

@ -176,10 +176,6 @@ static const char* GetOmxLibraryName()
if (version >= 17) {
return "libomxpluginkk.so";
}
else if (version < 14) {
// Below Honeycomb not supported
return nullptr;
}
// Ice Cream Sandwich and Jellybean
return "libomxplugin.so";

View File

@ -182,9 +182,14 @@ VideoSink::Start(int64_t aStartTime, const MediaInfo& aInfo)
[self] () {
self->mVideoSinkEndRequest.Complete();
self->TryUpdateRenderedVideoFrames();
// It is possible the video queue size is 0 and we have no frames to
// render. However, we need to call MaybeResolveEndPromise() to ensure
// mEndPromiseHolder is resolved.
self->MaybeResolveEndPromise();
}, [self] () {
self->mVideoSinkEndRequest.Complete();
self->TryUpdateRenderedVideoFrames();
self->MaybeResolveEndPromise();
}));
}
@ -418,12 +423,7 @@ VideoSink::UpdateRenderedVideoFrames()
mVideoFrameEndTime = std::max(mVideoFrameEndTime,
currentFrame ? currentFrame->GetEndTime() : lastDisplayedFrameEndTime);
// All frames are rendered, Let's resolve the promise.
if (VideoQueue().IsFinished() &&
VideoQueue().GetSize() <= 1 &&
!mVideoSinkEndRequest.Exists()) {
mEndPromiseHolder.ResolveIfExists(true, __func__);
}
MaybeResolveEndPromise();
RenderVideoFrames(mVideoQueueSendToCompositorSize, clockTime, nowTime);
@ -448,6 +448,18 @@ VideoSink::UpdateRenderedVideoFrames()
});
}
void
VideoSink::MaybeResolveEndPromise()
{
AssertOwnerThread();
// All frames are rendered, Let's resolve the promise.
if (VideoQueue().IsFinished() &&
VideoQueue().GetSize() <= 1 &&
!mVideoSinkEndRequest.Exists()) {
mEndPromiseHolder.ResolveIfExists(true, __func__);
}
}
void
VideoSink::DumpDebugInfo()
{

View File

@ -99,6 +99,8 @@ private:
void UpdateRenderedVideoFrames();
void UpdateRenderedVideoFramesByTimer();
void MaybeResolveEndPromise();
void AssertOwnerThread() const
{
MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());

View File

@ -371,12 +371,18 @@ AudioBuffer::StealJSArrayDataIntoSharedChannels(JSContext* aJSContext)
RefPtr<ThreadSharedFloatArrayBufferList> result =
new ThreadSharedFloatArrayBufferList(mJSChannels.Length());
for (uint32_t i = 0; i < mJSChannels.Length(); ++i) {
if (mJSChannels[i]) {
JS::ExposeObjectToActiveJS(mJSChannels[i]);
}
JS::Rooted<JSObject*> arrayBufferView(aJSContext, mJSChannels[i]);
bool isSharedMemory;
JS::Rooted<JSObject*> arrayBuffer(aJSContext,
JS_GetArrayBufferViewBuffer(aJSContext,
arrayBufferView,
&isSharedMemory));
if (arrayBuffer) {
JS::ExposeObjectToActiveJS(arrayBuffer);
}
// The channel data arrays should all have originated in
// RestoreJSChannelData, where they are created unshared.
MOZ_ASSERT(!isSharedMemory);

View File

@ -212,11 +212,6 @@ nsPluginInstanceOwner::GetImageContainer()
RefPtr<ImageContainer> container;
#if MOZ_WIDGET_ANDROID
// Right now we only draw with Gecko layers on Honeycomb and higher. See Paint()
// for what we do on other versions.
if (AndroidBridge::Bridge()->GetAPIVersion() < 11)
return nullptr;
LayoutDeviceRect r = GetPluginRect();
// NotifySize() causes Flash to do a bunch of stuff like ask for surfaces to render

View File

@ -29,17 +29,17 @@ window.addEventListener("popstate", function(aEvent) {
gState = aEvent.state;
}, false);
function waitForLoad(aCallback) {
function waitForLoad() {
function listener() {
gFrame.removeEventListener("load", listener, false);
SimpleTest.executeSoon(aCallback);
SimpleTest.executeSoon(continue_test);
}
gFrame.addEventListener("load", listener, false);
}
function loadContent(aURL, aCallback) {
waitForLoad(aCallback);
function loadContent(aURL) {
waitForLoad();
gFrame.src = aURL;
}
@ -61,42 +61,51 @@ function run_test() {
gFrame = document.getElementById("iframe");
test_basic_inner_navigation();
continue_test();
}
function end_test() {
testWin.done();
function* test_body()
{
yield* test_basic_inner_navigation();
yield* test_state_navigation();
}
function test_basic_inner_navigation() {
var gTestContinuation = null;
function continue_test() {
if (!gTestContinuation) {
gTestContinuation = test_body();
}
var ret = gTestContinuation.next();
if (ret.done) {
testWin.done();
}
}
function* test_basic_inner_navigation() {
// Navigate the inner frame a few times
loadContent(URL1, function() {
is(getURL(), URL1, "URL should be correct");
is(getContent(), "Test1", "Page should be correct");
yield loadContent(URL1);
is(getURL(), URL1, "URL should be correct");
is(getContent(), "Test1", "Page should be correct");
loadContent(URL2, function() {
is(getURL(), URL2, "URL should be correct");
is(getContent(), "Test2", "Page should be correct");
yield loadContent(URL2);
// Test that history is working
waitForLoad(function() {
is(getURL(), URL1, "URL should be correct");
is(getContent(), "Test1", "Page should be correct");
is(getURL(), URL2, "URL should be correct");
is(getContent(), "Test2", "Page should be correct");
waitForLoad(function() {
is(getURL(), URL2, "URL should be correct");
is(getContent(), "Test2", "Page should be correct");
// Test that history is working
window.history.back();
yield waitForLoad();
is(getURL(), URL1, "URL should be correct");
is(getContent(), "Test1", "Page should be correct");
test_state_navigation();
});
window.history.forward();
});
window.history.back();
});
});
window.history.forward();
yield waitForLoad();
is(getURL(), URL2, "URL should be correct");
is(getContent(), "Test2", "Page should be correct");
}
function test_state_navigation() {
function* test_state_navigation() {
window.history.pushState("STATE1", window.location);
is(getURL(), URL2, "URL should be correct");
@ -126,25 +135,20 @@ function test_state_navigation() {
is(getURL(), URL2, "URL should be correct");
is(getContent(), "Test2", "Page should be correct");
waitForLoad(function() {
is(getURL(), URL1, "URL should be correct");
is(getContent(), "Test1", "Page should be correct");
waitForLoad(function() {
is(gState, "START", "State should be correct");
is(getURL(), START, "URL should be correct");
is(getContent(), "Start", "Page should be correct");
end_test();
});
window.history.back();
is(gState, "START", "State should be correct");
});
window.history.back();
is(gState, "START", "State should be correct");
yield waitForLoad();
is(getURL(), URL1, "URL should be correct");
is(getContent(), "Test1", "Page should be correct");
window.history.back();
is(gState, "START", "State should be correct after going back twice");
yield waitForLoad();
is(gState, "START", "State should be correct");
is(getURL(), START, "URL should be correct");
is(getContent(), "Start", "Page should be correct");
}
</script>
</pre>

View File

@ -46,12 +46,6 @@ public:
}
};
static bool
IsSTSupported()
{
return AndroidBridge::Bridge()->GetAPIVersion() >= 14; /* ICS */
}
already_AddRefed<AndroidSurfaceTexture>
AndroidSurfaceTexture::Create()
{
@ -61,10 +55,6 @@ AndroidSurfaceTexture::Create()
already_AddRefed<AndroidSurfaceTexture>
AndroidSurfaceTexture::Create(GLContext* aContext, GLuint aTexture)
{
if (!IsSTSupported()) {
return nullptr;
}
RefPtr<AndroidSurfaceTexture> st = new AndroidSurfaceTexture();
if (!st->Init(aContext, aTexture)) {
printf_stderr("Failed to initialize AndroidSurfaceTexture");

View File

@ -579,7 +579,12 @@ bool Channel::ChannelImpl::ProcessOutgoingMessages() {
int[FileDescriptorSet::MAX_DESCRIPTORS_PER_MESSAGE]));
char buf[tmp];
if (partial_write_iter_.isNothing() &&
if (partial_write_iter_.isNothing()) {
Pickle::BufferList::IterImpl iter(msg->Buffers());
partial_write_iter_.emplace(iter);
}
if (partial_write_iter_.value().Data() == msg->Buffers().Start() &&
!msg->file_descriptor_set()->empty()) {
// This is the first chunk of a message which has descriptors to send
struct cmsghdr *cmsg;
@ -611,11 +616,6 @@ bool Channel::ChannelImpl::ProcessOutgoingMessages() {
size_t iov_count = 0;
size_t amt_to_write = 0;
if (partial_write_iter_.isNothing()) {
Pickle::BufferList::IterImpl iter(msg->Buffers());
partial_write_iter_.emplace(iter);
}
// How much of this message have we written so far?
Pickle::BufferList::IterImpl iter = partial_write_iter_.value();

View File

@ -15,7 +15,7 @@
namespace mozilla {
namespace mscom {
class DispatchForwarder : public IDispatch
class DispatchForwarder final : public IDispatch
{
public:
static HRESULT Create(IInterceptor* aInterceptor,

View File

@ -43,11 +43,14 @@ using mozilla::TimeStamp;
namespace {
class ShutdownEvent : public nsIObserver
class ShutdownEvent final : public nsIObserver
{
public:
NS_DECL_ISUPPORTS
NS_DECL_NSIOBSERVER
private:
~ShutdownEvent() {}
};
NS_IMPL_ISUPPORTS(ShutdownEvent, nsIObserver)

View File

@ -20,8 +20,8 @@ namespace mscom {
struct ArrayData;
class MainThreadHandoff : public IInterceptorSink
, public ICallFrameWalker
class MainThreadHandoff final : public IInterceptorSink
, public ICallFrameWalker
{
public:
static HRESULT Create(IInterceptorSink** aOutput);

View File

@ -83,7 +83,7 @@ nsColumnSetFrame::PaintColumnRule(nsRenderingContext* aCtx,
return;
nscolor ruleColor =
GetVisitedDependentColor(eCSSProperty__moz_column_rule_color);
GetVisitedDependentColor(eCSSProperty_column_rule_color);
// In order to re-use a large amount of code, we treat the column rule as a border.
// We create a new border style object and fill in all the details of the column rule as

View File

@ -1388,10 +1388,8 @@ nsPluginFrame::GetLayerState(nsDisplayListBuilder* aBuilder,
#ifdef MOZ_WIDGET_ANDROID
// We always want a layer on Honeycomb and later
if (AndroidBridge::Bridge()->GetAPIVersion() >= 11)
return LAYER_ACTIVE;
#endif
return LAYER_ACTIVE;
#else
if (mInstanceOwner->NeedsScrollImageLayer()) {
return LAYER_ACTIVE;
}
@ -1401,6 +1399,7 @@ nsPluginFrame::GetLayerState(nsDisplayListBuilder* aBuilder,
}
return LAYER_ACTIVE_FORCE;
#endif
}
class PluginFrameDidCompositeObserver final : public ClientLayerManager::

View File

@ -742,7 +742,7 @@ Declaration::GetValue(nsCSSPropertyID aProperty, nsAString& aValue,
case eCSSProperty_border_inline_end:
case eCSSProperty_border_block_start:
case eCSSProperty_border_block_end:
case eCSSProperty__moz_column_rule:
case eCSSProperty_column_rule:
case eCSSProperty_outline: {
const nsCSSPropertyID* subprops =
nsCSSProps::SubpropertyEntryFor(aProperty);
@ -1148,7 +1148,7 @@ Declaration::GetValue(nsCSSPropertyID aProperty, nsAString& aValue,
AppendValueToString(eCSSProperty_marker_end, aValue, aSerialization);
break;
}
case eCSSProperty__moz_columns: {
case eCSSProperty_columns: {
// Two values, column-count and column-width, separated by a space.
const nsCSSPropertyID* subprops =
nsCSSProps::SubpropertyEntryFor(aProperty);

View File

@ -3665,7 +3665,7 @@ StyleAnimationValue::ExtractComputedValue(nsCSSPropertyID aProperty,
BORDER_WIDTH_CASE(eCSSProperty_border_top_width, top)
#undef BORDER_WIDTH_CASE
case eCSSProperty__moz_column_rule_width:
case eCSSProperty_column_rule_width:
aComputedValue.SetCoordValue(
static_cast<const nsStyleColumn*>(styleStruct)->
GetComputedColumnRuleWidth());
@ -3698,7 +3698,7 @@ StyleAnimationValue::ExtractComputedValue(nsCSSPropertyID aProperty,
break;
}
case eCSSProperty__moz_column_rule_color: {
case eCSSProperty_column_rule_color: {
const nsStyleColumn *styleColumn =
static_cast<const nsStyleColumn*>(styleStruct);
nscolor color;
@ -3711,7 +3711,7 @@ StyleAnimationValue::ExtractComputedValue(nsCSSPropertyID aProperty,
break;
}
case eCSSProperty__moz_column_count: {
case eCSSProperty_column_count: {
const nsStyleColumn *styleColumn =
static_cast<const nsStyleColumn*>(styleStruct);
if (styleColumn->mColumnCount == NS_STYLE_COLUMN_COUNT_AUTO) {

View File

@ -7458,9 +7458,9 @@ static const nsCSSPropertyID kBorderBlockEndIDs[] = {
eCSSProperty_border_block_end_color
};
static const nsCSSPropertyID kColumnRuleIDs[] = {
eCSSProperty__moz_column_rule_width,
eCSSProperty__moz_column_rule_style,
eCSSProperty__moz_column_rule_color
eCSSProperty_column_rule_width,
eCSSProperty_column_rule_style,
eCSSProperty_column_rule_color
};
bool
@ -11640,9 +11640,9 @@ CSSParserImpl::ParsePropertyByFunction(nsCSSPropertyID aPropID)
case eCSSProperty_clip:
return ParseRect(eCSSProperty_clip);
case eCSSProperty__moz_columns:
case eCSSProperty_columns:
return ParseColumns();
case eCSSProperty__moz_column_rule:
case eCSSProperty_column_rule:
return ParseBorderSide(kColumnRuleIDs, false);
case eCSSProperty_content:
return ParseContent();
@ -13802,8 +13802,8 @@ CSSParserImpl::ParseColumns()
// find.
static const nsCSSPropertyID columnIDs[] = {
eCSSPropertyExtra_x_auto_value,
eCSSProperty__moz_column_count,
eCSSProperty__moz_column_width
eCSSProperty_column_count,
eCSSProperty_column_width
};
const int32_t numProps = MOZ_ARRAY_LENGTH(columnIDs);

View File

@ -1483,7 +1483,7 @@ CSS_PROP_SVG(
eStyleAnimType_Discrete)
CSS_PROP_COLUMN(
-moz-column-count,
_moz_column_count,
column_count,
CSS_PROP_DOMPROP_PREFIXED(ColumnCount),
CSS_PROPERTY_PARSE_VALUE |
// Need to reject 0 in addition to negatives. If we accept 0, we
@ -1496,7 +1496,7 @@ CSS_PROP_COLUMN(
eStyleAnimType_Custom)
CSS_PROP_COLUMN(
-moz-column-fill,
_moz_column_fill,
column_fill,
CSS_PROP_DOMPROP_PREFIXED(ColumnFill),
CSS_PROPERTY_PARSE_VALUE,
"",
@ -1506,7 +1506,7 @@ CSS_PROP_COLUMN(
eStyleAnimType_None)
CSS_PROP_COLUMN(
-moz-column-gap,
_moz_column_gap,
column_gap,
CSS_PROP_DOMPROP_PREFIXED(ColumnGap),
CSS_PROPERTY_PARSE_VALUE |
CSS_PROPERTY_VALUE_NONNEGATIVE,
@ -1517,13 +1517,13 @@ CSS_PROP_COLUMN(
eStyleAnimType_Coord)
CSS_PROP_SHORTHAND(
-moz-column-rule,
_moz_column_rule,
column_rule,
CSS_PROP_DOMPROP_PREFIXED(ColumnRule),
CSS_PROPERTY_PARSE_FUNCTION,
"")
CSS_PROP_COLUMN(
-moz-column-rule-color,
_moz_column_rule_color,
column_rule_color,
CSS_PROP_DOMPROP_PREFIXED(ColumnRuleColor),
CSS_PROPERTY_PARSE_VALUE |
CSS_PROPERTY_IGNORED_WHEN_COLORS_DISABLED,
@ -1534,7 +1534,7 @@ CSS_PROP_COLUMN(
eStyleAnimType_Custom)
CSS_PROP_COLUMN(
-moz-column-rule-style,
_moz_column_rule_style,
column_rule_style,
CSS_PROP_DOMPROP_PREFIXED(ColumnRuleStyle),
CSS_PROPERTY_PARSE_VALUE,
"",
@ -1544,7 +1544,7 @@ CSS_PROP_COLUMN(
eStyleAnimType_None)
CSS_PROP_COLUMN(
-moz-column-rule-width,
_moz_column_rule_width,
column_rule_width,
CSS_PROP_DOMPROP_PREFIXED(ColumnRuleWidth),
CSS_PROPERTY_PARSE_VALUE |
CSS_PROPERTY_VALUE_NONNEGATIVE,
@ -1555,7 +1555,7 @@ CSS_PROP_COLUMN(
eStyleAnimType_Custom)
CSS_PROP_COLUMN(
-moz-column-width,
_moz_column_width,
column_width,
CSS_PROP_DOMPROP_PREFIXED(ColumnWidth),
CSS_PROPERTY_PARSE_VALUE |
CSS_PROPERTY_VALUE_NONNEGATIVE,
@ -1566,7 +1566,7 @@ CSS_PROP_COLUMN(
eStyleAnimType_Coord)
CSS_PROP_SHORTHAND(
-moz-columns,
_moz_columns,
columns,
CSS_PROP_DOMPROP_PREFIXED(Columns),
CSS_PROPERTY_PARSE_FUNCTION,
"")

View File

@ -2842,17 +2842,17 @@ static const nsCSSPropertyID gOutlineSubpropTable[] = {
};
static const nsCSSPropertyID gColumnsSubpropTable[] = {
eCSSProperty__moz_column_count,
eCSSProperty__moz_column_width,
eCSSProperty_column_count,
eCSSProperty_column_width,
eCSSProperty_UNKNOWN
};
static const nsCSSPropertyID gColumnRuleSubpropTable[] = {
// nsCSSDeclaration.cpp outputs the subproperties in this order.
// It also depends on the color being third.
eCSSProperty__moz_column_rule_width,
eCSSProperty__moz_column_rule_style,
eCSSProperty__moz_column_rule_color,
eCSSProperty_column_rule_width,
eCSSProperty_column_rule_style,
eCSSProperty_column_rule_color,
eCSSProperty_UNKNOWN
};

View File

@ -275,14 +275,14 @@ COMPUTED_STYLE_PROP(box_flex, BoxFlex)
COMPUTED_STYLE_PROP(box_ordinal_group, BoxOrdinalGroup)
COMPUTED_STYLE_PROP(box_orient, BoxOrient)
COMPUTED_STYLE_PROP(box_pack, BoxPack)
COMPUTED_STYLE_PROP(_moz_column_count, ColumnCount)
COMPUTED_STYLE_PROP(_moz_column_fill, ColumnFill)
COMPUTED_STYLE_PROP(_moz_column_gap, ColumnGap)
//// COMPUTED_STYLE_PROP(_moz_column_rule, ColumnRule)
COMPUTED_STYLE_PROP(_moz_column_rule_color, ColumnRuleColor)
COMPUTED_STYLE_PROP(_moz_column_rule_style, ColumnRuleStyle)
COMPUTED_STYLE_PROP(_moz_column_rule_width, ColumnRuleWidth)
COMPUTED_STYLE_PROP(_moz_column_width, ColumnWidth)
COMPUTED_STYLE_PROP(column_count, ColumnCount)
COMPUTED_STYLE_PROP(column_fill, ColumnFill)
COMPUTED_STYLE_PROP(column_gap, ColumnGap)
//// COMPUTED_STYLE_PROP(column_rule, ColumnRule)
COMPUTED_STYLE_PROP(column_rule_color, ColumnRuleColor)
COMPUTED_STYLE_PROP(column_rule_style, ColumnRuleStyle)
COMPUTED_STYLE_PROP(column_rule_width, ColumnRuleWidth)
COMPUTED_STYLE_PROP(column_width, ColumnWidth)
COMPUTED_STYLE_PROP(float_edge, FloatEdge)
COMPUTED_STYLE_PROP(force_broken_image_icon, ForceBrokenImageIcon)
COMPUTED_STYLE_PROP(image_region, ImageRegion)

View File

@ -1517,7 +1517,7 @@ nsStyleContext::GetVisitedDependentColor(nsCSSPropertyID aProperty)
aProperty == eCSSProperty_border_bottom_color ||
aProperty == eCSSProperty_border_left_color ||
aProperty == eCSSProperty_outline_color ||
aProperty == eCSSProperty__moz_column_rule_color ||
aProperty == eCSSProperty_column_rule_color ||
aProperty == eCSSProperty_text_decoration_color ||
aProperty == eCSSProperty_text_emphasis_color ||
aProperty == eCSSProperty__webkit_text_fill_color ||

View File

@ -228,6 +228,7 @@ class BufferList : private AllocPolicy
// Special convenience method that returns Iter().Data().
char* Start() { return mSegments[0].mData; }
const char* Start() const { return mSegments[0].mData; }
IterImpl Iter() const { return IterImpl(*this); }

View File

@ -893,6 +893,7 @@ sync_java_files = [TOPSRCDIR + '/mobile/android/services/src/main/java/org/mozil
'sync/GlobalSession.java',
'sync/HTTPFailureException.java',
'sync/InfoCollections.java',
'sync/InfoConfiguration.java',
'sync/InfoCounts.java',
'sync/JSONRecordFetcher.java',
'sync/KeyBundleProvider.java',
@ -1014,6 +1015,13 @@ sync_java_files = [TOPSRCDIR + '/mobile/android/services/src/main/java/org/mozil
'sync/repositories/StoreFailedException.java',
'sync/repositories/StoreTracker.java',
'sync/repositories/StoreTrackingRepositorySession.java',
'sync/repositories/uploaders/BatchingUploader.java',
'sync/repositories/uploaders/BatchMeta.java',
'sync/repositories/uploaders/BufferSizeTracker.java',
'sync/repositories/uploaders/MayUploadProvider.java',
'sync/repositories/uploaders/Payload.java',
'sync/repositories/uploaders/PayloadUploadDelegate.java',
'sync/repositories/uploaders/RecordUploadRunnable.java',
'sync/Server11PreviousPostFailedException.java',
'sync/Server11RecordPostFailedException.java',
'sync/setup/activities/ActivityUtils.java',
@ -1030,6 +1038,7 @@ sync_java_files = [TOPSRCDIR + '/mobile/android/services/src/main/java/org/mozil
'sync/stage/EnsureCrypto5KeysStage.java',
'sync/stage/FennecTabsServerSyncStage.java',
'sync/stage/FetchInfoCollectionsStage.java',
'sync/stage/FetchInfoConfigurationStage.java',
'sync/stage/FetchMetaGlobalStage.java',
'sync/stage/FormHistoryServerSyncStage.java',
'sync/stage/GlobalSyncStage.java',

View File

@ -683,6 +683,8 @@ public class BrowserApp extends GeckoApp
mFindInPageBar = (FindInPageBar) findViewById(R.id.find_in_page);
mMediaCastingBar = (MediaCastingBar) findViewById(R.id.media_casting);
mDoorhangerOverlay = findViewById(R.id.doorhanger_overlay);
EventDispatcher.getInstance().registerGeckoThreadListener((GeckoEventListener)this,
"Gecko:DelayedStartup",
"Menu:Open",
@ -988,9 +990,6 @@ public class BrowserApp extends GeckoApp
@Override
public void onAttachedToWindow() {
mDoorhangerOverlay = findViewById(R.id.doorhanger_overlay);
mDoorhangerOverlay.setVisibility(View.VISIBLE);
// We can't show the first run experience until Gecko has finished initialization (bug 1077583).
checkFirstrun(this, new SafeIntent(getIntent()));
}

View File

@ -28,7 +28,7 @@ public final class RemoteManager implements IBinder.DeathRecipient {
private static RemoteManager sRemoteManager = null;
public synchronized static RemoteManager getInstance() {
if (sRemoteManager == null){
if (sRemoteManager == null) {
sRemoteManager = new RemoteManager();
}

View File

@ -93,7 +93,6 @@
android:layout_width="match_parent"
android:layout_height="match_parent"
android:background="@color/dark_transparent_overlay"
android:visibility="gone"
android:alpha="0"
android:layerType="hardware"/>

View File

@ -1,12 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/. -->
<resources>
<!-- These items are v11+ resources but are referenced in code shipped with
API 9 builds. Since v11+ resources don't ship on API 9 builds, in order
for the resource ID to be found (and thus compilation to succeed), we
provide dummy values below. -->
<item type="layout" name="tab_strip">@null</item>
</resources>

View File

@ -218,7 +218,7 @@ public final class NotificationHelper implements GeckoEventListener {
Log.i(LOGTAG, "buildNotificationPendingIntent, error parsing", ex);
}
final Intent notificationIntent = buildNotificationIntent(message, builder);
PendingIntent res = PendingIntent.getActivity(mContext, 0, notificationIntent, PendingIntent.FLAG_UPDATE_CURRENT);
PendingIntent res = PendingIntent.getBroadcast(mContext, 0, notificationIntent, PendingIntent.FLAG_UPDATE_CURRENT);
return res;
}

View File

@ -32,6 +32,7 @@ import org.mozilla.gecko.sync.stage.CompletedStage;
import org.mozilla.gecko.sync.stage.EnsureCrypto5KeysStage;
import org.mozilla.gecko.sync.stage.FennecTabsServerSyncStage;
import org.mozilla.gecko.sync.stage.FetchInfoCollectionsStage;
import org.mozilla.gecko.sync.stage.FetchInfoConfigurationStage;
import org.mozilla.gecko.sync.stage.FetchMetaGlobalStage;
import org.mozilla.gecko.sync.stage.FormHistoryServerSyncStage;
import org.mozilla.gecko.sync.stage.GlobalSyncStage;
@ -177,6 +178,8 @@ public class GlobalSession implements HttpResponseObserver {
stages.put(Stage.checkPreconditions, new CheckPreconditionsStage());
stages.put(Stage.fetchInfoCollections, new FetchInfoCollectionsStage());
stages.put(Stage.fetchMetaGlobal, new FetchMetaGlobalStage());
stages.put(Stage.fetchInfoConfiguration, new FetchInfoConfigurationStage(
config.infoConfigurationURL(), getAuthHeaderProvider()));
stages.put(Stage.ensureKeysStage, new EnsureCrypto5KeysStage());
stages.put(Stage.syncClientsEngine, new SyncClientsEngineStage());

View File

@ -0,0 +1,93 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.gecko.sync;
import android.util.Log;
import org.mozilla.gecko.background.common.log.Logger;
/**
* Wraps and provides access to configuration data returned from info/configuration.
* Docs: https://docs.services.mozilla.com/storage/apis-1.5.html#general-info
*
* - <bold>max_request_bytes</bold>: the maximum size in bytes of the overall
* HTTP request body that will be accepted by the server.
*
* - <bold>max_post_records</bold>: the maximum number of records that can be
* uploaded to a collection in a single POST request.
*
* - <bold>max_post_bytes</bold>: the maximum combined size in bytes of the
* record payloads that can be uploaded to a collection in a single
* POST request.
*
* - <bold>max_total_records</bold>: the maximum number of records that can be
* uploaded to a collection as part of a batched upload.
*
* - <bold>max_total_bytes</bold>: the maximum combined size in bytes of the
* record payloads that can be uploaded to a collection as part of
* a batched upload.
*/
public class InfoConfiguration {
private static final String LOG_TAG = "InfoConfiguration";
public static final String MAX_REQUEST_BYTES = "max_request_bytes";
public static final String MAX_POST_RECORDS = "max_post_records";
public static final String MAX_POST_BYTES = "max_post_bytes";
public static final String MAX_TOTAL_RECORDS = "max_total_records";
public static final String MAX_TOTAL_BYTES = "max_total_bytes";
private static final long DEFAULT_MAX_REQUEST_BYTES = 1048576;
private static final long DEFAULT_MAX_POST_RECORDS = 100;
private static final long DEFAULT_MAX_POST_BYTES = 1048576;
private static final long DEFAULT_MAX_TOTAL_RECORDS = 10000;
private static final long DEFAULT_MAX_TOTAL_BYTES = 104857600;
// While int's upper range is (2^31-1), which in bytes is equivalent to 2.147 GB, let's be optimistic
// about the future and use long here, so that this code works if the server decides its clients are
// all on fiber and have congress-library sized bookmark collections.
// Record counts are long for the sake of simplicity.
public final long maxRequestBytes;
public final long maxPostRecords;
public final long maxPostBytes;
public final long maxTotalRecords;
public final long maxTotalBytes;
public InfoConfiguration() {
Logger.debug(LOG_TAG, "info/configuration is unavailable, using defaults");
maxRequestBytes = DEFAULT_MAX_REQUEST_BYTES;
maxPostRecords = DEFAULT_MAX_POST_RECORDS;
maxPostBytes = DEFAULT_MAX_POST_BYTES;
maxTotalRecords = DEFAULT_MAX_TOTAL_RECORDS;
maxTotalBytes = DEFAULT_MAX_TOTAL_BYTES;
}
public InfoConfiguration(final ExtendedJSONObject record) {
Logger.debug(LOG_TAG, "info/configuration is " + record.toJSONString());
maxRequestBytes = getValueFromRecord(record, MAX_REQUEST_BYTES, DEFAULT_MAX_REQUEST_BYTES);
maxPostRecords = getValueFromRecord(record, MAX_POST_RECORDS, DEFAULT_MAX_POST_RECORDS);
maxPostBytes = getValueFromRecord(record, MAX_POST_BYTES, DEFAULT_MAX_POST_BYTES);
maxTotalRecords = getValueFromRecord(record, MAX_TOTAL_RECORDS, DEFAULT_MAX_TOTAL_RECORDS);
maxTotalBytes = getValueFromRecord(record, MAX_TOTAL_BYTES, DEFAULT_MAX_TOTAL_BYTES);
}
private static Long getValueFromRecord(ExtendedJSONObject record, String key, long defaultValue) {
if (!record.containsKey(key)) {
return defaultValue;
}
try {
Long val = record.getLong(key);
if (val == null) {
return defaultValue;
}
return val;
} catch (NumberFormatException e) {
Log.w(LOG_TAG, "Could not parse key " + key + " from record: " + record, e);
return defaultValue;
}
}
}

View File

@ -30,6 +30,8 @@ public class SyncConfiguration {
public URI clusterURL;
public KeyBundle syncKeyBundle;
public InfoConfiguration infoConfiguration;
public CollectionKeys collectionKeys;
public InfoCollections infoCollections;
public MetaGlobal metaGlobal;
@ -366,6 +368,10 @@ public class SyncConfiguration {
return infoBaseURL() + "collections";
}
public String infoConfigurationURL() {
return infoBaseURL() + "configuration";
}
public String infoCollectionCountsURL() {
return infoBaseURL() + "collection_counts";
}

View File

@ -9,7 +9,8 @@ import org.mozilla.gecko.sync.net.SyncStorageResponse;
/**
* A fairly generic delegate to handle fetches of single JSON object blobs, as
* provided by <code>info/collections</code> and <code>info/collection_counts</code>.
* provided by <code>info/configuration</code>, <code>info/collections</code>
* and <code>info/collection_counts</code>.
*/
public interface JSONRecordFetchDelegate {
public void handleSuccess(ExtendedJSONObject body);

View File

@ -9,6 +9,14 @@ import org.mozilla.gecko.sync.Utils;
import ch.boye.httpclientandroidlib.HttpResponse;
public class SyncResponse extends MozResponse {
public static final String X_WEAVE_BACKOFF = "x-weave-backoff";
public static final String X_BACKOFF = "x-backoff";
public static final String X_LAST_MODIFIED = "x-last-modified";
public static final String X_WEAVE_TIMESTAMP = "x-weave-timestamp";
public static final String X_WEAVE_RECORDS = "x-weave-records";
public static final String X_WEAVE_QUOTA_REMAINING = "x-weave-quota-remaining";
public static final String X_WEAVE_ALERT = "x-weave-alert";
public SyncResponse(HttpResponse res) {
super(res);
}
@ -18,7 +26,7 @@ public class SyncResponse extends MozResponse {
* present.
*/
public int weaveBackoffInSeconds() throws NumberFormatException {
return this.getIntegerHeader("x-weave-backoff");
return this.getIntegerHeader(X_WEAVE_BACKOFF);
}
/**
@ -26,7 +34,7 @@ public class SyncResponse extends MozResponse {
* present.
*/
public int xBackoffInSeconds() throws NumberFormatException {
return this.getIntegerHeader("x-backoff");
return this.getIntegerHeader(X_BACKOFF);
}
/**
@ -80,8 +88,12 @@ public class SyncResponse extends MozResponse {
}
}
public long normalizedWeaveTimestamp() {
return normalizedTimestampForHeader(X_WEAVE_TIMESTAMP);
}
/**
* The timestamp returned from a Sync server is a decimal number of seconds,
* Timestamps returned from a Sync server are decimal numbers of seconds,
* e.g., 1323393518.04.
*
* We want milliseconds since epoch.
@ -89,26 +101,27 @@ public class SyncResponse extends MozResponse {
* @return milliseconds since the epoch, as a long, or -1 if the header
* was missing or invalid.
*/
public long normalizedWeaveTimestamp() {
String h = "x-weave-timestamp";
if (!this.hasHeader(h)) {
public long normalizedTimestampForHeader(String header) {
if (!this.hasHeader(header)) {
return -1;
}
return Utils.decimalSecondsToMilliseconds(this.response.getFirstHeader(h).getValue());
return Utils.decimalSecondsToMilliseconds(
this.response.getFirstHeader(header).getValue()
);
}
public int weaveRecords() throws NumberFormatException {
return this.getIntegerHeader("x-weave-records");
return this.getIntegerHeader(X_WEAVE_RECORDS);
}
public int weaveQuotaRemaining() throws NumberFormatException {
return this.getIntegerHeader("x-weave-quota-remaining");
return this.getIntegerHeader(X_WEAVE_QUOTA_REMAINING);
}
public String weaveAlert() {
if (this.hasHeader("x-weave-alert")) {
return this.response.getFirstHeader("x-weave-alert").getValue();
if (this.hasHeader(X_WEAVE_ALERT)) {
return this.response.getFirstHeader(X_WEAVE_ALERT).getValue();
}
return null;
}

View File

@ -120,7 +120,9 @@ public class SyncStorageRequest implements Resource {
SyncStorageRequestDelegate d = this.request.delegate;
SyncStorageResponse res = new SyncStorageResponse(response);
// It is the responsibility of the delegate handlers to completely consume the response.
if (res.wasSuccessful()) {
// In context of a Sync storage response, success is either a 200 OK or 202 Accepted.
// 202 is returned during uploads of data in a batching mode, indicating that more is expected.
if (res.getStatusCode() == 200 || res.getStatusCode() == 202) {
d.handleRequestSuccess(res);
} else {
Logger.warn(LOG_TAG, "HTTP request failed.");

View File

@ -67,6 +67,19 @@ public class SyncStorageResponse extends SyncResponse {
return SyncStorageResponse.getServerErrorMessage(this.body().trim());
}
/**
* This header gives the last-modified time of the target resource as seen during processing of
* the request, and will be included in all success responses (200, 201, 204).
* When given in response to a write request, this will be equal to the servers current time and
* to the new last-modified time of any BSOs created or changed by the request.
*/
public String getLastModified() {
if (!response.containsHeader(X_LAST_MODIFIED)) {
return null;
}
return response.getFirstHeader(X_LAST_MODIFIED).getValue();
}
// TODO: Content-Type and Content-Length validation.
}

View File

@ -7,6 +7,7 @@ package org.mozilla.gecko.sync.repositories;
import java.net.URISyntaxException;
import org.mozilla.gecko.sync.InfoCollections;
import org.mozilla.gecko.sync.InfoConfiguration;
import org.mozilla.gecko.sync.net.AuthHeaderProvider;
/**
@ -20,8 +21,8 @@ public class ConstrainedServer11Repository extends Server11Repository {
private String sort = null;
private long limit = -1;
public ConstrainedServer11Repository(String collection, String storageURL, AuthHeaderProvider authHeaderProvider, InfoCollections infoCollections, long limit, String sort) throws URISyntaxException {
super(collection, storageURL, authHeaderProvider, infoCollections);
public ConstrainedServer11Repository(String collection, String storageURL, AuthHeaderProvider authHeaderProvider, InfoCollections infoCollections, InfoConfiguration infoConfiguration, long limit, String sort) throws URISyntaxException {
super(collection, storageURL, authHeaderProvider, infoCollections, infoConfiguration);
this.limit = limit;
this.sort = sort;
}

View File

@ -9,11 +9,14 @@ import java.net.URISyntaxException;
import java.util.ArrayList;
import org.mozilla.gecko.sync.InfoCollections;
import org.mozilla.gecko.sync.InfoConfiguration;
import org.mozilla.gecko.sync.Utils;
import org.mozilla.gecko.sync.net.AuthHeaderProvider;
import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate;
import android.content.Context;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
/**
* A Server11Repository implements fetching and storing against the Sync 1.1 API.
@ -27,6 +30,8 @@ public class Server11Repository extends Repository {
protected final AuthHeaderProvider authHeaderProvider;
protected final InfoCollections infoCollections;
private final InfoConfiguration infoConfiguration;
/**
* Construct a new repository that fetches and stores against the Sync 1.1. API.
*
@ -36,7 +41,7 @@ public class Server11Repository extends Repository {
* @param infoCollections instance; must not be null.
* @throws URISyntaxException
*/
public Server11Repository(String collection, String storageURL, AuthHeaderProvider authHeaderProvider, InfoCollections infoCollections) throws URISyntaxException {
public Server11Repository(@NonNull String collection, @NonNull String storageURL, AuthHeaderProvider authHeaderProvider, @NonNull InfoCollections infoCollections, @NonNull InfoConfiguration infoConfiguration) throws URISyntaxException {
if (collection == null) {
throw new IllegalArgumentException("collection must not be null");
}
@ -50,6 +55,7 @@ public class Server11Repository extends Repository {
this.collectionURI = new URI(storageURL + (storageURL.endsWith("/") ? collection : "/" + collection));
this.authHeaderProvider = authHeaderProvider;
this.infoCollections = infoCollections;
this.infoConfiguration = infoConfiguration;
}
@Override
@ -119,4 +125,13 @@ public class Server11Repository extends Repository {
public boolean updateNeeded(long lastSyncTimestamp) {
return infoCollections.updateNeeded(collection, lastSyncTimestamp);
}
@Nullable
public Long getCollectionLastModified() {
return infoCollections.getTimestamp(collection);
}
public InfoConfiguration getInfoConfiguration() {
return infoConfiguration;
}
}

View File

@ -4,94 +4,32 @@
package org.mozilla.gecko.sync.repositories;
import java.io.IOException;
import java.io.OutputStream;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.atomic.AtomicLong;
import org.json.simple.JSONArray;
import org.mozilla.gecko.background.common.log.Logger;
import org.mozilla.gecko.sync.CryptoRecord;
import org.mozilla.gecko.sync.DelayedWorkTracker;
import org.mozilla.gecko.sync.ExtendedJSONObject;
import org.mozilla.gecko.sync.HTTPFailureException;
import org.mozilla.gecko.sync.Server11PreviousPostFailedException;
import org.mozilla.gecko.sync.Server11RecordPostFailedException;
import org.mozilla.gecko.sync.UnexpectedJSONException;
import org.mozilla.gecko.sync.crypto.KeyBundle;
import org.mozilla.gecko.sync.net.AuthHeaderProvider;
import org.mozilla.gecko.sync.net.SyncResponse;
import org.mozilla.gecko.sync.net.SyncStorageCollectionRequest;
import org.mozilla.gecko.sync.net.SyncStorageRequest;
import org.mozilla.gecko.sync.net.SyncStorageRequestDelegate;
import org.mozilla.gecko.sync.net.SyncStorageResponse;
import org.mozilla.gecko.sync.net.WBOCollectionRequestDelegate;
import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionBeginDelegate;
import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionFetchRecordsDelegate;
import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionGuidsSinceDelegate;
import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionWipeDelegate;
import org.mozilla.gecko.sync.repositories.domain.Record;
import ch.boye.httpclientandroidlib.entity.ContentProducer;
import ch.boye.httpclientandroidlib.entity.EntityTemplate;
import org.mozilla.gecko.sync.repositories.uploaders.BatchingUploader;
public class Server11RepositorySession extends RepositorySession {
private static byte[] recordsStart;
private static byte[] recordSeparator;
private static byte[] recordsEnd;
static {
try {
recordsStart = "[\n".getBytes("UTF-8");
recordSeparator = ",\n".getBytes("UTF-8");
recordsEnd = "\n]\n".getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
// These won't fail.
}
}
public static final String LOG_TAG = "Server11Session";
private static final int UPLOAD_BYTE_THRESHOLD = 1024 * 1024; // 1MB.
private static final int UPLOAD_ITEM_THRESHOLD = 50;
private static final int PER_RECORD_OVERHEAD = 2; // Comma, newline.
// {}, newlines, but we get to skip one record overhead.
private static final int PER_BATCH_OVERHEAD = 5 - PER_RECORD_OVERHEAD;
/**
* Return the X-Weave-Timestamp header from <code>response</code>, or the
* current time if it is missing.
* <p>
* <b>Warning:</b> this can cause the timestamp of <code>response</code> to
* cross domains (from server clock to local clock), which could cause records
* to be skipped on account of clock drift. This should never happen, because
* <i>every</i> server response should have a well-formed X-Weave-Timestamp
* header.
*
* @param response
* The <code>SyncStorageResponse</code> to interrogate.
* @return Normalized timestamp in milliseconds.
*/
public static long getNormalizedTimestamp(SyncStorageResponse response) {
long normalizedTimestamp = -1;
try {
normalizedTimestamp = response.normalizedWeaveTimestamp();
} catch (NumberFormatException e) {
Logger.warn(LOG_TAG, "Malformed X-Weave-Timestamp header received.", e);
}
if (-1 == normalizedTimestamp) {
Logger.warn(LOG_TAG, "Computing stand-in timestamp from local clock. Clock drift could cause records to be skipped.");
normalizedTimestamp = System.currentTimeMillis();
}
return normalizedTimestamp;
}
/**
* Used to track outstanding requests, so that we can abort them as needed.
*/
@ -150,7 +88,8 @@ public class Server11RepositorySession extends RepositorySession {
Logger.debug(LOG_TAG, "Fetch done.");
removeRequestFromPending();
final long normalizedTimestamp = getNormalizedTimestamp(response);
// This will change overall and will use X_LAST_MODIFIED in Bug 730142.
final long normalizedTimestamp = response.normalizedTimestampForHeader(SyncResponse.X_WEAVE_TIMESTAMP);
Logger.debug(LOG_TAG, "Fetch completed. Timestamp is " + normalizedTimestamp);
// When we're done processing other events, finish.
@ -205,27 +144,26 @@ public class Server11RepositorySession extends RepositorySession {
}
}
Server11Repository serverRepository;
AtomicLong uploadTimestamp = new AtomicLong(0);
private void bumpUploadTimestamp(long ts) {
while (true) {
long existing = uploadTimestamp.get();
if (existing > ts) {
return;
}
if (uploadTimestamp.compareAndSet(existing, ts)) {
return;
}
}
}
private BatchingUploader uploader;
public Server11RepositorySession(Repository repository) {
super(repository);
serverRepository = (Server11Repository) repository;
}
public Server11Repository getServerRepository() {
return serverRepository;
}
@Override
public void setStoreDelegate(RepositorySessionStoreDelegate delegate) {
this.delegate = delegate;
// Now that we have the delegate, we can initialize our uploader.
this.uploader = new BatchingUploader(this, storeWorkQueue, delegate);
}
private String flattenIDs(String[] guids) {
// Consider using Utils.toDelimitedString if and when the signature changes
// to Collection<String> guids.
@ -314,300 +252,30 @@ public class Server11RepositorySession extends RepositorySession {
// TODO: implement wipe.
}
protected Object recordsBufferMonitor = new Object();
/**
* Data of outbound records.
* <p>
* We buffer the data (rather than the <code>Record</code>) so that we can
* flush the buffer based on outgoing transmission size.
* <p>
* Access should be synchronized on <code>recordsBufferMonitor</code>.
*/
protected ArrayList<byte[]> recordsBuffer = new ArrayList<byte[]>();
/**
* GUIDs of outbound records.
* <p>
* Used to fail entire outgoing uploads.
* <p>
* Access should be synchronized on <code>recordsBufferMonitor</code>.
*/
protected ArrayList<String> recordGuidsBuffer = new ArrayList<String>();
protected int byteCount = PER_BATCH_OVERHEAD;
@Override
public void store(Record record) throws NoStoreDelegateException {
if (delegate == null) {
throw new NoStoreDelegateException();
}
this.enqueue(record);
}
/**
* Batch incoming records until some reasonable threshold (e.g., 50),
* some size limit is hit (probably way less than 3MB!), or storeDone
* is received.
* @param record
*/
protected void enqueue(Record record) {
// JSONify and store the bytes, rather than the record.
byte[] json = record.toJSONBytes();
int delta = json.length;
synchronized (recordsBufferMonitor) {
if ((delta + byteCount > UPLOAD_BYTE_THRESHOLD) ||
(recordsBuffer.size() >= UPLOAD_ITEM_THRESHOLD)) {
// POST the existing contents, then enqueue.
flush();
}
recordsBuffer.add(json);
recordGuidsBuffer.add(record.guid);
byteCount += PER_RECORD_OVERHEAD + delta;
// If delegate was set, this shouldn't happen.
if (uploader == null) {
throw new IllegalStateException("Uploader haven't been initialized");
}
}
// Asynchronously upload records.
// Must be locked!
protected void flush() {
if (recordsBuffer.size() > 0) {
final ArrayList<byte[]> outgoing = recordsBuffer;
final ArrayList<String> outgoingGuids = recordGuidsBuffer;
RepositorySessionStoreDelegate uploadDelegate = this.delegate;
storeWorkQueue.execute(new RecordUploadRunnable(uploadDelegate, outgoing, outgoingGuids, byteCount));
recordsBuffer = new ArrayList<byte[]>();
recordGuidsBuffer = new ArrayList<String>();
byteCount = PER_BATCH_OVERHEAD;
}
uploader.process(record);
}
@Override
public void storeDone() {
Logger.debug(LOG_TAG, "storeDone().");
synchronized (recordsBufferMonitor) {
flush();
// Do this in a Runnable so that the timestamp is grabbed after any upload.
final Runnable r = new Runnable() {
@Override
public void run() {
synchronized (recordsBufferMonitor) {
final long end = uploadTimestamp.get();
Logger.debug(LOG_TAG, "Calling storeDone with " + end);
storeDone(end);
}
}
};
storeWorkQueue.execute(r);
}
}
/**
* <code>true</code> if a record upload has failed this session.
* <p>
* This is only set in begin and possibly by <code>RecordUploadRunnable</code>.
* Since those are executed serially, we can use an unsynchronized
* volatile boolean here.
*/
protected volatile boolean recordUploadFailed;
@Override
public void begin(RepositorySessionBeginDelegate delegate) throws InvalidSessionTransitionException {
recordUploadFailed = false;
super.begin(delegate);
}
/**
* Make an HTTP request, and convert HTTP request delegate callbacks into
* store callbacks within the context of this RepositorySession.
*
* @author rnewman
*
*/
protected class RecordUploadRunnable implements Runnable, SyncStorageRequestDelegate {
public final String LOG_TAG = "RecordUploadRunnable";
private final ArrayList<byte[]> outgoing;
private ArrayList<String> outgoingGuids;
private final long byteCount;
public RecordUploadRunnable(RepositorySessionStoreDelegate storeDelegate,
ArrayList<byte[]> outgoing,
ArrayList<String> outgoingGuids,
long byteCount) {
Logger.debug(LOG_TAG, "Preparing record upload for " +
outgoing.size() + " records (" +
byteCount + " bytes).");
this.outgoing = outgoing;
this.outgoingGuids = outgoingGuids;
this.byteCount = byteCount;
// If delegate was set, this shouldn't happen.
if (uploader == null) {
throw new IllegalStateException("Uploader haven't been initialized");
}
@Override
public AuthHeaderProvider getAuthHeaderProvider() {
return serverRepository.getAuthHeaderProvider();
}
@Override
public String ifUnmodifiedSince() {
return null;
}
@Override
public void handleRequestSuccess(SyncStorageResponse response) {
Logger.trace(LOG_TAG, "POST of " + outgoing.size() + " records done.");
ExtendedJSONObject body;
try {
body = response.jsonObjectBody(); // jsonObjectBody() throws or returns non-null.
} catch (Exception e) {
Logger.error(LOG_TAG, "Got exception parsing POST success body.", e);
this.handleRequestError(e);
return;
}
// Be defensive when logging timestamp.
if (body.containsKey("modified")) {
Long modified = body.getTimestamp("modified");
if (modified != null) {
Logger.trace(LOG_TAG, "POST request success. Modified timestamp: " + modified);
} else {
Logger.warn(LOG_TAG, "POST success body contains malformed 'modified': " + body.toJSONString());
}
} else {
Logger.warn(LOG_TAG, "POST success body does not contain key 'modified': " + body.toJSONString());
}
try {
JSONArray success = body.getArray("success");
if ((success != null) &&
(success.size() > 0)) {
Logger.trace(LOG_TAG, "Successful records: " + success.toString());
for (Object o : success) {
try {
delegate.onRecordStoreSucceeded((String) o);
} catch (ClassCastException e) {
Logger.error(LOG_TAG, "Got exception parsing POST success guid.", e);
// Not much to be done.
}
}
long normalizedTimestamp = getNormalizedTimestamp(response);
Logger.trace(LOG_TAG, "Passing back upload X-Weave-Timestamp: " + normalizedTimestamp);
bumpUploadTimestamp(normalizedTimestamp);
}
success = null; // Want to GC this ASAP.
ExtendedJSONObject failed = body.getObject("failed");
if ((failed != null) &&
(failed.object.size() > 0)) {
Logger.debug(LOG_TAG, "Failed records: " + failed.object.toString());
Exception ex = new Server11RecordPostFailedException();
for (String guid : failed.keySet()) {
delegate.onRecordStoreFailed(ex, guid);
}
}
failed = null; // Want to GC this ASAP.
} catch (UnexpectedJSONException e) {
Logger.error(LOG_TAG, "Got exception processing success/failed in POST success body.", e);
// TODO
return;
}
Logger.debug(LOG_TAG, "POST of " + outgoing.size() + " records handled.");
}
@Override
public void handleRequestFailure(SyncStorageResponse response) {
// TODO: call session.interpretHTTPFailure.
this.handleRequestError(new HTTPFailureException(response));
}
@Override
public void handleRequestError(final Exception ex) {
Logger.warn(LOG_TAG, "Got request error.", ex);
recordUploadFailed = true;
ArrayList<String> failedOutgoingGuids = outgoingGuids;
outgoingGuids = null; // Want to GC this ASAP.
for (String guid : failedOutgoingGuids) {
delegate.onRecordStoreFailed(ex, guid);
}
return;
}
public class ByteArraysContentProducer implements ContentProducer {
ArrayList<byte[]> outgoing;
public ByteArraysContentProducer(ArrayList<byte[]> arrays) {
outgoing = arrays;
}
@Override
public void writeTo(OutputStream outstream) throws IOException {
int count = outgoing.size();
outstream.write(recordsStart);
outstream.write(outgoing.get(0));
for (int i = 1; i < count; ++i) {
outstream.write(recordSeparator);
outstream.write(outgoing.get(i));
}
outstream.write(recordsEnd);
}
}
public class ByteArraysEntity extends EntityTemplate {
private final long count;
public ByteArraysEntity(ArrayList<byte[]> arrays, long totalBytes) {
super(new ByteArraysContentProducer(arrays));
this.count = totalBytes;
this.setContentType("application/json");
// charset is set in BaseResource.
}
@Override
public long getContentLength() {
return count;
}
@Override
public boolean isRepeatable() {
return true;
}
}
public ByteArraysEntity getBodyEntity() {
ByteArraysEntity body = new ByteArraysEntity(outgoing, byteCount);
return body;
}
@Override
public void run() {
if (recordUploadFailed) {
Logger.info(LOG_TAG, "Previous record upload failed. Failing all records and not retrying.");
Exception ex = new Server11PreviousPostFailedException();
for (String guid : outgoingGuids) {
delegate.onRecordStoreFailed(ex, guid);
}
return;
}
if (outgoing == null ||
outgoing.size() == 0) {
Logger.debug(LOG_TAG, "No items: RecordUploadRunnable returning immediately.");
return;
}
URI u = serverRepository.collectionURI();
SyncStorageRequest request = new SyncStorageRequest(u);
request.delegate = this;
// We don't want the task queue to proceed until this request completes.
// Fortunately, BaseResource is currently synchronous.
// If that ever changes, you'll need to block here.
ByteArraysEntity body = getBodyEntity();
request.post(body);
}
uploader.noMoreRecordsToUpload();
}
@Override

View File

@ -0,0 +1,165 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.gecko.sync.repositories.uploaders;
import android.support.annotation.CheckResult;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import org.mozilla.gecko.background.common.log.Logger;
import java.util.ArrayList;
import java.util.List;
import org.mozilla.gecko.sync.repositories.uploaders.BatchingUploader.TokenModifiedException;
import org.mozilla.gecko.sync.repositories.uploaders.BatchingUploader.LastModifiedChangedUnexpectedly;
import org.mozilla.gecko.sync.repositories.uploaders.BatchingUploader.LastModifiedDidNotChange;
/**
* Keeps track of token, Last-Modified value and GUIDs of succeeded records.
*/
/* @ThreadSafe */
public class BatchMeta extends BufferSizeTracker {
private static final String LOG_TAG = "BatchMeta";
// Will be set once first payload upload succeeds. We don't expect this to change until we
// commit the batch, and which point it must change.
/* @GuardedBy("this") */ private Long lastModified;
// Will be set once first payload upload succeeds. We don't expect this to ever change until
// a commit succeeds, at which point this gets set to null.
/* @GuardedBy("this") */ private String token;
/* @GuardedBy("accessLock") */ private boolean isUnlimited = false;
// Accessed by synchronously running threads.
/* @GuardedBy("accessLock") */ private final List<String> successRecordGuids = new ArrayList<>();
/* @GuardedBy("accessLock") */ private boolean needsCommit = false;
protected final Long collectionLastModified;
public BatchMeta(@NonNull Object payloadLock, long maxBytes, long maxRecords, @Nullable Long collectionLastModified) {
super(payloadLock, maxBytes, maxRecords);
this.collectionLastModified = collectionLastModified;
}
protected void setIsUnlimited(boolean isUnlimited) {
synchronized (accessLock) {
this.isUnlimited = isUnlimited;
}
}
@Override
protected boolean canFit(long recordDeltaByteCount) {
synchronized (accessLock) {
return isUnlimited || super.canFit(recordDeltaByteCount);
}
}
@Override
@CheckResult
protected boolean addAndEstimateIfFull(long recordDeltaByteCount) {
synchronized (accessLock) {
needsCommit = true;
boolean isFull = super.addAndEstimateIfFull(recordDeltaByteCount);
return !isUnlimited && isFull;
}
}
protected boolean needToCommit() {
synchronized (accessLock) {
return needsCommit;
}
}
protected synchronized String getToken() {
return token;
}
protected synchronized void setToken(final String newToken, boolean isCommit) throws TokenModifiedException {
// Set token once in a batching mode.
// In a non-batching mode, this.token and newToken will be null, and this is a no-op.
if (token == null) {
token = newToken;
return;
}
// Sanity checks.
if (isCommit) {
// We expect token to be null when commit payload succeeds.
if (newToken != null) {
throw new TokenModifiedException();
} else {
token = null;
}
return;
}
// We expect new token to always equal current token for non-commit payloads.
if (!token.equals(newToken)) {
throw new TokenModifiedException();
}
}
protected synchronized Long getLastModified() {
if (lastModified == null) {
return collectionLastModified;
}
return lastModified;
}
protected synchronized void setLastModified(final Long newLastModified, final boolean expectedToChange) throws LastModifiedChangedUnexpectedly, LastModifiedDidNotChange {
if (lastModified == null) {
lastModified = newLastModified;
return;
}
if (!expectedToChange && !lastModified.equals(newLastModified)) {
Logger.debug(LOG_TAG, "Last-Modified timestamp changed when we didn't expect it");
throw new LastModifiedChangedUnexpectedly();
} else if (expectedToChange && lastModified.equals(newLastModified)) {
Logger.debug(LOG_TAG, "Last-Modified timestamp did not change when we expected it to");
throw new LastModifiedDidNotChange();
} else {
lastModified = newLastModified;
}
}
protected ArrayList<String> getSuccessRecordGuids() {
synchronized (accessLock) {
return new ArrayList<>(this.successRecordGuids);
}
}
protected void recordSucceeded(final String recordGuid) {
// Sanity check.
if (recordGuid == null) {
throw new IllegalStateException();
}
synchronized (accessLock) {
successRecordGuids.add(recordGuid);
}
}
@Override
protected boolean canFitRecordByteDelta(long byteDelta, long recordCount, long byteCount) {
return isUnlimited || super.canFitRecordByteDelta(byteDelta, recordCount, byteCount);
}
@Override
protected void reset() {
synchronized (accessLock) {
super.reset();
token = null;
lastModified = null;
successRecordGuids.clear();
needsCommit = false;
}
}
}

View File

@ -0,0 +1,344 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.gecko.sync.repositories.uploaders;
import android.net.Uri;
import android.support.annotation.VisibleForTesting;
import org.mozilla.gecko.background.common.log.Logger;
import org.mozilla.gecko.sync.InfoConfiguration;
import org.mozilla.gecko.sync.Server11RecordPostFailedException;
import org.mozilla.gecko.sync.net.SyncResponse;
import org.mozilla.gecko.sync.net.SyncStorageResponse;
import org.mozilla.gecko.sync.repositories.Server11RepositorySession;
import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
import org.mozilla.gecko.sync.repositories.domain.Record;
import java.util.ArrayList;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicLong;
/**
* Uploader which implements batching introduced in Sync 1.5.
*
* Batch vs payload terminology:
* - batch is comprised of a series of payloads, which are all committed at the same time.
* -- identified via a "batch token", which is returned after first payload for the batch has been uploaded.
* - payload is a collection of records which are uploaded together. Associated with a batch.
* -- last payload, identified via commit=true, commits the batch.
*
* Limits for how many records can fit into a payload and into a batch are defined in the passed-in
* InfoConfiguration object.
*
* If we can't fit everything we'd like to upload into one batch (according to max-total-* limits),
* then we commit that batch, and start a new one. There are no explicit limits on total number of
* batches we might use, although at some point we'll start to run into storage limit errors from the API.
*
* Once we go past using one batch this uploader is no longer "atomic". Partial state is exposed
* to other clients after our first batch is committed and before our last batch is committed.
* However, our per-batch limits are high, X-I-U-S mechanics help protect downloading clients
* (as long as they implement X-I-U-S) with 412 error codes in case of interleaving upload and download,
* and most mobile clients will not be uploading large-enough amounts of data (especially structured
* data, such as bookmarks).
*
* Last-Modified header returned with the first batch payload POST success is maintained for a batch,
* to guard against concurrent-modification errors (different uploader commits before we're done).
*
* Non-batching mode notes:
* We also support Sync servers which don't enable batching for uploads. In this case, we respect
* payload limits for individual uploads, and every upload is considered a commit. Batching limits
* do not apply, and batch token is irrelevant.
* We do keep track of Last-Modified and send along X-I-U-S with our uploads, to protect against
* concurrent modifications by other clients.
*/
public class BatchingUploader {
private static final String LOG_TAG = "BatchingUploader";
private final Uri collectionUri;
private volatile boolean recordUploadFailed = false;
private final BatchMeta batchMeta;
private final Payload payload;
// Accessed by synchronously running threads, OK to not synchronize and just make it volatile.
private volatile Boolean inBatchingMode;
// Used to ensure we have thread-safe access to the following:
// - byte and record counts in both Payload and BatchMeta objects
// - buffers in the Payload object
private final Object payloadLock = new Object();
protected Executor workQueue;
protected final RepositorySessionStoreDelegate sessionStoreDelegate;
protected final Server11RepositorySession repositorySession;
protected AtomicLong uploadTimestamp = new AtomicLong(0);
protected static final int PER_RECORD_OVERHEAD_BYTE_COUNT = RecordUploadRunnable.RECORD_SEPARATOR.length;
protected static final int PER_PAYLOAD_OVERHEAD_BYTE_COUNT = RecordUploadRunnable.RECORDS_END.length;
// Sanity check. RECORD_SEPARATOR and RECORD_START are assumed to be of the same length.
static {
if (RecordUploadRunnable.RECORD_SEPARATOR.length != RecordUploadRunnable.RECORDS_START.length) {
throw new IllegalStateException("Separator and start tokens must be of the same length");
}
}
public BatchingUploader(final Server11RepositorySession repositorySession, final Executor workQueue, final RepositorySessionStoreDelegate sessionStoreDelegate) {
this.repositorySession = repositorySession;
this.workQueue = workQueue;
this.sessionStoreDelegate = sessionStoreDelegate;
this.collectionUri = Uri.parse(repositorySession.getServerRepository().collectionURI().toString());
InfoConfiguration config = repositorySession.getServerRepository().getInfoConfiguration();
this.batchMeta = new BatchMeta(
payloadLock, config.maxTotalBytes, config.maxTotalRecords,
repositorySession.getServerRepository().getCollectionLastModified()
);
this.payload = new Payload(payloadLock, config.maxPostBytes, config.maxPostRecords);
}
public void process(final Record record) {
final String guid = record.guid;
final byte[] recordBytes = record.toJSONBytes();
final long recordDeltaByteCount = recordBytes.length + PER_RECORD_OVERHEAD_BYTE_COUNT;
Logger.debug(LOG_TAG, "Processing a record with guid: " + guid);
// We can't upload individual records which exceed our payload byte limit.
if ((recordDeltaByteCount + PER_PAYLOAD_OVERHEAD_BYTE_COUNT) > payload.maxBytes) {
sessionStoreDelegate.onRecordStoreFailed(new RecordTooLargeToUpload(), guid);
return;
}
synchronized (payloadLock) {
final boolean canFitRecordIntoBatch = batchMeta.canFit(recordDeltaByteCount);
final boolean canFitRecordIntoPayload = payload.canFit(recordDeltaByteCount);
// Record fits!
if (canFitRecordIntoBatch && canFitRecordIntoPayload) {
Logger.debug(LOG_TAG, "Record fits into the current batch and payload");
addAndFlushIfNecessary(recordDeltaByteCount, recordBytes, guid);
// Payload won't fit the record.
} else if (canFitRecordIntoBatch) {
Logger.debug(LOG_TAG, "Current payload won't fit incoming record, uploading payload.");
flush(false, false);
Logger.debug(LOG_TAG, "Recording the incoming record into a new payload");
// Keep track of the overflow record.
addAndFlushIfNecessary(recordDeltaByteCount, recordBytes, guid);
// Batch won't fit the record.
} else {
Logger.debug(LOG_TAG, "Current batch won't fit incoming record, committing batch.");
flush(true, false);
Logger.debug(LOG_TAG, "Recording the incoming record into a new batch");
batchMeta.reset();
// Keep track of the overflow record.
addAndFlushIfNecessary(recordDeltaByteCount, recordBytes, guid);
}
}
}
// Convenience function used from the process method; caller must hold a payloadLock.
private void addAndFlushIfNecessary(long byteCount, byte[] recordBytes, String guid) {
boolean isPayloadFull = payload.addAndEstimateIfFull(byteCount, recordBytes, guid);
boolean isBatchFull = batchMeta.addAndEstimateIfFull(byteCount);
// Preemptive commit batch or upload a payload if they're estimated to be full.
if (isBatchFull) {
flush(true, false);
batchMeta.reset();
} else if (isPayloadFull) {
flush(false, false);
}
}
public void noMoreRecordsToUpload() {
Logger.debug(LOG_TAG, "Received 'no more records to upload' signal.");
// Run this after the last payload succeeds, so that we know for sure if we're in a batching
// mode and need to commit with a potentially empty payload.
workQueue.execute(new Runnable() {
@Override
public void run() {
commitIfNecessaryAfterLastPayload();
}
});
}
@VisibleForTesting
protected void commitIfNecessaryAfterLastPayload() {
// Must be called after last payload upload finishes.
synchronized (payload) {
// If we have any pending records in the Payload, flush them!
if (!payload.isEmpty()) {
flush(true, true);
// If we have an empty payload but need to commit the batch in the batching mode, flush!
} else if (batchMeta.needToCommit() && Boolean.TRUE.equals(inBatchingMode)) {
flush(true, true);
// Otherwise, we're done.
} else {
finished(uploadTimestamp);
}
}
}
/**
* We've been told by our upload delegate that a payload succeeded.
* Depending on the type of payload and batch mode status, inform our delegate of progress.
*
* @param response success response to our commit post
* @param isCommit was this a commit upload?
* @param isLastPayload was this a very last payload we'll upload?
*/
public void payloadSucceeded(final SyncStorageResponse response, final boolean isCommit, final boolean isLastPayload) {
// Sanity check.
if (inBatchingMode == null) {
throw new IllegalStateException("Can't process payload success until we know if we're in a batching mode");
}
// We consider records to have been committed if we're not in a batching mode or this was a commit.
// If records have been committed, notify our store delegate.
if (!inBatchingMode || isCommit) {
for (String guid : batchMeta.getSuccessRecordGuids()) {
sessionStoreDelegate.onRecordStoreSucceeded(guid);
}
}
// If this was our very last commit, we're done storing records.
// Get Last-Modified timestamp from the response, and pass it upstream.
if (isLastPayload) {
finished(response.normalizedTimestampForHeader(SyncResponse.X_LAST_MODIFIED));
}
}
public void lastPayloadFailed() {
finished(uploadTimestamp);
}
private void finished(long lastModifiedTimestamp) {
bumpTimestampTo(uploadTimestamp, lastModifiedTimestamp);
finished(uploadTimestamp);
}
private void finished(AtomicLong lastModifiedTimestamp) {
repositorySession.storeDone(lastModifiedTimestamp.get());
}
public BatchMeta getCurrentBatch() {
return batchMeta;
}
public void setInBatchingMode(boolean inBatchingMode) {
this.inBatchingMode = inBatchingMode;
// If we know for sure that we're not in a batching mode,
// consider our batch to be of unlimited size.
this.batchMeta.setIsUnlimited(!inBatchingMode);
}
public Boolean getInBatchingMode() {
return inBatchingMode;
}
public void setLastModified(final Long lastModified, final boolean isCommit) throws BatchingUploaderException {
// Sanity check.
if (inBatchingMode == null) {
throw new IllegalStateException("Can't process Last-Modified before we know we're in a batching mode.");
}
// In non-batching mode, every time we receive a Last-Modified timestamp, we expect it to change
// since records are "committed" (become visible to other clients) on every payload.
// In batching mode, we only expect Last-Modified to change when we commit a batch.
batchMeta.setLastModified(lastModified, isCommit || !inBatchingMode);
}
public void recordSucceeded(final String recordGuid) {
Logger.debug(LOG_TAG, "Record store succeeded: " + recordGuid);
batchMeta.recordSucceeded(recordGuid);
}
public void recordFailed(final String recordGuid) {
recordFailed(new Server11RecordPostFailedException(), recordGuid);
}
public void recordFailed(final Exception e, final String recordGuid) {
Logger.debug(LOG_TAG, "Record store failed for guid " + recordGuid + " with exception: " + e.toString());
recordUploadFailed = true;
sessionStoreDelegate.onRecordStoreFailed(e, recordGuid);
}
public Server11RepositorySession getRepositorySession() {
return repositorySession;
}
private static void bumpTimestampTo(final AtomicLong current, long newValue) {
while (true) {
long existing = current.get();
if (existing > newValue) {
return;
}
if (current.compareAndSet(existing, newValue)) {
return;
}
}
}
private void flush(final boolean isCommit, final boolean isLastPayload) {
final ArrayList<byte[]> outgoing;
final ArrayList<String> outgoingGuids;
final long byteCount;
// Even though payload object itself is thread-safe, we want to ensure we get these altogether
// as a "unit". Another approach would be to create a wrapper object for these values, but this works.
synchronized (payloadLock) {
outgoing = payload.getRecordsBuffer();
outgoingGuids = payload.getRecordGuidsBuffer();
byteCount = payload.getByteCount();
}
workQueue.execute(new RecordUploadRunnable(
new BatchingAtomicUploaderMayUploadProvider(),
collectionUri,
batchMeta,
new PayloadUploadDelegate(this, outgoingGuids, isCommit, isLastPayload),
outgoing,
byteCount,
isCommit
));
payload.reset();
}
private class BatchingAtomicUploaderMayUploadProvider implements MayUploadProvider {
public boolean mayUpload() {
return !recordUploadFailed;
}
}
public static class BatchingUploaderException extends Exception {
private static final long serialVersionUID = 1L;
}
public static class RecordTooLargeToUpload extends BatchingUploaderException {
private static final long serialVersionUID = 1L;
}
public static class LastModifiedDidNotChange extends BatchingUploaderException {
private static final long serialVersionUID = 1L;
}
public static class LastModifiedChangedUnexpectedly extends BatchingUploaderException {
private static final long serialVersionUID = 1L;
}
public static class TokenModifiedException extends BatchingUploaderException {
private static final long serialVersionUID = 1L;
};
}

View File

@ -0,0 +1,103 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.gecko.sync.repositories.uploaders;
import android.support.annotation.CallSuper;
import android.support.annotation.CheckResult;
/**
* Implements functionality shared by BatchMeta and Payload objects, namely:
* - keeping track of byte and record counts
* - incrementing those counts when records are added
* - checking if a record can fit
*/
/* @ThreadSafe */
public abstract class BufferSizeTracker {
protected final Object accessLock;
/* @GuardedBy("accessLock") */ private long byteCount = BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT;
/* @GuardedBy("accessLock") */ private long recordCount = 0;
/* @GuardedBy("accessLock") */ protected Long smallestRecordByteCount;
protected final long maxBytes;
protected final long maxRecords;
public BufferSizeTracker(Object accessLock, long maxBytes, long maxRecords) {
this.accessLock = accessLock;
this.maxBytes = maxBytes;
this.maxRecords = maxRecords;
}
@CallSuper
protected boolean canFit(long recordDeltaByteCount) {
synchronized (accessLock) {
return canFitRecordByteDelta(recordDeltaByteCount, recordCount, byteCount);
}
}
protected boolean isEmpty() {
synchronized (accessLock) {
return recordCount == 0;
}
}
/**
* Adds a record and returns a boolean indicating whether batch is estimated to be full afterwards.
*/
@CheckResult
protected boolean addAndEstimateIfFull(long recordDeltaByteCount) {
synchronized (accessLock) {
// Sanity check. Calling this method when buffer won't fit the record is an error.
if (!canFitRecordByteDelta(recordDeltaByteCount, recordCount, byteCount)) {
throw new IllegalStateException("Buffer size exceeded");
}
byteCount += recordDeltaByteCount;
recordCount += 1;
if (smallestRecordByteCount == null || smallestRecordByteCount > recordDeltaByteCount) {
smallestRecordByteCount = recordDeltaByteCount;
}
// See if we're full or nearly full after adding a record.
// We're halving smallestRecordByteCount because we're erring
// on the side of "can hopefully fit". We're trying to upload as soon as we know we
// should, but we also need to be mindful of minimizing total number of uploads we make.
return !canFitRecordByteDelta(smallestRecordByteCount / 2, recordCount, byteCount);
}
}
protected long getByteCount() {
synchronized (accessLock) {
// Ensure we account for payload overhead twice when the batch is empty.
// Payload overhead is either RECORDS_START ("[") or RECORDS_END ("]"),
// and for an empty payload we need account for both ("[]").
if (recordCount == 0) {
return byteCount + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT;
}
return byteCount;
}
}
protected long getRecordCount() {
synchronized (accessLock) {
return recordCount;
}
}
@CallSuper
protected void reset() {
synchronized (accessLock) {
byteCount = BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT;
recordCount = 0;
}
}
@CallSuper
protected boolean canFitRecordByteDelta(long byteDelta, long recordCount, long byteCount) {
return recordCount < maxRecords
&& (byteCount + byteDelta) <= maxBytes;
}
}

View File

@ -0,0 +1,9 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.gecko.sync.repositories.uploaders;
public interface MayUploadProvider {
boolean mayUpload();
}

View File

@ -0,0 +1,66 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.gecko.sync.repositories.uploaders;
import android.support.annotation.CheckResult;
import java.util.ArrayList;
/**
* Owns per-payload record byte and recordGuid buffers.
*/
/* @ThreadSafe */
public class Payload extends BufferSizeTracker {
// Data of outbound records.
/* @GuardedBy("accessLock") */ private final ArrayList<byte[]> recordsBuffer = new ArrayList<>();
// GUIDs of outbound records. Used to fail entire payloads.
/* @GuardedBy("accessLock") */ private final ArrayList<String> recordGuidsBuffer = new ArrayList<>();
public Payload(Object payloadLock, long maxBytes, long maxRecords) {
super(payloadLock, maxBytes, maxRecords);
}
@Override
protected boolean addAndEstimateIfFull(long recordDelta) {
throw new UnsupportedOperationException();
}
@CheckResult
protected boolean addAndEstimateIfFull(long recordDelta, byte[] recordBytes, String guid) {
synchronized (accessLock) {
recordsBuffer.add(recordBytes);
recordGuidsBuffer.add(guid);
return super.addAndEstimateIfFull(recordDelta);
}
}
@Override
protected void reset() {
synchronized (accessLock) {
super.reset();
recordsBuffer.clear();
recordGuidsBuffer.clear();
}
}
protected ArrayList<byte[]> getRecordsBuffer() {
synchronized (accessLock) {
return new ArrayList<>(recordsBuffer);
}
}
protected ArrayList<String> getRecordGuidsBuffer() {
synchronized (accessLock) {
return new ArrayList<>(recordGuidsBuffer);
}
}
protected boolean isEmpty() {
synchronized (accessLock) {
return recordsBuffer.isEmpty();
}
}
}

View File

@ -0,0 +1,185 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.gecko.sync.repositories.uploaders;
import org.json.simple.JSONArray;
import org.mozilla.gecko.background.common.log.Logger;
import org.mozilla.gecko.sync.ExtendedJSONObject;
import org.mozilla.gecko.sync.HTTPFailureException;
import org.mozilla.gecko.sync.NonArrayJSONException;
import org.mozilla.gecko.sync.NonObjectJSONException;
import org.mozilla.gecko.sync.Utils;
import org.mozilla.gecko.sync.net.AuthHeaderProvider;
import org.mozilla.gecko.sync.net.SyncResponse;
import org.mozilla.gecko.sync.net.SyncStorageRequestDelegate;
import org.mozilla.gecko.sync.net.SyncStorageResponse;
import java.util.ArrayList;
public class PayloadUploadDelegate implements SyncStorageRequestDelegate {
private static final String LOG_TAG = "PayloadUploadDelegate";
private static final String KEY_BATCH = "batch";
private final BatchingUploader uploader;
private ArrayList<String> postedRecordGuids;
private final boolean isCommit;
private final boolean isLastPayload;
public PayloadUploadDelegate(BatchingUploader uploader, ArrayList<String> postedRecordGuids, boolean isCommit, boolean isLastPayload) {
this.uploader = uploader;
this.postedRecordGuids = postedRecordGuids;
this.isCommit = isCommit;
this.isLastPayload = isLastPayload;
}
@Override
public AuthHeaderProvider getAuthHeaderProvider() {
return uploader.getRepositorySession().getServerRepository().getAuthHeaderProvider();
}
@Override
public String ifUnmodifiedSince() {
final Long lastModified = uploader.getCurrentBatch().getLastModified();
if (lastModified == null) {
return null;
}
return Utils.millisecondsToDecimalSecondsString(lastModified);
}
@Override
public void handleRequestSuccess(final SyncStorageResponse response) {
// First, do some sanity checking.
if (response.getStatusCode() != 200 && response.getStatusCode() != 202) {
handleRequestError(
new IllegalStateException("handleRequestSuccess received a non-200/202 response: " + response.getStatusCode())
);
return;
}
// We always expect to see a Last-Modified header. It's returned with every success response.
if (!response.httpResponse().containsHeader(SyncResponse.X_LAST_MODIFIED)) {
handleRequestError(
new IllegalStateException("Response did not have a Last-Modified header")
);
return;
}
// We expect to be able to parse the response as a JSON object.
final ExtendedJSONObject body;
try {
body = response.jsonObjectBody(); // jsonObjectBody() throws or returns non-null.
} catch (Exception e) {
Logger.error(LOG_TAG, "Got exception parsing POST success body.", e);
this.handleRequestError(e);
return;
}
// If we got a 200, it could be either a non-batching result, or a batch commit.
// - if we're in a batching mode, we expect this to be a commit.
// If we got a 202, we expect there to be a token present in the response
if (response.getStatusCode() == 200 && uploader.getCurrentBatch().getToken() != null) {
if (uploader.getInBatchingMode() && !isCommit) {
handleRequestError(
new IllegalStateException("Got 200 OK in batching mode, but this was not a commit payload")
);
return;
}
} else if (response.getStatusCode() == 202) {
if (!body.containsKey(KEY_BATCH)) {
handleRequestError(
new IllegalStateException("Batch response did not have a batch ID")
);
return;
}
}
// With sanity checks out of the way, can now safely say if we're in a batching mode or not.
// We only do this once per session.
if (uploader.getInBatchingMode() == null) {
uploader.setInBatchingMode(body.containsKey(KEY_BATCH));
}
// Tell current batch about the token we've received.
// Throws if token changed after being set once, or if we got a non-null token after a commit.
try {
uploader.getCurrentBatch().setToken(body.getString(KEY_BATCH), isCommit);
} catch (BatchingUploader.BatchingUploaderException e) {
handleRequestError(e);
return;
}
// Will throw if Last-Modified changed when it shouldn't have.
try {
uploader.setLastModified(
response.normalizedTimestampForHeader(SyncResponse.X_LAST_MODIFIED),
isCommit);
} catch (BatchingUploader.BatchingUploaderException e) {
handleRequestError(e);
return;
}
// All looks good up to this point, let's process success and failed arrays.
JSONArray success;
try {
success = body.getArray("success");
} catch (NonArrayJSONException e) {
handleRequestError(e);
return;
}
if (success != null && !success.isEmpty()) {
Logger.trace(LOG_TAG, "Successful records: " + success.toString());
for (Object o : success) {
try {
uploader.recordSucceeded((String) o);
} catch (ClassCastException e) {
Logger.error(LOG_TAG, "Got exception parsing POST success guid.", e);
// Not much to be done.
}
}
}
// GC
success = null;
ExtendedJSONObject failed;
try {
failed = body.getObject("failed");
} catch (NonObjectJSONException e) {
handleRequestError(e);
return;
}
if (failed != null && !failed.object.isEmpty()) {
Logger.debug(LOG_TAG, "Failed records: " + failed.object.toString());
for (String guid : failed.keySet()) {
uploader.recordFailed(guid);
}
}
// GC
failed = null;
// And we're done! Let uploader finish up.
uploader.payloadSucceeded(response, isCommit, isLastPayload);
}
@Override
public void handleRequestFailure(final SyncStorageResponse response) {
this.handleRequestError(new HTTPFailureException(response));
}
@Override
public void handleRequestError(Exception e) {
for (String guid : postedRecordGuids) {
uploader.recordFailed(e, guid);
}
// GC
postedRecordGuids = null;
if (isLastPayload) {
uploader.lastPayloadFailed();
}
}
}

View File

@ -0,0 +1,177 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.gecko.sync.repositories.uploaders;
import android.net.Uri;
import android.support.annotation.VisibleForTesting;
import org.mozilla.gecko.background.common.log.Logger;
import org.mozilla.gecko.sync.Server11PreviousPostFailedException;
import org.mozilla.gecko.sync.net.SyncStorageRequest;
import org.mozilla.gecko.sync.net.SyncStorageRequestDelegate;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import ch.boye.httpclientandroidlib.entity.ContentProducer;
import ch.boye.httpclientandroidlib.entity.EntityTemplate;
/**
* Responsible for creating and posting a <code>SyncStorageRequest</code> request object.
*/
public class RecordUploadRunnable implements Runnable {
public final String LOG_TAG = "RecordUploadRunnable";
public final static byte[] RECORDS_START = "[".getBytes(StandardCharsets.UTF_8);
public final static byte[] RECORD_SEPARATOR = ",".getBytes(StandardCharsets.UTF_8);
public final static byte[] RECORDS_END = "]".getBytes(StandardCharsets.UTF_8);
private static final String QUERY_PARAM_BATCH = "batch";
private static final String QUERY_PARAM_TRUE = "true";
private static final String QUERY_PARAM_BATCH_COMMIT = "commit";
private final MayUploadProvider mayUploadProvider;
private final SyncStorageRequestDelegate uploadDelegate;
private final ArrayList<byte[]> outgoing;
private final long byteCount;
// Used to construct POST URI during run().
@VisibleForTesting
public final boolean isCommit;
private final Uri collectionUri;
private final BatchMeta batchMeta;
public RecordUploadRunnable(MayUploadProvider mayUploadProvider,
Uri collectionUri,
BatchMeta batchMeta,
SyncStorageRequestDelegate uploadDelegate,
ArrayList<byte[]> outgoing,
long byteCount,
boolean isCommit) {
this.mayUploadProvider = mayUploadProvider;
this.uploadDelegate = uploadDelegate;
this.outgoing = outgoing;
this.byteCount = byteCount;
this.batchMeta = batchMeta;
this.collectionUri = collectionUri;
this.isCommit = isCommit;
}
public static class ByteArraysContentProducer implements ContentProducer {
ArrayList<byte[]> outgoing;
public ByteArraysContentProducer(ArrayList<byte[]> arrays) {
outgoing = arrays;
}
@Override
public void writeTo(OutputStream outstream) throws IOException {
int count = outgoing.size();
outstream.write(RECORDS_START);
if (count > 0) {
outstream.write(outgoing.get(0));
for (int i = 1; i < count; ++i) {
outstream.write(RECORD_SEPARATOR);
outstream.write(outgoing.get(i));
}
}
outstream.write(RECORDS_END);
}
public static long outgoingBytesCount(ArrayList<byte[]> outgoing) {
final long numberOfRecords = outgoing.size();
// Account for start and end tokens.
long count = RECORDS_START.length + RECORDS_END.length;
// Account for all the records.
for (int i = 0; i < numberOfRecords; i++) {
count += outgoing.get(i).length;
}
// Account for a separator between the records.
// There's one less separator than there are records.
if (numberOfRecords > 1) {
count += RECORD_SEPARATOR.length * (numberOfRecords - 1);
}
return count;
}
}
public static class ByteArraysEntity extends EntityTemplate {
private final long count;
public ByteArraysEntity(ArrayList<byte[]> arrays, long totalBytes) {
super(new ByteArraysContentProducer(arrays));
this.count = totalBytes;
this.setContentType("application/json");
// charset is set in BaseResource.
// Sanity check our byte counts.
long realByteCount = ByteArraysContentProducer.outgoingBytesCount(arrays);
if (realByteCount != totalBytes) {
throw new IllegalStateException("Mismatched byte counts. Received " + totalBytes + " while real byte count is " + realByteCount);
}
}
@Override
public long getContentLength() {
return count;
}
@Override
public boolean isRepeatable() {
return true;
}
}
@Override
public void run() {
if (!mayUploadProvider.mayUpload()) {
Logger.info(LOG_TAG, "Told not to proceed by the uploader. Cancelling upload, failing records.");
uploadDelegate.handleRequestError(new Server11PreviousPostFailedException());
return;
}
Logger.trace(LOG_TAG, "Running upload task. Outgoing records: " + outgoing.size());
// We don't want the task queue to proceed until this request completes.
// Fortunately, BaseResource is currently synchronous.
// If that ever changes, you'll need to block here.
final URI postURI = buildPostURI(isCommit, batchMeta, collectionUri);
final SyncStorageRequest request = new SyncStorageRequest(postURI);
request.delegate = uploadDelegate;
ByteArraysEntity body = new ByteArraysEntity(outgoing, byteCount);
request.post(body);
}
@VisibleForTesting
public static URI buildPostURI(boolean isCommit, BatchMeta batchMeta, Uri collectionUri) {
final Uri.Builder uriBuilder = collectionUri.buildUpon();
final String batchToken = batchMeta.getToken();
if (batchToken != null) {
uriBuilder.appendQueryParameter(QUERY_PARAM_BATCH, batchToken);
} else {
uriBuilder.appendQueryParameter(QUERY_PARAM_BATCH, QUERY_PARAM_TRUE);
}
if (isCommit) {
uriBuilder.appendQueryParameter(QUERY_PARAM_BATCH_COMMIT, QUERY_PARAM_TRUE);
}
try {
return new URI(uriBuilder.build().toString());
} catch (URISyntaxException e) {
throw new IllegalStateException("Failed to construct a collection URI", e);
}
}
}

View File

@ -50,6 +50,7 @@ public class AndroidBrowserBookmarksServerSyncStage extends ServerSyncStage {
session.config.storageURL(),
session.getAuthHeaderProvider(),
session.config.infoCollections,
session.config.infoConfiguration,
BOOKMARKS_REQUEST_LIMIT,
BOOKMARKS_SORT,
countsFetcher);

View File

@ -50,6 +50,7 @@ public class AndroidBrowserHistoryServerSyncStage extends ServerSyncStage {
session.config.storageURL(),
session.getAuthHeaderProvider(),
session.config.infoCollections,
session.config.infoConfiguration,
HISTORY_REQUEST_LIMIT,
HISTORY_SORT);
}

View File

@ -0,0 +1,59 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.gecko.sync.stage;
import org.mozilla.gecko.sync.ExtendedJSONObject;
import org.mozilla.gecko.sync.InfoConfiguration;
import org.mozilla.gecko.sync.JSONRecordFetcher;
import org.mozilla.gecko.sync.delegates.JSONRecordFetchDelegate;
import org.mozilla.gecko.sync.net.AuthHeaderProvider;
import org.mozilla.gecko.sync.net.SyncStorageResponse;
/**
* Fetches configuration data from info/configurations endpoint.
*/
public class FetchInfoConfigurationStage extends AbstractNonRepositorySyncStage {
private final String configurationURL;
private final AuthHeaderProvider authHeaderProvider;
public FetchInfoConfigurationStage(final String configurationURL, final AuthHeaderProvider authHeaderProvider) {
super();
this.configurationURL = configurationURL;
this.authHeaderProvider = authHeaderProvider;
}
public class StageInfoConfigurationDelegate implements JSONRecordFetchDelegate {
@Override
public void handleSuccess(final ExtendedJSONObject result) {
session.config.infoConfiguration = new InfoConfiguration(result);
session.advance();
}
@Override
public void handleFailure(final SyncStorageResponse response) {
// Handle all non-404 failures upstream.
if (response.getStatusCode() != 404) {
session.handleHTTPError(response, "Failure fetching info/configuration");
return;
}
// End-point might not be available (404) if server is running an older version.
// We will use default config values in this case.
session.config.infoConfiguration = new InfoConfiguration();
session.advance();
}
@Override
public void handleError(final Exception e) {
session.abort(e, "Failure fetching info/configuration");
}
}
@Override
public void execute() {
final StageInfoConfigurationDelegate delegate = new StageInfoConfigurationDelegate();
final JSONRecordFetcher fetcher = new JSONRecordFetcher(configurationURL, authHeaderProvider);
fetcher.fetch(delegate);
}
}

View File

@ -45,6 +45,7 @@ public class FormHistoryServerSyncStage extends ServerSyncStage {
session.config.storageURL(),
session.getAuthHeaderProvider(),
session.config.infoCollections,
session.config.infoConfiguration,
FORM_HISTORY_REQUEST_LIMIT,
FORM_HISTORY_SORT);
}

View File

@ -18,6 +18,7 @@ public interface GlobalSyncStage {
idle, // Start state.
checkPreconditions, // Preparation of the basics. TODO: clear status
fetchInfoCollections, // Take a look at timestamps.
fetchInfoConfiguration, // Fetch server upload limits
fetchMetaGlobal,
ensureKeysStage,
/*

View File

@ -8,6 +8,7 @@ import java.net.URISyntaxException;
import org.mozilla.gecko.background.common.log.Logger;
import org.mozilla.gecko.sync.InfoCollections;
import org.mozilla.gecko.sync.InfoConfiguration;
import org.mozilla.gecko.sync.InfoCounts;
import org.mozilla.gecko.sync.JSONRecordFetcher;
import org.mozilla.gecko.sync.net.AuthHeaderProvider;
@ -37,11 +38,12 @@ public class SafeConstrainedServer11Repository extends ConstrainedServer11Reposi
String storageURL,
AuthHeaderProvider authHeaderProvider,
InfoCollections infoCollections,
InfoConfiguration infoConfiguration,
long limit,
String sort,
JSONRecordFetcher countFetcher)
throws URISyntaxException {
super(collection, storageURL, authHeaderProvider, infoCollections, limit, sort);
super(collection, storageURL, authHeaderProvider, infoCollections, infoConfiguration, limit, sort);
if (countFetcher == null) {
throw new IllegalArgumentException("countFetcher must not be null");
}

View File

@ -145,7 +145,8 @@ public abstract class ServerSyncStage extends AbstractSessionManagingSyncStage i
return new Server11Repository(collection,
session.config.storageURL(),
session.getAuthHeaderProvider(),
session.config.infoCollections);
session.config.infoCollections,
session.config.infoConfiguration);
}
/**

View File

@ -8,6 +8,7 @@ import org.junit.Test;
import org.junit.runner.RunWith;
import org.mozilla.gecko.background.testhelpers.TestRunner;
import org.mozilla.gecko.sync.InfoCollections;
import org.mozilla.gecko.sync.InfoConfiguration;
import org.mozilla.gecko.sync.repositories.Server11Repository;
import java.net.URI;
@ -20,6 +21,7 @@ public class TestServer11Repository {
private static final String COLLECTION_URL = "http://foo.com/1.1/n6ec3u5bee3tixzp2asys7bs6fve4jfw/storage";
protected final InfoCollections infoCollections = new InfoCollections();
protected final InfoConfiguration infoConfiguration = new InfoConfiguration();
public static void assertQueryEquals(String expected, URI u) {
Assert.assertEquals(expected, u.getRawQuery());
@ -28,7 +30,7 @@ public class TestServer11Repository {
@SuppressWarnings("static-method")
@Test
public void testCollectionURIFull() throws URISyntaxException {
Server11Repository r = new Server11Repository(COLLECTION, COLLECTION_URL, null, infoCollections);
Server11Repository r = new Server11Repository(COLLECTION, COLLECTION_URL, null, infoCollections, infoConfiguration);
assertQueryEquals("full=1&newer=5000.000", r.collectionURI(true, 5000000L, -1, null, null));
assertQueryEquals("newer=1230.000", r.collectionURI(false, 1230000L, -1, null, null));
assertQueryEquals("newer=5000.000&limit=10", r.collectionURI(false, 5000000L, 10, null, null));
@ -38,8 +40,8 @@ public class TestServer11Repository {
@Test
public void testCollectionURI() throws URISyntaxException {
Server11Repository noTrailingSlash = new Server11Repository(COLLECTION, COLLECTION_URL, null, infoCollections);
Server11Repository trailingSlash = new Server11Repository(COLLECTION, COLLECTION_URL + "/", null, infoCollections);
Server11Repository noTrailingSlash = new Server11Repository(COLLECTION, COLLECTION_URL, null, infoCollections, infoConfiguration);
Server11Repository trailingSlash = new Server11Repository(COLLECTION, COLLECTION_URL + "/", null, infoCollections, infoConfiguration);
Assert.assertEquals("http://foo.com/1.1/n6ec3u5bee3tixzp2asys7bs6fve4jfw/storage/bookmarks", noTrailingSlash.collectionURI().toASCIIString());
Assert.assertEquals("http://foo.com/1.1/n6ec3u5bee3tixzp2asys7bs6fve4jfw/storage/bookmarks", trailingSlash.collectionURI().toASCIIString());
}

View File

@ -3,17 +3,16 @@
package org.mozilla.android.sync.test;
import ch.boye.httpclientandroidlib.HttpEntity;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mozilla.android.sync.test.SynchronizerHelpers.TrackingWBORepository;
import org.mozilla.android.sync.test.helpers.BaseTestStorageRequestDelegate;
import org.mozilla.android.sync.test.helpers.HTTPServerTestHelper;
import org.mozilla.android.sync.test.helpers.MockServer;
import org.mozilla.gecko.background.testhelpers.MockRecord;
import org.mozilla.gecko.background.testhelpers.TestRunner;
import org.mozilla.gecko.background.testhelpers.WaitHelper;
import org.mozilla.gecko.sync.InfoCollections;
import org.mozilla.gecko.sync.InfoConfiguration;
import org.mozilla.gecko.sync.JSONRecordFetcher;
import org.mozilla.gecko.sync.Utils;
import org.mozilla.gecko.sync.crypto.KeyBundle;
@ -21,18 +20,14 @@ import org.mozilla.gecko.sync.middleware.Crypto5MiddlewareRepository;
import org.mozilla.gecko.sync.net.AuthHeaderProvider;
import org.mozilla.gecko.sync.net.BaseResource;
import org.mozilla.gecko.sync.net.BasicAuthHeaderProvider;
import org.mozilla.gecko.sync.net.SyncStorageRecordRequest;
import org.mozilla.gecko.sync.net.SyncStorageResponse;
import org.mozilla.gecko.sync.repositories.FetchFailedException;
import org.mozilla.gecko.sync.repositories.Repository;
import org.mozilla.gecko.sync.repositories.RepositorySession;
import org.mozilla.gecko.sync.repositories.Server11Repository;
import org.mozilla.gecko.sync.repositories.Server11RepositorySession;
import org.mozilla.gecko.sync.repositories.StoreFailedException;
import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionCreationDelegate;
import org.mozilla.gecko.sync.repositories.domain.BookmarkRecord;
import org.mozilla.gecko.sync.repositories.domain.BookmarkRecordFactory;
import org.mozilla.gecko.sync.repositories.domain.Record;
import org.mozilla.gecko.sync.stage.SafeConstrainedServer11Repository;
import org.mozilla.gecko.sync.synchronizer.ServerLocalSynchronizer;
import org.mozilla.gecko.sync.synchronizer.Synchronizer;
@ -41,8 +36,6 @@ import org.simpleframework.http.Request;
import org.simpleframework.http.Response;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.junit.Assert.assertEquals;
@ -70,7 +63,6 @@ public class TestServer11RepositorySession {
private static final int TEST_PORT = HTTPServerTestHelper.getTestPort();
private static final String TEST_SERVER = "http://localhost:" + TEST_PORT + "/";
static final String LOCAL_BASE_URL = TEST_SERVER + "1.1/n6ec3u5bee3tixzp2asys7bs6fve4jfw/";
static final String LOCAL_REQUEST_URL = LOCAL_BASE_URL + "storage/bookmarks";
static final String LOCAL_INFO_BASE_URL = LOCAL_BASE_URL + "info/";
static final String LOCAL_COUNTS_URL = LOCAL_INFO_BASE_URL + "collection_counts";
@ -81,6 +73,7 @@ public class TestServer11RepositorySession {
public final AuthHeaderProvider authHeaderProvider = new BasicAuthHeaderProvider(TEST_USERNAME, TEST_PASSWORD);
protected final InfoCollections infoCollections = new InfoCollections();
protected final InfoConfiguration infoConfiguration = new InfoConfiguration();
// Few-second timeout so that our longer operations don't time out and cause spurious error-handling results.
private static final int SHORT_TIMEOUT = 10000;
@ -91,25 +84,6 @@ public class TestServer11RepositorySession {
private HTTPServerTestHelper data = new HTTPServerTestHelper();
public class MockServer11RepositorySession extends Server11RepositorySession {
public MockServer11RepositorySession(Repository repository) {
super(repository);
}
public RecordUploadRunnable getRecordUploadRunnable() {
// TODO: implement upload delegate in the class, too!
return new RecordUploadRunnable(null, recordsBuffer, recordGuidsBuffer, byteCount);
}
public void enqueueRecord(Record r) {
super.enqueue(r);
}
public HttpEntity getEntity() {
return this.getRecordUploadRunnable().getBodyEntity();
}
}
public class TestSyncStorageRequestDelegate extends
BaseTestStorageRequestDelegate {
public TestSyncStorageRequestDelegate(String username, String password) {
@ -125,24 +99,6 @@ public class TestServer11RepositorySession {
}
}
@Test
public void test() throws URISyntaxException {
BaseResource.rewriteLocalhost = false;
data.startHTTPServer(new POSTMockServer());
MockServer11RepositorySession session = new MockServer11RepositorySession(
null);
session.enqueueRecord(new MockRecord(Utils.generateGuid(), null, 0, false));
session.enqueueRecord(new MockRecord(Utils.generateGuid(), null, 0, false));
URI uri = new URI(LOCAL_REQUEST_URL);
SyncStorageRecordRequest r = new SyncStorageRecordRequest(uri);
TestSyncStorageRequestDelegate delegate = new TestSyncStorageRequestDelegate(TEST_USERNAME, TEST_PASSWORD);
r.delegate = delegate;
r.post(session.getEntity());
}
@SuppressWarnings("static-method")
protected TrackingWBORepository getLocal(int numRecords) {
final TrackingWBORepository local = new TrackingWBORepository();
@ -157,7 +113,7 @@ public class TestServer11RepositorySession {
final String COLLECTION = "test";
final TrackingWBORepository local = getLocal(100);
final Server11Repository remote = new Server11Repository(COLLECTION, getCollectionURL(COLLECTION), authHeaderProvider, infoCollections);
final Server11Repository remote = new Server11Repository(COLLECTION, getCollectionURL(COLLECTION), authHeaderProvider, infoCollections, infoConfiguration);
KeyBundle collectionKey = new KeyBundle(TEST_USERNAME, SYNC_KEY);
Crypto5MiddlewareRepository cryptoRepo = new Crypto5MiddlewareRepository(remote, collectionKey);
cryptoRepo.recordFactory = new BookmarkRecordFactory();
@ -234,6 +190,7 @@ public class TestServer11RepositorySession {
getCollectionURL(collection),
getAuthHeaderProvider(),
infoCollections,
infoConfiguration,
5000, "sortindex", countsFetcher);
data.startHTTPServer(server);

View File

@ -6,10 +6,20 @@ package org.mozilla.gecko.background.testhelpers;
import org.mozilla.gecko.sync.ExtendedJSONObject;
import org.mozilla.gecko.sync.repositories.domain.Record;
public class MockRecord extends Record {
import java.util.Random;
public class MockRecord extends Record {
private final int payloadByteCount;
public MockRecord(String guid, String collection, long lastModified, boolean deleted) {
super(guid, collection, lastModified, deleted);
// Payload used to be "foo", so let's not stray too far.
// Perhaps some tests "depend" on that payload size.
payloadByteCount = 3;
}
public MockRecord(String guid, String collection, long lastModified, boolean deleted, int payloadByteCount) {
super(guid, collection, lastModified, deleted);
this.payloadByteCount = payloadByteCount;
}
@Override
@ -29,6 +39,13 @@ public class MockRecord extends Record {
@Override
public String toJSONString() {
return "{\"id\":\"" + guid + "\", \"payload\": \"foo\"}";
// Build up a randomish payload string based on the length we were asked for.
final Random random = new Random();
final char[] payloadChars = new char[payloadByteCount];
for (int i = 0; i < payloadByteCount; i++) {
payloadChars[i] = (char) (random.nextInt(26) + 'a');
}
final String payloadString = new String(payloadChars);
return "{\"id\":\"" + guid + "\", \"payload\": \"" + payloadString+ "\"}";
}
}

View File

@ -11,6 +11,7 @@ import org.mozilla.android.sync.test.helpers.MockServer;
import org.mozilla.gecko.background.testhelpers.TestRunner;
import org.mozilla.gecko.background.testhelpers.WaitHelper;
import org.mozilla.gecko.sync.InfoCollections;
import org.mozilla.gecko.sync.InfoConfiguration;
import org.mozilla.gecko.sync.JSONRecordFetcher;
import org.mozilla.gecko.sync.net.AuthHeaderProvider;
import org.mozilla.gecko.sync.repositories.RepositorySession;
@ -35,6 +36,7 @@ public class TestSafeConstrainedServer11Repository {
}
protected final InfoCollections infoCollections = new InfoCollections();
protected final InfoConfiguration infoConfiguration = new InfoConfiguration();
private class CountsMockServer extends MockServer {
public final AtomicInteger count = new AtomicInteger(0);
@ -85,7 +87,7 @@ public class TestSafeConstrainedServer11Repository {
final int TEST_LIMIT = 1000;
final SafeConstrainedServer11Repository repo = new SafeConstrainedServer11Repository(
collection, getCollectionURL(collection), null, infoCollections,
collection, getCollectionURL(collection), null, infoCollections, infoConfiguration,
TEST_LIMIT, sort, countFetcher);
final AtomicBoolean shouldSkipLots = new AtomicBoolean(false);

View File

@ -0,0 +1,282 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
package org.mozilla.gecko.sync.repositories.uploaders;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mozilla.gecko.background.testhelpers.TestRunner;
import static org.junit.Assert.*;
@RunWith(TestRunner.class)
public class BatchMetaTest {
private BatchMeta batchMeta;
private long byteLimit = 1024;
private long recordLimit = 5;
private Object lock = new Object();
private Long collectionLastModified = 123L;
@Before
public void setUp() throws Exception {
batchMeta = new BatchMeta(lock, byteLimit, recordLimit, collectionLastModified);
}
@Test
public void testConstructor() {
assertEquals(batchMeta.collectionLastModified, collectionLastModified);
BatchMeta otherBatchMeta = new BatchMeta(lock, byteLimit, recordLimit, null);
assertNull(otherBatchMeta.collectionLastModified);
}
@Test
public void testGetLastModified() {
// Defaults to collection L-M
assertEquals(batchMeta.getLastModified(), Long.valueOf(123L));
try {
batchMeta.setLastModified(333L, true);
} catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
} catch (BatchingUploader.LastModifiedDidNotChange e) {}
assertEquals(batchMeta.getLastModified(), Long.valueOf(333L));
}
@Test
public void testSetLastModified() {
assertEquals(batchMeta.getLastModified(), collectionLastModified);
try {
batchMeta.setLastModified(123L, true);
assertEquals(batchMeta.getLastModified(), Long.valueOf(123L));
} catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
fail("Should not check for modifications on first L-M set");
} catch (BatchingUploader.LastModifiedDidNotChange e) {
fail("Should not check for modifications on first L-M set");
}
// Now the same, but passing in 'false' for "expecting to change".
batchMeta.reset();
assertEquals(batchMeta.getLastModified(), collectionLastModified);
try {
batchMeta.setLastModified(123L, false);
assertEquals(batchMeta.getLastModified(), Long.valueOf(123L));
} catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
fail("Should not check for modifications on first L-M set");
} catch (BatchingUploader.LastModifiedDidNotChange e) {
fail("Should not check for modifications on first L-M set");
}
// Test that we can't modify L-M when we're not expecting to
try {
batchMeta.setLastModified(333L, false);
} catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
assertTrue("Must throw when L-M changes unexpectedly", true);
} catch (BatchingUploader.LastModifiedDidNotChange e) {
fail("Not expecting did-not-change throw");
}
assertEquals(batchMeta.getLastModified(), Long.valueOf(123L));
// Test that we can modify L-M when we're expecting to
try {
batchMeta.setLastModified(333L, true);
} catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
fail("Not expecting changed-unexpectedly throw");
} catch (BatchingUploader.LastModifiedDidNotChange e) {
fail("Not expecting did-not-change throw");
}
assertEquals(batchMeta.getLastModified(), Long.valueOf(333L));
// Test that we catch L-M modifications that expect to change but actually don't
try {
batchMeta.setLastModified(333L, true);
} catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
fail("Not expecting changed-unexpectedly throw");
} catch (BatchingUploader.LastModifiedDidNotChange e) {
assertTrue("Expected-to-change-but-did-not-change didn't throw", true);
}
assertEquals(batchMeta.getLastModified(), Long.valueOf(333));
}
@Test
public void testSetToken() {
assertNull(batchMeta.getToken());
try {
batchMeta.setToken("MTIzNA", false);
} catch (BatchingUploader.TokenModifiedException e) {
fail("Should be able to set token for the first time");
}
assertEquals("MTIzNA", batchMeta.getToken());
try {
batchMeta.setToken("XYCvNA", false);
} catch (BatchingUploader.TokenModifiedException e) {
assertTrue("Should not be able to modify a token", true);
}
assertEquals("MTIzNA", batchMeta.getToken());
try {
batchMeta.setToken("XYCvNA", true);
} catch (BatchingUploader.TokenModifiedException e) {
assertTrue("Should catch non-null tokens during onCommit sets", true);
}
assertEquals("MTIzNA", batchMeta.getToken());
try {
batchMeta.setToken(null, true);
} catch (BatchingUploader.TokenModifiedException e) {
fail("Should be able to set token to null during onCommit set");
}
assertNull(batchMeta.getToken());
}
@Test
public void testRecordSucceeded() {
assertTrue(batchMeta.getSuccessRecordGuids().isEmpty());
batchMeta.recordSucceeded("guid1");
assertTrue(batchMeta.getSuccessRecordGuids().size() == 1);
assertTrue(batchMeta.getSuccessRecordGuids().contains("guid1"));
try {
batchMeta.recordSucceeded(null);
fail();
} catch (IllegalStateException e) {
assertTrue("Should not be able to 'succeed' a null guid", true);
}
}
@Test
public void testByteLimits() {
assertTrue(batchMeta.canFit(0));
// Should just fit
assertTrue(batchMeta.canFit(byteLimit - BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
// Can't fit a record due to payload overhead.
assertFalse(batchMeta.canFit(byteLimit));
assertFalse(batchMeta.canFit(byteLimit + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
assertFalse(batchMeta.canFit(byteLimit * 1000));
long recordDelta = byteLimit / 2;
assertFalse(batchMeta.addAndEstimateIfFull(recordDelta));
// Record delta shouldn't fit due to payload overhead.
assertFalse(batchMeta.canFit(recordDelta));
}
@Test
public void testCountLimits() {
// Our record limit is 5, let's add 4.
assertFalse(batchMeta.addAndEstimateIfFull(1));
assertFalse(batchMeta.addAndEstimateIfFull(1));
assertFalse(batchMeta.addAndEstimateIfFull(1));
assertFalse(batchMeta.addAndEstimateIfFull(1));
// 5th record still fits in
assertTrue(batchMeta.canFit(1));
// Add the 5th record
assertTrue(batchMeta.addAndEstimateIfFull(1));
// 6th record won't fit
assertFalse(batchMeta.canFit(1));
}
@Test
public void testNeedCommit() {
assertFalse(batchMeta.needToCommit());
assertFalse(batchMeta.addAndEstimateIfFull(1));
assertTrue(batchMeta.needToCommit());
assertFalse(batchMeta.addAndEstimateIfFull(1));
assertFalse(batchMeta.addAndEstimateIfFull(1));
assertFalse(batchMeta.addAndEstimateIfFull(1));
assertTrue(batchMeta.needToCommit());
batchMeta.reset();
assertFalse(batchMeta.needToCommit());
}
@Test
public void testAdd() {
// Ensure we account for payload overhead twice when the batch is empty.
// Payload overhead is either RECORDS_START or RECORDS_END, and for an empty payload
// we need both.
assertTrue(batchMeta.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
assertTrue(batchMeta.getRecordCount() == 0);
assertFalse(batchMeta.addAndEstimateIfFull(1));
assertTrue(batchMeta.getByteCount() == (1 + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
assertTrue(batchMeta.getRecordCount() == 1);
assertFalse(batchMeta.addAndEstimateIfFull(1));
assertFalse(batchMeta.addAndEstimateIfFull(1));
assertFalse(batchMeta.addAndEstimateIfFull(1));
assertTrue(batchMeta.getByteCount() == (4 + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
assertTrue(batchMeta.getRecordCount() == 4);
assertTrue(batchMeta.addAndEstimateIfFull(1));
try {
assertTrue(batchMeta.addAndEstimateIfFull(1));
fail("BatchMeta should not let us insert records that won't fit");
} catch (IllegalStateException e) {
assertTrue(true);
}
}
@Test
public void testReset() {
assertTrue(batchMeta.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
assertTrue(batchMeta.getRecordCount() == 0);
assertTrue(batchMeta.getSuccessRecordGuids().isEmpty());
// Shouldn't throw even if already empty
batchMeta.reset();
assertTrue(batchMeta.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
assertTrue(batchMeta.getRecordCount() == 0);
assertTrue(batchMeta.getSuccessRecordGuids().isEmpty());
assertFalse(batchMeta.addAndEstimateIfFull(1));
batchMeta.recordSucceeded("guid1");
try {
batchMeta.setToken("MTIzNA", false);
} catch (BatchingUploader.TokenModifiedException e) {}
try {
batchMeta.setLastModified(333L, true);
} catch (BatchingUploader.LastModifiedChangedUnexpectedly e) {
} catch (BatchingUploader.LastModifiedDidNotChange e) {}
assertEquals(Long.valueOf(333L), batchMeta.getLastModified());
assertEquals("MTIzNA", batchMeta.getToken());
assertTrue(batchMeta.getSuccessRecordGuids().size() == 1);
batchMeta.reset();
// Counts must be reset
assertTrue(batchMeta.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
assertTrue(batchMeta.getRecordCount() == 0);
assertTrue(batchMeta.getSuccessRecordGuids().isEmpty());
// Collection L-M shouldn't change
assertEquals(batchMeta.collectionLastModified, collectionLastModified);
// Token must be reset
assertNull(batchMeta.getToken());
// L-M must be reverted to collection L-M
assertEquals(batchMeta.getLastModified(), collectionLastModified);
}
}

View File

@ -0,0 +1,441 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
package org.mozilla.gecko.sync.repositories.uploaders;
import android.support.annotation.NonNull;
import static org.junit.Assert.*;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mozilla.gecko.background.testhelpers.MockRecord;
import org.mozilla.gecko.background.testhelpers.TestRunner;
import org.mozilla.gecko.sync.ExtendedJSONObject;
import org.mozilla.gecko.sync.InfoCollections;
import org.mozilla.gecko.sync.InfoConfiguration;
import org.mozilla.gecko.sync.Utils;
import org.mozilla.gecko.sync.repositories.Server11Repository;
import org.mozilla.gecko.sync.repositories.Server11RepositorySession;
import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
import java.net.URISyntaxException;
import java.util.Random;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
@RunWith(TestRunner.class)
public class BatchingUploaderTest {
class MockExecutorService implements Executor {
public int totalPayloads = 0;
public int commitPayloads = 0;
@Override
public void execute(@NonNull Runnable command) {
++totalPayloads;
if (((RecordUploadRunnable) command).isCommit) {
++commitPayloads;
}
}
}
class MockStoreDelegate implements RepositorySessionStoreDelegate {
public int storeFailed = 0;
public int storeSucceeded = 0;
public int storeCompleted = 0;
@Override
public void onRecordStoreFailed(Exception ex, String recordGuid) {
++storeFailed;
}
@Override
public void onRecordStoreSucceeded(String guid) {
++storeSucceeded;
}
@Override
public void onStoreCompleted(long storeEnd) {
++storeCompleted;
}
@Override
public RepositorySessionStoreDelegate deferredStoreDelegate(ExecutorService executor) {
return null;
}
}
private Executor workQueue;
private RepositorySessionStoreDelegate storeDelegate;
@Before
public void setUp() throws Exception {
workQueue = new MockExecutorService();
storeDelegate = new MockStoreDelegate();
}
@Test
public void testProcessEvenPayloadBatch() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
// 1st
uploader.process(record);
assertEquals(0, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// 2nd -> payload full
uploader.process(record);
assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// 3rd
uploader.process(record);
assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// 4th -> batch & payload full
uploader.process(record);
assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 5th
uploader.process(record);
assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 6th -> payload full
uploader.process(record);
assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 7th
uploader.process(record);
assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 8th -> batch & payload full
uploader.process(record);
assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(2, ((MockExecutorService) workQueue).commitPayloads);
// 9th
uploader.process(record);
assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(2, ((MockExecutorService) workQueue).commitPayloads);
// 10th -> payload full
uploader.process(record);
assertEquals(5, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(2, ((MockExecutorService) workQueue).commitPayloads);
// 11th
uploader.process(record);
assertEquals(5, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(2, ((MockExecutorService) workQueue).commitPayloads);
// 12th -> batch & payload full
uploader.process(record);
assertEquals(6, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(3, ((MockExecutorService) workQueue).commitPayloads);
// 13th
uploader.process(record);
assertEquals(6, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(3, ((MockExecutorService) workQueue).commitPayloads);
}
@Test
public void testProcessUnevenPayloadBatch() {
BatchingUploader uploader = makeConstrainedUploader(2, 5);
MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
// 1st
uploader.process(record);
assertEquals(0, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// 2nd -> payload full
uploader.process(record);
assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// 3rd
uploader.process(record);
assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// 4th -> payload full
uploader.process(record);
assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// 5th -> batch full
uploader.process(record);
assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 6th -> starts new batch
uploader.process(record);
assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 7th -> payload full
uploader.process(record);
assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 8th
uploader.process(record);
assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 9th -> payload full
uploader.process(record);
assertEquals(5, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 10th -> batch full
uploader.process(record);
assertEquals(6, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(2, ((MockExecutorService) workQueue).commitPayloads);
// 11th -> starts new batch
uploader.process(record);
assertEquals(6, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(2, ((MockExecutorService) workQueue).commitPayloads);
}
@Test
public void testNonBatchingOptimization() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
// 1st
uploader.process(record);
assertEquals(0, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// 2nd
uploader.process(record);
assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// 3rd
uploader.process(record);
assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// 4th
uploader.process(record);
assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 5th
uploader.process(record);
assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// And now we tell uploader that batching isn't supported.
// It shouldn't bother with batches from now on, just payloads.
uploader.setInBatchingMode(false);
// 6th
uploader.process(record);
assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 7th
uploader.process(record);
assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 8th
uploader.process(record);
assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 9th
uploader.process(record);
assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
// 10th
uploader.process(record);
assertEquals(5, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
}
@Test
public void testPreemtiveUploadByteCounts() {
// While processing a record, if we know for sure that another one won't fit,
// we upload the payload.
BatchingUploader uploader = makeConstrainedUploader(3, 6);
// Payload byte max: 1024; batch byte max: 4096
MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false, 400);
uploader.process(record);
assertEquals(0, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// After 2nd record, byte count is at 800+overhead. Our payload max is 1024, so it's unlikely
// we can fit another record at this pace. Expect payload to be uploaded.
uploader.process(record);
assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// After this record, we'll have less than 124 bytes of room left in the payload. Expect upload.
record = new MockRecord(Utils.generateGuid(), null, 0, false, 970);
uploader.process(record);
assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
uploader.process(record);
assertEquals(3, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
// At this point our byte count for the batch is at 3600+overhead;
// since we have just 496 bytes left in the batch, it's unlikely we'll fit another record.
// Expect a batch commit
uploader.process(record);
assertEquals(4, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
}
@Test
public void testRandomPayloadSizesBatching() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
final Random random = new Random();
for (int i = 0; i < 15000; i++) {
uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, random.nextInt(15000)));
}
}
@Test
public void testRandomPayloadSizesNonBatching() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
final Random random = new Random();
uploader.setInBatchingMode(false);
for (int i = 0; i < 15000; i++) {
uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, random.nextInt(15000)));
}
}
@Test
public void testRandomPayloadSizesNonBatchingDelayed() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
final Random random = new Random();
// Delay telling uploader that batching isn't supported.
// Randomize how many records we wait for.
final int delay = random.nextInt(20);
for (int i = 0; i < 15000; i++) {
if (delay == i) {
uploader.setInBatchingMode(false);
}
uploader.process(new MockRecord(Utils.generateGuid(), null, 0, false, random.nextInt(15000)));
}
}
@Test
public void testNoMoreRecordsAfterPayloadPost() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
// Process two records (payload limit is also two, batch is four),
// and ensure that 'no more records' commits.
MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
uploader.process(record);
uploader.process(record);
uploader.setInBatchingMode(true);
uploader.commitIfNecessaryAfterLastPayload();
// One will be a payload post, the other one is batch commit (empty payload)
assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
}
@Test
public void testNoMoreRecordsAfterPayloadPostWithOneRecordLeft() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
// Process two records (payload limit is also two, batch is four),
// and ensure that 'no more records' commits.
MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
uploader.process(record);
uploader.process(record);
uploader.process(record);
uploader.commitIfNecessaryAfterLastPayload();
// One will be a payload post, the other one is batch commit (one record payload)
assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
}
@Test
public void testNoMoreRecordsNoOp() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
uploader.commitIfNecessaryAfterLastPayload();
assertEquals(0, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
}
@Test
public void testNoMoreRecordsNoOpAfterCommit() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
uploader.process(record);
uploader.process(record);
uploader.process(record);
uploader.process(record);
assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
uploader.commitIfNecessaryAfterLastPayload();
assertEquals(2, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
}
@Test
public void testNoMoreRecordsEvenNonBatching() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
// Process two records (payload limit is also two, batch is four),
// set non-batching mode, and ensure that 'no more records' doesn't commit.
MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
uploader.process(record);
uploader.process(record);
uploader.setInBatchingMode(false);
uploader.commitIfNecessaryAfterLastPayload();
// One will be a payload post, the other one is batch commit (one record payload)
assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(0, ((MockExecutorService) workQueue).commitPayloads);
}
@Test
public void testNoMoreRecordsIncompletePayload() {
BatchingUploader uploader = makeConstrainedUploader(2, 4);
// We have one record (payload limit is 2), and "no-more-records" signal should commit it.
MockRecord record = new MockRecord(Utils.generateGuid(), null, 0, false);
uploader.process(record);
uploader.commitIfNecessaryAfterLastPayload();
assertEquals(1, ((MockExecutorService) workQueue).totalPayloads);
assertEquals(1, ((MockExecutorService) workQueue).commitPayloads);
}
private BatchingUploader makeConstrainedUploader(long maxPostRecords, long maxTotalRecords) {
Server11RepositorySession server11RepositorySession = new Server11RepositorySession(
makeCountConstrainedRepository(maxPostRecords, maxTotalRecords)
);
server11RepositorySession.setStoreDelegate(storeDelegate);
return new BatchingUploader(server11RepositorySession, workQueue, storeDelegate);
}
private Server11Repository makeCountConstrainedRepository(long maxPostRecords, long maxTotalRecords) {
return makeConstrainedRepository(1024, 1024, maxPostRecords, 4096, maxTotalRecords);
}
private Server11Repository makeConstrainedRepository(long maxRequestBytes, long maxPostBytes, long maxPostRecords, long maxTotalBytes, long maxTotalRecords) {
ExtendedJSONObject infoConfigurationJSON = new ExtendedJSONObject();
infoConfigurationJSON.put(InfoConfiguration.MAX_TOTAL_BYTES, maxTotalBytes);
infoConfigurationJSON.put(InfoConfiguration.MAX_TOTAL_RECORDS, maxTotalRecords);
infoConfigurationJSON.put(InfoConfiguration.MAX_POST_RECORDS, maxPostRecords);
infoConfigurationJSON.put(InfoConfiguration.MAX_POST_BYTES, maxPostBytes);
infoConfigurationJSON.put(InfoConfiguration.MAX_REQUEST_BYTES, maxRequestBytes);
InfoConfiguration infoConfiguration = new InfoConfiguration(infoConfigurationJSON);
try {
return new Server11Repository(
"dummyCollection",
"http://dummy.url/",
null,
new InfoCollections(),
infoConfiguration
);
} catch (URISyntaxException e) {
// Won't throw, and this won't happen.
return null;
}
}
}

View File

@ -0,0 +1,137 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
package org.mozilla.gecko.sync.repositories.uploaders;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mozilla.gecko.background.testhelpers.TestRunner;
import static org.junit.Assert.*;
@RunWith(TestRunner.class)
public class PayloadTest {
private Payload payload;
private long byteLimit = 1024;
private long recordLimit = 5;
private Object lock = new Object();
@Before
public void setUp() throws Exception {
payload = new Payload(lock, byteLimit, recordLimit);
}
@Test
public void testByteLimits() {
assertTrue(payload.canFit(0));
// Should just fit
assertTrue(payload.canFit(byteLimit - BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
// Can't fit a record due to payload overhead.
assertFalse(payload.canFit(byteLimit));
assertFalse(payload.canFit(byteLimit + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
assertFalse(payload.canFit(byteLimit * 1000));
long recordDelta = byteLimit / 2;
assertFalse(payload.addAndEstimateIfFull(recordDelta, new byte[0], null));
// Record delta shouldn't fit due to payload overhead.
assertFalse(payload.canFit(recordDelta));
}
@Test
public void testCountLimits() {
byte[] bytes = new byte[0];
// Our record limit is 5, let's add 4.
assertFalse(payload.addAndEstimateIfFull(1, bytes, null));
assertFalse(payload.addAndEstimateIfFull(1, bytes, null));
assertFalse(payload.addAndEstimateIfFull(1, bytes, null));
assertFalse(payload.addAndEstimateIfFull(1, bytes, null));
// 5th record still fits in
assertTrue(payload.canFit(1));
// Add the 5th record
assertTrue(payload.addAndEstimateIfFull(1, bytes, null));
// 6th record won't fit
assertFalse(payload.canFit(1));
}
@Test
public void testAdd() {
assertTrue(payload.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
assertTrue(payload.getRecordCount() == 0);
assertTrue(payload.isEmpty());
assertTrue(payload.getRecordsBuffer().isEmpty());
assertTrue(payload.getRecordGuidsBuffer().isEmpty());
try {
payload.addAndEstimateIfFull(1024);
fail("Simple add is not supported");
} catch (UnsupportedOperationException e) {
assertTrue(true);
}
byte[] recordBytes1 = new byte[100];
assertFalse(payload.addAndEstimateIfFull(1, recordBytes1, "guid1"));
assertTrue(payload.getRecordsBuffer().size() == 1);
assertTrue(payload.getRecordGuidsBuffer().size() == 1);
assertTrue(payload.getRecordGuidsBuffer().contains("guid1"));
assertTrue(payload.getRecordsBuffer().contains(recordBytes1));
assertTrue(payload.getByteCount() == (1 + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
assertTrue(payload.getRecordCount() == 1);
assertFalse(payload.isEmpty());
assertFalse(payload.addAndEstimateIfFull(1, recordBytes1, "guid2"));
assertFalse(payload.addAndEstimateIfFull(1, recordBytes1, "guid3"));
assertFalse(payload.addAndEstimateIfFull(1, recordBytes1, "guid4"));
assertTrue(payload.getByteCount() == (4 + BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT));
assertTrue(payload.getRecordCount() == 4);
assertTrue(payload.addAndEstimateIfFull(1, recordBytes1, "guid5"));
try {
assertTrue(payload.addAndEstimateIfFull(1, recordBytes1, "guid6"));
fail("Payload should not let us insert records that won't fit");
} catch (IllegalStateException e) {
assertTrue(true);
}
}
@Test
public void testReset() {
assertTrue(payload.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
assertTrue(payload.getRecordCount() == 0);
assertTrue(payload.getRecordsBuffer().isEmpty());
assertTrue(payload.getRecordGuidsBuffer().isEmpty());
assertTrue(payload.isEmpty());
// Shouldn't throw even if already empty
payload.reset();
assertTrue(payload.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
assertTrue(payload.getRecordCount() == 0);
assertTrue(payload.getRecordsBuffer().isEmpty());
assertTrue(payload.getRecordGuidsBuffer().isEmpty());
assertTrue(payload.isEmpty());
byte[] recordBytes1 = new byte[100];
assertFalse(payload.addAndEstimateIfFull(1, recordBytes1, "guid1"));
assertFalse(payload.isEmpty());
payload.reset();
assertTrue(payload.getByteCount() == 2 * BatchingUploader.PER_PAYLOAD_OVERHEAD_BYTE_COUNT);
assertTrue(payload.getRecordCount() == 0);
assertTrue(payload.getRecordsBuffer().isEmpty());
assertTrue(payload.getRecordGuidsBuffer().isEmpty());
assertTrue(payload.isEmpty());
}
}

View File

@ -0,0 +1,404 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
package org.mozilla.gecko.sync.repositories.uploaders;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mozilla.gecko.background.testhelpers.TestRunner;
import org.mozilla.gecko.sync.HTTPFailureException;
import org.mozilla.gecko.sync.InfoCollections;
import org.mozilla.gecko.sync.InfoConfiguration;
import org.mozilla.gecko.sync.NonObjectJSONException;
import org.mozilla.gecko.sync.net.SyncResponse;
import org.mozilla.gecko.sync.net.SyncStorageResponse;
import org.mozilla.gecko.sync.repositories.Server11Repository;
import org.mozilla.gecko.sync.repositories.Server11RepositorySession;
import org.mozilla.gecko.sync.repositories.delegates.RepositorySessionStoreDelegate;
import java.io.ByteArrayInputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.concurrent.Executor;
import ch.boye.httpclientandroidlib.HttpResponse;
import ch.boye.httpclientandroidlib.ProtocolVersion;
import ch.boye.httpclientandroidlib.entity.BasicHttpEntity;
import ch.boye.httpclientandroidlib.message.BasicHttpResponse;
import ch.boye.httpclientandroidlib.message.BasicStatusLine;
import static org.junit.Assert.*;
@RunWith(TestRunner.class)
public class PayloadUploadDelegateTest {
private BatchingUploader batchingUploader;
class MockUploader extends BatchingUploader {
public final ArrayList<String> successRecords = new ArrayList<>();
public final HashMap<String, Exception> failedRecords = new HashMap<>();
public boolean didLastPayloadFail = false;
public ArrayList<SyncStorageResponse> successResponses = new ArrayList<>();
public int commitPayloadsSucceeded = 0;
public int lastPayloadsSucceeded = 0;
public MockUploader(final Server11RepositorySession repositorySession, final Executor workQueue, final RepositorySessionStoreDelegate sessionStoreDelegate) {
super(repositorySession, workQueue, sessionStoreDelegate);
}
@Override
public void payloadSucceeded(final SyncStorageResponse response, final boolean isCommit, final boolean isLastPayload) {
successResponses.add(response);
if (isCommit) {
++commitPayloadsSucceeded;
}
if (isLastPayload) {
++lastPayloadsSucceeded;
}
}
@Override
public void recordSucceeded(final String recordGuid) {
successRecords.add(recordGuid);
}
@Override
public void recordFailed(final String recordGuid) {
recordFailed(new Exception(), recordGuid);
}
@Override
public void recordFailed(final Exception e, final String recordGuid) {
failedRecords.put(recordGuid, e);
}
@Override
public void lastPayloadFailed() {
didLastPayloadFail = true;
}
}
@Before
public void setUp() throws Exception {
Server11Repository server11Repository = new Server11Repository(
"dummyCollection",
"http://dummy.url/",
null,
new InfoCollections(),
new InfoConfiguration()
);
batchingUploader = new MockUploader(
new Server11RepositorySession(server11Repository),
null,
null
);
}
@Test
public void testHandleRequestSuccessNonSuccess() {
ArrayList<String> postedGuids = new ArrayList<>(2);
postedGuids.add("testGuid1");
postedGuids.add("testGuid2");
PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, false);
// Test that non-2* responses aren't processed
payloadUploadDelegate.handleRequestSuccess(makeSyncStorageResponse(404, null, null));
assertEquals(2, ((MockUploader) batchingUploader).failedRecords.size());
assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
assertEquals(IllegalStateException.class,
((MockUploader) batchingUploader).failedRecords.get("testGuid1").getClass());
assertEquals(IllegalStateException.class,
((MockUploader) batchingUploader).failedRecords.get("testGuid2").getClass());
}
@Test
public void testHandleRequestSuccessNoHeaders() {
ArrayList<String> postedGuids = new ArrayList<>(2);
postedGuids.add("testGuid1");
postedGuids.add("testGuid2");
PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, false);
// Test that responses without X-Last-Modified header aren't processed
payloadUploadDelegate.handleRequestSuccess(makeSyncStorageResponse(200, null, null));
assertEquals(2, ((MockUploader) batchingUploader).failedRecords.size());
assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
assertEquals(IllegalStateException.class,
((MockUploader) batchingUploader).failedRecords.get("testGuid1").getClass());
assertEquals(IllegalStateException.class,
((MockUploader) batchingUploader).failedRecords.get("testGuid2").getClass());
}
@Test
public void testHandleRequestSuccessBadBody() {
ArrayList<String> postedGuids = new ArrayList<>(2);
postedGuids.add("testGuid1");
postedGuids.add("testGuid2");
PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, true);
// Test that we catch json processing errors
payloadUploadDelegate.handleRequestSuccess(makeSyncStorageResponse(200, "non json body", "123"));
assertEquals(2, ((MockUploader) batchingUploader).failedRecords.size());
assertTrue(((MockUploader) batchingUploader).didLastPayloadFail);
assertEquals(NonObjectJSONException.class,
((MockUploader) batchingUploader).failedRecords.get("testGuid1").getClass());
assertEquals(NonObjectJSONException.class,
((MockUploader) batchingUploader).failedRecords.get("testGuid2").getClass());
}
@Test
public void testHandleRequestSuccess202NoToken() {
ArrayList<String> postedGuids = new ArrayList<>(1);
postedGuids.add("testGuid1");
PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, true);
// Test that we catch absent tokens in 202 responses
payloadUploadDelegate.handleRequestSuccess(makeSyncStorageResponse(202, "{\"success\": []}", "123"));
assertEquals(1, ((MockUploader) batchingUploader).failedRecords.size());
assertEquals(IllegalStateException.class,
((MockUploader) batchingUploader).failedRecords.get("testGuid1").getClass());
}
@Test
public void testHandleRequestSuccessBad200() {
ArrayList<String> postedGuids = new ArrayList<>(1);
postedGuids.add("testGuid1");
PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, false);
// Test that if in batching mode and saw the token, 200 must be a response to a commit
try {
batchingUploader.getCurrentBatch().setToken("MTIzNA", true);
} catch (BatchingUploader.BatchingUploaderException e) {}
batchingUploader.setInBatchingMode(true);
// not a commit, so should fail
payloadUploadDelegate.handleRequestSuccess(makeSyncStorageResponse(200, "{\"success\": []}", "123"));
assertEquals(1, ((MockUploader) batchingUploader).failedRecords.size());
assertEquals(IllegalStateException.class,
((MockUploader) batchingUploader).failedRecords.get("testGuid1").getClass());
}
@Test
public void testHandleRequestSuccessNonBatchingFailedLM() {
ArrayList<String> postedGuids = new ArrayList<>(1);
postedGuids.add("guid1");
postedGuids.add("guid2");
postedGuids.add("guid3");
PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, false);
payloadUploadDelegate.handleRequestSuccess(
makeSyncStorageResponse(200, "{\"success\": [\"guid1\", \"guid2\", \"guid3\"]}", "123"));
assertEquals(0, ((MockUploader) batchingUploader).failedRecords.size());
assertEquals(3, ((MockUploader) batchingUploader).successRecords.size());
assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
assertEquals(1, ((MockUploader) batchingUploader).successResponses.size());
assertEquals(0, ((MockUploader) batchingUploader).commitPayloadsSucceeded);
assertEquals(0, ((MockUploader) batchingUploader).lastPayloadsSucceeded);
// These should fail, because we're returning a non-changed L-M in a non-batching mode
postedGuids.add("guid4");
postedGuids.add("guid6");
payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, false);
payloadUploadDelegate.handleRequestSuccess(
makeSyncStorageResponse(200, "{\"success\": [\"guid4\", 5, \"guid6\"]}", "123"));
assertEquals(5, ((MockUploader) batchingUploader).failedRecords.size());
assertEquals(3, ((MockUploader) batchingUploader).successRecords.size());
assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
assertEquals(1, ((MockUploader) batchingUploader).successResponses.size());
assertEquals(0, ((MockUploader) batchingUploader).commitPayloadsSucceeded);
assertEquals(0, ((MockUploader) batchingUploader).lastPayloadsSucceeded);
assertEquals(BatchingUploader.LastModifiedDidNotChange.class,
((MockUploader) batchingUploader).failedRecords.get("guid4").getClass());
}
@Test
public void testHandleRequestSuccessNonBatching() {
ArrayList<String> postedGuids = new ArrayList<>();
postedGuids.add("guid1");
postedGuids.add("guid2");
postedGuids.add("guid3");
PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, false);
payloadUploadDelegate.handleRequestSuccess(
makeSyncStorageResponse(200, "{\"success\": [\"guid1\", \"guid2\", \"guid3\"], \"failed\": {}}", "123"));
postedGuids = new ArrayList<>();
postedGuids.add("guid4");
postedGuids.add("guid5");
payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, false);
payloadUploadDelegate.handleRequestSuccess(
makeSyncStorageResponse(200, "{\"success\": [\"guid4\", \"guid5\"], \"failed\": {}}", "333"));
postedGuids = new ArrayList<>();
postedGuids.add("guid6");
payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, true);
payloadUploadDelegate.handleRequestSuccess(
makeSyncStorageResponse(200, "{\"success\": [\"guid6\"], \"failed\": {}}", "444"));
assertEquals(0, ((MockUploader) batchingUploader).failedRecords.size());
assertEquals(6, ((MockUploader) batchingUploader).successRecords.size());
assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
assertEquals(3, ((MockUploader) batchingUploader).successResponses.size());
assertEquals(0, ((MockUploader) batchingUploader).commitPayloadsSucceeded);
assertEquals(1, ((MockUploader) batchingUploader).lastPayloadsSucceeded);
assertFalse(batchingUploader.getInBatchingMode());
postedGuids = new ArrayList<>();
postedGuids.add("guid7");
postedGuids.add("guid8");
payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, true);
payloadUploadDelegate.handleRequestSuccess(
makeSyncStorageResponse(200, "{\"success\": [\"guid8\"], \"failed\": {\"guid7\": \"reason\"}}", "555"));
assertEquals(1, ((MockUploader) batchingUploader).failedRecords.size());
assertTrue(((MockUploader) batchingUploader).failedRecords.containsKey("guid7"));
assertEquals(7, ((MockUploader) batchingUploader).successRecords.size());
assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
assertEquals(4, ((MockUploader) batchingUploader).successResponses.size());
assertEquals(0, ((MockUploader) batchingUploader).commitPayloadsSucceeded);
assertEquals(2, ((MockUploader) batchingUploader).lastPayloadsSucceeded);
assertFalse(batchingUploader.getInBatchingMode());
}
@Test
public void testHandleRequestSuccessBatching() {
ArrayList<String> postedGuids = new ArrayList<>();
postedGuids.add("guid1");
postedGuids.add("guid2");
postedGuids.add("guid3");
PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, false);
payloadUploadDelegate.handleRequestSuccess(
makeSyncStorageResponse(202, "{\"batch\": \"MTIzNA\", \"success\": [\"guid1\", \"guid2\", \"guid3\"], \"failed\": {}}", "123"));
assertTrue(batchingUploader.getInBatchingMode());
assertEquals("MTIzNA", batchingUploader.getCurrentBatch().getToken());
postedGuids = new ArrayList<>();
postedGuids.add("guid4");
postedGuids.add("guid5");
postedGuids.add("guid6");
payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, false, false);
payloadUploadDelegate.handleRequestSuccess(
makeSyncStorageResponse(202, "{\"batch\": \"MTIzNA\", \"success\": [\"guid4\", \"guid5\", \"guid6\"], \"failed\": {}}", "123"));
assertTrue(batchingUploader.getInBatchingMode());
assertEquals("MTIzNA", batchingUploader.getCurrentBatch().getToken());
postedGuids = new ArrayList<>();
postedGuids.add("guid7");
payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, true, false);
payloadUploadDelegate.handleRequestSuccess(
makeSyncStorageResponse(200, "{\"success\": [\"guid6\"], \"failed\": {}}", "222"));
// Even though everything indicates we're not in a batching, we were, so test that
// we don't reset the flag.
assertTrue(batchingUploader.getInBatchingMode());
assertNull(batchingUploader.getCurrentBatch().getToken());
postedGuids = new ArrayList<>();
postedGuids.add("guid8");
payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, postedGuids, true, true);
payloadUploadDelegate.handleRequestSuccess(
makeSyncStorageResponse(200, "{\"success\": [\"guid7\"], \"failed\": {}}", "333"));
assertEquals(0, ((MockUploader) batchingUploader).failedRecords.size());
assertEquals(8, ((MockUploader) batchingUploader).successRecords.size());
assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
assertEquals(4, ((MockUploader) batchingUploader).successResponses.size());
assertEquals(2, ((MockUploader) batchingUploader).commitPayloadsSucceeded);
assertEquals(1, ((MockUploader) batchingUploader).lastPayloadsSucceeded);
assertTrue(batchingUploader.getInBatchingMode());
}
@Test
public void testHandleRequestError() {
ArrayList<String> postedGuids = new ArrayList<>(3);
postedGuids.add("testGuid1");
postedGuids.add("testGuid2");
postedGuids.add("testGuid3");
PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(batchingUploader, postedGuids, false, false);
IllegalStateException e = new IllegalStateException();
payloadUploadDelegate.handleRequestError(e);
assertEquals(3, ((MockUploader) batchingUploader).failedRecords.size());
assertEquals(e, ((MockUploader) batchingUploader).failedRecords.get("testGuid1"));
assertEquals(e, ((MockUploader) batchingUploader).failedRecords.get("testGuid2"));
assertEquals(e, ((MockUploader) batchingUploader).failedRecords.get("testGuid3"));
assertFalse(((MockUploader) batchingUploader).didLastPayloadFail);
payloadUploadDelegate = new PayloadUploadDelegate(batchingUploader, postedGuids, false, true);
payloadUploadDelegate.handleRequestError(e);
assertEquals(3, ((MockUploader) batchingUploader).failedRecords.size());
assertTrue(((MockUploader) batchingUploader).didLastPayloadFail);
}
@Test
public void testHandleRequestFailure() {
ArrayList<String> postedGuids = new ArrayList<>(3);
postedGuids.add("testGuid1");
postedGuids.add("testGuid2");
postedGuids.add("testGuid3");
PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(batchingUploader, postedGuids, false, false);
final HttpResponse response = new BasicHttpResponse(
new BasicStatusLine(new ProtocolVersion("HTTP", 1, 1), 503, "Illegal method/protocol"));
payloadUploadDelegate.handleRequestFailure(new SyncStorageResponse(response));
assertEquals(3, ((MockUploader) batchingUploader).failedRecords.size());
assertEquals(HTTPFailureException.class,
((MockUploader) batchingUploader).failedRecords.get("testGuid1").getClass());
assertEquals(HTTPFailureException.class,
((MockUploader) batchingUploader).failedRecords.get("testGuid2").getClass());
assertEquals(HTTPFailureException.class,
((MockUploader) batchingUploader).failedRecords.get("testGuid3").getClass());
payloadUploadDelegate = new PayloadUploadDelegate(batchingUploader, postedGuids, false, true);
payloadUploadDelegate.handleRequestFailure(new SyncStorageResponse(response));
assertEquals(3, ((MockUploader) batchingUploader).failedRecords.size());
assertTrue(((MockUploader) batchingUploader).didLastPayloadFail);
}
@Test
public void testIfUnmodifiedSince() {
PayloadUploadDelegate payloadUploadDelegate = new PayloadUploadDelegate(
batchingUploader, new ArrayList<String>(), false, false);
assertNull(payloadUploadDelegate.ifUnmodifiedSince());
try {
batchingUploader.getCurrentBatch().setLastModified(1471645412480L, true);
} catch (BatchingUploader.BatchingUploaderException e) {}
assertEquals("1471645412.480", payloadUploadDelegate.ifUnmodifiedSince());
}
private SyncStorageResponse makeSyncStorageResponse(int code, String body, String lastModified) {
BasicHttpResponse response = new BasicHttpResponse(
new BasicStatusLine(new ProtocolVersion("HTTP", 1, 1), code, null));
if (body != null) {
BasicHttpEntity entity = new BasicHttpEntity();
entity.setContent(new ByteArrayInputStream(body.getBytes()));
response.setEntity(entity);
}
if (lastModified != null) {
response.addHeader(SyncResponse.X_LAST_MODIFIED, lastModified);
}
return new SyncStorageResponse(response);
}
}

View File

@ -0,0 +1,38 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
package org.mozilla.gecko.sync.repositories.uploaders;
import android.net.Uri;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mozilla.gecko.background.testhelpers.TestRunner;
import java.net.URI;
import static org.junit.Assert.*;
@RunWith(TestRunner.class)
public class RecordUploadRunnableTest {
@Test
public void testBuildPostURI() throws Exception {
BatchMeta batchMeta = new BatchMeta(new Object(), 1, 1, null);
URI postURI = RecordUploadRunnable.buildPostURI(
false, batchMeta, Uri.parse("http://example.com/"));
assertEquals("http://example.com/?batch=true", postURI.toString());
postURI = RecordUploadRunnable.buildPostURI(
true, batchMeta, Uri.parse("http://example.com/"));
assertEquals("http://example.com/?batch=true&commit=true", postURI.toString());
batchMeta.setToken("MTIzNA", false);
postURI = RecordUploadRunnable.buildPostURI(
false, batchMeta, Uri.parse("http://example.com/"));
assertEquals("http://example.com/?batch=MTIzNA", postURI.toString());
postURI = RecordUploadRunnable.buildPostURI(
true, batchMeta, Uri.parse("http://example.com/"));
assertEquals("http://example.com/?batch=MTIzNA&commit=true", postURI.toString());
}
}

View File

@ -48,11 +48,6 @@
#include "SerializedLoadContext.h"
#include "mozilla/net/NeckoChild.h"
#if defined(ANDROID) && !defined(MOZ_WIDGET_GONK)
#include "nsIPropertyBag2.h"
static const int32_t ANDROID_23_VERSION = 10;
#endif
using namespace mozilla;
namespace mozilla {
@ -590,22 +585,6 @@ Predictor::Init()
nsresult rv = NS_OK;
#if defined(ANDROID) && !defined(MOZ_WIDGET_GONK)
// This is an ugly hack to disable the predictor on android < 2.3, as it
// doesn't play nicely with those android versions, at least on our infra.
// Causes timeouts in reftests. See bug 881804 comment 86.
nsCOMPtr<nsIPropertyBag2> infoService =
do_GetService("@mozilla.org/system-info;1");
if (infoService) {
int32_t androidVersion = -1;
rv = infoService->GetPropertyAsInt32(NS_LITERAL_STRING("version"),
&androidVersion);
if (NS_SUCCEEDED(rv) && (androidVersion < ANDROID_23_VERSION)) {
return NS_ERROR_NOT_AVAILABLE;
}
}
#endif
rv = InstallObserver();
NS_ENSURE_SUCCESS(rv, rv);

View File

@ -6,7 +6,7 @@ from __future__ import print_function, unicode_literals
import os
import sys
from argparse import ArgumentParser
from argparse import ArgumentParser, REMAINDER
SEARCH_PATHS = []
@ -52,6 +52,10 @@ class MozlintParser(ArgumentParser):
'help': "Lint files touched by changes in the working directory "
"(i.e haven't been committed yet). Works with mercurial or git.",
}],
[['extra_args'],
{'nargs': REMAINDER,
'help': "Extra arguments that will be forwarded to the underlying linter.",
}],
]
def __init__(self, **kwargs):
@ -60,6 +64,13 @@ class MozlintParser(ArgumentParser):
for cli, args in self.arguments:
self.add_argument(*cli, **args)
def parse_known_args(self, *args, **kwargs):
# This is here so the eslint mach command doesn't lose 'extra_args'
# when using mach's dispatch functionality.
args, extra = ArgumentParser.parse_known_args(self, *args, **kwargs)
args.extra_args = extra
return args, extra
def find_linters(linters=None):
lints = []

View File

@ -541,6 +541,7 @@ public:
case __NR_unlink:
CASES_FOR_fchown:
case __NR_fchmod:
case __NR_flock:
#endif
return Allow();

View File

@ -272,8 +272,8 @@ builds:
# Miscellaneous tasks.
tasks:
eslint-gecko:
task: tasks/tests/eslint-gecko.yml
mozlint-eslint:
task: tasks/tests/mozlint-eslint.yml
root: true
when:
file_patterns:
@ -282,6 +282,7 @@ tasks:
- '**/*.jsm'
- '**/*.jsx'
- '**/*.html'
- '**/*.xhtml'
- '**/*.xml'
# Run when eslint policies change.
- '**/.eslintignore'
@ -289,9 +290,10 @@ tasks:
# The plugin implementing custom checks.
- 'tools/lint/eslint/eslint-plugin-mozilla/**'
# Other misc lint related files.
- 'python/mozlint/**'
- 'tools/lint/**'
- 'testing/docker/lint/**'
flake8-gecko:
mozlint-flake8:
task: tasks/tests/mozlint-flake8.yml
root: true
when:

View File

@ -10,7 +10,7 @@ docker-image: lint
task:
metadata:
name: '[TC] - ESLint'
description: 'ESLint test'
description: 'JavaScript ESLint linter'
payload:
image:
@ -32,16 +32,15 @@ task:
rm eslint.tar.gz &&
ln -s ../eslint-plugin-mozilla node_modules &&
cd ../../.. &&
tools/lint/eslint/node_modules/.bin/eslint --quiet --plugin html --ext [.js,.jsm,.jsx,.xml,.html,.xhtml] -f tools/lint/eslint-formatter .
./mach lint -l eslint -f treeherder --quiet
extra:
locations:
build: null
tests: null
build: null
tests: null
treeherder:
machine:
platform: lint
symbol: ES
machine:
platform: lint
symbol: ES
treeherderEnv:
- production
- staging
- production
- staging

View File

@ -3,7 +3,7 @@ $inherits:
from: 'tasks/lint.yml'
variables:
build_product: 'lint'
build_name: 'flake8-gecko'
build_name: 'mozlint-flake8'
build_type: 'opt'
docker-image: lint

View File

@ -2,7 +2,7 @@ $inherits:
from: 'tasks/build.yml'
task:
workerType: gecko-1-b-win2012
workerType: 'gecko-{{level}}-b-win2012'
payload:
artifacts:
-

View File

@ -2,6 +2,7 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import pprint
from datetime import datetime
@ -114,10 +115,12 @@ class UpdateTestCase(FirefoxTestCase):
try:
self.browser.tabbar.close_all_tabs([self.browser.tabbar.selected_tab])
# Add content of the update log file for detailed failures when applying an update
self.updates[self.current_update_index]['update_log'] = self.read_update_log()
# Print results for now until we have treeherder integration
output = pprint.pformat(self.updates)
self.logger.info('Update test results: \n{}'.format(output))
finally:
super(UpdateTestCase, self).tearDown()
@ -355,11 +358,25 @@ class UpdateTestCase(FirefoxTestCase):
# Restart Firefox to apply the update
self.restart()
def read_update_log(self):
"""Read the content of the update log file for the last update attempt."""
path = os.path.join(os.path.dirname(self.software_update.staging_directory),
'last-update.log')
try:
with open(path, 'rb') as f:
return f.read().splitlines()
except IOError as exc:
self.logger.warning(str(exc))
return None
def remove_downloaded_update(self):
"""Remove an already downloaded update from the update staging directory."""
self.logger.info('Clean-up update staging directory: {}'.format(
self.software_update.staging_directory))
mozfile.remove(self.software_update.staging_directory)
"""Remove an already downloaded update from the update staging directory.
Hereby not only remove the update subdir but everything below 'updates'.
"""
path = os.path.dirname(self.software_update.staging_directory)
self.logger.info('Clean-up update staging directory: {}'.format(path))
mozfile.remove(path)
def restore_config_files(self):
# Reset channel-prefs.js file if modified

View File

@ -232,7 +232,7 @@ user_pref("browser.webapps.checkForUpdates", 0);
// Enable debug logging in the tcp presentation server.
user_pref("dom.presentation.tcp_server.debug", true);
// Enable debug logging in the presentation core service.
pref("logging.Presentation", "debug");
user_pref("logging.Presentation", "debug");
// Don't connect to Yahoo! for RSS feed tests.
// en-US only uses .types.0.uri, but set all of them just to be sure.

View File

@ -144,8 +144,8 @@ Narrator.prototype = {
this._win.speechSynthesis.cancel();
let tw = this._treeWalker;
let paragraph = tw.currentNode;
if (!paragraph) {
tw.currentNode = tw.root;
if (paragraph == tw.root) {
this._sendTestEvent("paragraphsdone", {});
return Promise.resolve();
}
@ -193,7 +193,7 @@ Narrator.prototype = {
// User pressed stopped.
resolve();
} else {
tw.nextNode();
tw.currentNode = tw.nextNode() || tw.root;
this._speakInner().then(resolve, reject);
}
});

View File

@ -118,9 +118,15 @@ add_task(function* testNarrate() {
ok(!NarrateTestUtils.isVisible(popup), "popup is dismissed while speaking");
NarrateTestUtils.isStartedState(content, ok);
promiseEvent = ContentTaskUtils.waitForEvent(content, "paragraphend");
$(NarrateTestUtils.STOP).click();
yield promiseEvent;
// Go forward all the way to the end of the article. We should eventually
// stop.
do {
promiseEvent = Promise.race([
ContentTaskUtils.waitForEvent(content, "paragraphstart"),
ContentTaskUtils.waitForEvent(content, "paragraphsdone")]);
$(NarrateTestUtils.FORWARD).click();
} while ((yield promiseEvent).type == "paragraphstart");
yield ContentTaskUtils.waitForCondition(
() => !$(NarrateTestUtils.STOP), "transitioned to stopped state");
NarrateTestUtils.isStoppedState(content, ok);

View File

@ -61,9 +61,12 @@
locale/@AB_CD@/global/keys.properties (%chrome/global/keys.properties)
locale/@AB_CD@/global/languageNames.properties (%chrome/global/languageNames.properties)
locale/@AB_CD@/global/mozilla.dtd (%chrome/global/mozilla.dtd)
#ifndef MOZ_FENNEC
locale/@AB_CD@/global/narrate.properties (%chrome/global/narrate.properties)
#endif
locale/@AB_CD@/global/notification.dtd (%chrome/global/notification.dtd)
locale/@AB_CD@/global/preferences.dtd (%chrome/global/preferences.dtd)
#ifndef MOZ_FENNEC
locale/@AB_CD@/global/printdialog.dtd (%chrome/global/printdialog.dtd)
locale/@AB_CD@/global/printjoboptions.dtd (%chrome/global/printjoboptions.dtd)
locale/@AB_CD@/global/printPageSetup.dtd (%chrome/global/printPageSetup.dtd)
@ -71,6 +74,7 @@
locale/@AB_CD@/global/printPreviewProgress.dtd (%chrome/global/printPreviewProgress.dtd)
locale/@AB_CD@/global/printdialog.properties (%chrome/global/printdialog.properties)
locale/@AB_CD@/global/printProgress.dtd (%chrome/global/printProgress.dtd)
#endif
locale/@AB_CD@/global/regionNames.properties (%chrome/global/regionNames.properties)
locale/@AB_CD@/global/resetProfile.dtd (%chrome/global/resetProfile.dtd)
locale/@AB_CD@/global/resetProfile.properties (%chrome/global/resetProfile.properties)

View File

@ -595,17 +595,21 @@ FinderHighlighter.prototype = {
* offset is accounted for.
* Geometry.jsm takes care of the DOMRect calculations.
*
* @param {nsIDOMWindow} window
* @param {nsIDOMWindow} window Window to read the boundary rect from
* @param {Boolean} [includeScroll] Whether to ignore the scroll offset,
* which is useful for comparing DOMRects.
* Optional, defaults to `true`
* @return {Rect}
*/
_getRootBounds(window) {
_getRootBounds(window, includeScroll = true) {
let dwu = this._getDWU(window);
let cssPageRect = Rect.fromRect(dwu.getRootBounds());
let scrollX = {};
let scrollY = {};
dwu.getScrollXY(false, scrollX, scrollY);
cssPageRect.translate(scrollX.value, scrollY.value);
if (includeScroll) {
dwu.getScrollXY(false, scrollX, scrollY);
cssPageRect.translate(scrollX.value, scrollY.value);
}
// If we're in a frame, update the position of the rect (top/ left).
let currWin = window;
@ -617,9 +621,10 @@ FinderHighlighter.prototype = {
dwu = this._getDWU(currWin);
let parentRect = Rect.fromRect(dwu.getBoundsWithoutFlushing(el));
// Always take the scroll position into account.
dwu.getScrollXY(false, scrollX, scrollY);
parentRect.translate(scrollX.value, scrollY.value);
if (includeScroll) {
dwu.getScrollXY(false, scrollX, scrollY);
parentRect.translate(scrollX.value, scrollY.value);
}
cssPageRect.translate(parentRect.left, parentRect.top);
}
@ -784,17 +789,18 @@ FinderHighlighter.prototype = {
} else
bounds = this._getRootBounds(window);
let topBounds = this._getRootBounds(window.top, false);
let rects = new Set();
// A range may consist of multiple rectangles, we can also do these kind of
// precise cut-outs. range.getBoundingClientRect() returns the fully
// encompassing rectangle, which is too much for our purpose here.
for (let dims of range.getClientRects()) {
rects.add({
height: dims.bottom - dims.top,
width: dims.right - dims.left,
y: dims.top + bounds.top,
x: dims.left + bounds.left
});
for (let rect of range.getClientRects()) {
rect = Rect.fromRect(rect);
rect.x += bounds.x;
rect.y += bounds.y;
// If the rect is not even visible from the top document, we can ignore it.
if (rect.intersects(topBounds))
rects.add(rect);
}
dict = dict || this.getForWindow(window.top);
@ -894,6 +900,8 @@ FinderHighlighter.prototype = {
// Make sure to at least show the dimmed background.
this._repaintHighlightAllMask(window, false);
this._scheduleRepaintOfMask(window);
} else {
this._scheduleRepaintOfMask(window, { scrollOnly: true });
}
return;
}
@ -1049,22 +1057,20 @@ FinderHighlighter.prototype = {
dict.modalRepaintScheduler = window.setTimeout(() => {
dict.modalRepaintScheduler = null;
if (dict.unconditionalRepaintRequested) {
let { width: previousWidth, height: previousHeight } = dict.lastWindowDimensions;
let { width, height } = dict.lastWindowDimensions = this._getWindowDimensions(window);
let pageContentChanged = (Math.abs(previousWidth - width) > kContentChangeThresholdPx ||
Math.abs(previousHeight - height) > kContentChangeThresholdPx);
// When the page has changed significantly enough in size, we'll restart
// the iterator with the same parameters as before to find us new ranges.
if (pageContentChanged)
this.iterator.restart(this.finder);
if (dict.unconditionalRepaintRequested ||
(dict.modalHighlightRectsMap.size && pageContentChanged)) {
dict.unconditionalRepaintRequested = false;
this._repaintHighlightAllMask(window);
return;
}
let { width, height } = this._getWindowDimensions(window);
if (!dict.modalHighlightRectsMap.size ||
(Math.abs(dict.lastWindowDimensions.width - width) < kContentChangeThresholdPx &&
Math.abs(dict.lastWindowDimensions.height - height) < kContentChangeThresholdPx)) {
return;
}
this.iterator.restart(this.finder);
dict.lastWindowDimensions = { width, height };
this._repaintHighlightAllMask(window);
}, kModalHighlightRepaintFreqMs);
},
@ -1079,7 +1085,8 @@ FinderHighlighter.prototype = {
_maybeInstallStyleSheet(window) {
window = window.top;
let dict = this.getForWindow(window);
if (dict.installedSheet)
let document = window.document;
if (dict.installedSheet == document)
return;
let dwu = this._getDWU(window);
@ -1087,7 +1094,7 @@ FinderHighlighter.prototype = {
try {
dwu.loadSheetUsingURIString(uri, dwu.AGENT_SHEET);
} catch (e) {}
dict.installedSheet = true;
dict.installedSheet = document;
},
/**

View File

@ -26,6 +26,7 @@ like mach, mozreview and taskcluster.
usage
create
linters/eslint
linters/flake8
Indices and tables

View File

@ -0,0 +1,173 @@
=====================
Mozilla ESLint Plugin
=====================
balanced-listeners
------------------
Checks that for every occurence of 'addEventListener' or 'on' there is an
occurence of 'removeEventListener' or 'off' with the same event name.
components-imports
------------------
Checks the filename of imported files e.g. ``Cu.import("some/path/Blah.jsm")``
adds Blah to the global scope.
import-browserjs-globals
------------------------
When included files from the main browser UI scripts will be loaded and any
declared globals will be defined for the current file. This is mostly useful for
browser-chrome mochitests that call browser functions.
import-globals-from
-------------------
Parses a file for globals defined in various unique Mozilla ways.
When a "import-globals-from <path>" comment is found in a file, then all globals
from the file at <path> will be imported in the current scope. This will also
operate recursively.
This is useful for scripts that are loaded as <script> tag in a window and rely
on each other's globals.
If <path> is a relative path, then it must be relative to the file being
checked by the rule.
import-headjs-globals
---------------------
Import globals from head.js and from any files that were imported by
head.js (as far as we can correctly resolve the path).
The following file import patterns are supported:
- ``Services.scriptloader.loadSubScript(path)``
- ``loader.loadSubScript(path)``
- ``loadSubScript(path)``
- ``loadHelperScript(path)``
- ``import-globals-from path``
If path does not exist because it is generated e.g.
``testdir + "/somefile.js"`` we do our best to resolve it.
The following patterns are supported:
- ``Cu.import("resource://devtools/client/shared/widgets/ViewHelpers.jsm");``
- ``loader.lazyImporter(this, "name1");``
- ``loader.lazyRequireGetter(this, "name2"``
- ``loader.lazyServiceGetter(this, "name3"``
- ``XPCOMUtils.defineLazyModuleGetter(this, "setNamedTimeout", ...)``
- ``loader.lazyGetter(this, "toolboxStrings"``
- ``XPCOMUtils.defineLazyGetter(this, "clipboardHelper"``
mark-test-function-used
-----------------------
Simply marks test (the test method) as used. This avoids ESLint telling
us that the function is never called.
no-aArgs
--------
Checks that function argument names don't start with lowercase 'a' followed by
a capital letter. This is to prevent the use of Hungarian notation whereby the
first letter is a prefix that indicates the type or intended use of a variable.
no-cpows-in-tests
-----------------
This rule checks if the file is a browser mochitest and, if so, checks for
possible CPOW usage by checking for the following strings:
- "gBrowser.contentWindow"
- "gBrowser.contentDocument"
- "gBrowser.selectedBrowser.contentWindow"
- "browser.contentDocument"
- "window.content"
- "content"
- "content."
Note: These are string matches so we will miss situations where the parent
object is assigned to another variable e.g.::
var b = gBrowser;
b.content // Would not be detected as a CPOW.
no-single-arg-cu-import
-----------------------
Rejects calls to "Cu.import" that do not supply a second argument (meaning they
add the exported properties into global scope).
reject-importGlobalProperties
-----------------------------
Rejects calls to ``Cu.importGlobalProperties``. Use of this function is
undesirable in some parts of the tree.
reject-some-requires
--------------------
This takes an option, a regular expression. Invocations of
``require`` with a string literal argument are matched against this
regexp; and if it matches, the ``require`` use is flagged.
this-top-level-scope
--------------------
Treats top-level assignments like ``this.mumble = value`` as declaring a global.
Note: These are string matches so we will miss situations where the parent
object is assigned to another variable e.g.::
var b = gBrowser;
b.content // Would not be detected as a CPOW.
var-only-at-top-level
---------------------
Marks all var declarations that are not at the top level invalid.
Example
=======
+-------+-----------------------+
| Possible values for all rules |
+-------+-----------------------+
| Value | Meaning |
+-------+-----------------------+
| 0 | Deactivated |
+-------+-----------------------+
| 1 | Warning |
+-------+-----------------------+
| 2 | Error |
+-------+-----------------------+
Example configuration::
"rules": {
"mozilla/balanced-listeners": 2,
"mozilla/components-imports": 1,
"mozilla/import-globals-from": 1,
"mozilla/import-headjs-globals": 1,
"mozilla/mark-test-function-used": 1,
"mozilla/var-only-at-top-level": 1,
"mozilla/no-cpows-in-tests": 1,
}

Some files were not shown because too many files have changed in this diff Show More