mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-29 15:52:07 +00:00
Bug 1122061 - Move TelemetrySession tests out of test_telemetryPing.js. r=gfritzsche
This commit is contained in:
parent
32776a14b3
commit
93f8ea689f
@ -312,5 +312,5 @@ add_task(function* test_times() {
|
||||
});
|
||||
|
||||
add_task(function* test_shutdown() {
|
||||
yield TelemetrySession.shutdown();
|
||||
yield TelemetrySession.shutdown(false);
|
||||
});
|
||||
|
@ -211,7 +211,7 @@ this.TelemetryPing = Object.freeze({
|
||||
},
|
||||
|
||||
/**
|
||||
* Only used for testing. Saves a ping to disk with a specific file name and path.
|
||||
* Only used for testing. Saves a ping to disk and return the ping id once done.
|
||||
*
|
||||
* @param {String} aType The type of the ping.
|
||||
* @param {Object} aPayload The actual data payload for the ping.
|
||||
@ -224,9 +224,11 @@ this.TelemetryPing = Object.freeze({
|
||||
* environment data.
|
||||
* @param {Boolean} [aOptions.overwrite=false] true overwrites a ping with the same name,
|
||||
* if found.
|
||||
* @param {String} aOptions.filePath The path to save the ping to.
|
||||
* @param {String} [aOptions.filePath] The path to save the ping to. Will save to default
|
||||
* ping location if not provided.
|
||||
*
|
||||
* @returns {Promise} A promise that resolves when the ping is saved to disk.
|
||||
* @returns {Promise<Integer>} A promise that resolves with the ping id when the ping is
|
||||
* saved to disk.
|
||||
*/
|
||||
testSavePingToFile: function(aType, aPayload, aOptions = {}) {
|
||||
let options = aOptions;
|
||||
@ -450,7 +452,7 @@ let Impl = {
|
||||
},
|
||||
|
||||
/**
|
||||
* Save a ping to disk with a specific file name.
|
||||
* Save a ping to disk and return the ping id when done.
|
||||
*
|
||||
* @param {String} aType The type of the ping.
|
||||
* @param {Object} aPayload The actual data payload for the ping.
|
||||
@ -462,18 +464,25 @@ let Impl = {
|
||||
* @param {Boolean} aOptions.addEnvironment true if the ping should contain the
|
||||
* environment data.
|
||||
* @param {Boolean} aOptions.overwrite true overwrites a ping with the same name, if found.
|
||||
* @param {String} aOptions.filePath The path to save the ping to.
|
||||
* @param {String} [aOptions.filePath] The path to save the ping to. Will save to default
|
||||
* ping location if not provided.
|
||||
*
|
||||
* @returns {Promise} A promise that resolves when the ping is saved to disk.
|
||||
* @returns {Promise} A promise that resolves with the ping id when the ping is saved to
|
||||
* disk.
|
||||
*/
|
||||
testSavePingToFile: function testSavePingToFile(aType, aPayload, aOptions) {
|
||||
this._log.trace("testSavePingToFile - Type " + aType + ", Server " + this._server +
|
||||
", aOptions " + JSON.stringify(aOptions));
|
||||
|
||||
return this.assemblePing(aType, aPayload, aOptions)
|
||||
.then(pingData => TelemetryFile.savePingToFile(pingData, aOptions.filePath,
|
||||
aOptions.overwrite),
|
||||
error => this._log.error("testSavePingToFile - Rejection", error));
|
||||
.then(pingData => {
|
||||
if (aOptions.filePath) {
|
||||
return TelemetryFile.savePingToFile(pingData, aOptions.filePath, aOptions.overwrite)
|
||||
.then(() => { return pingData.id; });
|
||||
} else {
|
||||
return TelemetryFile.savePing(pingData, aOptions.overwrite)
|
||||
.then(() => { return pingData.id; });
|
||||
}
|
||||
}, error => this._log.error("testSavePing - Rejection", error));
|
||||
},
|
||||
|
||||
finishPingRequest: function finishPingRequest(success, startTime, ping, isPersisted) {
|
||||
|
@ -217,9 +217,11 @@ this.TelemetrySession = Object.freeze({
|
||||
},
|
||||
/**
|
||||
* Used only for testing purposes.
|
||||
* @param {Boolean} [aForceSavePending=true] If true, always saves the ping whether Telemetry
|
||||
* can send pings or not, which is used for testing.
|
||||
*/
|
||||
shutdown: function() {
|
||||
return Impl.shutdown(true);
|
||||
shutdown: function(aForceSavePending = true) {
|
||||
return Impl.shutdown(aForceSavePending);
|
||||
},
|
||||
/**
|
||||
* Used only for testing purposes.
|
||||
@ -1163,5 +1165,6 @@ let Impl = {
|
||||
if (Telemetry.canSend || testing) {
|
||||
return this.savePendingPings();
|
||||
}
|
||||
return Promise.resolve();
|
||||
},
|
||||
};
|
||||
|
@ -15,34 +15,20 @@ const Cr = Components.results;
|
||||
|
||||
Cu.import("resource://testing-common/httpd.js", this);
|
||||
Cu.import("resource://gre/modules/Services.jsm");
|
||||
Cu.import("resource://gre/modules/LightweightThemeManager.jsm", this);
|
||||
Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
|
||||
Cu.import("resource://gre/modules/TelemetryPing.jsm", this);
|
||||
Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
|
||||
Cu.import("resource://gre/modules/TelemetryFile.jsm", this);
|
||||
Cu.import("resource://gre/modules/Task.jsm", this);
|
||||
Cu.import("resource://gre/modules/Promise.jsm", this);
|
||||
Cu.import("resource://gre/modules/Preferences.jsm");
|
||||
Cu.import("resource://gre/modules/osfile.jsm", this);
|
||||
|
||||
const IGNORE_HISTOGRAM = "test::ignore_me";
|
||||
const IGNORE_HISTOGRAM_TO_CLONE = "MEMORY_HEAP_ALLOCATED";
|
||||
const IGNORE_CLONED_HISTOGRAM = "test::ignore_me_also";
|
||||
const ADDON_NAME = "Telemetry test addon";
|
||||
const ADDON_HISTOGRAM = "addon-histogram";
|
||||
// Add some unicode characters here to ensure that sending them works correctly.
|
||||
const FLASH_VERSION = "\u201c1.1.1.1\u201d";
|
||||
const SHUTDOWN_TIME = 10000;
|
||||
const FAILED_PROFILE_LOCK_ATTEMPTS = 2;
|
||||
const PING_FORMAT_VERSION = 2;
|
||||
const TEST_PING_TYPE = "test-ping-type";
|
||||
const TEST_PING_RETENTION = 180;
|
||||
|
||||
// Constants from prio.h for nsIFileOutputStream.init
|
||||
const PR_WRONLY = 0x2;
|
||||
const PR_CREATE_FILE = 0x8;
|
||||
const PR_TRUNCATE = 0x20;
|
||||
const RW_OWNER = 0600;
|
||||
|
||||
const NUMBER_OF_THREADS_TO_LAUNCH = 30;
|
||||
let gNumberOfThreadsLaunched = 0;
|
||||
const PLATFORM_VERSION = "1.9.2";
|
||||
const APP_VERSION = "1";
|
||||
const APP_NAME = "XPCShell";
|
||||
|
||||
const PREF_BRANCH = "toolkit.telemetry.";
|
||||
const PREF_ENABLED = PREF_BRANCH + "enabled";
|
||||
@ -50,8 +36,6 @@ const PREF_FHR_UPLOAD_ENABLED = "datareporting.healthreport.uploadEnabled";
|
||||
const PREF_FHR_SERVICE_ENABLED = "datareporting.healthreport.service.enabled";
|
||||
|
||||
const HAS_DATAREPORTINGSERVICE = "@mozilla.org/datareporting/service;1" in Cc;
|
||||
const SESSION_RECORDER_EXPECTED = HAS_DATAREPORTINGSERVICE &&
|
||||
Preferences.get(PREF_FHR_SERVICE_ENABLED, true);
|
||||
|
||||
const Telemetry = Cc["@mozilla.org/base/telemetry;1"].getService(Ci.nsITelemetry);
|
||||
|
||||
@ -65,15 +49,19 @@ XPCOMUtils.defineLazyGetter(this, "gDatareportingService",
|
||||
.getService(Ci.nsISupports)
|
||||
.wrappedJSObject);
|
||||
|
||||
function sendPing () {
|
||||
TelemetrySession.gatherStartup();
|
||||
function sendPing(aSendClientId, aSendEnvironment) {
|
||||
if (gServerStarted) {
|
||||
TelemetryPing.setServer("http://localhost:" + gHttpServer.identity.primaryPort);
|
||||
return TelemetrySession.testPing();
|
||||
} else {
|
||||
TelemetryPing.setServer("http://doesnotexist");
|
||||
return TelemetrySession.testPing();
|
||||
}
|
||||
|
||||
let options = {
|
||||
addClientId: aSendClientId,
|
||||
addEnvironment: aSendEnvironment,
|
||||
retentionDays: TEST_PING_RETENTION,
|
||||
};
|
||||
return TelemetryPing.send(TEST_PING_TYPE, {}, options);
|
||||
}
|
||||
|
||||
function wrapWithExceptionHandler(f) {
|
||||
@ -94,40 +82,6 @@ function registerPingHandler(handler) {
|
||||
wrapWithExceptionHandler(handler));
|
||||
}
|
||||
|
||||
function setupTestData() {
|
||||
Telemetry.newHistogram(IGNORE_HISTOGRAM, "never", Telemetry.HISTOGRAM_BOOLEAN);
|
||||
Telemetry.histogramFrom(IGNORE_CLONED_HISTOGRAM, IGNORE_HISTOGRAM_TO_CLONE);
|
||||
Services.startup.interrupted = true;
|
||||
Telemetry.registerAddonHistogram(ADDON_NAME, ADDON_HISTOGRAM,
|
||||
Telemetry.HISTOGRAM_LINEAR,
|
||||
1, 5, 6);
|
||||
let h1 = Telemetry.getAddonHistogram(ADDON_NAME, ADDON_HISTOGRAM);
|
||||
h1.add(1);
|
||||
let h2 = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT");
|
||||
h2.add();
|
||||
|
||||
let k1 = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT");
|
||||
k1.add("a");
|
||||
k1.add("a");
|
||||
k1.add("b");
|
||||
}
|
||||
|
||||
function getSavedHistogramsFile(basename) {
|
||||
let tmpDir = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
||||
let histogramsFile = tmpDir.clone();
|
||||
histogramsFile.append(basename);
|
||||
if (histogramsFile.exists()) {
|
||||
histogramsFile.remove(true);
|
||||
}
|
||||
do_register_cleanup(function () {
|
||||
try {
|
||||
histogramsFile.remove(true);
|
||||
} catch (e) {
|
||||
}
|
||||
});
|
||||
return histogramsFile;
|
||||
}
|
||||
|
||||
function decodeRequestPayload(request) {
|
||||
let s = request.bodyInputStream;
|
||||
let payload = null;
|
||||
@ -164,242 +118,53 @@ function decodeRequestPayload(request) {
|
||||
return payload;
|
||||
}
|
||||
|
||||
function checkPayloadInfo(payload, reason) {
|
||||
// get rid of the non-deterministic field
|
||||
const expected_info = {
|
||||
OS: "XPCShell",
|
||||
appVersion: "1",
|
||||
appName: "XPCShell",
|
||||
appBuildID: "2007010101",
|
||||
platformBuildID: "2007010101",
|
||||
flashVersion: FLASH_VERSION
|
||||
function checkPingFormat(aPing, aType, aHasClientId, aHasEnvironment) {
|
||||
const MANDATORY_PING_FIELDS = [
|
||||
"type", "id", "creationDate", "version", "application", "payload"
|
||||
];
|
||||
|
||||
const APPLICATION_TEST_DATA = {
|
||||
buildId: "2007010101",
|
||||
name: APP_NAME,
|
||||
version: APP_VERSION,
|
||||
vendor: "Mozilla",
|
||||
platformVersion: PLATFORM_VERSION,
|
||||
xpcomAbi: "noarch-spidermonkey",
|
||||
};
|
||||
|
||||
for (let f in expected_info) {
|
||||
do_check_eq(payload.info[f], expected_info[f]);
|
||||
// Check that the ping contains all the mandatory fields.
|
||||
for (let f of MANDATORY_PING_FIELDS) {
|
||||
Assert.ok(f in aPing, f + " must be available.");
|
||||
}
|
||||
|
||||
do_check_eq(payload.info.reason, reason);
|
||||
do_check_true("appUpdateChannel" in payload.info);
|
||||
do_check_true("revision" in payload.info);
|
||||
if (Services.appinfo.isOfficial) {
|
||||
do_check_true(payload.info.revision.startsWith("http"));
|
||||
Assert.equal(aPing.type, aType, "The ping must have the correct type.");
|
||||
Assert.equal(aPing.version, PING_FORMAT_VERSION, "The ping must have the correct version.");
|
||||
|
||||
// Test the application section.
|
||||
for (let f in APPLICATION_TEST_DATA) {
|
||||
Assert.equal(aPing.application[f], APPLICATION_TEST_DATA[f],
|
||||
f + " must have the correct value.");
|
||||
}
|
||||
|
||||
if ("@mozilla.org/datareporting/service;1" in Cc &&
|
||||
Services.prefs.getBoolPref(PREF_FHR_UPLOAD_ENABLED)) {
|
||||
do_check_true("clientID" in payload);
|
||||
do_check_neq(payload.clientID, null);
|
||||
do_check_eq(payload.clientID, gDataReportingClientID);
|
||||
}
|
||||
// We can't check the values for channel and architecture. Just make
|
||||
// sure they are in.
|
||||
Assert.ok("architecture" in aPing.application,
|
||||
"The application section must have an architecture field.");
|
||||
Assert.ok("channel" in aPing.application,
|
||||
"The application section must have a channel field.");
|
||||
|
||||
// Check the clientId and environment fields, as needed.
|
||||
Assert.equal("clientId" in aPing, aHasClientId);
|
||||
Assert.equal("environment" in aPing, aHasEnvironment);
|
||||
}
|
||||
|
||||
function checkPayload(request, payload, reason, successfulPings) {
|
||||
// Take off ["","submit","telemetry"].
|
||||
let pathComponents = request.path.split("/").slice(3);
|
||||
|
||||
checkPayloadInfo(payload, reason);
|
||||
do_check_eq(reason, pathComponents[1]);
|
||||
do_check_eq(request.getHeader("content-type"), "application/json; charset=UTF-8");
|
||||
do_check_true(payload.simpleMeasurements.uptime >= 0);
|
||||
do_check_true(payload.simpleMeasurements.startupInterrupted === 1);
|
||||
do_check_eq(payload.simpleMeasurements.shutdownDuration, SHUTDOWN_TIME);
|
||||
do_check_eq(payload.simpleMeasurements.savedPings, 1);
|
||||
do_check_true("maximalNumberOfConcurrentThreads" in payload.simpleMeasurements);
|
||||
do_check_true(payload.simpleMeasurements.maximalNumberOfConcurrentThreads >= gNumberOfThreadsLaunched);
|
||||
|
||||
let activeTicks = payload.simpleMeasurements.activeTicks;
|
||||
do_check_true(SESSION_RECORDER_EXPECTED ? activeTicks >= 0 : activeTicks == -1);
|
||||
|
||||
do_check_eq(payload.simpleMeasurements.failedProfileLockCount,
|
||||
FAILED_PROFILE_LOCK_ATTEMPTS);
|
||||
let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
||||
let failedProfileLocksFile = profileDirectory.clone();
|
||||
failedProfileLocksFile.append("Telemetry.FailedProfileLocks.txt");
|
||||
do_check_true(!failedProfileLocksFile.exists());
|
||||
|
||||
|
||||
let isWindows = ("@mozilla.org/windows-registry-key;1" in Components.classes);
|
||||
if (isWindows) {
|
||||
do_check_true(payload.simpleMeasurements.startupSessionRestoreReadBytes > 0);
|
||||
do_check_true(payload.simpleMeasurements.startupSessionRestoreWriteBytes > 0);
|
||||
}
|
||||
|
||||
const TELEMETRY_PING = "TELEMETRY_PING";
|
||||
const TELEMETRY_SUCCESS = "TELEMETRY_SUCCESS";
|
||||
const TELEMETRY_TEST_FLAG = "TELEMETRY_TEST_FLAG";
|
||||
const TELEMETRY_TEST_COUNT = "TELEMETRY_TEST_COUNT";
|
||||
const TELEMETRY_TEST_KEYED_FLAG = "TELEMETRY_TEST_KEYED_FLAG";
|
||||
const TELEMETRY_TEST_KEYED_COUNT = "TELEMETRY_TEST_KEYED_COUNT";
|
||||
const READ_SAVED_PING_SUCCESS = "READ_SAVED_PING_SUCCESS";
|
||||
|
||||
do_check_true(TELEMETRY_PING in payload.histograms);
|
||||
do_check_true(READ_SAVED_PING_SUCCESS in payload.histograms);
|
||||
do_check_true(TELEMETRY_TEST_FLAG in payload.histograms);
|
||||
do_check_true(TELEMETRY_TEST_COUNT in payload.histograms);
|
||||
|
||||
let rh = Telemetry.registeredHistograms(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, []);
|
||||
for (let name of rh) {
|
||||
if (/SQLITE/.test(name) && name in payload.histograms) {
|
||||
do_check_true(("STARTUP_" + name) in payload.histograms);
|
||||
}
|
||||
}
|
||||
do_check_false(IGNORE_HISTOGRAM in payload.histograms);
|
||||
do_check_false(IGNORE_CLONED_HISTOGRAM in payload.histograms);
|
||||
|
||||
// Flag histograms should automagically spring to life.
|
||||
const expected_flag = {
|
||||
range: [1, 2],
|
||||
bucket_count: 3,
|
||||
histogram_type: 3,
|
||||
values: {0:1, 1:0},
|
||||
sum: 0,
|
||||
sum_squares_lo: 0,
|
||||
sum_squares_hi: 0
|
||||
};
|
||||
let flag = payload.histograms[TELEMETRY_TEST_FLAG];
|
||||
do_check_eq(uneval(flag), uneval(expected_flag));
|
||||
|
||||
// We should have a test count.
|
||||
const expected_count = {
|
||||
range: [1, 2],
|
||||
bucket_count: 3,
|
||||
histogram_type: 4,
|
||||
values: {0:1, 1:0},
|
||||
sum: 1,
|
||||
sum_squares_lo: 1,
|
||||
sum_squares_hi: 0,
|
||||
};
|
||||
let count = payload.histograms[TELEMETRY_TEST_COUNT];
|
||||
do_check_eq(uneval(count), uneval(expected_count));
|
||||
|
||||
// There should be one successful report from the previous telemetry ping.
|
||||
const expected_tc = {
|
||||
range: [1, 2],
|
||||
bucket_count: 3,
|
||||
histogram_type: 2,
|
||||
values: {0:1, 1:successfulPings, 2:0},
|
||||
sum: successfulPings,
|
||||
sum_squares_lo: successfulPings,
|
||||
sum_squares_hi: 0
|
||||
};
|
||||
let tc = payload.histograms[TELEMETRY_SUCCESS];
|
||||
do_check_eq(uneval(tc), uneval(expected_tc));
|
||||
|
||||
let h = payload.histograms[READ_SAVED_PING_SUCCESS];
|
||||
do_check_eq(h.values[0], 1);
|
||||
|
||||
// The ping should include data from memory reporters. We can't check that
|
||||
// this data is correct, because we can't control the values returned by the
|
||||
// memory reporters. But we can at least check that the data is there.
|
||||
//
|
||||
// It's important to check for the presence of reporters with a mix of units,
|
||||
// because TelemetryPing has separate logic for each one. But we can't
|
||||
// currently check UNITS_COUNT_CUMULATIVE or UNITS_PERCENTAGE because
|
||||
// Telemetry doesn't touch a memory reporter with these units that's
|
||||
// available on all platforms.
|
||||
|
||||
do_check_true('MEMORY_JS_GC_HEAP' in payload.histograms); // UNITS_BYTES
|
||||
do_check_true('MEMORY_JS_COMPARTMENTS_SYSTEM' in payload.histograms); // UNITS_COUNT
|
||||
|
||||
// We should have included addon histograms.
|
||||
do_check_true("addonHistograms" in payload);
|
||||
do_check_true(ADDON_NAME in payload.addonHistograms);
|
||||
do_check_true(ADDON_HISTOGRAM in payload.addonHistograms[ADDON_NAME]);
|
||||
|
||||
do_check_true(("mainThread" in payload.slowSQL) &&
|
||||
("otherThreads" in payload.slowSQL));
|
||||
|
||||
// Check keyed histogram payload.
|
||||
|
||||
do_check_true("keyedHistograms" in payload);
|
||||
let keyedHistograms = payload.keyedHistograms;
|
||||
do_check_true(TELEMETRY_TEST_KEYED_FLAG in keyedHistograms);
|
||||
do_check_true(TELEMETRY_TEST_KEYED_COUNT in keyedHistograms);
|
||||
|
||||
Assert.deepEqual({}, keyedHistograms[TELEMETRY_TEST_KEYED_FLAG]);
|
||||
|
||||
const expected_keyed_count = {
|
||||
"a": {
|
||||
range: [1, 2],
|
||||
bucket_count: 3,
|
||||
histogram_type: 4,
|
||||
values: {0:2, 1:0},
|
||||
sum: 2,
|
||||
sum_squares_lo: 2,
|
||||
sum_squares_hi: 0,
|
||||
},
|
||||
"b": {
|
||||
range: [1, 2],
|
||||
bucket_count: 3,
|
||||
histogram_type: 4,
|
||||
values: {0:1, 1:0},
|
||||
sum: 1,
|
||||
sum_squares_lo: 1,
|
||||
sum_squares_hi: 0,
|
||||
},
|
||||
};
|
||||
Assert.deepEqual(expected_keyed_count, keyedHistograms[TELEMETRY_TEST_KEYED_COUNT]);
|
||||
}
|
||||
|
||||
// A fake plugin host for testing flash version telemetry
|
||||
let PluginHost = {
|
||||
getPluginTags: function(countRef) {
|
||||
let plugins = [{name: "Shockwave Flash", version: FLASH_VERSION}];
|
||||
countRef.value = plugins.length;
|
||||
return plugins;
|
||||
},
|
||||
|
||||
QueryInterface: function(iid) {
|
||||
if (iid.equals(Ci.nsIPluginHost)
|
||||
|| iid.equals(Ci.nsISupports))
|
||||
return this;
|
||||
|
||||
throw Components.results.NS_ERROR_NO_INTERFACE;
|
||||
}
|
||||
}
|
||||
|
||||
let PluginHostFactory = {
|
||||
createInstance: function (outer, iid) {
|
||||
if (outer != null)
|
||||
throw Components.results.NS_ERROR_NO_AGGREGATION;
|
||||
return PluginHost.QueryInterface(iid);
|
||||
}
|
||||
};
|
||||
|
||||
const PLUGINHOST_CONTRACTID = "@mozilla.org/plugin/host;1";
|
||||
const PLUGINHOST_CID = Components.ID("{2329e6ea-1f15-4cbe-9ded-6e98e842de0e}");
|
||||
|
||||
function registerFakePluginHost() {
|
||||
let registrar = Components.manager.QueryInterface(Ci.nsIComponentRegistrar);
|
||||
registrar.registerFactory(PLUGINHOST_CID, "Fake Plugin Host",
|
||||
PLUGINHOST_CONTRACTID, PluginHostFactory);
|
||||
}
|
||||
|
||||
function writeStringToFile(file, contents) {
|
||||
let ostream = Cc["@mozilla.org/network/safe-file-output-stream;1"]
|
||||
.createInstance(Ci.nsIFileOutputStream);
|
||||
ostream.init(file, PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE,
|
||||
RW_OWNER, ostream.DEFER_OPEN);
|
||||
ostream.write(contents, contents.length);
|
||||
ostream.QueryInterface(Ci.nsISafeOutputStream).finish();
|
||||
ostream.close();
|
||||
}
|
||||
|
||||
function write_fake_shutdown_file() {
|
||||
let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
||||
let file = profileDirectory.clone();
|
||||
file.append("Telemetry.ShutdownTime.txt");
|
||||
let contents = "" + SHUTDOWN_TIME;
|
||||
writeStringToFile(file, contents);
|
||||
}
|
||||
|
||||
function write_fake_failedprofilelocks_file() {
|
||||
let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
||||
let file = profileDirectory.clone();
|
||||
file.append("Telemetry.FailedProfileLocks.txt");
|
||||
let contents = "" + FAILED_PROFILE_LOCK_ATTEMPTS;
|
||||
writeStringToFile(file, contents);
|
||||
/**
|
||||
* Start the webserver used in the tests.
|
||||
*/
|
||||
function startWebserver() {
|
||||
gHttpServer.start(-1);
|
||||
gServerStarted = true;
|
||||
gRequestIterator = Iterator(new Request());
|
||||
}
|
||||
|
||||
function run_test() {
|
||||
@ -419,67 +184,13 @@ function run_test() {
|
||||
gDatareportingService.observe(null, "profile-after-change", null);
|
||||
}
|
||||
|
||||
// Make it look like we've previously failed to lock a profile a couple times.
|
||||
write_fake_failedprofilelocks_file();
|
||||
|
||||
// Make it look like we've shutdown before.
|
||||
write_fake_shutdown_file();
|
||||
|
||||
let currentMaxNumberOfThreads = Telemetry.maximalNumberOfConcurrentThreads;
|
||||
do_check_true(currentMaxNumberOfThreads > 0);
|
||||
|
||||
// Try to augment the maximal number of threads currently launched
|
||||
let threads = [];
|
||||
try {
|
||||
for (let i = 0; i < currentMaxNumberOfThreads + 10; ++i) {
|
||||
threads.push(Services.tm.newThread(0));
|
||||
}
|
||||
} catch (ex) {
|
||||
// If memory is too low, it is possible that not all threads will be launched.
|
||||
}
|
||||
gNumberOfThreadsLaunched = threads.length;
|
||||
|
||||
do_check_true(Telemetry.maximalNumberOfConcurrentThreads >= gNumberOfThreadsLaunched);
|
||||
|
||||
do_register_cleanup(function() {
|
||||
threads.forEach(function(thread) {
|
||||
thread.shutdown();
|
||||
});
|
||||
});
|
||||
|
||||
Telemetry.asyncFetchTelemetryData(wrapWithExceptionHandler(actualTest));
|
||||
}
|
||||
|
||||
function actualTest() {
|
||||
// try to make LightweightThemeManager do stuff
|
||||
let gInternalManager = Cc["@mozilla.org/addons/integration;1"]
|
||||
.getService(Ci.nsIObserver)
|
||||
.QueryInterface(Ci.nsITimerCallback);
|
||||
|
||||
gInternalManager.observe(null, "addons-startup", null);
|
||||
|
||||
// fake plugin host for consistent flash version data
|
||||
registerFakePluginHost();
|
||||
|
||||
run_next_test();
|
||||
Telemetry.asyncFetchTelemetryData(wrapWithExceptionHandler(run_next_test));
|
||||
}
|
||||
|
||||
add_task(function* asyncSetup() {
|
||||
yield TelemetrySession.setup();
|
||||
yield TelemetryPing.setup();
|
||||
|
||||
if (HAS_DATAREPORTINGSERVICE) {
|
||||
// force getSessionRecorder()==undefined to check the payload's activeTicks
|
||||
gDatareportingService.simulateNoSessionRecorder();
|
||||
}
|
||||
|
||||
// When no DRS or no DRS.getSessionRecorder(), activeTicks should be -1.
|
||||
do_check_eq(TelemetrySession.getPayload().simpleMeasurements.activeTicks, -1);
|
||||
|
||||
if (HAS_DATAREPORTINGSERVICE) {
|
||||
// Restore normal behavior for getSessionRecorder()
|
||||
gDatareportingService.simulateRestoreSessionRecorder();
|
||||
|
||||
gDataReportingClientID = yield gDatareportingService.getClientID();
|
||||
|
||||
// We should have cached the client id now. Lets confirm that by
|
||||
@ -492,273 +203,68 @@ add_task(function* asyncSetup() {
|
||||
|
||||
// Ensure that not overwriting an existing file fails silently
|
||||
add_task(function* test_overwritePing() {
|
||||
let ping = {slug: "foo"}
|
||||
let ping = {id: "foo"}
|
||||
yield TelemetryFile.savePing(ping, true);
|
||||
yield TelemetryFile.savePing(ping, false);
|
||||
yield TelemetryFile.cleanupPingFile(ping);
|
||||
});
|
||||
|
||||
// Ensures that expired histograms are not part of the payload.
|
||||
add_task(function* test_expiredHistogram() {
|
||||
let histogram_id = "FOOBAR";
|
||||
let dummy = Telemetry.newHistogram(histogram_id, "30", Telemetry.HISTOGRAM_EXPONENTIAL, 1, 2, 3);
|
||||
|
||||
dummy.add(1);
|
||||
|
||||
do_check_eq(TelemetrySession.getPayload()["histograms"][histogram_id], undefined);
|
||||
do_check_eq(TelemetrySession.getPayload()["histograms"]["TELEMETRY_TEST_EXPIRED"], undefined);
|
||||
});
|
||||
|
||||
// Checks that an invalid histogram file is deleted if TelemetryFile fails to parse it.
|
||||
add_task(function* test_runInvalidJSON() {
|
||||
let histogramsFile = getSavedHistogramsFile("invalid-histograms.dat");
|
||||
|
||||
writeStringToFile(histogramsFile, "this.is.invalid.JSON");
|
||||
do_check_true(histogramsFile.exists());
|
||||
|
||||
yield TelemetrySession.testLoadHistograms(histogramsFile);
|
||||
do_check_false(histogramsFile.exists());
|
||||
});
|
||||
|
||||
// Sends a ping to a non existing server.
|
||||
add_task(function* test_noServerPing() {
|
||||
yield sendPing();
|
||||
yield sendPing(false, false);
|
||||
});
|
||||
|
||||
// Checks that a sent ping is correctly received by a dummy http server.
|
||||
add_task(function* test_simplePing() {
|
||||
gHttpServer.start(-1);
|
||||
gServerStarted = true;
|
||||
gRequestIterator = Iterator(new Request());
|
||||
startWebserver();
|
||||
|
||||
yield sendPing();
|
||||
yield sendPing(false, false);
|
||||
let request = yield gRequestIterator.next();
|
||||
let payload = decodeRequestPayload(request);
|
||||
|
||||
checkPayloadInfo(payload, "test-ping");
|
||||
let ping = decodeRequestPayload(request);
|
||||
checkPingFormat(ping, TEST_PING_TYPE, false, false);
|
||||
});
|
||||
|
||||
// Saves the current session histograms, reloads them, perfoms a ping
|
||||
// and checks that the dummy http server received both the previously
|
||||
// saved histograms and the new ones.
|
||||
add_task(function* test_saveLoadPing() {
|
||||
let histogramsFile = getSavedHistogramsFile("saved-histograms.dat");
|
||||
add_task(function* test_pingHasClientId() {
|
||||
// Send a ping with a clientId.
|
||||
yield sendPing(true, false);
|
||||
|
||||
setupTestData();
|
||||
yield TelemetrySession.testSaveHistograms(histogramsFile);
|
||||
yield TelemetrySession.testLoadHistograms(histogramsFile);
|
||||
yield sendPing();
|
||||
let request = yield gRequestIterator.next();
|
||||
let ping = decodeRequestPayload(request);
|
||||
checkPingFormat(ping, TEST_PING_TYPE, true, false);
|
||||
|
||||
// Get requests received by dummy server.
|
||||
let request1 = yield gRequestIterator.next();
|
||||
let request2 = yield gRequestIterator.next();
|
||||
|
||||
// We decode both requests to check for the |reason|.
|
||||
let payload1 = decodeRequestPayload(request1);
|
||||
let payload2 = decodeRequestPayload(request2);
|
||||
|
||||
// Check we have the correct two requests. Ordering is not guaranteed.
|
||||
if (payload1.info.reason === "test-ping") {
|
||||
checkPayload(request1, payload1, "test-ping", 1);
|
||||
checkPayload(request2, payload2, "saved-session", 1);
|
||||
} else {
|
||||
checkPayload(request1, payload1, "saved-session", 1);
|
||||
checkPayload(request2, payload2, "test-ping", 1);
|
||||
if (HAS_DATAREPORTINGSERVICE &&
|
||||
Services.prefs.getBoolPref(PREF_FHR_UPLOAD_ENABLED)) {
|
||||
Assert.equal(ping.clientId, gDataReportingClientID,
|
||||
"The correct clientId must be reported.");
|
||||
}
|
||||
});
|
||||
|
||||
add_task(function* test_checkSubsession() {
|
||||
const COUNT_ID = "TELEMETRY_TEST_COUNT";
|
||||
const KEYED_ID = "TELEMETRY_TEST_KEYED_COUNT";
|
||||
const count = Telemetry.getHistogramById(COUNT_ID);
|
||||
const keyed = Telemetry.getKeyedHistogramById(KEYED_ID);
|
||||
const registeredIds =
|
||||
new Set(Telemetry.registeredHistograms(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, []));
|
||||
add_task(function* test_pingHasEnvironment() {
|
||||
// Send a ping with the environment data.
|
||||
yield sendPing(false, true);
|
||||
let request = yield gRequestIterator.next();
|
||||
let ping = decodeRequestPayload(request);
|
||||
checkPingFormat(ping, TEST_PING_TYPE, false, true);
|
||||
|
||||
const stableHistograms = new Set([
|
||||
"TELEMETRY_TEST_FLAG",
|
||||
"TELEMETRY_TEST_COUNT",
|
||||
"TELEMETRY_TEST_RELEASE_OPTOUT",
|
||||
"TELEMETRY_TEST_RELEASE_OPTIN",
|
||||
"STARTUP_CRASH_DETECTED",
|
||||
]);
|
||||
|
||||
const stableKeyedHistograms = new Set([
|
||||
"TELEMETRY_TEST_KEYED_FLAG",
|
||||
"TELEMETRY_TEST_KEYED_COUNT",
|
||||
"TELEMETRY_TEST_KEYED_RELEASE_OPTIN",
|
||||
"TELEMETRY_TEST_KEYED_RELEASE_OPTOUT",
|
||||
]);
|
||||
|
||||
// Compare the two sets of histograms.
|
||||
// The "subsession" histograms should match the registered
|
||||
// "classic" histograms. However, histograms can change
|
||||
// between us collecting the different payloads, so we only
|
||||
// check for deep equality on known stable histograms.
|
||||
checkHistograms = (classic, subsession) => {
|
||||
for (let id of Object.keys(classic)) {
|
||||
if (!registeredIds.has(id)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Assert.ok(id in subsession);
|
||||
if (stableHistograms.has(id)) {
|
||||
Assert.deepEqual(classic[id],
|
||||
subsession[id]);
|
||||
} else {
|
||||
Assert.equal(classic[id].histogram_type,
|
||||
subsession[id].histogram_type);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Same as above, except for keyed histograms.
|
||||
checkKeyedHistograms = (classic, subsession) => {
|
||||
for (let id of Object.keys(classic)) {
|
||||
if (!registeredIds.has(id)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Assert.ok(id in subsession);
|
||||
if (stableKeyedHistograms.has(id)) {
|
||||
Assert.deepEqual(classic[id],
|
||||
subsession[id]);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Both classic and subsession payload histograms should start the same.
|
||||
// The payloads should be identical for now except for the reason.
|
||||
count.clear();
|
||||
keyed.clear();
|
||||
let classic = TelemetrySession.getPayload();
|
||||
let subsession = TelemetrySession.getPayload("environment-change");
|
||||
|
||||
Assert.equal(classic.info.reason, "gather-payload");
|
||||
Assert.equal(subsession.info.reason, "environment-change");
|
||||
Assert.ok(!(COUNT_ID in classic.histograms));
|
||||
Assert.ok(!(COUNT_ID in subsession.histograms));
|
||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||
Assert.deepEqual(classic.keyedHistograms[KEYED_ID], {});
|
||||
Assert.deepEqual(subsession.keyedHistograms[KEYED_ID], {});
|
||||
|
||||
checkHistograms(classic.histograms, subsession.histograms);
|
||||
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
||||
|
||||
// Adding values should get picked up in both.
|
||||
count.add(1);
|
||||
keyed.add("a", 1);
|
||||
keyed.add("b", 1);
|
||||
classic = TelemetrySession.getPayload();
|
||||
subsession = TelemetrySession.getPayload("environment-change");
|
||||
|
||||
Assert.ok(COUNT_ID in classic.histograms);
|
||||
Assert.ok(COUNT_ID in subsession.histograms);
|
||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
||||
|
||||
checkHistograms(classic.histograms, subsession.histograms);
|
||||
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
||||
|
||||
// Values should still reset properly.
|
||||
count.clear();
|
||||
keyed.clear();
|
||||
classic = TelemetrySession.getPayload();
|
||||
subsession = TelemetrySession.getPayload("environment-change");
|
||||
|
||||
Assert.ok(!(COUNT_ID in classic.histograms));
|
||||
Assert.ok(!(COUNT_ID in subsession.histograms));
|
||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||
Assert.deepEqual(classic.keyedHistograms[KEYED_ID], {});
|
||||
|
||||
checkHistograms(classic.histograms, subsession.histograms);
|
||||
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
||||
|
||||
// Adding values should get picked up in both.
|
||||
count.add(1);
|
||||
keyed.add("a", 1);
|
||||
keyed.add("b", 1);
|
||||
classic = TelemetrySession.getPayload();
|
||||
subsession = TelemetrySession.getPayload("environment-change");
|
||||
|
||||
Assert.ok(COUNT_ID in classic.histograms);
|
||||
Assert.ok(COUNT_ID in subsession.histograms);
|
||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
||||
|
||||
checkHistograms(classic.histograms, subsession.histograms);
|
||||
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
||||
|
||||
// We should be able to reset only the subsession histograms.
|
||||
count.clear(true);
|
||||
keyed.clear(true);
|
||||
classic = TelemetrySession.getPayload();
|
||||
subsession = TelemetrySession.getPayload("environment-change");
|
||||
|
||||
Assert.ok(COUNT_ID in classic.histograms);
|
||||
Assert.ok(COUNT_ID in subsession.histograms);
|
||||
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
|
||||
Assert.equal(subsession.histograms[COUNT_ID].sum, 0);
|
||||
|
||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
||||
Assert.deepEqual(subsession.keyedHistograms[KEYED_ID], {});
|
||||
|
||||
// Adding values should get picked up in both again.
|
||||
count.add(1);
|
||||
keyed.add("a", 1);
|
||||
keyed.add("b", 1);
|
||||
classic = TelemetrySession.getPayload();
|
||||
subsession = TelemetrySession.getPayload("environment-change");
|
||||
|
||||
Assert.ok(COUNT_ID in classic.histograms);
|
||||
Assert.ok(COUNT_ID in subsession.histograms);
|
||||
Assert.equal(classic.histograms[COUNT_ID].sum, 2);
|
||||
Assert.equal(subsession.histograms[COUNT_ID].sum, 1);
|
||||
|
||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 2);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 2);
|
||||
Assert.equal(subsession.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
||||
Assert.equal(subsession.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
||||
// Test a field in the environment build section.
|
||||
Assert.equal(ping.application.buildId, ping.environment.build.buildId);
|
||||
});
|
||||
|
||||
// Checks that an expired histogram file is deleted when loaded.
|
||||
add_task(function* test_runOldPingFile() {
|
||||
let histogramsFile = getSavedHistogramsFile("old-histograms.dat");
|
||||
add_task(function* test_pingHasEnvironmentAndClientId() {
|
||||
// Send a ping with the environment data and client id.
|
||||
yield sendPing(true, true);
|
||||
let request = yield gRequestIterator.next();
|
||||
let ping = decodeRequestPayload(request);
|
||||
checkPingFormat(ping, TEST_PING_TYPE, true, true);
|
||||
|
||||
yield TelemetrySession.testSaveHistograms(histogramsFile);
|
||||
do_check_true(histogramsFile.exists());
|
||||
let mtime = histogramsFile.lastModifiedTime;
|
||||
histogramsFile.lastModifiedTime = mtime - (14 * 24 * 60 * 60 * 1000 + 60000); // 14 days, 1m
|
||||
|
||||
yield TelemetrySession.testLoadHistograms(histogramsFile);
|
||||
do_check_false(histogramsFile.exists());
|
||||
});
|
||||
|
||||
add_task(function* test_savedSessionClientID() {
|
||||
// Assure that we store the ping properly when saving sessions on shutdown.
|
||||
// We make the TelemetrySession shutdown to trigger a session save.
|
||||
const dir = TelemetryFile.pingDirectoryPath;
|
||||
yield OS.File.removeDir(dir, {ignoreAbsent: true});
|
||||
yield OS.File.makeDir(dir);
|
||||
yield TelemetrySession.shutdown();
|
||||
|
||||
yield TelemetryFile.loadSavedPings();
|
||||
Assert.equal(TelemetryFile.pingsLoaded, 1);
|
||||
let ping = TelemetryFile.popPendingPings().next();
|
||||
Assert.equal(ping.value.payload.clientID, gDataReportingClientID);
|
||||
// Test a field in the environment build section.
|
||||
Assert.equal(ping.application.buildId, ping.environment.build.buildId);
|
||||
// Test that we have the correct clientId.
|
||||
if (HAS_DATAREPORTINGSERVICE &&
|
||||
Services.prefs.getBoolPref(PREF_FHR_UPLOAD_ENABLED)) {
|
||||
Assert.equal(ping.clientId, gDataReportingClientID,
|
||||
"The correct clientId must be reported.");
|
||||
}
|
||||
});
|
||||
|
||||
add_task(function* stopServer(){
|
||||
|
@ -17,12 +17,12 @@ const Ci = Components.interfaces;
|
||||
const Cr = Components.results;
|
||||
const Cu = Components.utils;
|
||||
|
||||
Cu.import("resource://gre/modules/osfile.jsm", this);
|
||||
Cu.import("resource://gre/modules/Services.jsm", this);
|
||||
Cu.import("resource://testing-common/httpd.js", this);
|
||||
Cu.import("resource://gre/modules/Promise.jsm", this);
|
||||
Cu.import("resource://gre/modules/TelemetryFile.jsm", this);
|
||||
Cu.import("resource://gre/modules/TelemetryPing.jsm", this);
|
||||
Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
|
||||
Cu.import("resource://gre/modules/Task.jsm", this);
|
||||
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
|
||||
let {OS: {File, Path, Constants}} = Cu.import("resource://gre/modules/osfile.jsm", {});
|
||||
@ -53,70 +53,66 @@ let gCreatedPings = 0;
|
||||
let gSeenPings = 0;
|
||||
|
||||
/**
|
||||
* Creates some TelemetrySession pings for the current session and
|
||||
* saves them to disk. Each ping gets a unique ID slug based on
|
||||
* an incrementor.
|
||||
* Creates some Telemetry pings for the and saves them to disk. Each ping gets a
|
||||
* unique ID based on an incrementor.
|
||||
*
|
||||
* @param aNum the number of pings to create.
|
||||
* @param aAge the age in milliseconds to offset from now. A value
|
||||
* of 10 would make the ping 10ms older than now, for
|
||||
* example.
|
||||
* @param {Array} aPingInfos An array of ping type objects. Each entry must be an
|
||||
* object containing a "num" field for the number of pings to create and
|
||||
* an "age" field. The latter representing the age in milliseconds to offset
|
||||
* from now. A value of 10 would make the ping 10ms older than now, for
|
||||
* example.
|
||||
* @returns Promise
|
||||
* @resolve an Array with the created pings.
|
||||
* @resolve an Array with the created pings ids.
|
||||
*/
|
||||
function createSavedPings(aNum, aAge) {
|
||||
return Task.spawn(function*(){
|
||||
let pings = [];
|
||||
let age = Date.now() - aAge;
|
||||
let createSavedPings = Task.async(function* (aPingInfos) {
|
||||
let pingIds = [];
|
||||
let now = Date.now();
|
||||
|
||||
for (let i = 0; i < aNum; ++i) {
|
||||
let payload = TelemetrySession.getPayload();
|
||||
let ping = { slug: "test-ping-" + gCreatedPings, reason: "test", payload: payload };
|
||||
|
||||
yield TelemetryFile.savePing(ping);
|
||||
|
||||
if (aAge) {
|
||||
for (let type in aPingInfos) {
|
||||
let num = aPingInfos[type].num;
|
||||
let age = now - aPingInfos[type].age;
|
||||
for (let i = 0; i < num; ++i) {
|
||||
let pingId = yield TelemetryPing.testSavePingToFile("test-ping", {}, { overwrite: true });
|
||||
if (aPingInfos[type].age) {
|
||||
// savePing writes to the file synchronously, so we're good to
|
||||
// modify the lastModifedTime now.
|
||||
let file = getSavePathForPing(ping);
|
||||
yield File.setDates(file, null, age);
|
||||
let filePath = getSavePathForPingId(pingId);
|
||||
yield File.setDates(filePath, null, age);
|
||||
}
|
||||
gCreatedPings++;
|
||||
pings.push(ping);
|
||||
pingIds.push(pingId);
|
||||
}
|
||||
return pings;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return pingIds;
|
||||
});
|
||||
|
||||
/**
|
||||
* Deletes locally saved pings in aPings if they
|
||||
* exist.
|
||||
* Deletes locally saved pings if they exist.
|
||||
*
|
||||
* @param aPings an Array of pings to delete.
|
||||
* @param aPingIds an Array of ping ids to delete.
|
||||
* @returns Promise
|
||||
*/
|
||||
function clearPings(aPings) {
|
||||
return Task.spawn(function*() {
|
||||
for (let ping of aPings) {
|
||||
let path = getSavePathForPing(ping);
|
||||
yield File.remove(path);
|
||||
}
|
||||
});
|
||||
}
|
||||
let clearPings = Task.async(function* (aPingIds) {
|
||||
for (let pingId of aPingIds) {
|
||||
let filePath = getSavePathForPingId(pingId);
|
||||
yield File.remove(filePath);
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Returns a handle for the file that aPing should be
|
||||
* Returns a handle for the file that a ping should be
|
||||
* stored in locally.
|
||||
*
|
||||
* @returns path
|
||||
*/
|
||||
function getSavePathForPing(aPing) {
|
||||
return Path.join(Constants.Path.profileDir, PING_SAVE_FOLDER, aPing.slug);
|
||||
function getSavePathForPingId(aPingId) {
|
||||
return Path.join(Constants.Path.profileDir, PING_SAVE_FOLDER, aPingId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the number of TelemetrySession pings received by the
|
||||
* HttpServer is not equal to aExpectedNum.
|
||||
* Check if the number of Telemetry pings received by the HttpServer is not equal
|
||||
* to aExpectedNum.
|
||||
*
|
||||
* @param aExpectedNum the number of pings we expect to receive.
|
||||
*/
|
||||
@ -125,30 +121,28 @@ function assertReceivedPings(aExpectedNum) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Throws if any pings in aPings is saved locally.
|
||||
* Throws if any pings with the id in aPingIds is saved locally.
|
||||
*
|
||||
* @param aPings an Array of pings to check.
|
||||
* @param aPingIds an Array of pings ids to check.
|
||||
* @returns Promise
|
||||
*/
|
||||
function assertNotSaved(aPings) {
|
||||
return Task.spawn(function*() {
|
||||
let saved = 0;
|
||||
for (let ping of aPings) {
|
||||
let file = getSavePathForPing(ping);
|
||||
if (yield File.exists()) {
|
||||
saved++;
|
||||
}
|
||||
let assertNotSaved = Task.async(function* (aPingIds) {
|
||||
let saved = 0;
|
||||
for (let id of aPingIds) {
|
||||
let filePath = getSavePathForPingId(id);
|
||||
if (yield File.exists(filePath)) {
|
||||
saved++;
|
||||
}
|
||||
if (saved > 0) {
|
||||
do_throw("Found " + saved + " unexpected saved pings.");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
if (saved > 0) {
|
||||
do_throw("Found " + saved + " unexpected saved pings.");
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Our handler function for the HttpServer that simply
|
||||
* increments the gSeenPings global when it successfully
|
||||
* receives and decodes a TelemetrySession payload.
|
||||
* receives and decodes a Telemetry payload.
|
||||
*
|
||||
* @param aRequest the HTTP request sent from HttpServer.
|
||||
*/
|
||||
@ -174,7 +168,6 @@ function stopHttpServer() {
|
||||
* Reset Telemetry state.
|
||||
*/
|
||||
function resetTelemetry() {
|
||||
TelemetrySession.uninstall();
|
||||
// Quick and dirty way to clear TelemetryFile's pendingPings
|
||||
// collection, and put it back in its initial state.
|
||||
let gen = TelemetryFile.popPendingPings();
|
||||
@ -189,10 +182,6 @@ function startTelemetry() {
|
||||
return TelemetryPing.setup();
|
||||
}
|
||||
|
||||
function startTelemetrySession() {
|
||||
return TelemetrySession.setup();
|
||||
}
|
||||
|
||||
function run_test() {
|
||||
gHttpServer.registerPrefixHandler("/submit/telemetry/", pingHandler);
|
||||
gHttpServer.start(-1);
|
||||
@ -209,13 +198,26 @@ function run_test() {
|
||||
run_next_test();
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup the tests by making sure the ping storage directory is available, otherwise
|
||||
* |TelemetryPing.testSaveDirectoryToFile| could fail.
|
||||
*/
|
||||
add_task(function* setupEnvironment() {
|
||||
yield TelemetryPing.setup();
|
||||
|
||||
let directory = TelemetryFile.pingDirectoryPath;
|
||||
yield File.makeDir(directory, { ignoreExisting: true, unixMode: OS.Constants.S_IRWXU });
|
||||
|
||||
yield resetTelemetry();
|
||||
});
|
||||
|
||||
/**
|
||||
* Test that pings that are considered too old are just chucked out
|
||||
* immediately and never sent.
|
||||
*/
|
||||
add_task(function* test_expired_pings_are_deleted() {
|
||||
yield startTelemetrySession();
|
||||
let expiredPings = yield createSavedPings(EXPIRED_PINGS, EXPIRED_PING_FILE_AGE);
|
||||
let pingTypes = [{ num: EXPIRED_PINGS, age: EXPIRED_PING_FILE_AGE }];
|
||||
let expiredPings = yield createSavedPings(pingTypes);
|
||||
yield startTelemetry();
|
||||
assertReceivedPings(0);
|
||||
yield assertNotSaved(expiredPings);
|
||||
@ -226,8 +228,8 @@ add_task(function* test_expired_pings_are_deleted() {
|
||||
* Test that really recent pings are not sent on Telemetry initialization.
|
||||
*/
|
||||
add_task(function* test_recent_pings_not_sent() {
|
||||
yield startTelemetrySession();
|
||||
let recentPings = yield createSavedPings(RECENT_PINGS);
|
||||
let pingTypes = [{ num: RECENT_PINGS }];
|
||||
let recentPings = yield createSavedPings(pingTypes);
|
||||
yield startTelemetry();
|
||||
assertReceivedPings(0);
|
||||
yield resetTelemetry();
|
||||
@ -238,17 +240,20 @@ add_task(function* test_recent_pings_not_sent() {
|
||||
* Test that only the most recent LRU_PINGS pings are kept at startup.
|
||||
*/
|
||||
add_task(function* test_most_recent_pings_kept() {
|
||||
yield startTelemetrySession();
|
||||
let head = yield createSavedPings(LRU_PINGS);
|
||||
let tail = yield createSavedPings(3, ONE_MINUTE_MS);
|
||||
let pings = head.concat(tail);
|
||||
let pingTypes = [
|
||||
{ num: LRU_PINGS },
|
||||
{ num: 3, age: ONE_MINUTE_MS },
|
||||
];
|
||||
let pings = yield createSavedPings(pingTypes);
|
||||
let head = pings.slice(0, LRU_PINGS);
|
||||
let tail = pings.slice(-3);
|
||||
|
||||
yield startTelemetry();
|
||||
let gen = TelemetryFile.popPendingPings();
|
||||
|
||||
for (let item of gen) {
|
||||
for (let p of tail) {
|
||||
do_check_neq(p.slug, item.slug);
|
||||
for (let id of tail) {
|
||||
do_check_neq(id, item.id);
|
||||
}
|
||||
}
|
||||
|
||||
@ -263,10 +268,15 @@ add_task(function* test_most_recent_pings_kept() {
|
||||
* should just be deleted.
|
||||
*/
|
||||
add_task(function* test_overdue_pings_trigger_send() {
|
||||
yield startTelemetrySession();
|
||||
let recentPings = yield createSavedPings(RECENT_PINGS);
|
||||
let expiredPings = yield createSavedPings(EXPIRED_PINGS, EXPIRED_PING_FILE_AGE);
|
||||
let overduePings = yield createSavedPings(OVERDUE_PINGS, OVERDUE_PING_FILE_AGE);
|
||||
let pingTypes = [
|
||||
{ num: RECENT_PINGS },
|
||||
{ num: EXPIRED_PINGS, age: EXPIRED_PING_FILE_AGE },
|
||||
{ num: OVERDUE_PINGS, age: OVERDUE_PING_FILE_AGE },
|
||||
];
|
||||
let pings = yield createSavedPings(pingTypes);
|
||||
let recentPings = pings.slice(0, RECENT_PINGS);
|
||||
let expiredPings = pings.slice(RECENT_PINGS, RECENT_PINGS + EXPIRED_PINGS);
|
||||
let overduePings = pings.slice(-OVERDUE_PINGS);
|
||||
|
||||
yield startTelemetry();
|
||||
assertReceivedPings(TOTAL_EXPECTED_PINGS);
|
||||
|
762
toolkit/components/telemetry/tests/unit/test_TelemetrySession.js
Normal file
762
toolkit/components/telemetry/tests/unit/test_TelemetrySession.js
Normal file
@ -0,0 +1,762 @@
|
||||
/* Any copyright is dedicated to the Public Domain.
|
||||
http://creativecommons.org/publicdomain/zero/1.0/
|
||||
*/
|
||||
/* This testcase triggers two telemetry pings.
|
||||
*
|
||||
* Telemetry code keeps histograms of past telemetry pings. The first
|
||||
* ping populates these histograms. One of those histograms is then
|
||||
* checked in the second request.
|
||||
*/
|
||||
|
||||
const Cc = Components.classes;
|
||||
const Ci = Components.interfaces;
|
||||
const Cu = Components.utils;
|
||||
const Cr = Components.results;
|
||||
|
||||
Cu.import("resource://testing-common/httpd.js", this);
|
||||
Cu.import("resource://gre/modules/Services.jsm");
|
||||
Cu.import("resource://gre/modules/LightweightThemeManager.jsm", this);
|
||||
Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
|
||||
Cu.import("resource://gre/modules/TelemetryPing.jsm", this);
|
||||
Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
|
||||
Cu.import("resource://gre/modules/TelemetryFile.jsm", this);
|
||||
Cu.import("resource://gre/modules/Task.jsm", this);
|
||||
Cu.import("resource://gre/modules/Promise.jsm", this);
|
||||
Cu.import("resource://gre/modules/Preferences.jsm");
|
||||
Cu.import("resource://gre/modules/osfile.jsm", this);
|
||||
|
||||
const PING_FORMAT_VERSION = 2;
|
||||
const PING_TYPE = "main";
|
||||
|
||||
const PLATFORM_VERSION = "1.9.2";
|
||||
const APP_VERSION = "1";
|
||||
const APP_ID = "xpcshell@tests.mozilla.org";
|
||||
const APP_NAME = "XPCShell";
|
||||
|
||||
const IGNORE_HISTOGRAM = "test::ignore_me";
|
||||
const IGNORE_HISTOGRAM_TO_CLONE = "MEMORY_HEAP_ALLOCATED";
|
||||
const IGNORE_CLONED_HISTOGRAM = "test::ignore_me_also";
|
||||
const ADDON_NAME = "Telemetry test addon";
|
||||
const ADDON_HISTOGRAM = "addon-histogram";
|
||||
// Add some unicode characters here to ensure that sending them works correctly.
|
||||
const SHUTDOWN_TIME = 10000;
|
||||
const FAILED_PROFILE_LOCK_ATTEMPTS = 2;
|
||||
|
||||
// Constants from prio.h for nsIFileOutputStream.init
|
||||
const PR_WRONLY = 0x2;
|
||||
const PR_CREATE_FILE = 0x8;
|
||||
const PR_TRUNCATE = 0x20;
|
||||
const RW_OWNER = parseInt("0600", 8);
|
||||
|
||||
const NUMBER_OF_THREADS_TO_LAUNCH = 30;
|
||||
let gNumberOfThreadsLaunched = 0;
|
||||
|
||||
const PREF_BRANCH = "toolkit.telemetry.";
|
||||
const PREF_ENABLED = PREF_BRANCH + "enabled";
|
||||
const PREF_FHR_UPLOAD_ENABLED = "datareporting.healthreport.uploadEnabled";
|
||||
const PREF_FHR_SERVICE_ENABLED = "datareporting.healthreport.service.enabled";
|
||||
|
||||
const HAS_DATAREPORTINGSERVICE = "@mozilla.org/datareporting/service;1" in Cc;
|
||||
const SESSION_RECORDER_EXPECTED = HAS_DATAREPORTINGSERVICE &&
|
||||
Preferences.get(PREF_FHR_SERVICE_ENABLED, true);
|
||||
|
||||
const Telemetry = Cc["@mozilla.org/base/telemetry;1"].getService(Ci.nsITelemetry);
|
||||
|
||||
let gHttpServer = new HttpServer();
|
||||
let gServerStarted = false;
|
||||
let gRequestIterator = null;
|
||||
let gDataReportingClientID = null;
|
||||
|
||||
XPCOMUtils.defineLazyGetter(this, "gDatareportingService",
|
||||
() => Cc["@mozilla.org/datareporting/service;1"]
|
||||
.getService(Ci.nsISupports)
|
||||
.wrappedJSObject);
|
||||
|
||||
function sendPing() {
|
||||
TelemetrySession.gatherStartup();
|
||||
if (gServerStarted) {
|
||||
TelemetryPing.setServer("http://localhost:" + gHttpServer.identity.primaryPort);
|
||||
return TelemetrySession.testPing();
|
||||
} else {
|
||||
TelemetryPing.setServer("http://doesnotexist");
|
||||
return TelemetrySession.testPing();
|
||||
}
|
||||
}
|
||||
|
||||
function wrapWithExceptionHandler(f) {
|
||||
function wrapper(...args) {
|
||||
try {
|
||||
f(...args);
|
||||
} catch (ex if typeof(ex) == 'object') {
|
||||
dump("Caught exception: " + ex.message + "\n");
|
||||
dump(ex.stack);
|
||||
do_test_finished();
|
||||
}
|
||||
}
|
||||
return wrapper;
|
||||
}
|
||||
|
||||
function registerPingHandler(handler) {
|
||||
gHttpServer.registerPrefixHandler("/submit/telemetry/",
|
||||
wrapWithExceptionHandler(handler));
|
||||
}
|
||||
|
||||
function setupTestData() {
|
||||
Telemetry.newHistogram(IGNORE_HISTOGRAM, "never", Telemetry.HISTOGRAM_BOOLEAN);
|
||||
Telemetry.histogramFrom(IGNORE_CLONED_HISTOGRAM, IGNORE_HISTOGRAM_TO_CLONE);
|
||||
Services.startup.interrupted = true;
|
||||
Telemetry.registerAddonHistogram(ADDON_NAME, ADDON_HISTOGRAM,
|
||||
Telemetry.HISTOGRAM_LINEAR,
|
||||
1, 5, 6);
|
||||
let h1 = Telemetry.getAddonHistogram(ADDON_NAME, ADDON_HISTOGRAM);
|
||||
h1.add(1);
|
||||
let h2 = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT");
|
||||
h2.add();
|
||||
|
||||
let k1 = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT");
|
||||
k1.add("a");
|
||||
k1.add("a");
|
||||
k1.add("b");
|
||||
}
|
||||
|
||||
function getSavedPingFile(basename) {
|
||||
let tmpDir = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
||||
let pingFile = tmpDir.clone();
|
||||
pingFile.append(basename);
|
||||
if (pingFile.exists()) {
|
||||
pingFile.remove(true);
|
||||
}
|
||||
do_register_cleanup(function () {
|
||||
try {
|
||||
pingFile.remove(true);
|
||||
} catch (e) {
|
||||
}
|
||||
});
|
||||
return pingFile;
|
||||
}
|
||||
|
||||
function decodeRequestPayload(request) {
|
||||
let s = request.bodyInputStream;
|
||||
let payload = null;
|
||||
let decoder = Cc["@mozilla.org/dom/json;1"].createInstance(Ci.nsIJSON)
|
||||
|
||||
if (request.getHeader("content-encoding") == "gzip") {
|
||||
let observer = {
|
||||
buffer: "",
|
||||
onStreamComplete: function(loader, context, status, length, result) {
|
||||
this.buffer = String.fromCharCode.apply(this, result);
|
||||
}
|
||||
};
|
||||
|
||||
let scs = Cc["@mozilla.org/streamConverters;1"]
|
||||
.getService(Ci.nsIStreamConverterService);
|
||||
let listener = Cc["@mozilla.org/network/stream-loader;1"]
|
||||
.createInstance(Ci.nsIStreamLoader);
|
||||
listener.init(observer);
|
||||
let converter = scs.asyncConvertData("gzip", "uncompressed",
|
||||
listener, null);
|
||||
converter.onStartRequest(null, null);
|
||||
converter.onDataAvailable(null, null, s, 0, s.available());
|
||||
converter.onStopRequest(null, null, null);
|
||||
let unicodeConverter = Cc["@mozilla.org/intl/scriptableunicodeconverter"]
|
||||
.createInstance(Ci.nsIScriptableUnicodeConverter);
|
||||
unicodeConverter.charset = "UTF-8";
|
||||
let utf8string = unicodeConverter.ConvertToUnicode(observer.buffer);
|
||||
utf8string += unicodeConverter.Finish();
|
||||
payload = decoder.decode(utf8string);
|
||||
} else {
|
||||
payload = decoder.decodeFromStream(s, s.available());
|
||||
}
|
||||
|
||||
return payload;
|
||||
}
|
||||
|
||||
function checkPingFormat(aPing, aType, aHasClientId, aHasEnvironment) {
|
||||
const MANDATORY_PING_FIELDS = [
|
||||
"type", "id", "creationDate", "version", "application", "payload"
|
||||
];
|
||||
|
||||
const APPLICATION_TEST_DATA = {
|
||||
buildId: "2007010101",
|
||||
name: APP_NAME,
|
||||
version: APP_VERSION,
|
||||
vendor: "Mozilla",
|
||||
platformVersion: PLATFORM_VERSION,
|
||||
xpcomAbi: "noarch-spidermonkey",
|
||||
};
|
||||
|
||||
// Check that the ping contains all the mandatory fields.
|
||||
for (let f of MANDATORY_PING_FIELDS) {
|
||||
Assert.ok(f in aPing, f + "must be available.");
|
||||
}
|
||||
|
||||
Assert.equal(aPing.type, aType, "The ping must have the correct type.");
|
||||
Assert.equal(aPing.version, PING_FORMAT_VERSION, "The ping must have the correct version.");
|
||||
|
||||
// Test the application section.
|
||||
for (let f in APPLICATION_TEST_DATA) {
|
||||
Assert.equal(aPing.application[f], APPLICATION_TEST_DATA[f],
|
||||
f + " must have the correct value.");
|
||||
}
|
||||
|
||||
// We can't check the values for channel and architecture. Just make
|
||||
// sure they are in.
|
||||
Assert.ok("architecture" in aPing.application,
|
||||
"The application section must have an architecture field.");
|
||||
Assert.ok("channel" in aPing.application,
|
||||
"The application section must have a channel field.");
|
||||
|
||||
// Check the clientId and environment fields, as needed.
|
||||
Assert.equal("clientId" in aPing, aHasClientId);
|
||||
Assert.equal("environment" in aPing, aHasEnvironment);
|
||||
}
|
||||
|
||||
function checkPayload(payload, reason, successfulPings) {
|
||||
Assert.ok(payload.simpleMeasurements.uptime >= 0);
|
||||
Assert.equal(payload.simpleMeasurements.startupInterrupted, 1);
|
||||
Assert.equal(payload.simpleMeasurements.shutdownDuration, SHUTDOWN_TIME);
|
||||
Assert.equal(payload.simpleMeasurements.savedPings, 1);
|
||||
Assert.ok("maximalNumberOfConcurrentThreads" in payload.simpleMeasurements);
|
||||
Assert.ok(payload.simpleMeasurements.maximalNumberOfConcurrentThreads >= gNumberOfThreadsLaunched);
|
||||
|
||||
let activeTicks = payload.simpleMeasurements.activeTicks;
|
||||
Assert.ok(SESSION_RECORDER_EXPECTED ? activeTicks >= 0 : activeTicks == -1);
|
||||
|
||||
Assert.equal(payload.simpleMeasurements.failedProfileLockCount,
|
||||
FAILED_PROFILE_LOCK_ATTEMPTS);
|
||||
let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
||||
let failedProfileLocksFile = profileDirectory.clone();
|
||||
failedProfileLocksFile.append("Telemetry.FailedProfileLocks.txt");
|
||||
Assert.ok(!failedProfileLocksFile.exists());
|
||||
|
||||
|
||||
let isWindows = ("@mozilla.org/windows-registry-key;1" in Components.classes);
|
||||
if (isWindows) {
|
||||
Assert.ok(payload.simpleMeasurements.startupSessionRestoreReadBytes > 0);
|
||||
Assert.ok(payload.simpleMeasurements.startupSessionRestoreWriteBytes > 0);
|
||||
}
|
||||
|
||||
const TELEMETRY_PING = "TELEMETRY_PING";
|
||||
const TELEMETRY_SUCCESS = "TELEMETRY_SUCCESS";
|
||||
const TELEMETRY_TEST_FLAG = "TELEMETRY_TEST_FLAG";
|
||||
const TELEMETRY_TEST_COUNT = "TELEMETRY_TEST_COUNT";
|
||||
const TELEMETRY_TEST_KEYED_FLAG = "TELEMETRY_TEST_KEYED_FLAG";
|
||||
const TELEMETRY_TEST_KEYED_COUNT = "TELEMETRY_TEST_KEYED_COUNT";
|
||||
const READ_SAVED_PING_SUCCESS = "READ_SAVED_PING_SUCCESS";
|
||||
|
||||
Assert.ok(TELEMETRY_PING in payload.histograms);
|
||||
Assert.ok(READ_SAVED_PING_SUCCESS in payload.histograms);
|
||||
Assert.ok(TELEMETRY_TEST_FLAG in payload.histograms);
|
||||
Assert.ok(TELEMETRY_TEST_COUNT in payload.histograms);
|
||||
|
||||
let rh = Telemetry.registeredHistograms(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, []);
|
||||
for (let name of rh) {
|
||||
if (/SQLITE/.test(name) && name in payload.histograms) {
|
||||
let histogramName = ("STARTUP_" + name);
|
||||
Assert.ok(histogramName in payload.histograms, histogramName + " must be available.");
|
||||
}
|
||||
}
|
||||
Assert.ok(!(IGNORE_HISTOGRAM in payload.histograms));
|
||||
Assert.ok(!(IGNORE_CLONED_HISTOGRAM in payload.histograms));
|
||||
|
||||
// Flag histograms should automagically spring to life.
|
||||
const expected_flag = {
|
||||
range: [1, 2],
|
||||
bucket_count: 3,
|
||||
histogram_type: 3,
|
||||
values: {0:1, 1:0},
|
||||
sum: 0,
|
||||
sum_squares_lo: 0,
|
||||
sum_squares_hi: 0
|
||||
};
|
||||
let flag = payload.histograms[TELEMETRY_TEST_FLAG];
|
||||
Assert.equal(uneval(flag), uneval(expected_flag));
|
||||
|
||||
// We should have a test count.
|
||||
const expected_count = {
|
||||
range: [1, 2],
|
||||
bucket_count: 3,
|
||||
histogram_type: 4,
|
||||
values: {0:1, 1:0},
|
||||
sum: 1,
|
||||
sum_squares_lo: 1,
|
||||
sum_squares_hi: 0,
|
||||
};
|
||||
let count = payload.histograms[TELEMETRY_TEST_COUNT];
|
||||
Assert.equal(uneval(count), uneval(expected_count));
|
||||
|
||||
// There should be one successful report from the previous telemetry ping.
|
||||
const expected_tc = {
|
||||
range: [1, 2],
|
||||
bucket_count: 3,
|
||||
histogram_type: 2,
|
||||
values: {0:2, 1:successfulPings, 2:0},
|
||||
sum: successfulPings,
|
||||
sum_squares_lo: successfulPings,
|
||||
sum_squares_hi: 0
|
||||
};
|
||||
let tc = payload.histograms[TELEMETRY_SUCCESS];
|
||||
Assert.equal(uneval(tc), uneval(expected_tc));
|
||||
|
||||
let h = payload.histograms[READ_SAVED_PING_SUCCESS];
|
||||
Assert.equal(h.values[0], 1);
|
||||
|
||||
// The ping should include data from memory reporters. We can't check that
|
||||
// this data is correct, because we can't control the values returned by the
|
||||
// memory reporters. But we can at least check that the data is there.
|
||||
//
|
||||
// It's important to check for the presence of reporters with a mix of units,
|
||||
// because TelemetryPing has separate logic for each one. But we can't
|
||||
// currently check UNITS_COUNT_CUMULATIVE or UNITS_PERCENTAGE because
|
||||
// Telemetry doesn't touch a memory reporter with these units that's
|
||||
// available on all platforms.
|
||||
|
||||
Assert.ok('MEMORY_JS_GC_HEAP' in payload.histograms); // UNITS_BYTES
|
||||
Assert.ok('MEMORY_JS_COMPARTMENTS_SYSTEM' in payload.histograms); // UNITS_COUNT
|
||||
|
||||
// We should have included addon histograms.
|
||||
Assert.ok("addonHistograms" in payload);
|
||||
Assert.ok(ADDON_NAME in payload.addonHistograms);
|
||||
Assert.ok(ADDON_HISTOGRAM in payload.addonHistograms[ADDON_NAME]);
|
||||
|
||||
Assert.ok(("mainThread" in payload.slowSQL) &&
|
||||
("otherThreads" in payload.slowSQL));
|
||||
|
||||
// Check keyed histogram payload.
|
||||
|
||||
Assert.ok("keyedHistograms" in payload);
|
||||
let keyedHistograms = payload.keyedHistograms;
|
||||
Assert.ok(TELEMETRY_TEST_KEYED_FLAG in keyedHistograms);
|
||||
Assert.ok(TELEMETRY_TEST_KEYED_COUNT in keyedHistograms);
|
||||
|
||||
Assert.deepEqual({}, keyedHistograms[TELEMETRY_TEST_KEYED_FLAG]);
|
||||
|
||||
const expected_keyed_count = {
|
||||
"a": {
|
||||
range: [1, 2],
|
||||
bucket_count: 3,
|
||||
histogram_type: 4,
|
||||
values: {0:2, 1:0},
|
||||
sum: 2,
|
||||
sum_squares_lo: 2,
|
||||
sum_squares_hi: 0,
|
||||
},
|
||||
"b": {
|
||||
range: [1, 2],
|
||||
bucket_count: 3,
|
||||
histogram_type: 4,
|
||||
values: {0:1, 1:0},
|
||||
sum: 1,
|
||||
sum_squares_lo: 1,
|
||||
sum_squares_hi: 0,
|
||||
},
|
||||
};
|
||||
Assert.deepEqual(expected_keyed_count, keyedHistograms[TELEMETRY_TEST_KEYED_COUNT]);
|
||||
}
|
||||
|
||||
function writeStringToFile(file, contents) {
|
||||
let ostream = Cc["@mozilla.org/network/safe-file-output-stream;1"]
|
||||
.createInstance(Ci.nsIFileOutputStream);
|
||||
ostream.init(file, PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE,
|
||||
RW_OWNER, ostream.DEFER_OPEN);
|
||||
ostream.write(contents, contents.length);
|
||||
ostream.QueryInterface(Ci.nsISafeOutputStream).finish();
|
||||
ostream.close();
|
||||
}
|
||||
|
||||
function write_fake_shutdown_file() {
|
||||
let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
||||
let file = profileDirectory.clone();
|
||||
file.append("Telemetry.ShutdownTime.txt");
|
||||
let contents = "" + SHUTDOWN_TIME;
|
||||
writeStringToFile(file, contents);
|
||||
}
|
||||
|
||||
function write_fake_failedprofilelocks_file() {
|
||||
let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
|
||||
let file = profileDirectory.clone();
|
||||
file.append("Telemetry.FailedProfileLocks.txt");
|
||||
let contents = "" + FAILED_PROFILE_LOCK_ATTEMPTS;
|
||||
writeStringToFile(file, contents);
|
||||
}
|
||||
|
||||
function run_test() {
|
||||
do_test_pending();
|
||||
|
||||
// Addon manager needs a profile directory
|
||||
do_get_profile();
|
||||
loadAddonManager(APP_ID, APP_NAME, APP_VERSION, PLATFORM_VERSION);
|
||||
|
||||
Services.prefs.setBoolPref(PREF_ENABLED, true);
|
||||
Services.prefs.setBoolPref(PREF_FHR_UPLOAD_ENABLED, true);
|
||||
|
||||
// Send the needed startup notifications to the datareporting service
|
||||
// to ensure that it has been initialized.
|
||||
if (HAS_DATAREPORTINGSERVICE) {
|
||||
gDatareportingService.observe(null, "app-startup", null);
|
||||
gDatareportingService.observe(null, "profile-after-change", null);
|
||||
}
|
||||
|
||||
// Make it look like we've previously failed to lock a profile a couple times.
|
||||
write_fake_failedprofilelocks_file();
|
||||
|
||||
// Make it look like we've shutdown before.
|
||||
write_fake_shutdown_file();
|
||||
|
||||
let currentMaxNumberOfThreads = Telemetry.maximalNumberOfConcurrentThreads;
|
||||
do_check_true(currentMaxNumberOfThreads > 0);
|
||||
|
||||
// Try to augment the maximal number of threads currently launched
|
||||
let threads = [];
|
||||
try {
|
||||
for (let i = 0; i < currentMaxNumberOfThreads + 10; ++i) {
|
||||
threads.push(Services.tm.newThread(0));
|
||||
}
|
||||
} catch (ex) {
|
||||
// If memory is too low, it is possible that not all threads will be launched.
|
||||
}
|
||||
gNumberOfThreadsLaunched = threads.length;
|
||||
|
||||
do_check_true(Telemetry.maximalNumberOfConcurrentThreads >= gNumberOfThreadsLaunched);
|
||||
|
||||
do_register_cleanup(function() {
|
||||
threads.forEach(function(thread) {
|
||||
thread.shutdown();
|
||||
});
|
||||
});
|
||||
|
||||
Telemetry.asyncFetchTelemetryData(wrapWithExceptionHandler(run_next_test));
|
||||
}
|
||||
|
||||
add_task(function* asyncSetup() {
|
||||
yield TelemetrySession.setup();
|
||||
yield TelemetryPing.setup();
|
||||
|
||||
if (HAS_DATAREPORTINGSERVICE) {
|
||||
// force getSessionRecorder()==undefined to check the payload's activeTicks
|
||||
gDatareportingService.simulateNoSessionRecorder();
|
||||
}
|
||||
|
||||
// When no DRS or no DRS.getSessionRecorder(), activeTicks should be -1.
|
||||
do_check_eq(TelemetrySession.getPayload().simpleMeasurements.activeTicks, -1);
|
||||
|
||||
if (HAS_DATAREPORTINGSERVICE) {
|
||||
// Restore normal behavior for getSessionRecorder()
|
||||
gDatareportingService.simulateRestoreSessionRecorder();
|
||||
|
||||
gDataReportingClientID = yield gDatareportingService.getClientID();
|
||||
|
||||
// We should have cached the client id now. Lets confirm that by
|
||||
// checking the client id before the async ping setup is finished.
|
||||
let promisePingSetup = TelemetryPing.reset();
|
||||
do_check_eq(TelemetryPing.clientID, gDataReportingClientID);
|
||||
yield promisePingSetup;
|
||||
}
|
||||
});
|
||||
|
||||
// Ensures that expired histograms are not part of the payload.
|
||||
add_task(function* test_expiredHistogram() {
|
||||
let histogram_id = "FOOBAR";
|
||||
let dummy = Telemetry.newHistogram(histogram_id, "30", Telemetry.HISTOGRAM_EXPONENTIAL, 1, 2, 3);
|
||||
|
||||
dummy.add(1);
|
||||
|
||||
do_check_eq(TelemetrySession.getPayload()["histograms"][histogram_id], undefined);
|
||||
do_check_eq(TelemetrySession.getPayload()["histograms"]["TELEMETRY_TEST_EXPIRED"], undefined);
|
||||
});
|
||||
|
||||
// Checks that an invalid histogram file is deleted if TelemetryFile fails to parse it.
|
||||
add_task(function* test_runInvalidJSON() {
|
||||
let pingFile = getSavedPingFile("invalid-histograms.dat");
|
||||
|
||||
writeStringToFile(pingFile, "this.is.invalid.JSON");
|
||||
do_check_true(pingFile.exists());
|
||||
|
||||
yield TelemetryFile.testLoadHistograms(pingFile);
|
||||
do_check_false(pingFile.exists());
|
||||
});
|
||||
|
||||
// Sends a ping to a non existing server. If we remove this test, we won't get
|
||||
// all the histograms we need in the main ping.
|
||||
add_task(function* test_noServerPing() {
|
||||
yield sendPing();
|
||||
// We need two pings in order to make sure STARTUP_MEMORY_STORAGE_SQLIE histograms
|
||||
// are initialised. See bug 1131585.
|
||||
yield sendPing();
|
||||
});
|
||||
|
||||
// Checks that a sent ping is correctly received by a dummy http server.
|
||||
add_task(function* test_simplePing() {
|
||||
gHttpServer.start(-1);
|
||||
gServerStarted = true;
|
||||
gRequestIterator = Iterator(new Request());
|
||||
|
||||
yield sendPing();
|
||||
let request = yield gRequestIterator.next();
|
||||
let ping = decodeRequestPayload(request);
|
||||
|
||||
checkPingFormat(ping, PING_TYPE, true, true);
|
||||
});
|
||||
|
||||
// Saves the current session histograms, reloads them, performs a ping
|
||||
// and checks that the dummy http server received both the previously
|
||||
// saved histograms and the new ones.
|
||||
add_task(function* test_saveLoadPing() {
|
||||
let histogramsFile = getSavedPingFile("saved-histograms.dat");
|
||||
|
||||
setupTestData();
|
||||
yield TelemetrySession.testSaveHistograms(histogramsFile);
|
||||
yield TelemetryFile.testLoadHistograms(histogramsFile);
|
||||
yield sendPing();
|
||||
|
||||
// Get requests received by dummy server.
|
||||
let request1 = yield gRequestIterator.next();
|
||||
let request2 = yield gRequestIterator.next();
|
||||
|
||||
Assert.equal(request1.getHeader("content-type"), "application/json; charset=UTF-8",
|
||||
"The request must have the correct content-type.");
|
||||
Assert.equal(request2.getHeader("content-type"), "application/json; charset=UTF-8",
|
||||
"The request must have the correct content-type.");
|
||||
|
||||
// We decode both requests to check for the |reason|.
|
||||
let ping1 = decodeRequestPayload(request1);
|
||||
let ping2 = decodeRequestPayload(request2);
|
||||
|
||||
checkPingFormat(ping1, PING_TYPE, true, true);
|
||||
checkPingFormat(ping2, PING_TYPE, true, true);
|
||||
|
||||
// Check we have the correct two requests. Ordering is not guaranteed.
|
||||
if (ping1.payload.info.reason === "test-ping") {
|
||||
// Until we change MainPing according to bug 1120982, common ping payload
|
||||
// will contain another nested payload.
|
||||
checkPayload(ping1.payload, "test-ping", 1);
|
||||
checkPayload(ping2.payload, "saved-session", 1);
|
||||
} else {
|
||||
checkPayload(ping1.payload, "saved-session", 1);
|
||||
checkPayload(ping2.payload, "test-ping", 1);
|
||||
}
|
||||
});
|
||||
|
||||
add_task(function* test_checkSubsession() {
|
||||
const COUNT_ID = "TELEMETRY_TEST_COUNT";
|
||||
const KEYED_ID = "TELEMETRY_TEST_KEYED_COUNT";
|
||||
const count = Telemetry.getHistogramById(COUNT_ID);
|
||||
const keyed = Telemetry.getKeyedHistogramById(KEYED_ID);
|
||||
const registeredIds =
|
||||
new Set(Telemetry.registeredHistograms(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, []));
|
||||
|
||||
const stableHistograms = new Set([
|
||||
"TELEMETRY_TEST_FLAG",
|
||||
"TELEMETRY_TEST_COUNT",
|
||||
"TELEMETRY_TEST_RELEASE_OPTOUT",
|
||||
"TELEMETRY_TEST_RELEASE_OPTIN",
|
||||
"STARTUP_CRASH_DETECTED",
|
||||
]);
|
||||
|
||||
const stableKeyedHistograms = new Set([
|
||||
"TELEMETRY_TEST_KEYED_FLAG",
|
||||
"TELEMETRY_TEST_KEYED_COUNT",
|
||||
"TELEMETRY_TEST_KEYED_RELEASE_OPTIN",
|
||||
"TELEMETRY_TEST_KEYED_RELEASE_OPTOUT",
|
||||
]);
|
||||
|
||||
// Compare the two sets of histograms.
|
||||
// The "subsession" histograms should match the registered
|
||||
// "classic" histograms. However, histograms can change
|
||||
// between us collecting the different payloads, so we only
|
||||
// check for deep equality on known stable histograms.
|
||||
checkHistograms = (classic, subsession) => {
|
||||
for (let id of Object.keys(classic)) {
|
||||
if (!registeredIds.has(id)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Assert.ok(id in subsession);
|
||||
if (stableHistograms.has(id)) {
|
||||
Assert.deepEqual(classic[id],
|
||||
subsession[id]);
|
||||
} else {
|
||||
Assert.equal(classic[id].histogram_type,
|
||||
subsession[id].histogram_type);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Same as above, except for keyed histograms.
|
||||
checkKeyedHistograms = (classic, subsession) => {
|
||||
for (let id of Object.keys(classic)) {
|
||||
if (!registeredIds.has(id)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Assert.ok(id in subsession);
|
||||
if (stableKeyedHistograms.has(id)) {
|
||||
Assert.deepEqual(classic[id],
|
||||
subsession[id]);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Both classic and subsession payload histograms should start the same.
|
||||
// The payloads should be identical for now except for the reason.
|
||||
count.clear();
|
||||
keyed.clear();
|
||||
let classic = TelemetrySession.getPayload();
|
||||
let subsession = TelemetrySession.getPayload("environment-change");
|
||||
|
||||
Assert.equal(classic.info.reason, "gather-payload");
|
||||
Assert.equal(subsession.info.reason, "environment-change");
|
||||
Assert.ok(!(COUNT_ID in classic.histograms));
|
||||
Assert.ok(!(COUNT_ID in subsession.histograms));
|
||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||
Assert.deepEqual(classic.keyedHistograms[KEYED_ID], {});
|
||||
Assert.deepEqual(subsession.keyedHistograms[KEYED_ID], {});
|
||||
|
||||
checkHistograms(classic.histograms, subsession.histograms);
|
||||
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
||||
|
||||
// Adding values should get picked up in both.
|
||||
count.add(1);
|
||||
keyed.add("a", 1);
|
||||
keyed.add("b", 1);
|
||||
classic = TelemetrySession.getPayload();
|
||||
subsession = TelemetrySession.getPayload("environment-change");
|
||||
|
||||
Assert.ok(COUNT_ID in classic.histograms);
|
||||
Assert.ok(COUNT_ID in subsession.histograms);
|
||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
||||
|
||||
checkHistograms(classic.histograms, subsession.histograms);
|
||||
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
||||
|
||||
// Values should still reset properly.
|
||||
count.clear();
|
||||
keyed.clear();
|
||||
classic = TelemetrySession.getPayload();
|
||||
subsession = TelemetrySession.getPayload("environment-change");
|
||||
|
||||
Assert.ok(!(COUNT_ID in classic.histograms));
|
||||
Assert.ok(!(COUNT_ID in subsession.histograms));
|
||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||
Assert.deepEqual(classic.keyedHistograms[KEYED_ID], {});
|
||||
|
||||
checkHistograms(classic.histograms, subsession.histograms);
|
||||
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
||||
|
||||
// Adding values should get picked up in both.
|
||||
count.add(1);
|
||||
keyed.add("a", 1);
|
||||
keyed.add("b", 1);
|
||||
classic = TelemetrySession.getPayload();
|
||||
subsession = TelemetrySession.getPayload("environment-change");
|
||||
|
||||
Assert.ok(COUNT_ID in classic.histograms);
|
||||
Assert.ok(COUNT_ID in subsession.histograms);
|
||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
||||
|
||||
checkHistograms(classic.histograms, subsession.histograms);
|
||||
checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
|
||||
|
||||
// We should be able to reset only the subsession histograms.
|
||||
count.clear(true);
|
||||
keyed.clear(true);
|
||||
classic = TelemetrySession.getPayload();
|
||||
subsession = TelemetrySession.getPayload("environment-change");
|
||||
|
||||
Assert.ok(COUNT_ID in classic.histograms);
|
||||
Assert.ok(COUNT_ID in subsession.histograms);
|
||||
Assert.equal(classic.histograms[COUNT_ID].sum, 1);
|
||||
Assert.equal(subsession.histograms[COUNT_ID].sum, 0);
|
||||
|
||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
||||
Assert.deepEqual(subsession.keyedHistograms[KEYED_ID], {});
|
||||
|
||||
// Adding values should get picked up in both again.
|
||||
count.add(1);
|
||||
keyed.add("a", 1);
|
||||
keyed.add("b", 1);
|
||||
classic = TelemetrySession.getPayload();
|
||||
subsession = TelemetrySession.getPayload("environment-change");
|
||||
|
||||
Assert.ok(COUNT_ID in classic.histograms);
|
||||
Assert.ok(COUNT_ID in subsession.histograms);
|
||||
Assert.equal(classic.histograms[COUNT_ID].sum, 2);
|
||||
Assert.equal(subsession.histograms[COUNT_ID].sum, 1);
|
||||
|
||||
Assert.ok(KEYED_ID in classic.keyedHistograms);
|
||||
Assert.ok(KEYED_ID in subsession.keyedHistograms);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 2);
|
||||
Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 2);
|
||||
Assert.equal(subsession.keyedHistograms[KEYED_ID]["a"].sum, 1);
|
||||
Assert.equal(subsession.keyedHistograms[KEYED_ID]["b"].sum, 1);
|
||||
});
|
||||
|
||||
// Checks that an expired histogram file is deleted when loaded.
|
||||
add_task(function* test_runOldPingFile() {
|
||||
let histogramsFile = getSavedPingFile("old-histograms.dat");
|
||||
|
||||
yield TelemetrySession.testSaveHistograms(histogramsFile);
|
||||
do_check_true(histogramsFile.exists());
|
||||
let mtime = histogramsFile.lastModifiedTime;
|
||||
histogramsFile.lastModifiedTime = mtime - (14 * 24 * 60 * 60 * 1000 + 60000); // 14 days, 1m
|
||||
|
||||
yield TelemetryFile.testLoadHistograms(histogramsFile);
|
||||
do_check_false(histogramsFile.exists());
|
||||
});
|
||||
|
||||
add_task(function* test_savedSessionClientID() {
|
||||
// Assure that we store the ping properly when saving sessions on shutdown.
|
||||
// We make the TelemetrySession shutdown to trigger a session save.
|
||||
const dir = TelemetryFile.pingDirectoryPath;
|
||||
yield OS.File.removeDir(dir, {ignoreAbsent: true});
|
||||
yield OS.File.makeDir(dir);
|
||||
yield TelemetrySession.shutdown();
|
||||
|
||||
yield TelemetryFile.loadSavedPings();
|
||||
Assert.equal(TelemetryFile.pingsLoaded, 1);
|
||||
let ping = TelemetryFile.popPendingPings().next();
|
||||
Assert.equal(ping.value.clientId, gDataReportingClientID);
|
||||
});
|
||||
|
||||
add_task(function* stopServer(){
|
||||
gHttpServer.stop(do_test_finished);
|
||||
});
|
||||
|
||||
// An iterable sequence of http requests
|
||||
function Request() {
|
||||
let defers = [];
|
||||
let current = 0;
|
||||
|
||||
function RequestIterator() {}
|
||||
|
||||
// Returns a promise that resolves to the next http request
|
||||
RequestIterator.prototype.next = function() {
|
||||
let deferred = defers[current++];
|
||||
return deferred.promise;
|
||||
}
|
||||
|
||||
this.__iterator__ = function(){
|
||||
return new RequestIterator();
|
||||
}
|
||||
|
||||
registerPingHandler((request, response) => {
|
||||
let deferred = defers[defers.length - 1];
|
||||
defers.push(Promise.defer());
|
||||
deferred.resolve(request);
|
||||
});
|
||||
|
||||
defers.push(Promise.defer());
|
||||
}
|
@ -34,5 +34,6 @@ generated-files =
|
||||
[test_ThirdPartyCookieProbe.js]
|
||||
[test_TelemetrySendOldPings.js]
|
||||
skip-if = debug == true || os == "android" # Disabled due to intermittent orange on Android
|
||||
[test_TelemetrySession.js]
|
||||
[test_ThreadHangStats.js]
|
||||
run-sequentially = Bug 1046307, test can fail intermittently when CPU load is high
|
||||
|
@ -65,7 +65,7 @@ add_task(function* actualTest() {
|
||||
do_check_true(simpleMeasurements.bar > 1); // bar was included
|
||||
do_check_eq(undefined, simpleMeasurements.baz); // baz wasn't included since it wasn't added
|
||||
|
||||
yield TelemetrySession.shutdown();
|
||||
yield TelemetrySession.shutdown(false);
|
||||
|
||||
do_test_finished();
|
||||
});
|
||||
|
@ -514,5 +514,5 @@ add_test(function overrides_retrieved() {
|
||||
});
|
||||
|
||||
add_test(function test_shutdown() {
|
||||
TelemetrySession.shutdown().then(run_next_test);
|
||||
TelemetrySession.shutdown(false).then(run_next_test);
|
||||
});
|
||||
|
Loading…
Reference in New Issue
Block a user