Bug 749889: Webrtc import of rev 2047, with most of third_party and test/data removed rs=ted

This commit is contained in:
Randell Jesup 2012-06-21 07:34:58 -04:00
parent c64a09bff4
commit a9df4a23d9
3166 changed files with 782367 additions and 0 deletions

10
media/webrtc/.gclient Normal file
View File

@ -0,0 +1,10 @@
solutions = [
{ "name" : "trunk",
"url" : "http://webrtc.googlecode.com/svn/trunk/peerconnection",
"deps_file" : "DEPS",
"managed" : True,
"custom_deps" : {
},
"safesync_url": "",
},
]

View File

@ -0,0 +1,31 @@
entries = {
'trunk': 'http://webrtc.googlecode.com/svn/trunk/peerconnection',
'trunk/build': 'http://src.chromium.org/svn/trunk/src/build@120526',
'trunk/src': 'http://webrtc.googlecode.com/svn/trunk/src@1538',
'trunk/test': 'http://webrtc.googlecode.com/svn/trunk/test@1538',
'trunk/testing': 'http://src.chromium.org/svn/trunk/src/testing@120526',
'trunk/testing/gmock': 'http://googlemock.googlecode.com/svn/trunk@386',
'trunk/testing/gtest': 'http://googletest.googlecode.com/svn/trunk@573',
'trunk/third_party/expat/': 'http://src.chromium.org/svn/trunk/src/third_party/expat@120526',
'trunk/third_party/google-gflags': 'http://webrtc.googlecode.com/svn/trunk/third_party/google-gflags@1538',
'trunk/third_party/google-gflags/src': 'http://google-gflags.googlecode.com/svn/trunk/src@45',
'trunk/third_party/jsoncpp/': 'http://src.chromium.org/svn/trunk/src/third_party/jsoncpp@120526',
'trunk/third_party/jsoncpp/source': 'http://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/trunk/jsoncpp@248',
'trunk/third_party/libjingle/source': 'http://libjingle.googlecode.com/svn/trunk@115',
'trunk/third_party/libjpeg/': 'http://src.chromium.org/svn/trunk/src/third_party/libjpeg@120526',
'trunk/third_party/libjpeg_turbo/': 'http://src.chromium.org/svn/trunk/deps/third_party/libjpeg_turbo@119959',
'trunk/third_party/libsrtp/': 'http://src.chromium.org/svn/trunk/deps/third_party/libsrtp@119742',
'trunk/third_party/libvpx': 'http://webrtc.googlecode.com/svn/trunk/third_party/libvpx@1538',
'trunk/third_party/libvpx/source/libvpx': 'http://git.chromium.org/webm/libvpx.git@e479379a',
'trunk/third_party/libyuv': 'http://libyuv.googlecode.com/svn/trunk@121',
'trunk/third_party/protobuf/': 'http://src.chromium.org/svn/trunk/src/third_party/protobuf@120526',
'trunk/third_party/yasm/': 'http://src.chromium.org/svn/trunk/src/third_party/yasm@120526',
'trunk/third_party/yasm/binaries': 'http://src.chromium.org/svn/trunk/deps/third_party/yasm/binaries@74228',
'trunk/third_party/yasm/source/patched-yasm': 'http://src.chromium.org/svn/trunk/deps/third_party/yasm/patched-yasm@73761',
'trunk/tools': 'http://webrtc.googlecode.com/svn/trunk/tools@1538',
'trunk/tools/clang/scripts': 'http://src.chromium.org/svn/trunk/src/tools/clang/scripts@120526',
'trunk/tools/gyp': 'http://gyp.googlecode.com/svn/trunk@1187',
'trunk/tools/python': 'http://src.chromium.org/svn/trunk/src/tools/python@120526',
'trunk/tools/valgrind': 'http://src.chromium.org/svn/trunk/src/tools/valgrind@120526',
'trunk/tools/win/supalink': 'http://src.chromium.org/svn/trunk/src/tools/win/supalink@120526',
}

132
media/webrtc/trunk/DEPS Normal file
View File

@ -0,0 +1,132 @@
vars = {
# Use this googlecode_url variable only if there is an internal mirror for it.
# If you do not know, use the full path while defining your new deps entry.
"googlecode_url": "http://%s.googlecode.com/svn",
"chromium_trunk" : "http://src.chromium.org/svn/trunk",
"chromium_revision": "120526",
"webrtc_revision": "2047",
"libjingle_revision": "132",
}
# NOTE: Prefer revision numbers to tags for svn deps. Use http rather than
# https; the latter can cause problems for users behind proxies.
deps = {
# WebRTC deps.
"trunk/src":
(Var("googlecode_url") % "webrtc") + "/trunk/src@" + Var("webrtc_revision"),
"trunk/tools":
(Var("googlecode_url") % "webrtc") + "/trunk/tools@" + Var("webrtc_revision"),
"trunk/test":
(Var("googlecode_url") % "webrtc") + "/trunk/test@" + Var("webrtc_revision"),
"trunk/third_party/google-gflags":
(Var("googlecode_url") % "webrtc") + "/trunk/third_party/google-gflags@" + Var("webrtc_revision"),
"trunk/third_party/libvpx":
(Var("googlecode_url") % "webrtc") + "/trunk/third_party/libvpx@" + Var("webrtc_revision"),
"trunk/build":
Var("chromium_trunk") + "/src/build@" + Var("chromium_revision"),
"trunk/testing":
Var("chromium_trunk") + "/src/testing@" + Var("chromium_revision"),
"trunk/testing/gtest":
(Var("googlecode_url") % "googletest") + "/trunk@573",
"trunk/testing/gmock":
(Var("googlecode_url") % "googlemock") + "/trunk@386",
"trunk/tools/gyp":
(Var("googlecode_url") % "gyp") + "/trunk@1187",
# Needed by build/common.gypi.
"trunk/tools/win/supalink":
Var("chromium_trunk") + "/src/tools/win/supalink@" + Var("chromium_revision"),
"trunk/tools/clang/scripts":
Var("chromium_trunk") + "/src/tools/clang/scripts@" + Var("chromium_revision"),
"trunk/tools/python":
Var("chromium_trunk") + "/src/tools/python@" + Var("chromium_revision"),
"trunk/tools/valgrind":
Var("chromium_trunk") + "/src/tools/valgrind@" + Var("chromium_revision"),
"trunk/third_party/protobuf/":
Var("chromium_trunk") + "/src/third_party/protobuf@" + Var("chromium_revision"),
"trunk/third_party/libvpx/source/libvpx":
"http://git.chromium.org/webm/libvpx.git@e479379a",
"trunk/third_party/libjpeg_turbo/":
Var("chromium_trunk") + "/deps/third_party/libjpeg_turbo@119959",
"trunk/third_party/libjpeg/":
Var("chromium_trunk") + "/src/third_party/libjpeg@" + Var("chromium_revision"),
"trunk/third_party/yasm/":
Var("chromium_trunk") + "/src/third_party/yasm@" + Var("chromium_revision"),
"trunk/third_party/expat/":
Var("chromium_trunk") + "/src/third_party/expat@" + Var("chromium_revision"),
"trunk/third_party/google-gflags/src":
(Var("googlecode_url") % "google-gflags") + "/trunk/src@45",
"trunk/third_party/yasm/source/patched-yasm":
Var("chromium_trunk") + "/deps/third_party/yasm/patched-yasm@73761",
# Used by libjpeg-turbo
"trunk/third_party/yasm/binaries":
Var("chromium_trunk") + "/deps/third_party/yasm/binaries@74228",
# TODO(andrew): roll to 164 after fixing:
# http://code.google.com/p/webrtc/issues/detail?id=267
"trunk/third_party/libyuv":
(Var("googlecode_url") % "libyuv") + "/trunk@121",
# libjingle deps.
"trunk/third_party/libjingle/source":
(Var("googlecode_url") % "libjingle") + "/trunk/@" + Var("libjingle_revision"),
"trunk/third_party/libsrtp/":
Var("chromium_trunk") + "/deps/third_party/libsrtp@119742",
"trunk/third_party/jsoncpp/":
Var("chromium_trunk") + "/src/third_party/jsoncpp@" + Var("chromium_revision"),
"trunk/third_party/jsoncpp/source":
"http://jsoncpp.svn.sourceforge.net/svnroot/jsoncpp/trunk/jsoncpp@248",
}
deps_os = {
"win": {
"trunk/third_party/cygwin/":
Var("chromium_trunk") + "/deps/third_party/cygwin@66844",
}
}
hooks = [
{
# Create a supplement.gypi file under trunk/. This file will be picked up
# by gyp and we use it to set Chromium related variables (inside_chromium_build)
# to 0 and enable the standalone build.
"pattern": ".",
"action": ["python", "trunk/tools/create_supplement_gypi.py", "trunk/src/supplement.gypi"],
},
{
# Pull clang on mac. If nothing changed, or on non-mac platforms, this takes
# zero seconds to run. If something changed, it downloads a prebuilt clang.
"pattern": ".",
"action": ["python", "trunk/tools/clang/scripts/update.py", "--mac-only"],
},
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
"pattern": ".",
"action": ["python", "trunk/build/gyp_chromium", "--depth=trunk", "trunk/peerconnection.gyp"],
},
]

1069
media/webrtc/trunk/Makefile Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
henrike@webrtc.org
mallinath@webrtc.org
perkj@webrtc.org
wu@webrtc.org
tommi@webrtc.org

12
media/webrtc/trunk/README Normal file
View File

@ -0,0 +1,12 @@
This folder can be used to pull together the chromium version of webrtc
and libjingle, and build the peerconnection sample client and server. This will
check out a new repository in which you can build peerconnection_server.
Steps:
1) Create a new directory for the new repository (outside the webrtc repo):
mkdir peerconnection
cd peerconnection
2) gclient config --name trunk http://webrtc.googlecode.com/svn/trunk/peerconnection
3) gclient sync
4) cd trunk
5) make peerconnection_server peerconnection_client

View File

@ -0,0 +1,15 @@
List of property sheets to be included by projects:
common.vsprops
Not used anymore. No-op. Kept for compatibility with current projects.
debug.vsprops
Enables debug settings. Must be included directly in Debug configuration. Includes internal\essential.vsprops.
external_code.vsprops
Contains settings made to simplify usage of external (non-Google) code. It relaxes the warning levels. Should be included after debug.vsprops or release.vsprops to override their settings.
output_dll_copy.rules
Run to enable automatic copy of DLL when they are as an input file in a vcproj project.
release.vsprops
Enables release settings. Must be included directly in Release configuration. Includes internal\essential.vsprops. Also includes "internal\release_impl$(CHROME_BUILD_TYPE).vsprops". So the behavior is dependant on the CHROME_BUILD_TYPE environment variable.

View File

@ -0,0 +1,625 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'All',
'type': 'none',
'xcode_create_dependents_test_runner': 1,
'dependencies': [
'some.gyp:*',
'../base/base.gyp:*',
'../chrome/browser/sync/tools/sync_tools.gyp:*',
'../chrome/chrome.gyp:*',
'../content/content.gyp:*',
'../crypto/crypto.gyp:*',
'../ui/ui.gyp:*',
'../gpu/gpu.gyp:*',
'../gpu/demos/demos.gyp:*',
'../gpu/tools/tools.gyp:*',
'../ipc/ipc.gyp:*',
'../jingle/jingle.gyp:*',
'../media/media.gyp:*',
'../net/net.gyp:*',
'../ppapi/ppapi.gyp:*',
'../ppapi/ppapi_internal.gyp:*',
'../printing/printing.gyp:*',
'../sdch/sdch.gyp:*',
'../skia/skia.gyp:*',
'../sql/sql.gyp:*',
'../testing/gmock.gyp:*',
'../testing/gtest.gyp:*',
'../third_party/bzip2/bzip2.gyp:*',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:*',
'../third_party/cld/cld.gyp:*',
'../third_party/codesighs/codesighs.gyp:*',
'../third_party/ffmpeg/ffmpeg.gyp:*',
'../third_party/iccjpeg/iccjpeg.gyp:*',
'../third_party/icu/icu.gyp:*',
'../third_party/libpng/libpng.gyp:*',
'../third_party/libwebp/libwebp.gyp:*',
'../third_party/libxml/libxml.gyp:*',
'../third_party/libxslt/libxslt.gyp:*',
'../third_party/lzma_sdk/lzma_sdk.gyp:*',
'../third_party/mesa/mesa.gyp:*',
'../third_party/modp_b64/modp_b64.gyp:*',
'../third_party/npapi/npapi.gyp:*',
'../third_party/ots/ots.gyp:*',
'../third_party/sqlite/sqlite.gyp:*',
'../third_party/WebKit/Source/WebKit/chromium/All.gyp:*',
'../third_party/zlib/zlib.gyp:*',
'../v8/tools/gyp/v8.gyp:*',
'../webkit/support/webkit_support.gyp:*',
'../webkit/webkit.gyp:*',
'util/build_util.gyp:*',
'temp_gyp/googleurl.gyp:*',
'<(libjpeg_gyp_path):*',
],
'conditions': [
['os_posix==1 and OS!="android"', {
'dependencies': [
'../third_party/yasm/yasm.gyp:*#host',
'../cloud_print/virtual_driver/virtual_driver_posix.gyp:*',
],
}],
['OS=="mac" or OS=="win"', {
'dependencies': [
'../third_party/nss/nss.gyp:*',
],
}],
['OS=="mac"', {
'dependencies': [
'../third_party/ocmock/ocmock.gyp:*',
],
}],
['OS=="linux"', {
'dependencies': [
'../breakpad/breakpad.gyp:*',
'../courgette/courgette.gyp:*',
'../dbus/dbus.gyp:*',
'../sandbox/sandbox.gyp:*',
],
'conditions': [
['branding=="Chrome"', {
'dependencies': [
'../chrome/chrome.gyp:linux_packages_<(channel)',
],
}],
],
}],
['use_wayland==1', {
'dependencies': [
'../ui/wayland/wayland.gyp:*',
],
}],
['toolkit_uses_gtk==1', {
'dependencies': [
'../tools/gtk_clipboard_dump/gtk_clipboard_dump.gyp:*',
'../tools/xdisplaycheck/xdisplaycheck.gyp:*',
],
}],
['OS=="win"', {
'conditions': [
['win_use_allocator_shim==1', {
'dependencies': [
'../base/allocator/allocator.gyp:*',
],
}],
],
'dependencies': [
'../breakpad/breakpad.gyp:*',
'../chrome_frame/chrome_frame.gyp:*',
'../cloud_print/virtual_driver/virtual_driver.gyp:*',
'../courgette/courgette.gyp:*',
'../rlz/rlz.gyp:*',
'../sandbox/sandbox.gyp:*',
'../third_party/angle/src/build_angle.gyp:*',
'../third_party/bsdiff/bsdiff.gyp:*',
'../third_party/bspatch/bspatch.gyp:*',
'../third_party/gles2_book/gles2_book.gyp:*',
'../tools/memory_watcher/memory_watcher.gyp:*',
],
}, {
'dependencies': [
'../third_party/libevent/libevent.gyp:*',
],
}],
['toolkit_views==1', {
'dependencies': [
'../ui/views/views.gyp:*',
],
}],
['use_aura==1', {
'dependencies': [
'../ui/aura/aura.gyp:*',
'../ash/ash.gyp:*',
],
}],
['remoting==1', {
'dependencies': [
'../remoting/remoting.gyp:*',
],
}],
['use_openssl==0', {
'dependencies': [
'../net/third_party/nss/ssl.gyp:*',
],
}],
],
}, # target_name: All
{
'target_name': 'All_syzygy',
'type': 'none',
'conditions': [
['OS=="win" and fastbuild==0', {
'dependencies': [
'../chrome/installer/mini_installer_syzygy.gyp:*',
],
},
],
],
}, # target_name: All_syzygy
{
'target_name': 'chromium_builder_tests',
'type': 'none',
'dependencies': [
'../base/base.gyp:base_unittests',
'../chrome/chrome.gyp:browser_tests',
'../chrome/chrome.gyp:interactive_ui_tests',
'../chrome/chrome.gyp:safe_browsing_tests',
'../chrome/chrome.gyp:sync_integration_tests',
'../chrome/chrome.gyp:sync_unit_tests',
'../chrome/chrome.gyp:ui_tests',
'../chrome/chrome.gyp:unit_tests',
'../content/content.gyp:content_browsertests',
'../content/content.gyp:content_unittests',
'../crypto/crypto.gyp:crypto_unittests',
'../ui/ui.gyp:gfx_unittests',
'../gpu/gpu.gyp:gpu_unittests',
'../gpu/gles2_conform_support/gles2_conform_support.gyp:gles2_conform_support',
'../ipc/ipc.gyp:ipc_tests',
'../jingle/jingle.gyp:jingle_unittests',
'../media/media.gyp:media_unittests',
'../net/net.gyp:net_unittests',
'../printing/printing.gyp:printing_unittests',
'../remoting/remoting.gyp:remoting_unittests',
'../sql/sql.gyp:sql_unittests',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber_unittests',
'temp_gyp/googleurl.gyp:googleurl_unittests',
],
'conditions': [
['OS=="win"', {
'dependencies': [
'../chrome/chrome.gyp:installer_util_unittests',
'../chrome/chrome.gyp:mini_installer_test',
# mini_installer_tests depends on mini_installer. This should be
# defined in installer.gyp.
'../chrome/installer/mini_installer.gyp:mini_installer',
'../chrome_frame/chrome_frame.gyp:chrome_frame_net_tests',
'../chrome_frame/chrome_frame.gyp:chrome_frame_perftests',
'../chrome_frame/chrome_frame.gyp:chrome_frame_reliability_tests',
'../chrome_frame/chrome_frame.gyp:chrome_frame_tests',
'../chrome_frame/chrome_frame.gyp:chrome_frame_unittests',
'../chrome_frame/chrome_frame.gyp:npchrome_frame',
'../courgette/courgette.gyp:courgette_unittests',
'../sandbox/sandbox.gyp:sbox_integration_tests',
'../sandbox/sandbox.gyp:sbox_unittests',
'../sandbox/sandbox.gyp:sbox_validation_tests',
'../webkit/webkit.gyp:pull_in_copy_TestNetscapePlugIn',
'../ui/views/views.gyp:views_unittests',
# TODO(nsylvain) ui_tests.exe depends on test_shell_common.
# This should:
# 1) not be the case. OR.
# 2) be expressed in the ui tests dependencies.
'../webkit/webkit.gyp:test_shell_common',
],
}],
],
}, # target_name: chromium_builder_tests
{
'target_name': 'chromium_2010_builder_tests',
'type': 'none',
'dependencies': [
'chromium_builder_tests',
],
}, # target_name: chromium_2010_builder_tests
{
'target_name': 'chromium_builder_nacl_win_integration',
'type': 'none',
'dependencies': [
'chromium_builder_qa', # needed for pyauto
'chromium_builder_tests',
],
}, # target_name: chromium_builder_nacl_win_integration
{
'target_name': 'chromium_builder_perf',
'type': 'none',
'dependencies': [
'chromium_builder_qa', # needed for pyauto
'../chrome/chrome.gyp:performance_browser_tests',
'../chrome/chrome.gyp:performance_ui_tests',
'../chrome/chrome.gyp:plugin_tests',
'../chrome/chrome.gyp:sync_performance_tests',
'../chrome/chrome.gyp:ui_tests',
],
}, # target_name: chromium_builder_perf
{
'target_name': 'chromium_gpu_builder',
'type': 'none',
'dependencies': [
'../chrome/chrome.gyp:gpu_tests',
'../chrome/chrome.gyp:performance_browser_tests',
'../chrome/chrome.gyp:performance_ui_tests',
'../webkit/webkit.gyp:pull_in_DumpRenderTree',
],
}, # target_name: chromium_gpu_builder
{
'target_name': 'chromium_builder_qa',
'type': 'none',
'dependencies': [
'../chrome/chrome.gyp:chromedriver',
],
'conditions': [
# If you change this condition, make sure you also change it
# in chrome_tests.gypi
['OS=="mac" or OS=="win" or (os_posix==1 and OS != "android" and target_arch==python_arch)', {
'dependencies': [
'../chrome/chrome.gyp:pyautolib',
],
}],
],
}, # target_name: chromium_builder_qa
],
'conditions': [
['OS=="mac"', {
'targets': [
{
# Target to build everything plus the dmg. We don't put the dmg
# in the All target because developers really don't need it.
'target_name': 'all_and_dmg',
'type': 'none',
'dependencies': [
'All',
'../chrome/chrome.gyp:build_app_dmg',
],
},
# These targets are here so the build bots can use them to build
# subsets of a full tree for faster cycle times.
{
'target_name': 'chromium_builder_dbg',
'type': 'none',
'dependencies': [
'../chrome/chrome.gyp:browser_tests',
'../chrome/chrome.gyp:interactive_ui_tests',
'../chrome/chrome.gyp:safe_browsing_tests',
'../chrome/chrome.gyp:sync_integration_tests',
'../chrome/chrome.gyp:sync_unit_tests',
'../chrome/chrome.gyp:ui_tests',
'../chrome/chrome.gyp:unit_tests',
'../content/content.gyp:content_browsertests',
'../content/content.gyp:content_unittests',
'../ui/ui.gyp:gfx_unittests',
'../gpu/gpu.gyp:gpu_unittests',
'../ipc/ipc.gyp:ipc_tests',
'../jingle/jingle.gyp:jingle_unittests',
'../media/media.gyp:media_unittests',
'../printing/printing.gyp:printing_unittests',
'../remoting/remoting.gyp:remoting_unittests',
'../sql/sql.gyp:sql_unittests',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber_unittests',
'temp_gyp/googleurl.gyp:googleurl_unittests',
],
},
{
'target_name': 'chromium_builder_rel',
'type': 'none',
'dependencies': [
'../chrome/chrome.gyp:browser_tests',
'../chrome/chrome.gyp:performance_browser_tests',
'../chrome/chrome.gyp:performance_ui_tests',
'../chrome/chrome.gyp:plugin_tests',
'../chrome/chrome.gyp:safe_browsing_tests',
'../chrome/chrome.gyp:sync_integration_tests',
'../chrome/chrome.gyp:sync_unit_tests',
'../chrome/chrome.gyp:ui_tests',
'../chrome/chrome.gyp:unit_tests',
'../content/content.gyp:content_browsertests',
'../content/content.gyp:content_unittests',
'../ui/ui.gyp:gfx_unittests',
'../gpu/gpu.gyp:gpu_unittests',
'../ipc/ipc.gyp:ipc_tests',
'../jingle/jingle.gyp:jingle_unittests',
'../media/media.gyp:media_unittests',
'../printing/printing.gyp:printing_unittests',
'../remoting/remoting.gyp:remoting_unittests',
'../sql/sql.gyp:sql_unittests',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber_unittests',
'temp_gyp/googleurl.gyp:googleurl_unittests',
],
},
{
'target_name': 'chromium_builder_dbg_tsan_mac',
'type': 'none',
'dependencies': [
'../base/base.gyp:base_unittests',
'../crypto/crypto.gyp:crypto_unittests',
'temp_gyp/googleurl.gyp:googleurl_unittests',
'../net/net.gyp:net_unittests',
'../ipc/ipc.gyp:ipc_tests',
'../jingle/jingle.gyp:jingle_unittests',
'../media/media.gyp:media_unittests',
'../printing/printing.gyp:printing_unittests',
'../remoting/remoting.gyp:remoting_unittests',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber_unittests',
],
},
{
'target_name': 'chromium_builder_dbg_valgrind_mac',
'type': 'none',
'dependencies': [
'../base/base.gyp:base_unittests',
'../crypto/crypto.gyp:crypto_unittests',
'../ipc/ipc.gyp:ipc_tests',
'../media/media.gyp:media_unittests',
'../net/net.gyp:net_unittests',
'../printing/printing.gyp:printing_unittests',
'../remoting/remoting.gyp:remoting_unittests',
'../chrome/chrome.gyp:safe_browsing_tests',
'../chrome/chrome.gyp:sync_unit_tests',
'../chrome/chrome.gyp:unit_tests',
'../chrome/chrome.gyp:ui_tests',
'../content/content.gyp:content_unittests',
'../ui/ui.gyp:gfx_unittests',
'../jingle/jingle.gyp:jingle_unittests',
'../sql/sql.gyp:sql_unittests',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber_unittests',
'temp_gyp/googleurl.gyp:googleurl_unittests',
],
},
], # targets
}], # OS="mac"
['OS=="win"', {
'targets': [
# These targets are here so the build bots can use them to build
# subsets of a full tree for faster cycle times.
{
'target_name': 'chromium_builder',
'type': 'none',
'dependencies': [
'../chrome/chrome.gyp:browser_tests',
'../chrome/chrome.gyp:installer_util_unittests',
'../chrome/chrome.gyp:interactive_ui_tests',
'../chrome/chrome.gyp:mini_installer_test',
'../chrome/chrome.gyp:performance_browser_tests',
'../chrome/chrome.gyp:performance_ui_tests',
'../chrome/chrome.gyp:plugin_tests',
'../chrome/chrome.gyp:safe_browsing_tests',
'../chrome/chrome.gyp:sync_integration_tests',
'../chrome/chrome.gyp:sync_unit_tests',
'../chrome/chrome.gyp:ui_tests',
'../chrome/chrome.gyp:unit_tests',
'../content/content.gyp:content_browsertests',
'../content/content.gyp:content_unittests',
# mini_installer_tests depends on mini_installer. This should be
# defined in installer.gyp.
'../chrome/installer/mini_installer.gyp:mini_installer',
'../chrome_frame/chrome_frame.gyp:chrome_frame_net_tests',
'../chrome_frame/chrome_frame.gyp:chrome_frame_perftests',
'../chrome_frame/chrome_frame.gyp:chrome_frame_reliability_tests',
'../chrome_frame/chrome_frame.gyp:chrome_frame_tests',
'../chrome_frame/chrome_frame.gyp:chrome_frame_unittests',
'../chrome_frame/chrome_frame.gyp:npchrome_frame',
'../courgette/courgette.gyp:courgette_unittests',
'../ui/ui.gyp:gfx_unittests',
'../gpu/gpu.gyp:gpu_unittests',
'../ipc/ipc.gyp:ipc_tests',
'../jingle/jingle.gyp:jingle_unittests',
'../media/media.gyp:media_unittests',
'../printing/printing.gyp:printing_unittests',
'../remoting/remoting.gyp:remoting_unittests',
'../sql/sql.gyp:sql_unittests',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber_unittests',
'../webkit/webkit.gyp:pull_in_copy_TestNetscapePlugIn',
'../ui/views/views.gyp:views_unittests',
# TODO(nsylvain) ui_tests.exe depends on test_shell_common.
# This should:
# 1) not be the case. OR.
# 2) be expressed in the ui tests dependencies.
'../webkit/webkit.gyp:test_shell_common',
'temp_gyp/googleurl.gyp:googleurl_unittests',
],
},
{
'target_name': 'chromium_builder_dbg_tsan_win',
'type': 'none',
'dependencies': [
'../base/base.gyp:base_unittests',
'../content/content.gyp:content_unittests',
'../crypto/crypto.gyp:crypto_unittests',
'../ipc/ipc.gyp:ipc_tests',
'../jingle/jingle.gyp:jingle_unittests',
'../media/media.gyp:media_unittests',
'../net/net.gyp:net_unittests',
'../printing/printing.gyp:printing_unittests',
'../remoting/remoting.gyp:remoting_unittests',
'../sql/sql.gyp:sql_unittests',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber_unittests',
'temp_gyp/googleurl.gyp:googleurl_unittests',
],
},
{
'target_name': 'chromium_builder_dbg_drmemory_win',
'type': 'none',
'dependencies': [
'../base/base.gyp:base_unittests',
'../chrome/chrome.gyp:unit_tests',
'../content/content.gyp:content_unittests',
'../crypto/crypto.gyp:crypto_unittests',
'../ipc/ipc.gyp:ipc_tests',
'../jingle/jingle.gyp:jingle_unittests',
'../media/media.gyp:media_unittests',
'../net/net.gyp:net_unittests',
'../printing/printing.gyp:printing_unittests',
'../remoting/remoting.gyp:remoting_unittests',
'../sql/sql.gyp:sql_unittests',
'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber_unittests',
'temp_gyp/googleurl.gyp:googleurl_unittests',
],
},
{
'target_name': 'webkit_builder_win',
'type': 'none',
'dependencies': [
'../webkit/webkit.gyp:test_shell',
'../webkit/webkit.gyp:test_shell_tests',
'../webkit/webkit.gyp:pull_in_webkit_unit_tests',
'../webkit/webkit.gyp:pull_in_DumpRenderTree',
],
},
], # targets
'conditions': [
['branding=="Chrome"', {
'targets': [
{
'target_name': 'chrome_official_builder',
'type': 'none',
'dependencies': [
'../chrome/chrome.gyp:chromedriver',
'../chrome/chrome.gyp:crash_service',
'../chrome/chrome.gyp:crash_service_win64',
'../chrome/chrome.gyp:performance_ui_tests',
'../chrome/chrome.gyp:policy_templates',
'../chrome/chrome.gyp:pyautolib',
'../chrome/chrome.gyp:reliability_tests',
'../chrome/chrome.gyp:automated_ui_tests',
'../chrome/installer/mini_installer.gyp:mini_installer',
'../chrome_frame/chrome_frame.gyp:npchrome_frame',
'../courgette/courgette.gyp:courgette',
'../courgette/courgette.gyp:courgette64',
'../cloud_print/virtual_driver/virtual_driver.gyp:virtual_driver',
'../remoting/remoting.gyp:remoting_webapp',
'../third_party/adobe/flash/flash_player.gyp:flash_player',
],
'conditions': [
['internal_pdf', {
'dependencies': [
'../pdf/pdf.gyp:pdf',
],
}], # internal_pdf
]
},
], # targets
}], # branding=="Chrome"
], # conditions
}], # OS="win"
['chromeos==1', {
'targets': [
{
'target_name': 'chromeos_builder',
'type': 'none',
'dependencies': [
'../ash/ash.gyp:ash_shell',
'../ash/ash.gyp:aura_shell_unittests',
'../base/base.gyp:base_unittests',
#'../chrome/chrome.gyp:browser_tests',
'../chrome/chrome.gyp:chrome',
#'../chrome/chrome.gyp:interactive_ui_tests',
#'../chrome/chrome.gyp:performance_browser_tests',
#'../chrome/chrome.gyp:performance_ui_tests',
#'../chrome/chrome.gyp:safe_browsing_tests',
#'../chrome/chrome.gyp:sync_integration_tests',
'../chrome/chrome.gyp:sync_unit_tests',
'../chrome/chrome.gyp:ui_tests',
'../chrome/chrome.gyp:unit_tests',
#'../content/content.gyp:content_browsertests',
'../content/content.gyp:content_unittests',
#'../crypto/crypto.gyp:crypto_unittests',
#'../dbus/dbus.gyp:dbus_unittests',
'../ipc/ipc.gyp:ipc_tests',
#'../jingle/jingle.gyp:jingle_unittests',
#'../media/media.gyp:ffmpeg_tests',
#'../media/media.gyp:media_unittests',
#'../net/net.gyp:net_unittests',
#'../printing/printing.gyp:printing_unittests',
#'../remoting/remoting.gyp:remoting_unittests',
'../sql/sql.gyp:sql_unittests',
#'../third_party/cacheinvalidation/cacheinvalidation.gyp:cacheinvalidation_unittests',
#'../third_party/libphonenumber/libphonenumber.gyp:libphonenumber_unittests',
'../ui/aura/aura.gyp:*',
'../ui/gfx/compositor/compositor.gyp:*',
'../ui/ui.gyp:gfx_unittests',
'../ui/views/views.gyp:views',
'../ui/views/views.gyp:views_unittests',
'../webkit/webkit.gyp:pull_in_webkit_unit_tests',
#'temp_gyp/googleurl.gyp:googleurl_unittests',
],
},
], # targets
}], # "chromeos==1"
['use_aura==1', {
'targets': [
{
'target_name': 'aura_builder',
'type': 'none',
'dependencies': [
'../ash/ash.gyp:ash_shell',
'../ash/ash.gyp:aura_shell_unittests',
'../chrome/chrome.gyp:chrome',
'../chrome/chrome.gyp:unit_tests',
'../chrome/chrome.gyp:ui_tests',
'../ui/aura/aura.gyp:*',
'../ui/gfx/compositor/compositor.gyp:*',
'../ui/views/views.gyp:views',
'../ui/views/views.gyp:views_unittests',
'../webkit/webkit.gyp:pull_in_webkit_unit_tests',
],
'conditions': [
['OS=="win"', {
# Remove this when we have the real compositor.
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': ['../third_party/directxsdk/files/dlls/D3DX10d_43.dll']
},
],
'dependencies': [
'../chrome/chrome.gyp:crash_service',
'../chrome/chrome.gyp:crash_service_win64',
],
}],
['OS=="linux"', {
# Tests that currently only work on Linux.
'dependencies': [
'../base/base.gyp:base_unittests',
'../chrome/chrome.gyp:sync_unit_tests',
'../content/content.gyp:content_unittests',
'../ipc/ipc.gyp:ipc_tests',
'../sql/sql.gyp:sql_unittests',
'../ui/ui.gyp:gfx_unittests',
],
}],
['OS=="mac"', {
# Exclude dependencies that are not currently implemented.
'dependencies!': [
'../chrome/chrome.gyp:chrome',
'../chrome/chrome.gyp:unit_tests',
'../chrome/chrome.gyp:ui_tests',
'../ui/views/views.gyp:views_unittests',
],
}],
],
},
], # targets
}], # "use_aura==1"
], # conditions
}

View File

@ -0,0 +1,60 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is all.gyp file for Android to prevent breakage in Android and other
# platform; It will be churning a lot in the short term and eventually be merged
# into all.gyp.
{
'targets': [
{
'target_name': 'All',
'type': 'none',
'dependencies': [
'util/build_util.gyp:*',
'android_builder_tests',
],
}, # target_name: All
{
# The current list of tests for android. This is temporary
# until the full set supported. If adding a new test here,
# please also add it to build/android/run_tests.py, else the
# test is not run.
'target_name': 'android_builder_tests',
'type': 'none',
'dependencies': [
'../base/base.gyp:base_unittests',
'../sql/sql.gyp:sql_unittests',
'../ipc/ipc.gyp:ipc_tests',
'../net/net.gyp:net_unittests',
# From here down: not added to run_tests.py yet.
'../jingle/jingle.gyp:jingle_unittests',
],
},
{
# Experimental / in-progress targets that are expected to fail
# but we still try to compile them on bots (turning the stage
# orange, not red).
'target_name': 'android_experimental',
'type': 'none',
'dependencies': [
'../webkit/webkit.gyp:pull_in_webkit_unit_tests',
'../webkit/webkit.gyp:pull_in_DumpRenderTree',
'../chrome/chrome.gyp:unit_tests',
],
},
{
# In-progress targets that are expected to fail and are NOT run
# on any bot.
'target_name': 'android_in_progress',
'type': 'none',
'dependencies': [
'../content/content.gyp:content_browsertests',
'../content/content.gyp:content_unittests',
'../chrome/chrome.gyp:sync_unit_tests',
'../ui/ui.gyp:gfx_unittests',
],
},
], # targets
}

View File

@ -0,0 +1,780 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to communicate with the device via the adb command.
Assumes adb binary is currently on system path.
Usage:
python android_commands.py wait-for-pm
"""
import collections
import datetime
import logging
import optparse
import os
import pexpect
import re
import subprocess
import sys
import tempfile
import time
# adb_interface.py is under ../../third_party/android/testrunner/
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..',
'..', 'third_party', 'android', 'testrunner'))
import adb_interface
import cmd_helper
import errors # is under ../../third_party/android/testrunner/errors.py
from run_tests_helper import IsRunningAsBuildbot
# Pattern to search for the next whole line of pexpect output and capture it
# into a match group. We can't use ^ and $ for line start end with pexpect,
# see http://www.noah.org/python/pexpect/#doc for explanation why.
PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
# Set the adb shell prompt to be a unique marker that will [hopefully] not
# appear at the start of any line of a command's output.
SHELL_PROMPT = '~+~PQ\x17RS~+~'
# This only works for single core devices.
SCALING_GOVERNOR = '/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor'
DROP_CACHES = '/proc/sys/vm/drop_caches'
# Java properties file
LOCAL_PROPERTIES_PATH = '/data/local.prop'
# Property in /data/local.prop that controls Java assertions.
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
BOOT_COMPLETE_RE = re.compile(
re.escape('android.intent.action.MEDIA_MOUNTED path: /mnt/sdcard')
+ '|' + re.escape('PowerManagerService: bootCompleted'))
# Keycode "enum" suitable for passing to AndroidCommands.SendKey().
KEYCODE_DPAD_RIGHT = 22
KEYCODE_ENTER = 66
KEYCODE_MENU = 82
KEYCODE_BACK = 4
def GetEmulators():
"""Returns a list of emulators. Does not filter by status (e.g. offline).
Both devices starting with 'emulator' will be returned in below output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
emulator-5558 device
"""
re_device = re.compile('^emulator-[0-9]+', re.MULTILINE)
devices = re_device.findall(cmd_helper.GetCmdOutput(['adb', 'devices']))
return devices
def GetAttachedDevices():
"""Returns a list of attached, online android devices.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list.
Example output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
"""
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
devices = re_device.findall(cmd_helper.GetCmdOutput(['adb', 'devices']))
preferred_device = os.environ.get("ANDROID_SERIAL")
if preferred_device in devices:
devices.remove(preferred_device)
devices.insert(0, preferred_device)
return devices
def _GetHostFileInfo(file_name):
"""Returns a tuple containing size and modified UTC time for file_name."""
# The time accuracy on device is only to minute level, remove the second and
# microsecond from host results.
utc_time = datetime.datetime.utcfromtimestamp(os.path.getmtime(file_name))
time_delta = datetime.timedelta(seconds=utc_time.second,
microseconds=utc_time.microsecond)
return os.path.getsize(file_name), utc_time - time_delta
def ListHostPathContents(path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
if os.path.isfile(path):
return {os.path.basename(path): _GetHostFileInfo(path)}
ret = {}
for root, dirs, files in os.walk(path):
for d in dirs:
if d.startswith('.'):
dirs.remove(d) # Prune the dir for subsequent iterations.
for f in files:
if f.startswith('.'):
continue
full_file_name = os.path.join(root, f)
file_name = os.path.relpath(full_file_name, path)
ret[file_name] = _GetHostFileInfo(full_file_name)
return ret
def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir)+1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta;
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files
def GetLogTimestamp(log_line):
"""Returns the timestamp of the given |log_line|."""
try:
return datetime.datetime.strptime(log_line[:18], '%m-%d %H:%M:%S.%f')
except (ValueError, IndexError):
logging.critical('Error reading timestamp from ' + log_line)
return None
class AndroidCommands(object):
"""Helper class for communicating with Android device via adb.
Args:
device: If given, adb commands are only send to the device of this ID.
Otherwise commands are sent to all attached devices.
wait_for_pm: If true, issues an adb wait-for-device command.
"""
def __init__(self, device=None, wait_for_pm=False):
self._adb = adb_interface.AdbInterface()
if device:
self._adb.SetTargetSerial(device)
if wait_for_pm:
self.WaitForDevicePm()
self._logcat = None
self._original_governor = None
self._pushed_files = []
def Adb(self):
"""Returns our AdbInterface to avoid us wrapping all its methods."""
return self._adb
def WaitForDevicePm(self):
"""Blocks until the device's package manager is available.
To workaround http://b/5201039, we restart the shell and retry if the
package manager isn't back after 120 seconds.
Raises:
errors.WaitForResponseTimedOutError after max retries reached.
"""
last_err = None
retries = 3
while retries:
try:
self._adb.WaitForDevicePm()
return # Success
except errors.WaitForResponseTimedOutError as e:
last_err = e
logging.warning('Restarting and retrying after timeout: %s' % str(e))
retries -= 1
self.RestartShell()
raise last_err # Only reached after max retries, re-raise the last error.
def SynchronizeDateTime(self):
"""Synchronize date/time between host and device."""
self._adb.SendShellCommand('date -u %f' % time.time())
def RestartShell(self):
"""Restarts the shell on the device. Does not block for it to return."""
self.RunShellCommand('stop')
self.RunShellCommand('start')
def Reboot(self, full_reboot=True):
"""Reboots the device and waits for the package manager to return.
Args:
full_reboot: Whether to fully reboot the device or just restart the shell.
"""
# TODO(torne): hive can't reboot the device either way without breaking the
# connection; work out if we can handle this better
if os.environ.get('USING_HIVE'):
logging.warning('Ignoring reboot request as we are on hive')
return
if full_reboot:
self._adb.SendCommand('reboot')
else:
self.RestartShell()
self.WaitForDevicePm()
self.StartMonitoringLogcat(timeout=120)
self.WaitForLogMatch(BOOT_COMPLETE_RE)
self.UnlockDevice()
def Uninstall(self, package):
"""Uninstalls the specified package from the device.
Args:
package: Name of the package to remove.
"""
uninstall_command = 'uninstall %s' % package
logging.info('>>> $' + uninstall_command)
self._adb.SendCommand(uninstall_command, timeout_time=60)
def Install(self, package_file_path):
"""Installs the specified package to the device.
Args:
package_file_path: Path to .apk file to install.
"""
assert os.path.isfile(package_file_path)
install_command = 'install %s' % package_file_path
logging.info('>>> $' + install_command)
self._adb.SendCommand(install_command, timeout_time=2*60)
# It is tempting to turn this function into a generator, however this is not
# possible without using a private (local) adb_shell instance (to ensure no
# other command interleaves usage of it), which would defeat the main aim of
# being able to reuse the adb shell instance across commands.
def RunShellCommand(self, command, timeout_time=20, log_result=True):
"""Send a command to the adb shell and return the result.
Args:
command: String containing the shell command to send. Must not include
the single quotes as we use them to escape the whole command.
timeout_time: Number of seconds to wait for command to respond before
retrying, used by AdbInterface.SendShellCommand.
log_result: Boolean to indicate whether we should log the result of the
shell command.
Returns:
list containing the lines of output received from running the command
"""
logging.info('>>> $' + command)
if "'" in command: logging.warning(command + " contains ' quotes")
result = self._adb.SendShellCommand("'%s'" % command,
timeout_time).splitlines()
if log_result:
logging.info('\n>>> '.join(result))
return result
def KillAll(self, process):
"""Android version of killall, connected via adb.
Args:
process: name of the process to kill off
Returns:
the number of processess killed
"""
pids = self.ExtractPid(process)
if pids:
self.RunShellCommand('kill ' + ' '.join(pids))
return len(pids)
def StartActivity(self, package, activity,
action='android.intent.action.VIEW', data=None,
extras=None, trace_file_name=None):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.android.chrome').
activity: Name of activity (e.g. '.Main' or 'com.android.chrome.Main').
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity.
trace_file_name: If used, turns on and saves the trace to this file name.
"""
cmd = 'am start -a %s -n %s/%s' % (action, package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
cmd += ' -e'
for key in extras:
cmd += ' %s %s' % (key, extras[key])
if trace_file_name:
cmd += ' -S -P ' + trace_file_name
self.RunShellCommand(cmd)
def EnableAdbRoot(self):
"""Enable root on the device."""
self._adb.EnableAdbRoot()
def CloseApplication(self, package):
"""Attempt to close down the application, using increasing violence.
Args:
package: Name of the process to kill off, e.g. com.android.chrome
"""
self.RunShellCommand('am force-stop ' + package)
def ClearApplicationState(self, package):
"""Closes and clears all state for the given |package|."""
self.CloseApplication(package)
self.RunShellCommand('rm -r /data/data/%s/cache/*' % package)
self.RunShellCommand('rm -r /data/data/%s/files/*' % package)
self.RunShellCommand('rm -r /data/data/%s/shared_prefs/*' % package)
def SendKeyEvent(self, keycode):
"""Sends keycode to the device.
Args:
keycode: Numeric keycode to send (see "enum" at top of file).
"""
self.RunShellCommand('input keyevent %d' % keycode)
def PushIfNeeded(self, local_path, device_path):
"""Pushes |local_path| to |device_path|.
Works for files and directories. This method skips copying any paths in
|test_data_paths| that already exist on the device with the same timestamp
and size.
All pushed files can be removed by calling RemovePushedFiles().
"""
assert os.path.exists(local_path)
self._pushed_files.append(device_path)
# If the path contents are the same, there's nothing to do.
local_contents = ListHostPathContents(local_path)
device_contents = self.ListPathContents(device_path)
# Only compare the size and timestamp if only copying a file because
# the filename on device can be renamed.
if os.path.isfile(local_path):
assert len(local_contents) == 1
is_equal = local_contents.values() == device_contents.values()
else:
is_equal = local_contents == device_contents
if is_equal:
logging.info('%s is up-to-date. Skipping file push.' % device_path)
return
# They don't match, so remove everything first and then create it.
if os.path.isdir(local_path):
self.RunShellCommand('rm -r %s' % device_path, timeout_time=2*60)
self.RunShellCommand('mkdir -p %s' % device_path)
# NOTE: We can't use adb_interface.Push() because it hardcodes a timeout of
# 60 seconds which isn't sufficient for a lot of users of this method.
push_command = 'push %s %s' % (local_path, device_path)
logging.info('>>> $' + push_command)
output = self._adb.SendCommand(push_command, timeout_time=30*60)
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)"
# Errors look like this: "failed to copy ... "
if not re.search('^[0-9]', output):
logging.critical('PUSH FAILED: ' + output)
def GetFileContents(self, filename):
"""Gets contents from the file specified by |filename|."""
return self.RunShellCommand('if [ -f "' + filename + '" ]; then cat "' +
filename + '"; fi')
def SetFileContents(self, filename, contents):
"""Writes |contents| to the file specified by |filename|."""
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
self._adb.Push(f.name, filename)
def RemovePushedFiles(self):
"""Removes all files pushed with PushIfNeeded() from the device."""
for p in self._pushed_files:
self.RunShellCommand('rm -r %s' % p, timeout_time=2*60)
def ListPathContents(self, path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
# Example output:
# /foo/bar:
# -rw-r----- 1 user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
'(?P<user>[^\s]+)\s+'
'(?P<group>[^\s]+)\s+'
'(?P<size>[^\s]+)\s+'
'(?P<date>[^\s]+)\s+'
'(?P<time>[^\s]+)\s+'
'(?P<filename>[^\s]+)$')
return _GetFilesFromRecursiveLsOutput(
path, self.RunShellCommand('ls -lR %s' % path), re_file,
self.RunShellCommand('date +%z')[0])
def SetupPerformanceTest(self):
"""Sets up performance tests."""
# Disable CPU scaling to reduce noise in tests
if not self._original_governor:
self._original_governor = self.RunShellCommand('cat ' + SCALING_GOVERNOR)
self.RunShellCommand('echo performance > ' + SCALING_GOVERNOR)
self.DropRamCaches()
def TearDownPerformanceTest(self):
"""Tears down performance tests."""
if self._original_governor:
self.RunShellCommand('echo %s > %s' % (self._original_governor[0],
SCALING_GOVERNOR))
self._original_governor = None
def SetJavaAssertsEnabled(self, enable):
"""Sets or removes the device java assertions property.
Args:
enable: If True the property will be set.
Returns:
True if the file was modified (reboot is required for it to take effect).
"""
# First ensure the desired property is persisted.
temp_props_file = tempfile.NamedTemporaryFile()
properties = ''
if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
properties = file(temp_props_file.name).read()
re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*all\s*$', re.MULTILINE)
if enable != bool(re.search(re_search, properties)):
re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*\w+\s*$', re.MULTILINE)
properties = re.sub(re_replace, '', properties)
if enable:
properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
file(temp_props_file.name, 'w').write(properties)
self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
was_set = 'all' in self.RunShellCommand('getprop ' + JAVA_ASSERT_PROPERTY)
if was_set == enable:
return False
self.RunShellCommand('setprop %s "%s"' % (JAVA_ASSERT_PROPERTY,
enable and 'all' or ''))
return True
def DropRamCaches(self):
"""Drops the filesystem ram caches for performance testing."""
self.RunShellCommand('echo 3 > ' + DROP_CACHES)
def StartMonitoringLogcat(self, clear=True, timeout=10, logfile=None,
filters=[]):
"""Starts monitoring the output of logcat, for use with WaitForLogMatch.
Args:
clear: If True the existing logcat output will be cleared, to avoiding
matching historical output lurking in the log.
timeout: How long WaitForLogMatch will wait for the given match
filters: A list of logcat filters to be used.
"""
if clear:
self.RunShellCommand('logcat -c')
args = ['logcat', '-v', 'threadtime']
if filters:
args.extend(filters)
else:
args.append('*:v')
# Spawn logcat and syncronize with it.
for _ in range(4):
self._logcat = pexpect.spawn('adb', args, timeout=timeout,
logfile=logfile)
self.RunShellCommand('log startup_sync')
if self._logcat.expect(['startup_sync', pexpect.EOF,
pexpect.TIMEOUT]) == 0:
break
self._logcat.close(force=True)
else:
logging.critical('Error reading from logcat: ' + str(self._logcat.match))
sys.exit(1)
def GetMonitoredLogCat(self):
"""Returns an "adb logcat" command as created by pexpected.spawn."""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
return self._logcat
def WaitForLogMatch(self, search_re):
"""Blocks until a line containing |line_re| is logged or a timeout occurs.
Args:
search_re: The compiled re to search each line for.
Returns:
The re match object.
"""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
logging.info('<<< Waiting for logcat:' + str(search_re.pattern))
t0 = time.time()
try:
while True:
# Note this will block for upto the timeout _per log line_, so we need
# to calculate the overall timeout remaining since t0.
time_remaining = t0 + self._logcat.timeout - time.time()
if time_remaining < 0: raise pexpect.TIMEOUT(self._logcat)
self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
line = self._logcat.match.group(1)
search_match = search_re.search(line)
if search_match:
return search_match
logging.info('<<< Skipped Logcat Line:' + str(line))
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT(
'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
'to debug)' %
(self._logcat.timeout, search_re.pattern))
def StartRecordingLogcat(self, clear=True, filters=['*:v']):
"""Starts recording logcat output to eventually be saved as a string.
This call should come before some series of tests are run, with either
StopRecordingLogcat or SearchLogcatRecord following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
if clear:
self._adb.SendCommand('logcat -c')
logcat_command = 'adb logcat -v threadtime %s' % ' '.join(filters)
self.logcat_process = subprocess.Popen(logcat_command, shell=True,
stdout=subprocess.PIPE)
def StopRecordingLogcat(self):
"""Stops an existing logcat recording subprocess and returns output.
Returns:
The logcat output as a string or an empty string if logcat was not
being recorded at the time.
"""
if not self.logcat_process:
return ''
# Cannot evaluate directly as 0 is a possible value.
# Better to read the self.logcat_process.stdout before killing it,
# Otherwise the communicate may return incomplete output due to pipe break.
if self.logcat_process.poll() == None:
self.logcat_process.kill()
(output, _) = self.logcat_process.communicate()
self.logcat_process = None
return output
def SearchLogcatRecord(self, record, message, thread_id=None, proc_id=None,
log_level=None, component=None):
"""Searches the specified logcat output and returns results.
This method searches through the logcat output specified by record for a
certain message, narrowing results by matching them against any other
specified criteria. It returns all matching lines as described below.
Args:
record: A string generated by Start/StopRecordingLogcat to search.
message: An output string to search for.
thread_id: The thread id that is the origin of the message.
proc_id: The process that is the origin of the message.
log_level: The log level of the message.
component: The name of the component that would create the message.
Returns:
A list of dictionaries represeting matching entries, each containing keys
thread_id, proc_id, log_level, component, and message.
"""
if thread_id:
thread_id = str(thread_id)
if proc_id:
proc_id = str(proc_id)
results = []
reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
re.MULTILINE)
log_list = reg.findall(record)
for (tid, pid, log_lev, comp, msg) in log_list:
if ((not thread_id or thread_id == tid) and
(not proc_id or proc_id == pid) and
(not log_level or log_level == log_lev) and
(not component or component == comp) and msg.find(message) > -1):
match = dict({'thread_id': tid, 'proc_id': pid,
'log_level': log_lev, 'component': comp,
'message': msg})
results.append(match)
return results
def ExtractPid(self, process_name):
"""Extracts Process Ids for a given process name from Android Shell.
Args:
process_name: name of the process on the device.
Returns:
List of all the process ids (as strings) that match the given name.
"""
pids = []
for line in self.RunShellCommand('ps'):
data = line.split()
try:
if process_name in data[-1]: # name is in the last column
pids.append(data[1]) # PID is in the second column
except IndexError:
pass
return pids
def GetIoStats(self):
"""Gets cumulative disk IO stats since boot (for all processes).
Returns:
Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
was an error.
"""
# Field definitions.
# http://www.kernel.org/doc/Documentation/iostats.txt
device = 2
num_reads_issued_idx = 3
num_reads_merged_idx = 4
num_sectors_read_idx = 5
ms_spent_reading_idx = 6
num_writes_completed_idx = 7
num_writes_merged_idx = 8
num_sectors_written_idx = 9
ms_spent_writing_idx = 10
num_ios_in_progress_idx = 11
ms_spent_doing_io_idx = 12
ms_spent_doing_io_weighted_idx = 13
for line in self.RunShellCommand('cat /proc/diskstats'):
fields = line.split()
if fields[device] == 'mmcblk0':
return {
'num_reads': int(fields[num_reads_issued_idx]),
'num_writes': int(fields[num_writes_completed_idx]),
'read_ms': int(fields[ms_spent_reading_idx]),
'write_ms': int(fields[ms_spent_writing_idx]),
}
logging.warning('Could not find disk IO stats.')
return None
def GetMemoryUsage(self, package):
"""Returns the memory usage for all processes whose name contains |pacakge|.
Args:
name: A string holding process name to lookup pid list for.
Returns:
Dict of {metric:usage_kb}, summed over all pids associated with |name|.
The metric keys retruned are: Size, Rss, Pss, Shared_Clean, Shared_Dirty,
Private_Clean, Private_Dirty, Referenced, Swap, KernelPageSize,
MMUPageSize.
"""
usage_dict = collections.defaultdict(int)
pid_list = self.ExtractPid(package)
# We used to use the showmap command, but it is currently broken on
# stingray so it's easier to just parse /proc/<pid>/smaps directly.
memory_stat_re = re.compile('^(?P<key>\w+):\s+(?P<value>\d+) kB$')
for pid in pid_list:
for line in self.RunShellCommand('cat /proc/%s/smaps' % pid,
log_result=False):
match = re.match(memory_stat_re, line)
if match: usage_dict[match.group('key')] += int(match.group('value'))
if not usage_dict or not any(usage_dict.values()):
# Presumably the process died between ps and showmap.
logging.warning('Could not find memory usage for pid ' + str(pid))
return usage_dict
def UnlockDevice(self):
"""Unlocks the screen of the device."""
# Make sure a menu button event will actually unlock the screen.
if IsRunningAsBuildbot():
assert self.RunShellCommand('getprop ro.test_harness')[0].strip() == '1'
# The following keyevent unlocks the screen if locked.
self.SendKeyEvent(KEYCODE_MENU)
# If the screen wasn't locked the previous command will bring up the menu,
# which this will dismiss. Otherwise this shouldn't change anything.
self.SendKeyEvent(KEYCODE_BACK)
def main(argv):
option_parser = optparse.OptionParser()
option_parser.add_option('-w', '--wait_for_pm', action='store_true',
default=False, dest='wait_for_pm',
help='Waits for Device Package Manager to become available')
option_parser.add_option('--enable_asserts', dest='set_asserts',
action='store_true', default=None,
help='Sets the dalvik.vm.enableassertions property to "all"')
option_parser.add_option('--disable_asserts', dest='set_asserts',
action='store_false', default=None,
help='Removes the dalvik.vm.enableassertions property')
options, args = option_parser.parse_args(argv)
commands = AndroidCommands(wait_for_pm=options.wait_for_pm)
if options.set_asserts != None:
if commands.SetJavaAssertsEnabled(options.set_asserts):
commands.Reboot(full_reboot=False)
if __name__ == '__main__':
main(sys.argv)

View File

@ -0,0 +1,146 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import android_commands
from chrome_test_server_spawner import SpawningServer
from flag_changer import FlagChanger
import lighttpd_server
import run_tests_helper
FORWARDER_PATH = '/data/local/tmp/forwarder'
# These ports must match up with the constants in net/test/test_server.cc
TEST_SERVER_SPAWNER_PORT = 8001
TEST_SERVER_PORT = 8002
TEST_SYNC_SERVER_PORT = 8003
class BaseTestRunner(object):
"""Base class for running tests on a single device."""
def __init__(self, device):
"""
Args:
device: Tests will run on the device of this ID.
"""
self.device = device
self.adb = android_commands.AndroidCommands(device=device)
# Synchronize date/time between host and device. Otherwise same file on
# host and device may have different timestamp which may cause
# AndroidCommands.PushIfNeeded failed, or a test which may compare timestamp
# got from http head and local time could be failed.
self.adb.SynchronizeDateTime()
self._http_server = None
self._forwarder = None
self._spawning_server = None
self._spawner_forwarder = None
self._forwarder_device_port = 8000
self.forwarder_base_url = ('http://localhost:%d' %
self._forwarder_device_port)
self.flags = FlagChanger(self.adb)
def RunTests(self):
# TODO(bulach): this should actually do SetUp / RunTestsInternal / TearDown.
# Refactor the various subclasses to expose a RunTestsInternal without
# any params.
raise NotImplementedError
def SetUp(self):
"""Called before tests run."""
pass
def TearDown(self):
"""Called when tests finish running."""
self.ShutdownHelperToolsForTestSuite()
def CopyTestData(self, test_data_paths, dest_dir):
"""Copies |test_data_paths| list of files/directories to |dest_dir|.
Args:
test_data_paths: A list of files or directories relative to |dest_dir|
which should be copied to the device. The paths must exist in
|CHROME_DIR|.
dest_dir: Absolute path to copy to on the device.
"""
for p in test_data_paths:
self.adb.PushIfNeeded(
os.path.join(run_tests_helper.CHROME_DIR, p),
os.path.join(dest_dir, p))
def LaunchTestHttpServer(self, document_root, extra_config_contents=None):
"""Launches an HTTP server to serve HTTP tests.
Args:
document_root: Document root of the HTTP server.
extra_config_contents: Extra config contents for the HTTP server.
"""
self._http_server = lighttpd_server.LighttpdServer(
document_root, extra_config_contents=extra_config_contents)
if self._http_server.StartupHttpServer():
logging.info('http server started: http://localhost:%s',
self._http_server.port)
else:
logging.critical('Failed to start http server')
# Root access needed to make the forwarder executable work.
self.adb.EnableAdbRoot()
self.StartForwarderForHttpServer()
def StartForwarderForHttpServer(self):
"""Starts a forwarder for the HTTP server.
The forwarder forwards HTTP requests and responses between host and device.
"""
# Sometimes the forwarder device port may be already used. We have to kill
# all forwarder processes to ensure that the forwarder can be started since
# currently we can not associate the specified port to related pid.
# TODO(yfriedman/wangxianzhu): This doesn't work as most of the time the
# port is in use but the forwarder is already dead. Killing all forwarders
# is overly destructive and breaks other tests which make use of forwarders.
# if IsDevicePortUsed(self.adb, self._forwarder_device_port):
# self.adb.KillAll('forwarder')
self._forwarder = run_tests_helper.ForwardDevicePorts(
self.adb, [(self._forwarder_device_port, self._http_server.port)])
def RestartHttpServerForwarderIfNecessary(self):
"""Restarts the forwarder if it's not open."""
# Checks to see if the http server port is being used. If not forwards the
# request.
# TODO(dtrainor): This is not always reliable because sometimes the port
# will be left open even after the forwarder has been killed.
if not run_tests_helper.IsDevicePortUsed(self.adb,
self._forwarder_device_port):
self.StartForwarderForHttpServer()
def ShutdownHelperToolsForTestSuite(self):
"""Shuts down the server and the forwarder."""
# Forwarders should be killed before the actual servers they're forwarding
# to as they are clients potentially with open connections and to allow for
# proper hand-shake/shutdown.
if self._forwarder or self._spawner_forwarder:
# Kill all forwarders on the device and then kill the process on the host
# (if it exists)
self.adb.KillAll('forwarder')
if self._forwarder:
self._forwarder.kill()
if self._spawner_forwarder:
self._spawner_forwarder.kill()
if self._http_server:
self._http_server.ShutdownHttpServer()
if self._spawning_server:
self._spawning_server.Stop()
self.flags.Restore()
def LaunchChromeTestServerSpawner(self):
"""Launches test server spawner."""
self._spawning_server = SpawningServer(TEST_SERVER_SPAWNER_PORT,
TEST_SERVER_PORT)
self._spawning_server.Start()
# TODO(yfriedman): Ideally we'll only try to start up a port forwarder if
# there isn't one already running but for now we just get an error message
# and the existing forwarder still works.
self._spawner_forwarder = run_tests_helper.ForwardDevicePorts(
self.adb, [(TEST_SERVER_SPAWNER_PORT, TEST_SERVER_SPAWNER_PORT),
(TEST_SERVER_PORT, TEST_SERVER_PORT)])

View File

@ -0,0 +1,16 @@
#!/bin/bash -ex
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Currently used as the entry point by both trybot and FYI bot.
# TODO(jrg): redirect those bots to buildbot_try_compile.sh and
# buildbot_fyi.sh, then delete this file.
ROOT=$(cd "$(dirname $0)"; pwd)
if [ "${TRYBOT:-0}" = 1 ] ; then
exec $ROOT/buildbot_try_compile.sh
else
exec $ROOT/buildbot_fyi.sh
fi

View File

@ -0,0 +1,105 @@
#!/bin/bash
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Bash functions used by buildbot annotator scripts for the android
# build of chromium. Executing this script should not perform actions
# other than setting variables and defining of functions.
# Number of jobs on the compile line; e.g. make -j"${JOBS}"
JOBS="${JOBS:-4}"
# Clobber build? Overridden by bots with BUILDBOT_CLOBBER.
NEED_CLOBBER="${NEED_CLOBBER:-0}"
# Function to force-green a bot.
function bb_force_bot_green_and_exit {
echo "@@@BUILD_STEP Bot forced green.@@@"
exit 0
}
# Basic setup for all bots to run after a source tree checkout.
# $1: source root.
function bb_baseline_setup {
echo "@@@BUILD_STEP cd into source root@@@"
SRC_ROOT="$1"
if [ ! -d "${SRC_ROOT}" ] ; then
echo "Please specify a valid source root directory as an arg"
echo '@@@STEP_FAILURE@@@'
return 1
fi
cd $SRC_ROOT
if [ ! -f build/android/envsetup.sh ] ; then
echo "No envsetup.sh"
echo "@@@STEP_FAILURE@@@"
return 1
fi
echo "@@@BUILD_STEP Basic setup@@@"
export ANDROID_SDK_ROOT=/usr/local/google/android-sdk-linux
export ANDROID_NDK_ROOT=/usr/local/google/android-ndk-r7
for mandatory_directory in "${ANDROID_SDK_ROOT}" "${ANDROID_NDK_ROOT}" ; do
if [[ ! -d "${mandatory_directory}" ]]; then
echo "Directory ${mandatory_directory} does not exist."
echo "Build cannot continue."
echo "@@@STEP_FAILURE@@@"
return 1
fi
done
if [ ! "$BUILDBOT_CLOBBER" = "" ]; then
NEED_CLOBBER=1
fi
echo "@@@BUILD_STEP Configure with envsetup.sh@@@"
. build/android/envsetup.sh
if [ "$NEED_CLOBBER" -eq 1 ]; then
echo "@@@BUILD_STEP Clobber@@@"
rm -rf "${SRC_ROOT}"/out
if [ -e "${SRC_ROOT}"/out ] ; then
echo "Clobber appeared to fail? ${SRC_ROOT}/out still exists."
echo "@@@STEP_WARNINGS@@@"
fi
fi
echo "@@@BUILD_STEP android_gyp@@@"
android_gyp
}
# Compile step
function bb_compile {
echo "@@@BUILD_STEP Compile@@@"
make -j${JOBS}
}
# Experimental compile step; does not turn the tree red if it fails.
function bb_compile_experimental {
# Linking DumpRenderTree appears to hang forever?
# EXPERIMENTAL_TARGETS="DumpRenderTree webkit_unit_tests"
EXPERIMENTAL_TARGETS="webkit_unit_tests"
for target in ${EXPERIMENTAL_TARGETS} ; do
echo "@@@BUILD_STEP Experimental Compile $target @@@"
set +e
make -j4 "${target}"
if [ $? -ne 0 ] ; then
echo "@@@STEP_WARNINGS@@@"
fi
set -e
done
}
# Run tests on an emulator.
function bb_run_tests_emulator {
echo "@@@BUILD_STEP Run Tests on an Emulator@@@"
build/android/run_tests.py -e --xvfb --verbose
}
# Run tests on an actual device. (Better have one plugged in!)
function bb_run_tests {
echo "@@@BUILD_STEP Run Tests on actual hardware@@@"
build/android/run_tests.py --xvfb --verbose
}

View File

@ -0,0 +1,20 @@
#!/bin/bash -ex
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Buildbot annotator script for the FYI waterfall. Compile,
# experimental compile, run tests, ...
# SHERIFF: there should be no need to disable this bot.
# The FYI waterfall does not close the tree.
ROOT=$(cd "$(dirname $0)"; pwd)
. "${ROOT}"/buildbot_functions.sh
bb_baseline_setup "${ROOT}"/../..
bb_compile
bb_compile_experimental
bb_run_tests

View File

@ -0,0 +1,18 @@
#!/bin/bash -ex
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Buildbot annotator script for the main waterfall. Compile only.
ROOT=$(cd "$(dirname $0)"; pwd)
. "${ROOT}"/buildbot_functions.sh
# SHERIFF: if you need to quickly turn the main waterfall android bots
# green (preventing tree closures), uncomment the next line (and send
# appropriate email out):
## bb_force_bot_green_and_exit
bb_baseline_setup "${ROOT}"/../..
bb_compile

View File

@ -0,0 +1,16 @@
#!/bin/bash -ex
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Buildbot annotator script for trybots. Compile only.
ROOT=$(cd "$(dirname $0)"; pwd)
. "${ROOT}"/buildbot_functions.sh
# SHERIFF: if you need to quickly turn "android" trybots green,
# uncomment the next line (and send appropriate email out):
## bb_force_bot_green_and_exit
bb_baseline_setup "${ROOT}"/../..
bb_compile

View File

@ -0,0 +1,17 @@
#!/bin/bash -ex
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Buildbot annotator script for trybots. Compile and test.
ROOT=$(cd "$(dirname $0)"; pwd)
. "${ROOT}"/buildbot_functions.sh
# SHERIFF: if you need to quickly turn "android_test" trybots green,
# uncomment the next line (and send appropriate email out):
## bb_force_bot_green_and_exit
bb_baseline_setup "${ROOT}"/../..
bb_compile
bb_run_tests

View File

@ -0,0 +1,114 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A "Test Server Spawner" that handles killing/stopping per-test test servers.
It's used to accept requests from the device to spawn and kill instances of the
chrome test server on the host.
"""
import BaseHTTPServer
import logging
import os
import sys
import threading
import time
import urlparse
# Path that are needed to import testserver
cr_src = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..')
sys.path.append(os.path.join(cr_src, 'third_party'))
sys.path.append(os.path.join(cr_src, 'third_party', 'tlslite'))
sys.path.append(os.path.join(cr_src, 'third_party', 'pyftpdlib', 'src'))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..',
'..', 'net', 'tools', 'testserver'))
import testserver
_test_servers = []
class SpawningServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler used to process http GET request.
"""
def GetServerType(self, server_type):
"""Returns the server type to use when starting the test server.
This function translate the command-line argument into the appropriate
numerical constant.
# TODO(yfriedman): Do that translation!
"""
if server_type:
pass
return 0
def do_GET(self):
parsed_path = urlparse.urlparse(self.path)
action = parsed_path.path
params = urlparse.parse_qs(parsed_path.query, keep_blank_values=1)
logging.info('Action is: %s' % action)
if action == '/killserver':
# There should only ever be one test server at a time. This may do the
# wrong thing if we try and start multiple test servers.
_test_servers.pop().Stop()
elif action == '/start':
logging.info('Handling request to spawn a test webserver')
for param in params:
logging.info('%s=%s' % (param, params[param][0]))
s_type = 0
doc_root = None
if 'server_type' in params:
s_type = self.GetServerType(params['server_type'][0])
if 'doc_root' in params:
doc_root = params['doc_root'][0]
self.webserver_thread = threading.Thread(
target=self.SpawnTestWebServer, args=(s_type, doc_root))
self.webserver_thread.setDaemon(True)
self.webserver_thread.start()
self.send_response(200, 'OK')
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>started</title></head></html>')
logging.info('Returned OK!!!')
def SpawnTestWebServer(self, s_type, doc_root):
class Options(object):
log_to_console = True
server_type = s_type
port = self.server.test_server_port
data_dir = doc_root or 'chrome/test/data'
file_root_url = '/files/'
cert = False
policy_keys = None
policy_user = None
startup_pipe = None
options = Options()
logging.info('Listening on %d, type %d, data_dir %s' % (options.port,
options.server_type, options.data_dir))
testserver.main(options, None, server_list=_test_servers)
logging.info('Test-server has died.')
class SpawningServer(object):
"""The class used to start/stop a http server.
"""
def __init__(self, test_server_spawner_port, test_server_port):
logging.info('Creating new spawner %d', test_server_spawner_port)
self.server = testserver.StoppableHTTPServer(('', test_server_spawner_port),
SpawningServerRequestHandler)
self.port = test_server_spawner_port
self.server.test_server_port = test_server_port
def Listen(self):
logging.info('Starting test server spawner')
self.server.serve_forever()
def Start(self):
listener_thread = threading.Thread(target=self.Listen)
listener_thread.setDaemon(True)
listener_thread.start()
time.sleep(1)
def Stop(self):
self.server.Stop()

View File

@ -0,0 +1,40 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import subprocess
def RunCmd(args, cwd=None):
"""Opens a subprocess to execute a program and returns its return value.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
"""
logging.info(str(args) + ' ' + (cwd or ''))
p = subprocess.Popen(args=args, cwd=cwd)
return p.wait()
def GetCmdOutput(args, cwd=None):
"""Open a subprocess to execute a program and returns its output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
"""
logging.info(str(args) + ' ' + (cwd or ''))
p = subprocess.Popen(args=args, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr:
logging.critical(stderr)
logging.info(stdout[:4096]) # Truncate output longer than 4k.
return stdout

View File

@ -0,0 +1,202 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Collect debug info for a test."""
import datetime
import logging
import os
import re
import shutil
import string
import subprocess
import tempfile
import cmd_helper
TOMBSTONE_DIR = '/data/tombstones/'
class GTestDebugInfo(object):
"""A helper class to get relate debug information for a gtest.
Args:
adb: ADB interface the tests are using.
device: Serial# of the Android device in which the specified gtest runs.
testsuite_name: Name of the specified gtest.
gtest_filter: Test filter used by the specified gtest.
"""
def __init__(self, adb, device, testsuite_name, gtest_filter,
collect_new_crashes=True):
"""Initializes the DebugInfo class for a specified gtest."""
self.adb = adb
self.device = device
self.testsuite_name = testsuite_name
self.gtest_filter = gtest_filter
self.logcat_process = None
self.has_storage = False
self.log_dir = None
self.log_file_name = None
self.collect_new_crashes = collect_new_crashes
self.old_crash_files = self.ListCrashFiles()
def InitStorage(self):
"""Initializes the storage in where we put the debug information."""
if self.has_storage:
return
self.has_storage = True
self.log_dir = tempfile.mkdtemp()
self.log_file_name = os.path.join(self.log_dir,
self._GeneratePrefixName() + '_log.txt')
def CleanupStorage(self):
"""Cleans up the storage in where we put the debug information."""
if not self.has_storage:
return
self.has_storage = False
assert os.path.exists(self.log_dir)
shutil.rmtree(self.log_dir)
self.log_dir = None
self.log_file_name = None
def GetStoragePath(self):
"""Returns the path in where we store the debug information."""
self.InitStorage()
return self.log_dir
def _GetSignatureFromGTestFilter(self):
"""Gets a signature from gtest_filter.
Signature is used to identify the tests from which we collect debug
information.
Returns:
A signature string. Returns 'all' if there is no gtest filter.
"""
if not self.gtest_filter:
return 'all'
filename_chars = "-_()%s%s" % (string.ascii_letters, string.digits)
return ''.join(c for c in self.gtest_filter if c in filename_chars)
def _GeneratePrefixName(self):
"""Generates a prefix name for debug information of the test.
The prefix name consists of the following:
(1) root name of test_suite_base.
(2) device serial number.
(3) filter signature generate from gtest_filter.
(4) date & time when calling this method.
Returns:
Name of the log file.
"""
return (os.path.splitext(self.testsuite_name)[0] + '_' + self.device + '_' +
self._GetSignatureFromGTestFilter() + '_' +
datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S-%f'))
def StartRecordingLog(self, clear=True, filters=['*:v']):
"""Starts recording logcat output to a file.
This call should come before running test, with calling StopRecordingLog
following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
self.InitStorage()
self.StopRecordingLog()
if clear:
cmd_helper.RunCmd(['adb', 'logcat', '-c'])
logging.info('Start dumping log to %s ...' % self.log_file_name)
command = 'adb logcat -v threadtime %s > %s' % (' '.join(filters),
self.log_file_name)
self.logcat_process = subprocess.Popen(command, shell=True)
def StopRecordingLog(self):
"""Stops an existing logcat recording subprocess."""
if not self.logcat_process:
return
# Cannot evaluate directly as 0 is a possible value.
if self.logcat_process.poll() is None:
self.logcat_process.kill()
self.logcat_process = None
logging.info('Finish log dump.')
def TakeScreenshot(self, identifier_mark):
"""Takes a screen shot from current specified device.
Args:
identifier_mark: A string to identify the screen shot DebugInfo will take.
It will be part of filename of the screen shot. Empty
string is acceptable.
Returns:
Returns True if successfully taking screen shot from device, otherwise
returns False.
"""
self.InitStorage()
assert isinstance(identifier_mark, str)
shot_path = os.path.join(self.log_dir, ''.join([self._GeneratePrefixName(),
identifier_mark,
'_screenshot.png']))
screenshot_path = os.path.join(os.getenv('ANDROID_HOST_OUT'), 'bin',
'screenshot2')
re_success = re.compile(re.escape('Success.'), re.MULTILINE)
if re_success.findall(cmd_helper.GetCmdOutput([screenshot_path, '-s',
self.device, shot_path])):
logging.info("Successfully took a screen shot to %s" % shot_path)
return True
logging.error('Failed to take screen shot from device %s' % self.device)
return False
def ListCrashFiles(self):
"""Collects crash files from current specified device.
Returns:
A dict of crash files in format {"name": (size, lastmod), ...}.
"""
if not self.collect_new_crashes:
return {}
return self.adb.ListPathContents(TOMBSTONE_DIR)
def ArchiveNewCrashFiles(self):
"""Archives the crash files newly generated until calling this method."""
if not self.collect_new_crashes:
return
current_crash_files = self.ListCrashFiles()
files = [f for f in current_crash_files if f not in self.old_crash_files]
logging.info('New crash file(s):%s' % ' '.join(files))
for f in files:
self.adb.Adb().Pull(TOMBSTONE_DIR + f,
os.path.join(self.GetStoragePath(), f))
@staticmethod
def ZipAndCleanResults(dest_dir, dump_file_name, debug_info_list):
"""A helper method to zip all debug information results into a dump file.
Args:
dest-dir: Dir path in where we put the dump file.
dump_file_name: Desired name of the dump file. This method makes sure
'.zip' will be added as ext name.
debug_info_list: List of all debug info objects.
"""
if not dest_dir or not dump_file_name or not debug_info_list:
return
cmd_helper.RunCmd(['mkdir', '-p', dest_dir])
log_basename = os.path.basename(dump_file_name)
log_file = os.path.join(dest_dir,
os.path.splitext(log_basename)[0] + '.zip')
logging.info('Zipping debug dumps into %s ...' % log_file)
for d in debug_info_list:
d.ArchiveNewCrashFiles()
# Add new dumps into the zip file. The zip may exist already if previous
# gtest also dumps the debug information. It's OK since we clean up the old
# dumps in each build step.
cmd_helper.RunCmd(['zip', '-q', '-r', log_file,
' '.join([d.GetStoragePath() for d in debug_info_list])])
assert os.path.exists(log_file)
for debug_info in debug_info_list:
debug_info.CleanupStorage()

View File

@ -0,0 +1,253 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to start and stop Android emulator.
Assumes system environment ANDROID_NDK_ROOT has been set.
Emulator: The class provides the methods to launch/shutdown the emulator with
the android virtual device named 'buildbot' .
"""
import logging
import os
import signal
import subprocess
import sys
import time
import android_commands
# adb_interface.py is under ../../third_party/android/testrunner/
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '..',
'..', 'third_party', 'android', 'testrunner'))
import adb_interface
import cmd_helper
import errors
import run_command
class EmulatorLaunchException(Exception):
"""Emulator failed to launch."""
pass
def _KillAllEmulators():
"""Kill all running emulators that look like ones we started.
There are odd 'sticky' cases where there can be no emulator process
running but a device slot is taken. A little bot trouble and and
we're out of room forever.
"""
emulators = android_commands.GetEmulators()
if not emulators:
return
for emu_name in emulators:
cmd_helper.GetCmdOutput(['adb', '-s', emu_name, 'emu', 'kill'])
logging.info('Emulator killing is async; give a few seconds for all to die.')
for i in range(5):
if not android_commands.GetEmulators():
return
time.sleep(1)
class PortPool(object):
"""Pool for emulator port starting position that changes over time."""
_port_min = 5554
_port_max = 5585
_port_current_index = 0
@classmethod
def port_range(cls):
"""Return a range of valid ports for emulator use.
The port must be an even number between 5554 and 5584. Sometimes
a killed emulator "hangs on" to a port long enough to prevent
relaunch. This is especially true on slow machines (like a bot).
Cycling through a port start position helps make us resilient."""
ports = range(cls._port_min, cls._port_max, 2)
n = cls._port_current_index
cls._port_current_index = (n + 1) % len(ports)
return ports[n:] + ports[:n]
def _GetAvailablePort():
"""Returns an available TCP port for the console."""
used_ports = []
emulators = android_commands.GetEmulators()
for emulator in emulators:
used_ports.append(emulator.split('-')[1])
for port in PortPool.port_range():
if str(port) not in used_ports:
return port
class Emulator(object):
"""Provides the methods to lanuch/shutdown the emulator.
The emulator has the android virtual device named 'buildbot'.
The emulator could use any even TCP port between 5554 and 5584 for the
console communication, and this port will be part of the device name like
'emulator-5554'. Assume it is always True, as the device name is the id of
emulator managed in this class.
Attributes:
emulator: Path of Android's emulator tool.
popen: Popen object of the running emulator process.
device: Device name of this emulator.
"""
# Signals we listen for to kill the emulator on
_SIGNALS = (signal.SIGINT, signal.SIGHUP)
# Time to wait for an emulator launch, in seconds. This includes
# the time to launch the emulator and a wait-for-device command.
_LAUNCH_TIMEOUT = 120
# Timeout interval of wait-for-device command before bouncing to a a
# process life check.
_WAITFORDEVICE_TIMEOUT = 5
# Time to wait for a "wait for boot complete" (property set on device).
_WAITFORBOOT_TIMEOUT = 300
def __init__(self, fast_and_loose=False):
"""Init an Emulator.
Args:
fast_and_loose: Loosen up the rules for reliable running for speed.
Intended for quick testing or re-testing.
"""
try:
android_sdk_root = os.environ['ANDROID_SDK_ROOT']
except KeyError:
logging.critical('The ANDROID_SDK_ROOT must be set to run the test on '
'emulator.')
raise
self.emulator = os.path.join(android_sdk_root, 'tools', 'emulator')
self.popen = None
self.device = None
self.fast_and_loose = fast_and_loose
def _DeviceName(self):
"""Return our device name."""
port = _GetAvailablePort()
return ('emulator-%d' % port, port)
def Launch(self):
"""Launches the emulator and waits for package manager to startup.
If fails, an exception will be raised.
"""
_KillAllEmulators() # just to be sure
if not self.fast_and_loose:
self._AggressiveImageCleanup()
(self.device, port) = self._DeviceName()
emulator_command = [
self.emulator,
# Speed up emulator launch by 40%. Really.
'-no-boot-anim',
# The default /data size is 64M.
# That's not enough for 4 unit test bundles and their data.
'-partition-size', '256',
# Use a familiar name and port.
'-avd', 'buildbot',
'-port', str(port)]
if not self.fast_and_loose:
emulator_command.extend([
# Wipe the data. We've seen cases where an emulator
# gets 'stuck' if we don't do this (every thousand runs or
# so).
'-wipe-data',
])
logging.info('Emulator launch command: %s', ' '.join(emulator_command))
self.popen = subprocess.Popen(args=emulator_command,
stderr=subprocess.STDOUT)
self._InstallKillHandler()
self._ConfirmLaunch()
def _AggressiveImageCleanup(self):
"""Aggressive cleanup of emulator images.
Experimentally it looks like our current emulator use on the bot
leaves image files around in /tmp/android-$USER. If a "random"
name gets reused, we choke with a 'File exists' error.
TODO(jrg): is there a less hacky way to accomplish the same goal?
"""
logging.info('Aggressive Image Cleanup')
emulator_imagedir = '/tmp/android-%s' % os.environ['USER']
if not os.path.exists(emulator_imagedir):
return
for image in os.listdir(emulator_imagedir):
full_name = os.path.join(emulator_imagedir, image)
if 'emulator' in full_name:
logging.info('Deleting emulator image %s', full_name)
os.unlink(full_name)
def _ConfirmLaunch(self, wait_for_boot=False):
"""Confirm the emulator launched properly.
Loop on a wait-for-device with a very small timeout. On each
timeout, check the emulator process is still alive.
After confirming a wait-for-device can be successful, make sure
it returns the right answer.
"""
a = android_commands.AndroidCommands(self.device, False)
seconds_waited = 0
number_of_waits = 2 # Make sure we can wfd twice
adb_cmd = "adb -s %s %s" % (self.device, 'wait-for-device')
while seconds_waited < self._LAUNCH_TIMEOUT:
try:
run_command.RunCommand(adb_cmd,
timeout_time=self._WAITFORDEVICE_TIMEOUT,
retry_count=1)
number_of_waits -= 1
if not number_of_waits:
break
except errors.WaitForResponseTimedOutError as e:
seconds_waited += self._WAITFORDEVICE_TIMEOUT
adb_cmd = "adb -s %s %s" % (self.device, 'kill-server')
run_command.RunCommand(adb_cmd)
self.popen.poll()
if self.popen.returncode != None:
raise EmulatorLaunchException('EMULATOR DIED')
if seconds_waited >= self._LAUNCH_TIMEOUT:
raise EmulatorLaunchException('TIMEOUT with wait-for-device')
logging.info('Seconds waited on wait-for-device: %d', seconds_waited)
if wait_for_boot:
# Now that we checked for obvious problems, wait for a boot complete.
# Waiting for the package manager is sometimes problematic.
# TODO(jrg): for reasons I don't understand, sometimes this
# gives an "error: device not found" which is only fixed with an
# 'adb kill-server' command. Fix.
a.Adb().SetTargetSerial(self.device)
a.Adb().WaitForBootComplete(self._WAITFORBOOT_TIMEOUT)
def Shutdown(self):
"""Shuts down the process started by launch."""
if self.popen:
self.popen.poll()
if self.popen.returncode == None:
self.popen.kill()
self.popen = None
def _ShutdownOnSignal(self, signum, frame):
logging.critical('emulator _ShutdownOnSignal')
for sig in self._SIGNALS:
signal.signal(sig, signal.SIG_DFL)
self.Shutdown()
raise KeyboardInterrupt # print a stack
def _InstallKillHandler(self):
"""Install a handler to kill the emulator when we exit unexpectedly."""
for sig in self._SIGNALS:
signal.signal(sig, self._ShutdownOnSignal)
def main(argv):
Emulator().launch()
if __name__ == '__main__':
main(sys.argv)

View File

@ -0,0 +1,155 @@
#!/bin/bash
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Sets up environment for building Chromium on Android. Only Android NDK,
# Revision 6b on Linux or Mac is offically supported.
#
# To run this script, the system environment ANDROID_NDK_ROOT must be set
# to Android NDK's root path.
#
# TODO(michaelbai): Develop a standard for NDK/SDK integration.
#
# If current path isn't the Chrome's src directory, CHROME_SRC must be set
# to the Chrome's src directory.
if [ ! -d "${ANDROID_NDK_ROOT}" ]; then
echo "ANDROID_NDK_ROOT must be set to the path of Android NDK, Revision 6b." \
>& 2
echo "which could be installed by" >& 2
echo "<chromium_tree>/src/build/install-build-deps-android.sh" >& 2
return 1
fi
if [ ! -d "${ANDROID_SDK_ROOT}" ]; then
echo "ANDROID_SDK_ROOT must be set to the path of Android SDK, Android 3.2." \
>& 2
echo "which could be installed by" >& 2
echo "<chromium_tree>/src/build/install-build-deps-android.sh" >& 2
return 1
fi
host_os=$(uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
case "${host_os}" in
"linux")
toolchain_dir="linux-x86"
;;
"mac")
toolchain_dir="darwin-x86"
;;
*)
echo "Host platform ${host_os} is not supported" >& 2
return 1
esac
export ANDROID_TOOLCHAIN="${ANDROID_NDK_ROOT}/toolchains/arm-linux-androideabi-4.4.3/prebuilt/${toolchain_dir}/bin/"
# Add Android SDK's platform-tools to system path.
export PATH="${PATH}:${ANDROID_SDK_ROOT}/platform-tools/"
if [ ! -d "${ANDROID_TOOLCHAIN}" ]; then
echo "Can not find Android toolchain in ${ANDROID_TOOLCHAIN}." >& 2
echo "The NDK version might be wrong." >& 2
return 1
fi
if [ -z "${CHROME_SRC}" ]; then
# if $CHROME_SRC was not set, assume current directory is CHROME_SRC.
export CHROME_SRC=$(pwd)
fi
if [ ! -d "${CHROME_SRC}" ]; then
echo "CHROME_SRC must be set to the path of Chrome source code." >& 2
return 1
fi
make() {
# TODO(michaelbai): how to use ccache in NDK.
if [ -n "${USE_CCACHE}" ]; then
if [ -e "${PREBUILT_CCACHE_PATH}" ]; then
use_ccache_var="$PREBUILT_CCACHE_PATH "
else
use_ccache_var=""
fi
fi
# Only cross-compile if the build is being done either from Chromium's src/
# directory, or through WebKit, in which case the WEBKIT_ANDROID_BUILD
# environment variable will be defined. WebKit uses a different directory.
if [ -f "$PWD/build/android/envsetup.sh" ] ||
[ -n "${WEBKIT_ANDROID_BUILD}" ]; then
CC="${use_ccache_var}${CROSS_CC}" CXX="${use_ccache_var}${CROSS_CXX}" \
LINK="${CROSS_LINK}" AR="${CROSS_AR}" RANLIB="${CROSS_RANLIB}" \
command make $*
else
command make $*
fi
}
# Performs a gyp_chromium run to convert gyp->Makefile for android code.
android_gyp() {
"${CHROME_SRC}/build/gyp_chromium" --depth="${CHROME_SRC}"
}
firstword() {
echo "${1}"
}
export CROSS_AR="$(firstword "${ANDROID_TOOLCHAIN}"/*-ar)"
export CROSS_CC="$(firstword "${ANDROID_TOOLCHAIN}"/*-gcc)"
export CROSS_CXX="$(firstword "${ANDROID_TOOLCHAIN}"/*-g++)"
export CROSS_LINK="$(firstword "${ANDROID_TOOLCHAIN}"/*-gcc)"
export CROSS_RANLIB="$(firstword "${ANDROID_TOOLCHAIN}"/*-ranlib)"
export OBJCOPY="$(firstword "${ANDROID_TOOLCHAIN}"/*-objcopy)"
export STRIP="$(firstword "${ANDROID_TOOLCHAIN}"/*-strip)"
# The set of GYP_DEFINES to pass to gyp. Use 'readlink -e' on directories
# to canonicalize them (remove double '/', remove trailing '/', etc).
DEFINES="OS=android"
DEFINES+=" android_build_type=0" # Currently, Only '0' is supportted.
DEFINES+=" host_os=${host_os}"
DEFINES+=" linux_fpic=1"
DEFINES+=" release_optimize=s"
DEFINES+=" linux_use_tcmalloc=0"
DEFINES+=" disable_nacl=1"
DEFINES+=" remoting=0"
DEFINES+=" p2p_apis=0"
DEFINES+=" enable_touch_events=1"
DEFINES+=" build_ffmpegsumo=0"
# TODO(bulach): use "shared_libraries" once the transition from executable
# is over.
DEFINES+=" gtest_target_type=executable"
DEFINES+=" branding=Chromium"
# If the TARGET_PRODUCT wasn't set, use 'full' by default.
if [ -z "${TARGET_PRODUCT}" ]; then
TARGET_PRODUCT="full"
fi
# The following defines will affect ARM code generation of both C/C++ compiler
# and V8 mksnapshot.
case "${TARGET_PRODUCT}" in
"full")
DEFINES+=" target_arch=arm"
DEFINES+=" arm_neon=0 armv7=1 arm_thumb=1 arm_fpu=vfpv3-d16"
;;
*x86*)
DEFINES+=" target_arch=ia32 use_libffmpeg=0"
;;
*)
echo "TARGET_PRODUCT: ${TARGET_PRODUCT} is not supported." >& 2
return 1
esac
export GYP_DEFINES="${DEFINES}"
# Use the "android" flavor of the Makefile generator for both Linux and OS X.
export GYP_GENERATORS="make-android"
# Use our All target as the default
export GYP_GENERATOR_FLAGS="${GYP_GENERATOR_FLAGS} default_target=All"
# We want to use our version of "all" targets.
export CHROMIUM_GYP_FILE="${CHROME_SRC}/build/all_android.gyp"

View File

@ -0,0 +1,49 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Location where chrome reads command line flags from
CHROME_COMMAND_FILE = '/data/local/chrome-command-line'
class FlagChanger(object):
"""Temporarily changes the flags Chrome runs with."""
def __init__(self, android_cmd):
self._android_cmd = android_cmd
self._old_flags = None
def Set(self, flags, append=False):
"""Sets the command line flags used when chrome is started.
Args:
flags: A list of flags to set, eg. ['--single-process'].
append: Whether to append to existing flags or overwrite them.
"""
if flags:
assert flags[0] != 'chrome'
if not self._old_flags:
self._old_flags = self._android_cmd.GetFileContents(CHROME_COMMAND_FILE)
if self._old_flags:
self._old_flags = self._old_flags[0].strip()
if append and self._old_flags:
# Avoid appending flags that are already present.
new_flags = filter(lambda flag: self._old_flags.find(flag) == -1, flags)
self._android_cmd.SetFileContents(CHROME_COMMAND_FILE,
self._old_flags + ' ' +
' '.join(new_flags))
else:
self._android_cmd.SetFileContents(CHROME_COMMAND_FILE,
'chrome ' + ' '.join(flags))
def Restore(self):
"""Restores the flags to their original state."""
if self._old_flags == None:
return # Set() was never called.
elif self._old_flags:
self._android_cmd.SetFileContents(CHROME_COMMAND_FILE, self._old_flags)
else:
self._android_cmd.RunShellCommand('rm ' + CHROME_COMMAND_FILE)

View File

@ -0,0 +1,15 @@
# List of suppressions
#
# Automatically generated by run_tests.py
RTLTest.GetTextDirection
ReadOnlyFileUtilTest.ContentsEqual
ReadOnlyFileUtilTest.TextContentsEqual
SharedMemoryTest.OpenExclusive
StackTrace.DebugPrintBacktrace
VerifyPathControlledByUserTest.Symlinks
PathServiceTest.Get
SharedMemoryTest.OpenClose
StringPrintfTest.StringPrintfMisc
StringPrintfTest.StringAppendfString
StringPrintfTest.StringAppendfInt
StringPrintfTest.StringPrintfBounds

View File

@ -0,0 +1,10 @@
# Addtional list of suppressions from emulator
#
# Automatically generated by run_tests.py
PathServiceTest.Get
SharedMemoryTest.OpenClose
StringPrintfTest.StringAppendfInt
StringPrintfTest.StringAppendfString
StringPrintfTest.StringPrintfBounds
StringPrintfTest.StringPrintfMisc
VerifyPathControlledByUserTest.Symlinks

View File

@ -0,0 +1,2 @@
# Times out
IPCSyncChannelTest.ChattyServer

View File

@ -0,0 +1,55 @@
# List of suppressions.
# Generated by hand to get net_unittests running initially.
# Likely too aggressive disabling.
CertVerifierTest.*
CookieMonsterTest.*
DirectoryListerTest.*
DiskCacheTest.*
DiskCacheBackendTest.*
DnsConfigServiceTest.*
DnsRRResolverTest.Resolve
GZipUnitTest.*
HostResolverImplTest.*
NetUtilTest.GetNetworkList
ProxyResolverV8Test.*
TransportSecurityStateTest.ParseSidePins*
X509CertificateTest.*
X509CertificateParseTest.*
FtpDirectoryListingParserWindowsTest.Good
HttpNetworkTransactionTest.SOCKS4_HTTP_GET
HttpNetworkTransactionTest.SOCKS4_SSL_GET
HttpNetworkTransactionTest.UploadUnreadableFile
HttpNetworkTransactionTest.UnreadableUploadFileAfterAuthRestart
ProxyResolverJSBindingsTest.MyIpAddress
ProxyScriptFetcherImplTest.*
SOCKSClientSocketTest.*
SSLClientSocketTest.*
PythonUtils.PythonRunTime
URLRequestTestHTTP.*
HTTPSRequestTest.HTTPSMismatchedTest
HTTPSRequestTest.HTTPSExpiredTest
HTTPSRequestTest.HTTPSPreloadedHSTSTest
HTTPSRequestTest.ClientAuthTest
URLRequestTest.FileTest
URLRequestTest.FileDirRedirectNoCrash
URLRequestTest.DelayedCookieCallback
URLRequestTest.DoNotSendCookies
URLRequestTest.DoNotSaveCookies
URLRequestTest.DoNotSendCookies_ViaPolicy
URLRequestTest.DoNotSaveCookies_ViaPolicy
URLRequestTest.DoNotSaveEmptyCookies
URLRequestTest.DoNotSendCookies_ViaPolicy_Async
URLRequestTest.DoNotSaveCookies_ViaPolicy_Async
URLRequestTest.CookiePolicy_ForceSession
URLRequestTest.DoNotOverrideReferrer
WebSocketJobTest.ThrottlingWebSocket
WebSocketJobTest.ThrottlingWebSocketSpdyEnabled
WebSocketJobTest.ThrottlingSpdy
WebSocketJobTest.ThrottlingSpdySpdyEnabled
X509CertificateWeakDigestTest.*
FtpDirectoryListingParserTest.*
*/X509CertificateWeakDigestTest.*
TransportSecurityStateTest.BogusPinsHeaders
TransportSecurityStateTest.ValidPinsHeaders
HTTPSRequestTest.ResumeTest
HTTPSRequestTest.SSLSessionCacheShardTest

View File

@ -0,0 +1,234 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a convenient wrapper for spawning a test lighttpd instance.
Usage:
lighttpd_server PATH_TO_DOC_ROOT
"""
import codecs
import contextlib
import httplib
import os
import pexpect
import random
import shutil
import socket
import sys
import tempfile
class LighttpdServer(object):
"""Wraps lighttpd server, providing robust startup.
Args:
document_root: Path to root of this server's hosted files.
port: TCP port on the _host_ machine that the server will listen on. If
ommitted it will attempt to use 9000, or if unavailable it will find
a free port from 8001 - 8999.
lighttpd_path, lighttpd_module_path: Optional paths to lighttpd binaries.
base_config_path: If supplied this file will replace the built-in default
lighttpd config file.
extra_config_contents: If specified, this string will be appended to the
base config (default built-in, or from base_config_path).
config_path, error_log, access_log: Optional paths where the class should
place temprary files for this session.
"""
def __init__(self, document_root, port=None,
lighttpd_path=None, lighttpd_module_path=None,
base_config_path=None, extra_config_contents=None,
config_path=None, error_log=None, access_log=None):
self.temp_dir = tempfile.mkdtemp(prefix='lighttpd_for_chrome_android')
self.document_root = os.path.abspath(document_root)
self.fixed_port = port
self.port = port or 9000
self.server_tag = 'LightTPD ' + str(random.randint(111111, 999999))
self.lighttpd_path = lighttpd_path or '/usr/sbin/lighttpd'
self.lighttpd_module_path = lighttpd_module_path or '/usr/lib/lighttpd'
self.base_config_path = base_config_path
self.extra_config_contents = extra_config_contents
self.config_path = config_path or self._Mktmp('config')
self.error_log = error_log or self._Mktmp('error_log')
self.access_log = access_log or self._Mktmp('access_log')
self.pid_file = self._Mktmp('pid_file')
self.process = None
def _Mktmp(self, name):
return os.path.join(self.temp_dir, name)
def _GetRandomPort(self):
# Ports 8001-8004 are reserved for other test servers. Ensure we don't
# collide with them.
return random.randint(8005, 8999)
def StartupHttpServer(self):
"""Starts up a http server with specified document root and port."""
# Currently we use lighttpd as http sever in test.
while True:
if self.base_config_path:
# Read the config
with codecs.open(self.base_config_path, 'r', 'utf-8') as f:
config_contents = f.read()
else:
config_contents = self._GetDefaultBaseConfig()
if self.extra_config_contents:
config_contents += self.extra_config_contents
# Write out the config, filling in placeholders from the members of |self|
with codecs.open(self.config_path, 'w', 'utf-8') as f:
f.write(config_contents % self.__dict__)
if (not os.path.exists(self.lighttpd_path) or
not os.access(self.lighttpd_path, os.X_OK)):
raise EnvironmentError(
'Could not find lighttpd at %s.\n'
'It may need to be installed (e.g. sudo apt-get install lighttpd)'
% self.lighttpd_path)
self.process = pexpect.spawn(self.lighttpd_path,
['-D', '-f', self.config_path,
'-m', self.lighttpd_module_path],
cwd=self.temp_dir)
client_error, server_error = self._TestServerConnection()
if not client_error:
assert int(open(self.pid_file, 'r').read()) == self.process.pid
break
self.process.close()
if self.fixed_port or not 'in use' in server_error:
print 'Client error:', client_error
print 'Server error:', server_error
return False
self.port = self._GetRandomPort()
return True
def ShutdownHttpServer(self):
"""Shuts down our lighttpd processes."""
if self.process:
self.process.terminate()
shutil.rmtree(self.temp_dir, ignore_errors=True)
def _TestServerConnection(self):
# Wait for server to start
server_msg = ''
for timeout in xrange(1, 5):
client_error = None
try:
with contextlib.closing(httplib.HTTPConnection(
'127.0.0.1', self.port, timeout=timeout)) as http:
http.set_debuglevel(timeout > 3)
http.request('HEAD', '/')
r = http.getresponse()
r.read()
if (r.status == 200 and r.reason == 'OK' and
r.getheader('Server') == self.server_tag):
return (None, server_msg)
client_error = ('Bad response: %s %s version %s\n ' %
(r.status, r.reason, r.version) +
'\n '.join([': '.join(h) for h in r.getheaders()]))
except (httplib.HTTPException, socket.error) as client_error:
pass # Probably too quick connecting: try again
# Check for server startup error messages
ix = self.process.expect([pexpect.TIMEOUT, pexpect.EOF, '.+'],
timeout=timeout)
if ix == 2: # stdout spew from the server
server_msg += self.process.match.group(0)
elif ix == 1: # EOF -- server has quit so giveup.
client_error = client_error or 'Server exited'
break
return (client_error or 'Timeout', server_msg)
def _GetDefaultBaseConfig(self):
return """server.tag = "%(server_tag)s"
server.modules = ( "mod_access",
"mod_accesslog",
"mod_alias",
"mod_cgi",
"mod_rewrite" )
# default document root required
#server.document-root = "."
# files to check for if .../ is requested
index-file.names = ( "index.php", "index.pl", "index.cgi",
"index.html", "index.htm", "default.htm" )
# mimetype mapping
mimetype.assign = (
".gif" => "image/gif",
".jpg" => "image/jpeg",
".jpeg" => "image/jpeg",
".png" => "image/png",
".svg" => "image/svg+xml",
".css" => "text/css",
".html" => "text/html",
".htm" => "text/html",
".xhtml" => "application/xhtml+xml",
".xhtmlmp" => "application/vnd.wap.xhtml+xml",
".js" => "application/x-javascript",
".log" => "text/plain",
".conf" => "text/plain",
".text" => "text/plain",
".txt" => "text/plain",
".dtd" => "text/xml",
".xml" => "text/xml",
".manifest" => "text/cache-manifest",
)
# Use the "Content-Type" extended attribute to obtain mime type if possible
mimetype.use-xattr = "enable"
##
# which extensions should not be handle via static-file transfer
#
# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi
static-file.exclude-extensions = ( ".php", ".pl", ".cgi" )
server.bind = "127.0.0.1"
server.port = %(port)s
## virtual directory listings
dir-listing.activate = "enable"
#dir-listing.encoding = "iso-8859-2"
#dir-listing.external-css = "style/oldstyle.css"
## enable debugging
#debug.log-request-header = "enable"
#debug.log-response-header = "enable"
#debug.log-request-handling = "enable"
#debug.log-file-not-found = "enable"
#### SSL engine
#ssl.engine = "enable"
#ssl.pemfile = "server.pem"
# Autogenerated test-specific config follows.
cgi.assign = ( ".cgi" => "/usr/bin/env",
".pl" => "/usr/bin/env",
".asis" => "/bin/cat",
".php" => "/usr/bin/php-cgi" )
server.errorlog = "%(error_log)s"
accesslog.filename = "%(access_log)s"
server.upload-dirs = ( "/tmp" )
server.pid-file = "%(pid_file)s"
server.document-root = "%(document_root)s"
"""
def main(argv):
server = LighttpdServer(*argv[1:])
try:
if server.StartupHttpServer():
raw_input('Server running at http://127.0.0.1:%s -'
' press Enter to exit it.' % server.port)
else:
print 'Server exit code:', server.process.exitstatus
finally:
server.ShutdownHttpServer()
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -0,0 +1,56 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
def _EscapePerfResult(s):
"""Escapes |s| for use in a perf result."""
# Colons (:) and equal signs (=) are not allowed, and we chose an arbitrary
# limit of 40 chars.
return re.sub(':|=', '_', s[:40])
def PrintPerfResult(measurement, trace, values, units, important=True,
print_to_stdout=True):
"""Prints numerical data to stdout in the format required by perf tests.
The string args may be empty but they must not contain any colons (:) or
equals signs (=).
Args:
measurement: A description of the quantity being measured, e.g. "vm_peak".
trace: A description of the particular data point, e.g. "reference".
values: A list of numeric measured values.
units: A description of the units of measure, e.g. "bytes".
important: If True, the output line will be specially marked, to notify the
post-processor.
Returns:
String of the formated perf result.
"""
important_marker = '*' if important else ''
assert isinstance(values, list)
assert len(values)
assert '/' not in measurement
avg = None
if len(values) > 1:
try:
value = '[%s]' % ','.join([str(v) for v in values])
avg = sum([float(v) for v in values]) / len(values)
except ValueError:
value = ", ".join(values)
else:
value = values[0]
output = '%sRESULT %s: %s= %s %s' % (important_marker,
_EscapePerfResult(measurement),
_EscapePerfResult(trace),
value, units)
if avg:
output += '\nAvg %s: %d%s' % (measurement, avg, units)
if print_to_stdout:
print output
return output

View File

@ -0,0 +1,362 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all the native unit tests.
1. Copy over test binary to /data/local on device.
2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
to be deployed to the device (in /data/local/tmp).
3. Environment:
3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named:
/data/local/tmp/chrome/test/data
3.2. page_cycler_tests have following requirements,
3.2.1 the following data on host:
<chrome_src_dir>/tools/page_cycler
<chrome_src_dir>/data/page_cycler
3.2.2. two data directories to store above test data on device named:
/data/local/tmp/tools/ (for database perf test)
/data/local/tmp/data/ (for other perf tests)
3.2.3. a http server to serve http perf tests.
The http root is host's <chrome_src_dir>/data/page_cycler/, port 8000.
3.2.4 a tool named forwarder is also required to run on device to
forward the http request/response between host and device.
3.2.5 Chrome is installed on device.
4. Run the binary in the device and stream the log to the host.
4.1. Optionally, filter specific tests.
4.2. Optionally, rebaseline: run the available tests and update the
suppressions file for failures.
4.3. If we're running a single test suite and we have multiple devices
connected, we'll shard the tests.
5. Clean up the device.
Suppressions:
Individual tests in a test binary can be suppressed by listing it in
the gtest_filter directory in a file of the same name as the test binary,
one test per line. Here is an example:
$ cat gtest_filter/base_unittests_disabled
DataPackTest.Load
ReadOnlyFileUtilTest.ContentsEqual
This file is generated by the tests running on devices. If running on emulator,
additonal filter file which lists the tests only failed in emulator will be
loaded. We don't care about the rare testcases which succeeded on emuatlor, but
failed on device.
"""
import logging
import os
import re
import subprocess
import sys
import time
import android_commands
import cmd_helper
import debug_info
import emulator
import run_tests_helper
from single_test_runner import SingleTestRunner
from test_package_executable import TestPackageExecutable
from test_result import BaseTestResult, TestResults
_TEST_SUITES = ['base_unittests', 'sql_unittests', 'ipc_tests', 'net_unittests']
def FullyQualifiedTestSuites():
"""Return a fully qualified list that represents all known suites."""
# If not specified, assume the test suites are in out/Release
test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR,
'out', 'Release'))
return [os.path.join(test_suite_dir, t) for t in _TEST_SUITES]
class TimeProfile(object):
"""Class for simple profiling of action, with logging of cost."""
def __init__(self, description):
self._description = description
self.Start()
def Start(self):
self._starttime = time.time()
def Stop(self):
"""Stop profiling and dump a log."""
if self._starttime:
stoptime = time.time()
logging.info('%fsec to perform %s' %
(stoptime - self._starttime, self._description))
self._starttime = None
class Xvfb(object):
"""Class to start and stop Xvfb if relevant. Nop if not Linux."""
def __init__(self):
self._pid = 0
def _IsLinux(self):
"""Return True if on Linux; else False."""
return sys.platform.startswith('linux')
def Start(self):
"""Start Xvfb and set an appropriate DISPLAY environment. Linux only.
Copied from tools/code_coverage/coverage_posix.py
"""
if not self._IsLinux():
return
proc = subprocess.Popen(["Xvfb", ":9", "-screen", "0", "1024x768x24",
"-ac"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self._pid = proc.pid
if not self._pid:
raise Exception('Could not start Xvfb')
os.environ['DISPLAY'] = ":9"
# Now confirm, giving a chance for it to start if needed.
for test in range(10):
proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True)
pid, retcode = os.waitpid(proc.pid, 0)
if retcode == 0:
break
time.sleep(0.25)
if retcode != 0:
raise Exception('Could not confirm Xvfb happiness')
def Stop(self):
"""Stop Xvfb if needed. Linux only."""
if self._pid:
try:
os.kill(self._pid, signal.SIGKILL)
except:
pass
del os.environ['DISPLAY']
self._pid = 0
def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline,
timeout, performance_test, cleanup_test_files, tool,
log_dump_name, fast_and_loose=False, annotate=False):
"""Runs the tests.
Args:
device: Device to run the tests.
test_suite: A specific test suite to run, empty to run all.
gtest_filter: A gtest_filter flag.
test_arguments: Additional arguments to pass to the test binary.
rebaseline: Whether or not to run tests in isolation and update the filter.
timeout: Timeout for each test.
performance_test: Whether or not performance test(s).
cleanup_test_files: Whether or not to cleanup test files on device.
tool: Name of the Valgrind tool.
log_dump_name: Name of log dump file.
fast_and_loose: should we go extra-fast but sacrifice stability
and/or correctness? Intended for quick cycle testing; not for bots!
annotate: should we print buildbot-style annotations?
Returns:
A TestResults object.
"""
results = []
if test_suite:
global _TEST_SUITES
if not os.path.exists(test_suite):
logging.critical('Unrecognized test suite %s, supported: %s' %
(test_suite, _TEST_SUITES))
if test_suite in _TEST_SUITES:
logging.critical('(Remember to include the path: out/Release/%s)',
test_suite)
return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')])
fully_qualified_test_suites = [test_suite]
else:
fully_qualified_test_suites = FullyQualifiedTestSuites()
debug_info_list = []
print 'Known suites: ' + str(_TEST_SUITES)
print 'Running these: ' + str(fully_qualified_test_suites)
for t in fully_qualified_test_suites:
if annotate:
print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t)
test = SingleTestRunner(device, t, gtest_filter, test_arguments,
timeout, rebaseline, performance_test,
cleanup_test_files, tool, not not log_dump_name,
fast_and_loose=fast_and_loose)
test.RunTests()
results += [test.test_results]
# Collect debug info.
debug_info_list += [test.dump_debug_info]
if rebaseline:
test.UpdateFilter(test.test_results.failed)
elif test.test_results.failed:
test.test_results.LogFull()
# Zip all debug info outputs into a file named by log_dump_name.
debug_info.GTestDebugInfo.ZipAndCleanResults(
os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release',
'debug_info_dumps'),
log_dump_name, [d for d in debug_info_list if d])
if annotate:
if test.test_results.timed_out:
print '@@@STEP_WARNINGS@@@'
elif test.test_results.failed:
print '@@@STEP_FAILURE@@@'
else:
print 'Step success!' # No annotation needed
return TestResults.FromTestResults(results)
def _RunATestSuite(options):
"""Run a single test suite.
Helper for Dispatch() to allow stop/restart of the emulator across
test bundles. If using the emulator, we start it on entry and stop
it on exit.
Args:
options: options for running the tests.
Returns:
0 if successful, number of failing tests otherwise.
"""
attached_devices = []
buildbot_emulator = None
if options.use_emulator:
t = TimeProfile('Emulator launch')
buildbot_emulator = emulator.Emulator(options.fast_and_loose)
buildbot_emulator.Launch()
t.Stop()
attached_devices.append(buildbot_emulator.device)
else:
attached_devices = android_commands.GetAttachedDevices()
if not attached_devices:
logging.critical('A device must be attached and online.')
return 1
test_results = RunTests(attached_devices[0], options.test_suite,
options.gtest_filter, options.test_arguments,
options.rebaseline, options.timeout,
options.performance_test,
options.cleanup_test_files, options.tool,
options.log_dump,
fast_and_loose=options.fast_and_loose,
annotate=options.annotate)
if buildbot_emulator:
buildbot_emulator.Shutdown()
# Another chance if we timed out? At this point It is safe(r) to
# run fast and loose since we just uploaded all the test data and
# binary.
if test_results.timed_out and options.repeat:
logging.critical('Timed out; repeating in fast_and_loose mode.')
options.fast_and_loose = True
options.repeat = options.repeat - 1
logging.critical('Repeats left: ' + str(options.repeat))
return _RunATestSuite(options)
return len(test_results.failed)
def Dispatch(options):
"""Dispatches the tests, sharding if possible.
If options.use_emulator is True, all tests will be run in a new emulator
instance.
Args:
options: options for running the tests.
Returns:
0 if successful, number of failing tests otherwise.
"""
if options.test_suite == 'help':
ListTestSuites()
return 0
if options.use_xvfb:
xvfb = Xvfb()
xvfb.Start()
if options.test_suite:
all_test_suites = [options.test_suite]
else:
all_test_suites = FullyQualifiedTestSuites()
failures = 0
for suite in all_test_suites:
options.test_suite = suite
failures += _RunATestSuite(options)
if options.use_xvfb:
xvfb.Stop()
return failures
def ListTestSuites():
"""Display a list of available test suites
"""
print 'Available test suites are:'
for test_suite in _TEST_SUITES:
print test_suite
def main(argv):
option_parser = run_tests_helper.CreateTestRunnerOptionParser(None,
default_timeout=0)
option_parser.add_option('-s', '--suite', dest='test_suite',
help='Executable name of the test suite to run '
'(use -s help to list them)')
option_parser.add_option('-r', dest='rebaseline',
help='Rebaseline and update *testsuite_disabled',
action='store_true',
default=False)
option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
help='gtest filter')
option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
help='Additional arguments to pass to the test')
option_parser.add_option('-p', dest='performance_test',
help='Indicator of performance test',
action='store_true',
default=False)
option_parser.add_option('-L', dest='log_dump',
help='file name of log dump, which will be put in'
'subfolder debug_info_dumps under the same directory'
'in where the test_suite exists.')
option_parser.add_option('-e', '--emulator', dest='use_emulator',
help='Run tests in a new instance of emulator',
action='store_true',
default=False)
option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
action='store_true', default=False,
help='Use Xvfb around tests (ignored if not Linux)')
option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose',
action='store_true', default=False,
help='Go faster (but be less stable), '
'for quick testing. Example: when tracking down '
'tests that hang to add to the disabled list, '
'there is no need to redeploy the test binary '
'or data to the device again. '
'Don\'t use on bots by default!')
option_parser.add_option('--repeat', dest='repeat', type='int',
default=2,
help='Repeat count on test timeout')
option_parser.add_option('--annotate', default=True,
help='Print buildbot-style annotate messages '
'for each test suite. Default=True')
options, args = option_parser.parse_args(argv)
if len(args) > 1:
print 'Unknown argument:', args[1:]
option_parser.print_usage()
sys.exit(1)
run_tests_helper.SetLogLevel(options.verbose_count)
return Dispatch(options)
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -0,0 +1,133 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions common to native test runners."""
import logging
import optparse
import os
import subprocess
import sys
# TODO(michaelbai): Move constant definitions like below to a common file.
FORWARDER_PATH = '/data/local/tmp/forwarder'
CHROME_DIR = os.path.abspath(os.path.join(sys.path[0], '..', '..'))
def IsRunningAsBuildbot():
"""Returns True if we are currently running on buildbot; False otherwise."""
return bool(os.getenv('BUILDBOT_BUILDERNAME'))
def ReportBuildbotLink(label, url):
"""Adds a link with name |label| linking to |url| to current buildbot step.
Args:
label: A string with the name of the label.
url: A string of the URL.
"""
if IsRunningAsBuildbot():
print '@@@STEP_LINK@%s@%s@@@' % (label, url)
def ReportBuildbotMsg(msg):
"""Appends |msg| to the current buildbot step text.
Args:
msg: String to be appended.
"""
if IsRunningAsBuildbot():
print '@@@STEP_TEXT@%s@@@' % msg
def ReportBuildbotError():
"""Marks the current step as failed."""
if IsRunningAsBuildbot():
print '@@@STEP_FAILURE@@@'
def GetExpectations(file_name):
"""Returns a list of test names in the |file_name| test expectations file."""
if not file_name or not os.path.exists(file_name):
return []
return [x for x in [x.strip() for x in file(file_name).readlines()]
if x and x[0] != '#']
def SetLogLevel(verbose_count):
"""Sets log level as |verbose_count|."""
log_level = logging.WARNING # Default.
if verbose_count == 1:
log_level = logging.INFO
elif verbose_count >= 2:
log_level = logging.DEBUG
logging.getLogger().setLevel(log_level)
def CreateTestRunnerOptionParser(usage=None, default_timeout=60):
"""Returns a new OptionParser with arguments applicable to all tests."""
option_parser = optparse.OptionParser(usage=usage)
option_parser.add_option('-t', dest='timeout',
help='Timeout to wait for each test',
type='int',
default=default_timeout)
option_parser.add_option('-c', dest='cleanup_test_files',
help='Cleanup test files on the device after run',
action='store_true',
default=False)
option_parser.add_option('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
option_parser.add_option('--tool',
dest='tool',
help='Run the test under a tool '
'(use --tool help to list them)')
return option_parser
def ForwardDevicePorts(adb, ports, host_name='127.0.0.1'):
"""Forwards a TCP port on the device back to the host.
Works like adb forward, but in reverse.
Args:
adb: Instance of AndroidCommands for talking to the device.
ports: A list of tuples (device_port, host_port) to forward.
host_name: Optional. Address to forward to, must be addressable from the
host machine. Usually this is omitted and loopback is used.
Returns:
subprocess instance connected to the forwarder process on the device.
"""
adb.PushIfNeeded(
os.path.join(CHROME_DIR, 'out', 'Release', 'forwarder'), FORWARDER_PATH)
forward_string = ['%d:%d:%s' %
(device, host, host_name) for device, host in ports]
logging.info("Forwarding ports: %s" % (forward_string))
return subprocess.Popen(
['adb', '-s', adb._adb.GetSerialNumber(),
'shell', '%s -D %s' % (FORWARDER_PATH, ' '.join(forward_string))])
def IsDevicePortUsed(adb, device_port):
"""Checks whether the specified device port is used or not.
Args:
adb: Instance of AndroidCommands for talking to the device.
device_port: Port on device we want to check.
Returns:
True if the port on device is already used, otherwise returns False.
"""
base_url = '127.0.0.1:%d' % device_port
netstat_results = adb.RunShellCommand('netstat')
for single_connect in netstat_results:
# Column 3 is the local address which we want to check with.
if single_connect.split()[3] == base_url:
return True
return False

View File

@ -0,0 +1,316 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
from base_test_runner import BaseTestRunner
import debug_info
import run_tests_helper
from test_package_executable import TestPackageExecutable
from test_result import TestResults
class SingleTestRunner(BaseTestRunner):
"""Single test suite attached to a single device.
Args:
device: Device to run the tests.
test_suite: A specific test suite to run, empty to run all.
gtest_filter: A gtest_filter flag.
test_arguments: Additional arguments to pass to the test binary.
timeout: Timeout for each test.
rebaseline: Whether or not to run tests in isolation and update the filter.
performance_test: Whether or not performance test(s).
cleanup_test_files: Whether or not to cleanup test files on device.
tool: Name of the Valgrind tool.
dump_debug_info: Whether or not to dump debug information.
"""
def __init__(self, device, test_suite, gtest_filter, test_arguments, timeout,
rebaseline, performance_test, cleanup_test_files, tool,
dump_debug_info=False,
fast_and_loose=False):
BaseTestRunner.__init__(self, device)
self._running_on_emulator = self.device.startswith('emulator')
self._gtest_filter = gtest_filter
self._test_arguments = test_arguments
self.test_results = TestResults()
if dump_debug_info:
self.dump_debug_info = debug_info.GTestDebugInfo(self.adb, device,
os.path.basename(test_suite), gtest_filter)
else:
self.dump_debug_info = None
self.fast_and_loose = fast_and_loose
self.test_package = TestPackageExecutable(self.adb, device,
test_suite, timeout, rebaseline, performance_test, cleanup_test_files,
tool, self.dump_debug_info)
def _GetHttpServerDocumentRootForTestSuite(self):
"""Returns the document root needed by the test suite."""
if self.test_package.test_suite_basename == 'page_cycler_tests':
return os.path.join(run_tests_helper.CHROME_DIR, 'data', 'page_cycler')
return None
def _TestSuiteRequiresMockTestServer(self):
"""Returns True if the test suite requires mock test server."""
return False
# TODO(yfriedman): Disabled because of flakiness.
# (self.test_package.test_suite_basename == 'unit_tests' or
# self.test_package.test_suite_basename == 'net_unittests' or
# False)
def _GetFilterFileName(self):
"""Returns the filename of gtest filter."""
filter_dir = os.path.join(sys.path[0], 'gtest_filter')
filter_name = self.test_package.test_suite_basename + '_disabled'
disabled_filter = os.path.join(filter_dir, filter_name)
return disabled_filter
def _GetAdditionalEmulatorFilterName(self):
"""Returns the filename of additional gtest filter for emulator."""
filter_dir = os.path.join(sys.path[0], 'gtest_filter')
filter_name = '%s%s' % (self.test_package.test_suite_basename,
'_emulator_additional_disabled')
disabled_filter = os.path.join(filter_dir, filter_name)
return disabled_filter
def GetDisabledTests(self):
"""Returns a list of disabled tests.
Returns:
A list of disabled tests obtained from gtest_filter/test_suite_disabled.
"""
disabled_tests = run_tests_helper.GetExpectations(self._GetFilterFileName())
if self._running_on_emulator:
# Append emulator's filter file.
disabled_tests.extend(run_tests_helper.GetExpectations(
self._GetAdditionalEmulatorFilterName()))
return disabled_tests
def UpdateFilter(self, failed_tests):
"""Updates test_suite_disabled file with the new filter (deletes if empty).
If running in Emulator, only the failed tests which are not in the normal
filter returned by _GetFilterFileName() are written to emulator's
additional filter file.
Args:
failed_tests: A sorted list of failed tests.
"""
disabled_tests = []
if not self._running_on_emulator:
filter_file_name = self._GetFilterFileName()
else:
filter_file_name = self._GetAdditionalEmulatorFilterName()
disabled_tests.extend(
run_tests_helper.GetExpectations(self._GetFilterFileName()))
logging.info('About to update emulator\'s addtional filter (%s).'
% filter_file_name)
new_failed_tests = []
if failed_tests:
for test in failed_tests:
if test.name not in disabled_tests:
new_failed_tests.append(test.name)
if not new_failed_tests:
if os.path.exists(filter_file_name):
os.unlink(filter_file_name)
return
filter_file = file(filter_file_name, 'w')
if self._running_on_emulator:
filter_file.write('# Addtional list of suppressions from emulator\n')
else:
filter_file.write('# List of suppressions\n')
filter_file.write("""This file was automatically generated by run_tests.py
""")
filter_file.write('\n'.join(sorted(new_failed_tests)))
filter_file.write('\n')
filter_file.close()
def GetDataFilesForTestSuite(self):
"""Returns a list of data files/dirs needed by the test suite."""
# Ideally, we'd just push all test data. However, it has >100MB, and a lot
# of the files are not relevant (some are used for browser_tests, others for
# features not supported, etc..).
if self.test_package.test_suite_basename in ['base_unittests',
'sql_unittests',
'unit_tests']:
return [
'net/data/cache_tests/insert_load1',
'net/data/cache_tests/dirty_entry5',
'ui/base/test/data/data_pack_unittest',
'chrome/test/data/bookmarks/History_with_empty_starred',
'chrome/test/data/bookmarks/History_with_starred',
'chrome/test/data/extensions/json_schema_test.js',
'chrome/test/data/History/',
'chrome/test/data/json_schema_validator/',
'chrome/test/data/serializer_nested_test.js',
'chrome/test/data/serializer_test.js',
'chrome/test/data/serializer_test_nowhitespace.js',
'chrome/test/data/top_sites/',
'chrome/test/data/web_database',
'chrome/test/data/zip',
]
elif self.test_package.test_suite_basename == 'net_unittests':
return [
'net/data/cache_tests',
'net/data/filter_unittests',
'net/data/ftp',
'net/data/proxy_resolver_v8_unittest',
'net/data/ssl/certificates',
]
elif self.test_package.test_suite_basename == 'ui_tests':
return [
'chrome/test/data/dromaeo',
'chrome/test/data/json2.js',
'chrome/test/data/sunspider',
'chrome/test/data/v8_benchmark',
'chrome/test/ui/sunspider_uitest.js',
'chrome/test/ui/v8_benchmark_uitest.js',
]
elif self.test_package.test_suite_basename == 'page_cycler_tests':
data = [
'tools/page_cycler',
'data/page_cycler',
]
for d in data:
if not os.path.exists(d):
raise Exception('Page cycler data not found.')
return data
elif self.test_package.test_suite_basename == 'webkit_unit_tests':
return [
'third_party/WebKit/Source/WebKit/chromium/tests/data',
]
return []
def LaunchHelperToolsForTestSuite(self):
"""Launches helper tools for the test suite.
Sometimes one test may need to run some helper tools first in order to
successfully complete the test.
"""
document_root = self._GetHttpServerDocumentRootForTestSuite()
if document_root:
self.LaunchTestHttpServer(document_root)
if self._TestSuiteRequiresMockTestServer():
self.LaunchChromeTestServerSpawner()
def StripAndCopyFiles(self):
"""Strips and copies the required data files for the test suite."""
self.test_package.StripAndCopyExecutable()
self.test_package.tool.CopyFiles()
test_data = self.GetDataFilesForTestSuite()
if test_data and not self.fast_and_loose:
if self.test_package.test_suite_basename == 'page_cycler_tests':
# Since the test data for page cycler are huge (around 200M), we use
# sdcard to store the data and create symbol links to map them to
# data/local/tmp/ later.
self.CopyTestData(test_data, '/sdcard/')
for p in [os.path.dirname(d) for d in test_data if os.path.isdir(d)]:
mapped_device_path = '/data/local/tmp/' + p
# Unlink the mapped_device_path at first in case it was mapped to
# a wrong path. Add option '-r' becuase the old path could be a dir.
self.adb.RunShellCommand('rm -r %s' % mapped_device_path)
self.adb.RunShellCommand(
'ln -s /sdcard/%s %s' % (p, mapped_device_path))
else:
self.CopyTestData(test_data, '/data/local/tmp/')
def RunTestsWithFilter(self):
"""Runs a tests via a small, temporary shell script."""
self.test_package.CreateTestRunnerScript(self._gtest_filter,
self._test_arguments)
self.test_results = self.test_package.RunTestsAndListResults()
def RebaselineTests(self):
"""Runs all available tests, restarting in case of failures."""
if self._gtest_filter:
all_tests = set(self._gtest_filter.split(':'))
else:
all_tests = set(self.test_package.GetAllTests())
failed_results = set()
executed_results = set()
while True:
executed_names = set([f.name for f in executed_results])
self._gtest_filter = ':'.join(all_tests - executed_names)
self.RunTestsWithFilter()
failed_results.update(self.test_results.crashed,
self.test_results.failed)
executed_results.update(self.test_results.crashed,
self.test_results.failed,
self.test_results.ok)
executed_names = set([f.name for f in executed_results])
logging.info('*' * 80)
logging.info(self.device)
logging.info('Executed: ' + str(len(executed_names)) + ' of ' +
str(len(all_tests)))
logging.info('Failed so far: ' + str(len(failed_results)) + ' ' +
str([f.name for f in failed_results]))
logging.info('Remaining: ' + str(len(all_tests - executed_names)) + ' ' +
str(all_tests - executed_names))
logging.info('*' * 80)
if executed_names == all_tests:
break
self.test_results = TestResults.FromOkAndFailed(list(executed_results -
failed_results),
list(failed_results))
def _RunTestsForSuiteInternal(self):
"""Runs all tests (in rebaseline mode, run each test in isolation).
Returns:
A TestResults object.
"""
if self.test_package.rebaseline:
self.RebaselineTests()
else:
if not self._gtest_filter:
self._gtest_filter = ('-' + ':'.join(self.GetDisabledTests()) + ':' +
':'.join(['*.' + x + '*' for x in
self.test_package.GetDisabledPrefixes()]))
self.RunTestsWithFilter()
def SetUp(self):
"""Sets up necessary test enviroment for the test suite."""
super(SingleTestRunner, self).SetUp()
if self.test_package.performance_test:
if run_tests_helper.IsRunningAsBuildbot():
self.adb.SetJavaAssertsEnabled(enable=False)
self.adb.Reboot(full_reboot=False)
self.adb.SetupPerformanceTest()
if self.dump_debug_info:
self.dump_debug_info.StartRecordingLog(True)
self.StripAndCopyFiles()
self.LaunchHelperToolsForTestSuite()
self.test_package.tool.SetupEnvironment()
def TearDown(self):
"""Cleans up the test enviroment for the test suite."""
super(SingleTestRunner, self).TearDown()
self.test_package.tool.CleanUpEnvironment()
if self.test_package.cleanup_test_files:
self.adb.RemovePushedFiles()
if self.dump_debug_info:
self.dump_debug_info.StopRecordingLog()
if self.test_package.performance_test:
self.adb.TearDownPerformanceTest()
def RunTests(self):
"""Runs the tests and cleans up the files once finished.
Returns:
A TestResults object.
"""
self.SetUp()
try:
self._RunTestsForSuiteInternal()
finally:
self.TearDown()
return self.test_results

View File

@ -0,0 +1,24 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'ssl',
'type': 'none',
'direct_dependent_settings': {
'defines': [
'USE_OPENSSL',
],
'include_dirs': [
'../../third_party/openssl/openssl/include',
'../../third_party/openssl/config/android',
],
},
'dependencies': [
'../../third_party/openssl/openssl.gyp:openssl',
],
},
],
}

View File

@ -0,0 +1,168 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import re
import os
import pexpect
from perf_tests_helper import PrintPerfResult
from test_result import BaseTestResult, TestResults
from valgrind_tools import CreateTool
# TODO(bulach): TestPackage, TestPackageExecutable and
# TestPackageApk are a work in progress related to making the native tests
# run as a NDK-app from an APK rather than a stand-alone executable.
class TestPackage(object):
"""A helper base class for both APK and stand-alone executables.
Args:
adb: ADB interface the tests are using.
device: Device to run the tests.
test_suite: A specific test suite to run, empty to run all.
timeout: Timeout for each test.
rebaseline: Whether or not to run tests in isolation and update the filter.
performance_test: Whether or not performance test(s).
cleanup_test_files: Whether or not to cleanup test files on device.
tool: Name of the Valgrind tool.
dump_debug_info: A debug_info object.
"""
def __init__(self, adb, device, test_suite, timeout, rebaseline,
performance_test, cleanup_test_files, tool, dump_debug_info):
self.adb = adb
self.device = device
self.test_suite = os.path.splitext(test_suite)[0]
self.test_suite_basename = os.path.basename(self.test_suite)
self.test_suite_dirname = os.path.dirname(self.test_suite)
self.rebaseline = rebaseline
self.performance_test = performance_test
self.cleanup_test_files = cleanup_test_files
self.tool = CreateTool(tool, self.adb)
if timeout == 0:
if self.test_suite_basename == 'page_cycler_tests':
timeout = 900
else:
timeout = 60
# On a VM (e.g. chromium buildbots), this timeout is way too small.
if os.environ.get('BUILDBOT_SLAVENAME'):
timeout = timeout * 2
self.timeout = timeout * self.tool.GetTimeoutScale()
self.dump_debug_info = dump_debug_info
def _BeginGetIOStats(self):
"""Gets I/O statistics before running test.
Return:
Tuple of (I/O stats object, flag of ready to continue). When encountering
error, ready-to-continue flag is False, True otherwise. The I/O stats
object may be None if the test is not performance test.
"""
initial_io_stats = None
# Try to get the disk I/O statistics for all performance tests.
if self.performance_test and not self.rebaseline:
initial_io_stats = self.adb.GetIoStats()
# Get rid of the noise introduced by launching Chrome for page cycler.
if self.test_suite_basename == 'page_cycler_tests':
try:
chrome_launch_done_re = re.compile(
re.escape('Finish waiting for browser launch!'))
self.adb.WaitForLogMatch(chrome_launch_done_re)
initial_io_stats = self.adb.GetIoStats()
except pexpect.TIMEOUT:
logging.error('Test terminated because Chrome launcher has no'
'response after 120 second.')
return (None, False)
finally:
if self.dump_debug_info:
self.dump_debug_info.TakeScreenshot('_Launch_Chrome_')
return (initial_io_stats, True)
def _EndGetIOStats(self, initial_io_stats):
"""Gets I/O statistics after running test and calcuate the I/O delta.
Args:
initial_io_stats: I/O stats object got from _BeginGetIOStats.
Return:
String for formated diso I/O statistics.
"""
disk_io = ''
if self.performance_test and initial_io_stats:
final_io_stats = self.adb.GetIoStats()
for stat in final_io_stats:
disk_io += '\n' + PrintPerfResult(stat, stat,
[final_io_stats[stat] -
initial_io_stats[stat]],
stat.split('_')[1], True, False)
logging.info(disk_io)
return disk_io
def GetDisabledPrefixes(self):
return ['DISABLED_', 'FLAKY_', 'FAILS_']
def _ParseGTestListTests(self, all_tests):
ret = []
current = ''
disabled_prefixes = self.GetDisabledPrefixes()
for test in all_tests:
if not test:
continue
if test[0] != ' ':
current = test
continue
if 'YOU HAVE' in test:
break
test_name = test[2:]
if not any([test_name.startswith(x) for x in disabled_prefixes]):
ret += [current + test_name]
return ret
def _WatchTestOutput(self, p):
"""Watches the test output.
Args:
p: the process generating output as created by pexpect.spawn.
"""
ok_tests = []
failed_tests = []
timed_out = False
re_run = re.compile('\[ RUN \] ?(.*)\r\n')
re_fail = re.compile('\[ FAILED \] ?(.*)\r\n')
re_ok = re.compile('\[ OK \] ?(.*)\r\n')
(io_stats_before, ready_to_continue) = self._BeginGetIOStats()
while ready_to_continue:
found = p.expect([re_run, pexpect.EOF], timeout=self.timeout)
if found == 1: # matched pexpect.EOF
break
if self.dump_debug_info:
self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')
full_test_name = p.match.group(1)
found = p.expect([re_ok, re_fail, pexpect.EOF, pexpect.TIMEOUT],
timeout=self.timeout)
if found == 0: # re_ok
ok_tests += [BaseTestResult(full_test_name.replace('\r', ''),
p.before)]
continue
failed_tests += [BaseTestResult(full_test_name.replace('\r', ''),
p.before)]
if found >= 2:
# The test crashed / bailed out (i.e., didn't print OK or FAIL).
if found == 3: # pexpect.TIMEOUT
logging.error('Test terminated after %d second timeout.',
self.timeout)
timed_out = True
break
p.close()
if not self.rebaseline and ready_to_continue:
ok_tests += self._EndGetIOStats(io_stats_before)
ret_code = self._GetGTestReturnCode()
if ret_code:
failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code,
'pexpect.before: %s'
'\npexpect.after: %s'
% (p.before,
p.after))]
return TestResults.FromOkAndFailed(ok_tests, failed_tests, timed_out)

View File

@ -0,0 +1,156 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import pexpect
import shutil
import sys
import tempfile
import cmd_helper
from test_package import TestPackage
class TestPackageExecutable(TestPackage):
"""A helper class for running stand-alone executables."""
_TEST_RUNNER_RET_VAL_FILE = '/data/local/tmp/gtest_retval'
def __init__(self, adb, device, test_suite, timeout, rebaseline,
performance_test, cleanup_test_files, tool, dump_debug_info,
symbols_dir=None):
"""
Args:
adb: ADB interface the tests are using.
device: Device to run the tests.
test_suite: A specific test suite to run, empty to run all.
timeout: Timeout for each test.
rebaseline: Whether or not to run tests in isolation and update the
filter.
performance_test: Whether or not performance test(s).
cleanup_test_files: Whether or not to cleanup test files on device.
tool: Name of the Valgrind tool.
dump_debug_info: A debug_info object.
symbols_dir: Directory to put the stripped binaries.
"""
TestPackage.__init__(self, adb, device, test_suite, timeout,
rebaseline, performance_test, cleanup_test_files,
tool, dump_debug_info)
self.symbols_dir = symbols_dir
def _GetGTestReturnCode(self):
ret = None
ret_code = 1 # Assume failure if we can't find it
ret_code_file = tempfile.NamedTemporaryFile()
try:
if not self.adb.Adb().Pull(
TestPackageExecutable._TEST_RUNNER_RET_VAL_FILE, ret_code_file.name):
logging.critical('Unable to pull gtest ret val file %s',
ret_code_file.name)
raise ValueError
ret_code = file(ret_code_file.name).read()
ret = int(ret_code)
except ValueError:
logging.critical('Error reading gtest ret val file %s [%s]',
ret_code_file.name, ret_code)
ret = 1
return ret
def _AddNativeCoverageExports(self):
# export GCOV_PREFIX set the path for native coverage results
# export GCOV_PREFIX_STRIP indicates how many initial directory
# names to strip off the hardwired absolute paths.
# This value is calculated in buildbot.sh and
# depends on where the tree is built.
# Ex: /usr/local/google/code/chrome will become
# /code/chrome if GCOV_PREFIX_STRIP=3
try:
depth = os.environ['NATIVE_COVERAGE_DEPTH_STRIP']
except KeyError:
logging.info('NATIVE_COVERAGE_DEPTH_STRIP is not defined: '
'No native coverage.')
return ''
export_string = 'export GCOV_PREFIX="/data/local/gcov"\n'
export_string += 'export GCOV_PREFIX_STRIP=%s\n' % depth
return export_string
def GetAllTests(self):
"""Returns a list of all tests available in the test suite."""
all_tests = self.adb.RunShellCommand(
'/data/local/%s --gtest_list_tests' % self.test_suite_basename)
return self._ParseGTestListTests(all_tests)
def CreateTestRunnerScript(self, gtest_filter, test_arguments):
"""Creates a test runner script and pushes to the device.
Args:
gtest_filter: A gtest_filter flag.
test_arguments: Additional arguments to pass to the test binary.
"""
tool_wrapper = self.tool.GetTestWrapper()
sh_script_file = tempfile.NamedTemporaryFile()
# We need to capture the exit status from the script since adb shell won't
# propagate to us.
sh_script_file.write('cd /data/local\n'
'%s'
'%s /data/local/%s --gtest_filter=%s %s\n'
'echo $? > %s' %
(self._AddNativeCoverageExports(),
tool_wrapper, self.test_suite_basename,
gtest_filter, test_arguments,
TestPackageExecutable._TEST_RUNNER_RET_VAL_FILE))
sh_script_file.flush()
cmd_helper.RunCmd(['chmod', '+x', sh_script_file.name])
self.adb.PushIfNeeded(sh_script_file.name,
'/data/local/chrome_test_runner.sh')
logging.info('Conents of the test runner script: ')
for line in open(sh_script_file.name).readlines():
logging.info(' ' + line.rstrip())
def RunTestsAndListResults(self):
"""Runs all the tests and checks for failures.
Returns:
A TestResults object.
"""
args = ['adb', '-s', self.device, 'shell', 'sh',
'/data/local/chrome_test_runner.sh']
logging.info(args)
p = pexpect.spawn(args[0], args[1:], logfile=sys.stdout)
return self._WatchTestOutput(p)
def StripAndCopyExecutable(self):
"""Strips and copies the executable to the device."""
if self.tool.NeedsDebugInfo():
target_name = self.test_suite
elif self.test_suite_basename == 'webkit_unit_tests':
# webkit_unit_tests has been stripped in build step.
target_name = self.test_suite
else:
target_name = self.test_suite + '_' + self.device + '_stripped'
should_strip = True
if os.path.isfile(target_name):
logging.info('Found target file %s' % target_name)
target_mtime = os.stat(target_name).st_mtime
source_mtime = os.stat(self.test_suite).st_mtime
if target_mtime > source_mtime:
logging.info('Target mtime (%d) is newer than source (%d), assuming '
'no change.' % (target_mtime, source_mtime))
should_strip = False
if should_strip:
logging.info('Did not find up-to-date stripped binary. Generating a '
'new one (%s).' % target_name)
# Whenever we generate a stripped binary, copy to the symbols dir. If we
# aren't stripping a new binary, assume it's there.
if self.symbols_dir:
if not os.path.exists(self.symbols_dir):
os.makedirs(self.symbols_dir)
shutil.copy(self.test_suite, self.symbols_dir)
strip = os.environ['STRIP']
cmd_helper.RunCmd([strip, self.test_suite, '-o', target_name])
test_binary = '/data/local/' + self.test_suite_basename
self.adb.PushIfNeeded(target_name, test_binary)

View File

@ -0,0 +1,110 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
# Language values match constants in Sponge protocol buffer (sponge.proto).
JAVA = 5
PYTHON = 7
class BaseTestResult(object):
"""A single result from a unit test."""
def __init__(self, name, log):
self.name = name
self.log = log
class SingleTestResult(BaseTestResult):
"""Result information for a single test.
Args:
full_name: Full name of the test.
start_date: Date in milliseconds when the test began running.
dur: Duration of the test run in milliseconds.
lang: Language of the test (JAVA or PYTHON).
log: An optional string listing any errors.
error: A tuple of a short error message and a longer version used by Sponge
if test resulted in a fail or error. An empty tuple implies a pass.
"""
def __init__(self, full_name, start_date, dur, lang, log='', error=()):
BaseTestResult.__init__(self, full_name, log)
name_pieces = full_name.rsplit('#')
if len(name_pieces) > 0:
self.test_name = name_pieces[1]
self.class_name = name_pieces[0]
else:
self.class_name = full_name
self.test_name = full_name
self.start_date = start_date
self.dur = dur
self.error = error
self.lang = lang
class TestResults(object):
"""Results of a test run."""
def __init__(self):
self.ok = []
self.failed = []
self.crashed = []
self.unknown = []
self.disabled = []
self.unexpected_pass = []
self.timed_out = False
@staticmethod
def FromOkAndFailed(ok, failed, timed_out=False):
ret = TestResults()
ret.ok = ok
ret.failed = failed
ret.timed_out = timed_out
return ret
@staticmethod
def FromTestResults(results):
"""Combines a list of results in a single TestResults object."""
ret = TestResults()
for t in results:
ret.ok += t.ok
ret.failed += t.failed
ret.crashed += t.crashed
ret.unknown += t.unknown
ret.disabled += t.disabled
ret.unexpected_pass += t.unexpected_pass
if t.timed_out:
ret.timed_out = True
return ret
def _Log(self, sorted_list):
for t in sorted_list:
logging.critical(t.name)
if t.log:
logging.critical(t.log)
def GetAllBroken(self):
"""Returns the all broken tests including failed, crashed, unknown."""
return self.failed + self.crashed + self.unknown
def LogFull(self):
"""Output all broken tests or 'passed' if none broken"""
logging.critical('*' * 80)
logging.critical('Final result')
if self.failed:
logging.critical('Failed:')
self._Log(sorted(self.failed))
if self.crashed:
logging.critical('Crashed:')
self._Log(sorted(self.crashed))
if self.unknown:
logging.critical('Unknown:')
self._Log(sorted(self.unknown))
if not self.GetAllBroken():
logging.critical('Passed')
logging.critical('*' * 80)

View File

@ -0,0 +1,185 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Classes in this file define additional actions that need to be taken to run a
test under some kind of runtime error detection tool.
The interface is intended to be used as follows.
1. For tests that simply run a native process (i.e. no activity is spawned):
Call tool.CopyFiles().
Prepend test command line with tool.GetTestWrapper().
2. For tests that spawn an activity:
Call tool.CopyFiles().
Call tool.SetupEnvironment().
Run the test as usual.
Call tool.CleanUpEnvironment().
"""
import os.path
import sys
from run_tests_helper import CHROME_DIR
class BaseTool(object):
"""A tool that does nothing."""
def __init__(self, *args, **kwargs):
pass
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ''
def CopyFiles(self):
"""Copies tool-specific files to the device, create directories, etc."""
pass
def SetupEnvironment(self):
"""Sets up the system environment for a test.
This is a good place to set system properties.
"""
pass
def CleanUpEnvironment(self):
"""Cleans up environment."""
pass
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 1.0
def NeedsDebugInfo(self):
"""Whether this tool requires debug info.
Returns True if this tool can not work with stripped binaries.
"""
return False
class ValgrindTool(BaseTool):
"""Base abstract class for Valgrind tools."""
VG_DIR = '/data/local/tmp/valgrind'
VGLOGS_DIR = '/data/local/tmp/vglogs'
def __init__(self, adb, renderer=False):
self.adb = adb
if renderer:
# exactly 31 chars, SystemProperties::PROP_NAME_MAX
self.wrap_property = 'wrap.com.android.chrome:sandbox'
else:
self.wrap_property = 'wrap.com.android.chrome'
def CopyFiles(self):
"""Copies Valgrind tools to the device."""
self.adb.RunShellCommand('rm -r %s; mkdir %s' %
(ValgrindTool.VG_DIR, ValgrindTool.VG_DIR))
self.adb.RunShellCommand('rm -r %s; mkdir %s' %
(ValgrindTool.VGLOGS_DIR, ValgrindTool.VGLOGS_DIR))
files = self.GetFilesForTool()
for f in files:
self.adb.PushIfNeeded(os.path.join(CHROME_DIR, f),
os.path.join(ValgrindTool.VG_DIR,
os.path.basename(f)))
def SetupEnvironment(self):
"""Sets up device environment."""
self.adb.RunShellCommand('chmod 777 /data/local/tmp')
self.adb.RunShellCommand('setprop %s "logwrapper %s"' % (
self.wrap_property, self.GetTestWrapper()))
self.adb.RunShellCommand('setprop chrome.timeout_scale %f' % (
self.GetTimeoutScale()))
def CleanUpEnvironment(self):
"""Cleans up device environment."""
self.adb.RunShellCommand('setprop %s ""' % (self.wrap_property,))
self.adb.RunShellCommand('setprop chrome.timeout_scale ""')
def GetFilesForTool(self):
"""Returns a list of file names for the tool."""
raise NotImplementedError()
def NeedsDebugInfo(self):
"""Whether this tool requires debug info.
Returns True if this tool can not work with stripped binaries.
"""
return True
class MemcheckTool(ValgrindTool):
"""Memcheck tool."""
def __init__(self, adb, renderer=False):
super(MemcheckTool, self).__init__(adb, renderer)
def GetFilesForTool(self):
"""Returns a list of file names for the tool."""
return ['tools/valgrind/android/vg-chrome-wrapper.sh',
'tools/valgrind/memcheck/suppressions.txt',
'tools/valgrind/memcheck/suppressions_android.txt']
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper.sh'
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 30
class TSanTool(ValgrindTool):
"""ThreadSanitizer tool. See http://code.google.com/p/data-race-test ."""
def __init__(self, adb, renderer=False):
super(TSanTool, self).__init__(adb, renderer)
def GetFilesForTool(self):
"""Returns a list of file names for the tool."""
return ['tools/valgrind/android/vg-chrome-wrapper-tsan.sh',
'tools/valgrind/tsan/suppressions.txt',
'tools/valgrind/tsan/suppressions_android.txt',
'tools/valgrind/tsan/ignores.txt']
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper-tsan.sh'
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 30
TOOL_REGISTRY = {
'memcheck': lambda x: MemcheckTool(x, False),
'memcheck-renderer': lambda x: MemcheckTool(x, True),
'tsan': lambda x: TSanTool(x, False),
'tsan-renderer': lambda x: TSanTool(x, True)
}
def CreateTool(tool_name, adb):
"""Creates a tool with the specified tool name.
Args:
tool_name: Name of the tool to create.
adb: ADB interface the tool will use.
"""
if not tool_name:
return BaseTool()
ctor = TOOL_REGISTRY.get(tool_name)
if ctor:
return ctor(adb)
else:
print 'Unknown tool %s, available tools: %s' % (
tool_name, ', '.join(sorted(TOOL_REGISTRY.keys())))
sys.exit(1)

View File

@ -0,0 +1,45 @@
#!/usr/bin/env python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO: remove this script when GYP has for loops
import sys
import optparse
def main(argv):
parser = optparse.OptionParser()
usage = 'usage: %s [options ...] format_string locale_list'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-d', dest='dash_to_underscore', action="store_true",
default=False,
help='map "en-US" to "en" and "-" to "_" in locales')
(options, arglist) = parser.parse_args(argv)
if len(arglist) < 3:
print 'ERROR: need string and list of locales'
return 1
str_template = arglist[1]
locales = arglist[2:]
results = []
for locale in locales:
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://crbug.com/19165, http://crbug.com/25578).
if options.dash_to_underscore:
if locale == 'en-US':
locale = 'en'
locale = locale.replace('-', '_')
results.append(str_template.replace('ZZLOCALE', locale))
# Quote each element so filename spaces don't mess up GYP's attempt to parse
# it into a list.
print ' '.join(["'%s'" % x for x in results])
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -0,0 +1,22 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file lists symbols that should not be stripped by Xcode from the binaries
# built for Mac OS X using AddressSanitizer
# (http://dev.chromium.org/developers/testing/addresssanitizer).
___asan_init
___asan_register_global
___asan_register_globals
___asan_unregister_globals
___asan_report_load1
___asan_report_load2
___asan_report_load4
___asan_report_load8
___asan_report_load16
___asan_report_store1
___asan_report_store2
___asan_report_store4
___asan_report_store8
___asan_report_store16

View File

@ -0,0 +1,51 @@
#!/bin/sh
# Copyright (c) 2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is a wrapper for fetching values from the BRANDING files. Pass the
# value of GYP's branding variable followed by the key you want and the right
# file is checked.
#
# branding_value.sh Chromium COPYRIGHT
# branding_value.sh Chromium PRODUCT_FULLNAME
#
set -e
if [ $# -ne 2 ] ; then
echo "error: expect two arguments, branding and key" >&2
exit 1
fi
BUILD_BRANDING=$1
THE_KEY=$2
pushd $(dirname "${0}") > /dev/null
BUILD_DIR=$(pwd)
popd > /dev/null
TOP="${BUILD_DIR}/.."
case ${BUILD_BRANDING} in
Chromium)
BRANDING_FILE="${TOP}/chrome/app/theme/chromium/BRANDING"
;;
Chrome)
BRANDING_FILE="${TOP}/chrome/app/theme/google_chrome/BRANDING"
;;
*)
echo "error: unknown branding: ${BUILD_BRANDING}" >&2
exit 1
;;
esac
BRANDING_VALUE=$(sed -n -e "s/^${THE_KEY}=\(.*\)\$/\1/p" "${BRANDING_FILE}")
if [ -z "${BRANDING_VALUE}" ] ; then
echo "error: failed to find key '${THE_KEY}'" >&2
exit 1
fi
echo "${BRANDING_VALUE}"

View File

@ -0,0 +1,143 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file adds defines about the platform we're currently building on.
// Operating System:
// OS_WIN / OS_MACOSX / OS_LINUX / OS_POSIX (MACOSX or LINUX)
// Compiler:
// COMPILER_MSVC / COMPILER_GCC
// Processor:
// ARCH_CPU_X86 / ARCH_CPU_X86_64 / ARCH_CPU_X86_FAMILY (X86 or X86_64)
// ARCH_CPU_32_BITS / ARCH_CPU_64_BITS
#ifndef BUILD_BUILD_CONFIG_H_
#define BUILD_BUILD_CONFIG_H_
// A set of macros to use for platform detection.
#if defined(__APPLE__)
#define OS_MACOSX 1
#elif defined(ANDROID)
#define OS_ANDROID 1
#elif defined(__native_client__)
#define OS_NACL 1
#elif defined(__linux__)
#define OS_LINUX 1
// Use TOOLKIT_GTK on linux if TOOLKIT_VIEWS isn't defined.
#if !defined(TOOLKIT_VIEWS)
#define TOOLKIT_GTK
#endif
#elif defined(_WIN32)
#define OS_WIN 1
#define TOOLKIT_VIEWS 1
#elif defined(__FreeBSD__)
#define OS_FREEBSD 1
#define TOOLKIT_GTK
#elif defined(__OpenBSD__)
#define OS_OPENBSD 1
#define TOOLKIT_GTK
#elif defined(__sun)
#define OS_SOLARIS 1
#define TOOLKIT_GTK
#else
#error Please add support for your platform in build/build_config.h
#endif
#if defined(USE_OPENSSL) && defined(USE_NSS)
#error Cannot use both OpenSSL and NSS
#endif
// For access to standard BSD features, use OS_BSD instead of a
// more specific macro.
#if defined(OS_FREEBSD) || defined(OS_OPENBSD)
#define OS_BSD 1
#endif
// For access to standard POSIXish features, use OS_POSIX instead of a
// more specific macro.
#if defined(OS_MACOSX) || defined(OS_LINUX) || defined(OS_FREEBSD) || \
defined(OS_OPENBSD) || defined(OS_SOLARIS) || defined(OS_ANDROID) || \
defined(OS_NACL)
#define OS_POSIX 1
#endif
#if defined(OS_POSIX) && !defined(OS_MACOSX) && !defined(OS_ANDROID) && \
!defined(OS_NACL)
#define USE_X11 1 // Use X for graphics.
#endif
// Use tcmalloc
#if (defined(OS_WIN) || defined(OS_LINUX)) && !defined(NO_TCMALLOC)
#define USE_TCMALLOC 1
#endif
// Compiler detection.
#if defined(__GNUC__)
#define COMPILER_GCC 1
#elif defined(_MSC_VER)
#define COMPILER_MSVC 1
#else
#error Please add support for your compiler in build/build_config.h
#endif
// Processor architecture detection. For more info on what's defined, see:
// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
// http://www.agner.org/optimize/calling_conventions.pdf
// or with gcc, run: "echo | gcc -E -dM -"
#if defined(_M_X64) || defined(__x86_64__)
#define ARCH_CPU_X86_FAMILY 1
#define ARCH_CPU_X86_64 1
#define ARCH_CPU_64_BITS 1
#define ARCH_CPU_LITTLE_ENDIAN 1
#elif defined(_M_IX86) || defined(__i386__)
#define ARCH_CPU_X86_FAMILY 1
#define ARCH_CPU_X86 1
#define ARCH_CPU_32_BITS 1
#define ARCH_CPU_LITTLE_ENDIAN 1
#elif defined(__ARMEL__)
#define ARCH_CPU_ARM_FAMILY 1
#define ARCH_CPU_ARMEL 1
#define ARCH_CPU_32_BITS 1
#define ARCH_CPU_LITTLE_ENDIAN 1
#define WCHAR_T_IS_UNSIGNED 1
#elif defined(__pnacl__)
#define ARCH_CPU_32_BITS 1
#else
#error Please add support for your architecture in build/build_config.h
#endif
// Type detection for wchar_t.
#if defined(OS_WIN)
#define WCHAR_T_IS_UTF16
#elif defined(OS_POSIX) && defined(COMPILER_GCC) && \
defined(__WCHAR_MAX__) && \
(__WCHAR_MAX__ == 0x7fffffff || __WCHAR_MAX__ == 0xffffffff)
#define WCHAR_T_IS_UTF32
#elif defined(OS_POSIX) && defined(COMPILER_GCC) && \
defined(__WCHAR_MAX__) && \
(__WCHAR_MAX__ == 0x7fff || __WCHAR_MAX__ == 0xffff)
// On Posix, we'll detect short wchar_t, but projects aren't guaranteed to
// compile in this mode (in particular, Chrome doesn't). This is intended for
// other projects using base who manage their own dependencies and make sure
// short wchar works for them.
#define WCHAR_T_IS_UTF16
#else
#error Please add support for your compiler in build/build_config.h
#endif
#if defined(OS_CHROMEOS)
// Single define to trigger whether CrOS fonts have BCI on.
// In that case font sizes/deltas should be adjusted.
//define CROS_FONTS_USING_BCI
#endif
#if defined(OS_ANDROID)
// The compiler thinks std::string::const_iterator and "const char*" are
// equivalent types.
#define STD_STRING_ITERATOR_IS_CHAR_POINTER
// The compiler thinks base::string16::const_iterator and "char16*" are
// equivalent types.
#define BASE_STRING16_ITERATOR_IS_CHAR16_POINTER
#endif
#endif // BUILD_BUILD_CONFIG_H_

View File

@ -0,0 +1,127 @@
# -*- python -*-
# Crocodile config file for Chromium - settings common to all platforms
#
# This should be speicified before the platform-specific config, for example:
# croc -c chrome_common.croc -c linux/chrome_linux.croc
{
# List of root directories, applied in order
'roots' : [
# Sub-paths we specifically care about and want to call out
{
'root' : '_/src',
'altname' : 'CHROMIUM',
},
],
# List of rules, applied in order
# Note that any 'include':0 rules here will be overridden by the 'include':1
# rules in the platform-specific configs.
'rules' : [
# Don't scan for executable lines in uninstrumented C++ header files
{
'regexp' : '.*\\.(h|hpp)$',
'add_if_missing' : 0,
},
# Groups
{
'regexp' : '',
'group' : 'source',
},
{
'regexp' : '.*_(test|unittest|uitest|browsertest)\\.',
'group' : 'test',
},
# Languages
{
'regexp' : '.*\\.(c|h)$',
'language' : 'C',
},
{
'regexp' : '.*\\.(cc|cpp|hpp)$',
'language' : 'C++',
},
# Files/paths to include. Specify these before the excludes, since rules
# are in order.
{
'regexp' : '^CHROMIUM/(base|media|net|printing|remoting|chrome|content|webkit/glue|native_client)/',
'include' : 1,
},
# Don't include subversion or mercurial SCM dirs
{
'regexp' : '.*/(\\.svn|\\.hg)/',
'include' : 0,
},
# Don't include output dirs
{
'regexp' : '.*/(Debug|Release|sconsbuild|out|xcodebuild)/',
'include' : 0,
},
# Don't include third-party source
{
'regexp' : '.*/third_party/',
'include' : 0,
},
# We don't run the V8 test suite, so we don't care about V8 coverage.
{
'regexp' : '.*/v8/',
'include' : 0,
},
],
# Paths to add source from
'add_files' : [
'CHROMIUM'
],
# Statistics to print
'print_stats' : [
{
'stat' : 'files_executable',
'format' : '*RESULT FilesKnown: files_executable= %d files',
},
{
'stat' : 'files_instrumented',
'format' : '*RESULT FilesInstrumented: files_instrumented= %d files',
},
{
'stat' : '100.0 * files_instrumented / files_executable',
'format' : '*RESULT FilesInstrumentedPercent: files_instrumented_percent= %g',
},
{
'stat' : 'lines_executable',
'format' : '*RESULT LinesKnown: lines_known= %d lines',
},
{
'stat' : 'lines_instrumented',
'format' : '*RESULT LinesInstrumented: lines_instrumented= %d lines',
},
{
'stat' : 'lines_covered',
'format' : '*RESULT LinesCoveredSource: lines_covered_source= %d lines',
'group' : 'source',
},
{
'stat' : 'lines_covered',
'format' : '*RESULT LinesCoveredTest: lines_covered_test= %d lines',
'group' : 'test',
},
{
'stat' : '100.0 * lines_covered / lines_executable',
'format' : '*RESULT PercentCovered: percent_covered= %g',
},
{
'stat' : '100.0 * lines_covered / lines_executable',
'format' : '*RESULT PercentCoveredSource: percent_covered_source= %g',
'group' : 'source',
},
{
'stat' : '100.0 * lines_covered / lines_executable',
'format' : '*RESULT PercentCoveredTest: percent_covered_test= %g',
'group' : 'test',
},
],
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,49 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compiler version checking tool for gcc
Print gcc version as XY if you are running gcc X.Y.*.
This is used to tweak build flags for gcc 4.4.
"""
import os
import re
import subprocess
import sys
def GetVersion(compiler):
try:
# Note that compiler could be something tricky like "distcc g++".
compiler = compiler + " -dumpversion"
pipe = subprocess.Popen(compiler, stdout=subprocess.PIPE, shell=True)
gcc_output = pipe.communicate()[0]
result = re.match(r"(\d+)\.(\d+)", gcc_output)
return result.group(1) + result.group(2)
except Exception, e:
print >> sys.stderr, "compiler_version.py failed to execute:", compiler
print >> sys.stderr, e
return ""
def main():
# Check if CXX environment variable exists and
# if it does use that compiler.
cxx = os.getenv("CXX", None)
if cxx:
cxxversion = GetVersion(cxx)
if cxxversion != "":
print cxxversion
return 0
else:
# Otherwise we check the g++ version.
gccversion = GetVersion("g++")
if gccversion != "":
print gccversion
return 0
return 1
if __name__ == "__main__":
sys.exit(main())

View File

@ -0,0 +1,18 @@
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil, sys;
""" Copy File.
This module works much like the cp posix command - it takes 2 arguments:
(src, dst) and copies the file with path |src| to |dst|.
"""
def Main(src, dst):
return shutil.copyfile(src, dst)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1], sys.argv[2]))

View File

@ -0,0 +1,15 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Writes True if the argument is a directory."""
import os.path
import sys
def main():
sys.stdout.write(str(os.path.isdir(sys.argv[1])))
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,33 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Shim to run nacl toolchain download script only if there is a nacl dir."""
import os
import sys
def Main(args):
# Exit early if disable_nacl=1.
if 'disable_nacl=1' in os.environ.get('GYP_DEFINES', ''):
return 0
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(script_dir)
nacl_dir = os.path.join(src_dir, 'native_client')
nacl_build_dir = os.path.join(nacl_dir, 'build')
download_script = os.path.join(nacl_build_dir, 'download_toolchains.py')
if not os.path.exists(download_script):
print "Can't find '%s'" % download_script
print 'Presumably you are intentionally building without NativeClient.'
print 'Skipping NativeClient toolchain download.'
sys.exit(0)
sys.path.insert(0, nacl_build_dir)
import download_toolchains
download_toolchains.Main(args)
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))

View File

@ -0,0 +1,56 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert any unicode characters found in the input file to C literals."""
import codecs
import optparse
import os
import sys
def main(argv):
parser = optparse.OptionParser()
usage = 'Usage: %prog -o <output_dir> <input_file>'
parser.set_usage(usage)
parser.add_option('-o', dest='output_dir')
options, arglist = parser.parse_args(argv)
if not options.output_dir:
print "output_dir required"
return 1
if len(arglist) != 2:
print "input_file required"
return 1
in_filename = arglist[1]
if not in_filename.endswith('.utf8'):
print "input_file should end in .utf8"
return 1
out_filename = os.path.join(options.output_dir, os.path.basename(
os.path.splitext(in_filename)[0]))
WriteEscapedFile(in_filename, out_filename)
return 0
def WriteEscapedFile(in_filename, out_filename):
input_data = codecs.open(in_filename, 'r', 'utf8').read()
with codecs.open(out_filename, 'w', 'ascii') as out_file:
for i, char in enumerate(input_data):
if ord(char) > 127:
out_file.write(repr(char.encode('utf8'))[1:-1])
if input_data[i + 1:i + 2] in '0123456789abcdefABCDEF':
out_file.write('""')
else:
out_file.write(char.encode('ascii'))
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -0,0 +1,56 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extracts a single file from a CAB archive."""
import os
import shutil
import subprocess
import sys
import tempfile
def main():
if len(sys.argv) != 4:
print 'Usage: extract_from_cab.py cab_path archived_file output_dir'
return 1
[cab_path, archived_file, output_dir] = sys.argv[1:]
# Expand.exe does its work in a fixed-named temporary directory created within
# the given output directory. This is a problem for concurrent extractions, so
# create a unique temp dir within the desired output directory to work around
# this limitation.
temp_dir = tempfile.mkdtemp(dir=output_dir)
try:
# Invoke the Windows expand utility to extract the file.
level = subprocess.call(
['expand', cab_path, '-F:' + archived_file, temp_dir])
if level == 0:
# Move the output file into place, preserving expand.exe's behavior of
# paving over any preexisting file.
output_file = os.path.join(output_dir, archived_file)
try:
os.remove(output_file)
except OSError:
pass
os.rename(os.path.join(temp_dir, archived_file), output_file)
finally:
shutil.rmtree(temp_dir, True)
if level != 0:
return level
# The expand utility preserves the modification date and time of the archived
# file. Touch the extracted file. This helps build systems that compare the
# modification times of input and output files to determine whether to do an
# action.
os.utime(os.path.join(output_dir, archived_file), None)
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,86 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This gypi file defines the patterns used for determining whether a
# file is excluded from the build on a given platform. It is
# included by common.gypi for chromium_code.
{
'conditions': [
['OS!="win"', {
'sources/': [ ['exclude', '_win(_unittest)?\\.(h|cc)$'],
['exclude', '(^|/)win/'],
['exclude', '(^|/)win_[^/]*\\.(h|cc)$'] ],
}],
['OS!="mac"', {
'sources/': [ ['exclude', '_(cocoa|mac)(_unittest)?\\.(h|cc)$'],
['exclude', '(^|/)(cocoa|mac)/'],
['exclude', '\\.mm?$' ] ],
}],
# Do not exclude the linux files on *BSD since most of them can be
# shared at this point.
# In case a file is not needed, it is going to be excluded later on.
# TODO(evan): the above is not correct; we shouldn't build _linux
# files on non-linux.
['OS!="linux" and OS!="openbsd" and OS!="freebsd"', {
'sources/': [
['exclude', '_linux(_unittest)?\\.(h|cc)$'],
['exclude', '(^|/)linux/'],
],
}],
['OS!="android"', {
'sources/': [
['exclude', '_android(_unittest)?\\.cc$'],
['exclude', '(^|/)android/'],
],
}],
['OS=="win"', {
'sources/': [ ['exclude', '_posix(_unittest)?\\.(h|cc)$'] ],
}],
['chromeos!=1', {
'sources/': [ ['exclude', '_chromeos\\.(h|cc)$'] ]
}],
['OS!="linux" and OS!="openbsd" and OS!="freebsd"', {
'sources/': [
['exclude', '_xdg(_unittest)?\\.(h|cc)$'],
],
}],
['use_x11!=1', {
'sources/': [
['exclude', '_(chromeos|x|x11)(_unittest)?\\.(h|cc)$'],
['exclude', '(^|/)x11_[^/]*\\.(h|cc)$'],
],
}],
['toolkit_uses_gtk!=1', {
'sources/': [
['exclude', '_gtk(_unittest)?\\.(h|cc)$'],
['exclude', '(^|/)gtk/'],
['exclude', '(^|/)gtk_[^/]*\\.(h|cc)$'],
],
}],
['toolkit_views==0', {
'sources/': [ ['exclude', '_views\\.(h|cc)$'] ]
}],
['use_aura==0', {
'sources/': [ ['exclude', '_aura(_unittest)?\\.(h|cc)$'],
['exclude', '(^|/)aura/'],
]
}],
['use_aura==0 or use_x11==0', {
'sources/': [ ['exclude', '_aurax11\\.(h|cc)$'] ]
}],
['use_aura==0 or OS!="win"', {
'sources/': [ ['exclude', '_aurawin\\.(h|cc)$'] ]
}],
['use_wayland!=1', {
'sources/': [
['exclude', '_(wayland)(_unittest)?\\.(h|cc)$'],
['exclude', '(^|/)wayland/'],
['exclude', '(^|/)(wayland)_[^/]*\\.(h|cc)$'],
],
}],
]
}

View File

@ -0,0 +1,31 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is meant to be included into an action to invoke grit in a
# consistent manner. To use this the following variables need to be
# defined:
# grit_grd_file: string: grd file path
# grit_out_dir: string: the output directory path
# It would be really nice to do this with a rule instead of actions, but it
# would need to determine inputs and outputs via grit_info on a per-file
# basis. GYP rules dont currently support that. They could be extended to
# do this, but then every generator would need to be updated to handle this.
{
'variables': {
'grit_cmd': ['python', '<(DEPTH)/tools/grit/grit.py'],
},
'inputs': [
'<!@pymod_do_main(grit_info <@(grit_defines) --inputs <(grit_grd_file))',
],
'outputs': [
'<!@pymod_do_main(grit_info <@(grit_defines) --outputs \'<(grit_out_dir)\' <(grit_grd_file))',
],
'action': ['<@(grit_cmd)',
'-i', '<(grit_grd_file)', 'build',
'-o', '<(grit_out_dir)',
'<@(grit_defines)' ],
'message': 'Generating resources from <(grit_grd_file)',
}

View File

@ -0,0 +1,30 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is meant to be included into a target that will have one or more
# uses of grit_action.gypi. To use this the following variables need to be
# defined:
# grit_out_dir: string: the output directory path
# NOTE: This file is optional, not all targets that use grit include it, some
# do their own custom directives instead.
{
'conditions': [
# If the target is a direct binary, it needs to be able to find the header,
# otherwise it probably a supporting target just for grit so the include
# dir needs to be set on anything that depends on this action.
['_type=="executable" or _type=="shared_library" or \
_type=="loadable_module" or _type=="static_library"', {
'include_dirs': [
'<(grit_out_dir)',
],
}, {
'direct_dependent_settings': {
'include_dirs': [
'<(grit_out_dir)',
],
},
}],
],
}

View File

@ -0,0 +1,171 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script is wrapper for Chromium that adds some support for how GYP
# is invoked by Chromium beyond what can be done in the gclient hooks.
import glob
import os
import shlex
import subprocess
import sys
script_dir = os.path.dirname(__file__)
chrome_src = os.path.abspath(os.path.join(script_dir, os.pardir))
sys.path.insert(0, os.path.join(chrome_src, 'tools', 'gyp', 'pylib'))
import gyp
# Add paths so that pymod_do_main(...) can import files.
sys.path.insert(1, os.path.join(chrome_src, 'tools', 'grit'))
sys.path.insert(1, os.path.join(chrome_src, 'chrome', 'tools', 'build'))
# On Windows, Psyco shortens warm runs of build/gyp_chromium by about
# 20 seconds on a z600 machine with 12 GB of RAM, from 90 down to 70
# seconds. Conversely, memory usage of build/gyp_chromium with Psyco
# maxes out at about 158 MB vs. 132 MB without it.
#
# Psyco uses native libraries, so we need to load a different
# installation depending on which OS we are running under. It has not
# been tested whether using Psyco on our Mac and Linux builds is worth
# it (the GYP running time is a lot shorter, so the JIT startup cost
# may not be worth it).
if sys.platform == 'win32':
try:
sys.path.insert(0, os.path.join(chrome_src, 'third_party', 'psyco_win32'))
import psyco
except:
psyco = None
else:
psyco = None
def apply_gyp_environment(file_path=None):
"""
Reads in a *.gyp_env file and applies the valid keys to os.environ.
"""
if not file_path or not os.path.exists(file_path):
return
file_contents = open(file_path).read()
try:
file_data = eval(file_contents, {'__builtins__': None}, None)
except SyntaxError, e:
e.filename = os.path.abspath(file_path)
raise
supported_vars = ( 'CHROMIUM_GYP_FILE',
'CHROMIUM_GYP_SYNTAX_CHECK',
'GYP_DEFINES',
'GYP_GENERATOR_FLAGS',
'GYP_GENERATOR_OUTPUT', )
for var in supported_vars:
val = file_data.get(var)
if val:
if var in os.environ:
print 'INFO: Environment value for "%s" overrides value in %s.' % (
var, os.path.abspath(file_path)
)
else:
os.environ[var] = val
def additional_include_files(args=[]):
"""
Returns a list of additional (.gypi) files to include, without
duplicating ones that are already specified on the command line.
"""
# Determine the include files specified on the command line.
# This doesn't cover all the different option formats you can use,
# but it's mainly intended to avoid duplicating flags on the automatic
# makefile regeneration which only uses this format.
specified_includes = set()
for arg in args:
if arg.startswith('-I') and len(arg) > 2:
specified_includes.add(os.path.realpath(arg[2:]))
result = []
def AddInclude(path):
if os.path.realpath(path) not in specified_includes:
result.append(path)
# Always include common.gypi.
AddInclude(os.path.join(script_dir, 'common.gypi'))
# Optionally add supplemental .gypi files if present.
supplements = glob.glob(os.path.join(chrome_src, '*', 'supplement.gypi'))
for supplement in supplements:
AddInclude(supplement)
return result
if __name__ == '__main__':
args = sys.argv[1:]
# Use the Psyco JIT if available.
if psyco:
psyco.profile()
print "Enabled Psyco JIT."
# Fall back on hermetic python if we happen to get run under cygwin.
# TODO(bradnelson): take this out once this issue is fixed:
# http://code.google.com/p/gyp/issues/detail?id=177
if sys.platform == 'cygwin':
python_dir = os.path.join(chrome_src, 'third_party', 'python_26')
env = os.environ.copy()
env['PATH'] = python_dir + os.pathsep + env.get('PATH', '')
p = subprocess.Popen(
[os.path.join(python_dir, 'python.exe')] + sys.argv,
env=env, shell=False)
p.communicate()
sys.exit(p.returncode)
if 'SKIP_CHROMIUM_GYP_ENV' not in os.environ:
# Update the environment based on chromium.gyp_env
gyp_env_path = os.path.join(os.path.dirname(chrome_src), 'chromium.gyp_env')
apply_gyp_environment(gyp_env_path)
# This could give false positives since it doesn't actually do real option
# parsing. Oh well.
gyp_file_specified = False
for arg in args:
if arg.endswith('.gyp'):
gyp_file_specified = True
break
# If we didn't get a file, check an env var, and then fall back to
# assuming 'all.gyp' from the same directory as the script.
if not gyp_file_specified:
gyp_file = os.environ.get('CHROMIUM_GYP_FILE')
if gyp_file:
# Note that CHROMIUM_GYP_FILE values can't have backslashes as
# path separators even on Windows due to the use of shlex.split().
args.extend(shlex.split(gyp_file))
else:
args.append(os.path.join(script_dir, 'all.gyp'))
args.extend(['-I' + i for i in additional_include_files(args)])
# There shouldn't be a circular dependency relationship between .gyp files,
# but in Chromium's .gyp files, on non-Mac platforms, circular relationships
# currently exist. The check for circular dependencies is currently
# bypassed on other platforms, but is left enabled on the Mac, where a
# violation of the rule causes Xcode to misbehave badly.
# TODO(mark): Find and kill remaining circular dependencies, and remove this
# option. http://crbug.com/35878.
# TODO(tc): Fix circular dependencies in ChromiumOS then add linux2 to the
# list.
if sys.platform not in ('darwin',):
args.append('--no-circular-check')
# If CHROMIUM_GYP_SYNTAX_CHECK is set to 1, it will invoke gyp with --check
# to enfore syntax checking.
syntax_check = os.environ.get('CHROMIUM_GYP_SYNTAX_CHECK')
if syntax_check and int(syntax_check):
args.append('--check')
print 'Updating projects from gyp files...'
sys.stdout.flush()
# Off we go...
sys.exit(gyp.main(args))

View File

@ -0,0 +1,123 @@
#!/bin/bash
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -e
# The script is to install Android SDK, NDK for build chromium on Android, and
# doesn't need to run as root.
# Using Android 4.0, API Level: 14 (ice cream sandwich). The SDK package is
# about 25M.
SDK_FILE_NAME="android-sdk_r16-linux.tgz"
SDK_DOWNLOAD_URL="http://dl.google.com/android/${SDK_FILE_NAME}"
SDK_MD5SUM="3ba457f731d51da3741c29c8830a4583"
# Using "ANDROID_SDK_ROOT/tools/android list targets" to get the matching target
# id which will be loaded in simulator for testing.
# For example: the output of the listed the target could be below, and the
# 'android-13' is the SDK_TARGET_ID in this case.
# id: 9 or "android-13"
# Name: Android 3.2
# Type: Platform
# API level: 13
# Revision: 1
# Skins: WXGA (default)
SDK_TARGET_ID=android-14
# Using NDK r7; The package is about 64M.
NDK_FILE_NAME="android-ndk-r7-linux-x86.tar.bz2"
NDK_DOWNLOAD_URL="http://dl.google.com/android/ndk/${NDK_FILE_NAME}"
NDK_MD5SUM="bf15e6b47bf50824c4b96849bf003ca3"
# The temporary directory used to store the downloaded file.
TEMPDIR=$(mktemp -d)
cleanup() {
local status=${?}
trap - EXIT
rm -rf "${TEMPDIR}"
exit ${status}
}
trap cleanup EXIT
##########################################################
# Download and install a tgz package by wget and tar -xvf.
# The current directory is changed in this function.
# Arguments:
# local_file_name, the name of downloaded file.
# download_url, the url to download the package.
# md5, the package's md5 which could be found in download page.
# install_path, where the package should be installed.
# Returns:
# None
##########################################################
install_dev_kit() {
local local_file_name="${1}"
local download_url="${2}"
local md5="${3}"
local install_path="${4}"
cd "${TEMPDIR}"
wget "${download_url}"
local computed_md5=$(md5sum "${local_file_name}" | cut -d' ' -f1)
if [[ "${computed_md5}" != "${md5}" ]]; then
echo "Downloaded ${local_file_name} has bad md5sum, which is expected" >& 2
echo "to be ${md5} but was ${computed_md5}" >& 2
exit 1
fi
echo "Install ${local_file_name}"
mv "${local_file_name}" "${install_path}"
cd "${install_path}"
tar -xvf "${local_file_name}"
}
if [[ -z "${ANDROID_SDK_ROOT}" ]]; then
echo "Please set ANDROID_SDK_ROOT to where they should installed to." >& 2
echo "For example: /usr/local/android-sdk-linux_x86" >& 2
exit 1
fi
if [[ -z "${ANDROID_NDK_ROOT}" ]]; then
echo "Please set ANDROID_NDK_ROOT to where they should installed to." >& 2
echo "For example: /usr/local/android-ndk-r6b" >& 2
exit 1
fi
# Install Android SDK if it doesn't exist.
if [[ ! -d "${ANDROID_SDK_ROOT}" ]]; then
echo 'Install ANDROID SDK ...'
(install_dev_kit "${SDK_FILE_NAME}" "${SDK_DOWNLOAD_URL}" "${SDK_MD5SUM}" \
$(dirname "${ANDROID_SDK_ROOT}"))
fi
# Install the target if it doesn't exist. The package installed above contains
# no platform, platform-tool or tool, all those should be installed by
# ${ANDROID_SDK_ROOT}/tools/android.
if [[ ! $("${ANDROID_SDK_ROOT}/tools/android" list targets \
| grep -q "${SDK_TARGET_ID}") ]]; then
# Updates the SDK by installing the necessary components.
# From current configuration, all android platforms will be installed.
# This will take a little bit long time.
echo "Install platform, platform-tool and tool ..."
"${ANDROID_SDK_ROOT}"/tools/android update sdk -o --no-ui \
--filter platform,platform-tool,tool,system-image
fi
# Create a Android Virtual Device named 'buildbot' with default hardware
# configuration and override the existing one, since there is no easy way to
# check whether current AVD has correct configuration and it takes almost no
# time to create a new one.
"${ANDROID_SDK_ROOT}/tools/android" --silent create avd --name buildbot \
--target ${SDK_TARGET_ID} --force <<< "no"
# Install Android NDK if it doesn't exist.
if [[ ! -d "${ANDROID_NDK_ROOT}" ]]; then
echo 'Install ANDROID NDK ...'
(install_dev_kit "${NDK_FILE_NAME}" "${NDK_DOWNLOAD_URL}" "${NDK_MD5SUM}" \
$(dirname "${ANDROID_NDK_ROOT}"))
fi

View File

@ -0,0 +1,513 @@
#!/bin/bash -e
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script to install everything needed to build chromium (well, ideally, anyway)
# See http://code.google.com/p/chromium/wiki/LinuxBuildInstructions
# and http://code.google.com/p/chromium/wiki/LinuxBuild64Bit
usage() {
echo "Usage: $0 [--options]"
echo "Options:"
echo "--[no-]syms: enable or disable installation of debugging symbols"
echo "--[no-]gold: enable or disable installation of gold linker"
echo "--[no-]lib32: enable or disable installation of 32 bit libraries"
echo "--[no-]restore-usr-bin-ld: enable or disable restoring /usr/bin/ld to"
echo " ld.bfd if it is currently gold"
echo "Script will prompt interactively if options not given."
exit 1
}
while test "$1" != ""
do
case "$1" in
--syms) do_inst_syms=1;;
--no-syms) do_inst_syms=0;;
--gold) do_inst_gold=1;;
--no-gold) do_inst_gold=0;;
--lib32) do_inst_lib32=1;;
--no-lib32) do_inst_lib32=0;;
--restore-usr-bin-ld) do_restore_usr_bin_ld=1;;
--no-restore-usr-bin-ld) do_restore_usr_bin_ld=0;;
*) usage;;
esac
shift
done
install_gold() {
# Gold is optional; it's a faster replacement for ld,
# and makes life on 2GB machines much more pleasant.
# First make sure root can access this directory, as that's tripped
# up some folks.
if sudo touch xyz.$$
then
sudo rm xyz.$$
else
echo root cannot write to the current directory, not installing gold
return
fi
BINUTILS=binutils-2.21.1
BINUTILS_URL=http://ftp.gnu.org/gnu/binutils/$BINUTILS.tar.bz2
BINUTILS_SHA1=525255ca6874b872540c9967a1d26acfbc7c8230
test -f $BINUTILS.tar.bz2 || wget $BINUTILS_URL
if test "`sha1sum $BINUTILS.tar.bz2|cut -d' ' -f1`" != "$BINUTILS_SHA1"
then
echo Bad sha1sum for $BINUTILS.tar.bz2
exit 1
fi
tar -xjvf $BINUTILS.tar.bz2
cd $BINUTILS
./configure --prefix=/usr/local/gold --enable-gold=default --enable-threads \
--enable-bfd=yes
NCPU=`cat /proc/cpuinfo |grep ^processor|wc -l`
make maybe-all-binutils maybe-all-gold maybe-all-ld -j${NCPU}
if sudo make maybe-install-binutils maybe-install-gold maybe-install-ld
then
# Still need to figure out graceful way of pointing gyp to use
# /usr/local/gold/bin/ld without requiring him to set environment
# variables.
sudo strip /usr/local/gold/bin/ld.gold
sudo strip /usr/local/gold/bin/ld.bfd
else
echo "make install failed, not installing gold"
fi
}
if ! egrep -q \
'Ubuntu (10\.04|10\.10|11\.04|11\.10|lucid|maverick|natty|oneiric)' \
/etc/issue; then
echo "Only Ubuntu 10.04 (lucid) through 11.10 (oneiric) are currently" \
"supported" >&2
exit 1
fi
if ! uname -m | egrep -q "i686|x86_64"; then
echo "Only x86 architectures are currently supported" >&2
exit
fi
if [ "x$(id -u)" != x0 ]; then
echo "Running as non-root user."
echo "You might have to enter your password one or more times for 'sudo'."
echo
fi
# Packages needed for chromeos only
chromeos_dev_list="libpulse-dev"
# Packages need for development
dev_list="apache2.2-bin bison curl elfutils fakeroot flex g++ gperf
language-pack-fr libapache2-mod-php5 libasound2-dev libbz2-dev
libcairo2-dev libcups2-dev libcurl4-gnutls-dev libdbus-glib-1-dev
libelf-dev libgconf2-dev libgl1-mesa-dev libglib2.0-dev
libglu1-mesa-dev libgnome-keyring-dev libgtk2.0-dev libjpeg62-dev
libkrb5-dev libnspr4-dev libnss3-dev libpam0g-dev libsctp-dev
libsqlite3-dev libssl-dev libudev-dev libwww-perl libxslt1-dev
libxss-dev libxt-dev libxtst-dev mesa-common-dev msttcorefonts patch
perl php5-cgi pkg-config python python-cherrypy3 python-dev
python-psutil rpm ruby subversion ttf-dejavu-core ttf-indic-fonts
ttf-kochi-gothic ttf-kochi-mincho ttf-thai-tlwg wdiff
$chromeos_dev_list"
# Run-time libraries required by chromeos only
chromeos_lib_list="libpulse0 libbz2-1.0 libcurl4-gnutls-dev"
# Full list of required run-time libraries
lib_list="libatk1.0-0 libc6 libasound2 libcairo2 libcups2 libdbus-glib-1-2
libexpat1 libfontconfig1 libfreetype6 libglib2.0-0 libgnome-keyring0
libgtk2.0-0 libpam0g libpango1.0-0 libpcre3 libpixman-1-0
libpng12-0 libstdc++6 libsqlite3-0 libudev0 libx11-6 libxau6 libxcb1
libxcomposite1 libxcursor1 libxdamage1 libxdmcp6 libxext6 libxfixes3
libxi6 libxinerama1 libxrandr2 libxrender1 libxtst6 zlib1g
$chromeos_lib_list"
# Debugging symbols for all of the run-time libraries
dbg_list="libatk1.0-dbg libc6-dbg libcairo2-dbg libdbus-glib-1-2-dbg
libfontconfig1-dbg libglib2.0-0-dbg libgtk2.0-0-dbg
libpango1.0-0-dbg libpcre3-dbg libpixman-1-0-dbg
libsqlite3-0-dbg
libx11-6-dbg libxau6-dbg libxcb1-dbg libxcomposite1-dbg
libxcursor1-dbg libxdamage1-dbg libxdmcp6-dbg libxext6-dbg
libxfixes3-dbg libxi6-dbg libxinerama1-dbg libxrandr2-dbg
libxrender1-dbg libxtst6-dbg zlib1g-dbg"
# Plugin lists needed for tests.
plugin_list="flashplugin-installer"
# Some NSS packages were renamed in Natty.
if egrep -q 'Ubuntu (10\.04|10\.10)' /etc/issue; then
dbg_list="${dbg_list} libnspr4-0d-dbg libnss3-1d-dbg"
lib_list="${lib_list} libnspr4-0d libnss3-1d"
else
dbg_list="${dbg_list} libnspr4-dbg libnss3-dbg"
lib_list="${lib_list} libnspr4 libnss3"
fi
# Waits for the user to press 'Y' or 'N'. Either uppercase of lowercase is
# accepted. Returns 0 for 'Y' and 1 for 'N'. If an optional parameter has
# been provided to yes_no(), the function also accepts RETURN as a user input.
# The parameter specifies the exit code that should be returned in that case.
# The function will echo the user's selection followed by a newline character.
# Users can abort the function by pressing CTRL-C. This will call "exit 1".
yes_no() {
local c
while :; do
c="$(trap 'stty echo -iuclc icanon 2>/dev/null' EXIT INT TERM QUIT
stty -echo iuclc -icanon 2>/dev/null
dd count=1 bs=1 2>/dev/null | od -An -tx1)"
case "$c" in
" 0a") if [ -n "$1" ]; then
[ $1 -eq 0 ] && echo "Y" || echo "N"
return $1
fi
;;
" 79") echo "Y"
return 0
;;
" 6e") echo "N"
return 1
;;
"") echo "Aborted" >&2
exit 1
;;
*) # The user pressed an unrecognized key. As we are not echoing
# any incorrect user input, alert the user by ringing the bell.
(tput bel) 2>/dev/null
;;
esac
done
}
if test "$do_inst_syms" = ""
then
echo "This script installs all tools and libraries needed to build Chromium."
echo ""
echo "For most of the libraries, it can also install debugging symbols, which"
echo "will allow you to debug code in the system libraries. Most developers"
echo "won't need these symbols."
echo -n "Do you want me to install them for you (y/N) "
if yes_no 1; then
do_inst_syms=1
fi
fi
if test "$do_inst_syms" = "1"; then
echo "Installing debugging symbols."
else
echo "Skipping installation of debugging symbols."
dbg_list=
fi
sudo apt-get update
# We initially run "apt-get" with the --reinstall option and parse its output.
# This way, we can find all the packages that need to be newly installed
# without accidentally promoting any packages from "auto" to "manual".
# We then re-run "apt-get" with just the list of missing packages.
echo "Finding missing packages..."
packages="${dev_list} ${lib_list} ${dbg_list} ${plugin_list}"
# Intentially leaving $packages unquoted so it's more readable.
echo "Packages required: " $packages
echo
new_list_cmd="sudo apt-get install --reinstall $(echo $packages)"
if new_list="$(yes n | LANG=C $new_list_cmd)"; then
# We probably never hit this following line.
echo "No missing packages, and the packages are up-to-date."
elif [ $? -eq 1 ]; then
# We expect apt-get to have exit status of 1.
# This indicates that we canceled the install with "yes n|".
new_list=$(echo "$new_list" |
sed -e '1,/The following NEW packages will be installed:/d;s/^ //;t;d')
new_list=$(echo "$new_list" | sed 's/ *$//')
if [ -z "$new_list" ] ; then
echo "No missing packages, and the packages are up-to-date."
else
echo "Installing missing packages: $new_list."
sudo apt-get install ${new_list}
fi
echo
else
# An apt-get exit status of 100 indicates that a real error has occurred.
# I am intentionally leaving out the '"'s around new_list_cmd,
# as this makes it easier to cut and paste the output
echo "The following command failed: " ${new_list_cmd}
echo
echo "It produces the following output:"
yes n | $new_list_cmd || true
echo
echo "You will have to install the above packages yourself."
echo
exit 100
fi
# Some operating systems already ship gold (on recent Debian and
# Ubuntu you can do "apt-get install binutils-gold" to get it), but
# older releases didn't. Additionally, gold 2.20 (included in Ubuntu
# Lucid) makes binaries that just segfault, and 2.20.1 does not support
# --map-whole-files.
# So install from source if we don't have a good version.
case `ld --version` in
*gold*2.2[1-9].*)
echo "*** Warning ***"
echo "If the default linker is gold, linking may fail for:"
echo "the Linux kernel, kernel modules, Valgrind, and Wine."
echo "If you previously installed gold as the default linker,"
echo "you can restore the original linker by running:"
echo "'cd /usr/bin; sudo rm ld; sudo mv ld.orig ld'"
echo
if [ "$do_restore_usr_bin_ld" = "" ]
then
echo -n "Restore /usr/bin/ld to the original linker? (Y/n) "
if yes_no 0
then
do_restore_usr_bin_ld=1
fi
echo
fi
if [ "$do_restore_usr_bin_ld" = "1" ]
then
if sudo mv /usr/bin/ld.orig /usr/bin/ld
then
echo "Restored /usr/bin/ld.orig as /usr/bin/ld"
else
echo "Failed to restore /usr/bin/ld.orig as /usr/bin/ld"
fi
echo
fi
;;
esac
# Check the gold version first.
gold_up_to_date="1"
if [ -x "/usr/local/gold/bin/ld" ]
then
case `/usr/local/gold/bin/ld --version` in
*gold*2.2[1-9].*) ;;
* )
gold_up_to_date="0"
esac
fi
# Then check and make sure ld.bfd exists.
if [ "$gold_up_to_date" = "1" ] && [ ! -x "/usr/local/gold/bin/ld.bfd" ]
then
gold_up_to_date="0"
fi
if [ "$gold_up_to_date" = "0" ]
then
if test "$do_inst_gold" = ""
then
echo "Gold is a new linker that links Chrome 5x faster than GNU ld."
echo -n "*** To use the gold linker, "
echo "you must pass -B/usr/local/gold/bin/ to g++ ***"
echo -n "Install the gold linker? (y/N) "
if yes_no 1; then
do_inst_gold=1
fi
fi
if test "$do_inst_gold" = "1"
then
echo "Building binutils with gold..."
install_gold || exit 99
else
echo "Not installing gold."
fi
fi
# Install 32bit backwards compatibility support for 64bit systems
if [ "$(uname -m)" = "x86_64" ]; then
if test "$do_inst_lib32" = ""
then
echo "Installing 32bit libraries not already provided by the system"
echo
echo "This is only needed to build a 32-bit Chrome on your 64-bit system."
echo
echo "While we only need to install a relatively small number of library"
echo "files, we temporarily need to download a lot of large *.deb packages"
echo "that contain these files. We will create new *.deb packages that"
echo "include just the 32bit libraries. These files will then be found on"
echo "your system in places like /lib32, /usr/lib32, /usr/lib/debug/lib32,"
echo "/usr/lib/debug/usr/lib32. If you ever need to uninstall these files,"
echo "look for packages named *-ia32.deb."
echo "Do you want me to download all packages needed to build new 32bit"
echo -n "package files (y/N) "
if yes_no 1; then
do_inst_lib32=1
fi
fi
if test "$do_inst_lib32" != "1"
then
echo "Exiting without installing any 32bit libraries."
exit 0
fi
# Standard 32bit compatibility libraries
echo "First, installing the limited existing 32-bit support..."
cmp_list="ia32-libs lib32asound2-dev lib32stdc++6 lib32z1
lib32z1-dev libc6-dev-i386 libc6-i386 g++-multilib"
if [ -n "`apt-cache search lib32readline-gplv2-dev 2>/dev/null`" ]; then
cmp_list="${cmp_list} lib32readline-gplv2-dev"
else
cmp_list="${cmp_list} lib32readline5-dev"
fi
sudo apt-get install $cmp_list
tmp=/tmp/install-32bit.$$
trap 'rm -rf "${tmp}"' EXIT INT TERM QUIT
mkdir -p "${tmp}/apt/lists/partial" "${tmp}/cache" "${tmp}/partial"
touch "${tmp}/status"
[ -r /etc/apt/apt.conf ] && cp /etc/apt/apt.conf "${tmp}/apt/"
cat >>"${tmp}/apt/apt.conf" <<EOF
Apt::Architecture "i386";
Dir::Cache "${tmp}/cache";
Dir::Cache::Archives "${tmp}/";
Dir::State::Lists "${tmp}/apt/lists/";
Dir::State::status "${tmp}/status";
EOF
# Download 32bit packages
echo "Computing list of available 32bit packages..."
sudo apt-get -c="${tmp}/apt/apt.conf" update
echo "Downloading available 32bit packages..."
sudo apt-get -c="${tmp}/apt/apt.conf" \
--yes --download-only --force-yes --reinstall install \
${lib_list} ${dbg_list}
# Open packages, remove everything that is not a library, move the
# library to a lib32 directory and package everything as a *.deb file.
echo "Repackaging and installing 32bit packages for use on 64bit systems..."
for i in ${lib_list} ${dbg_list}; do
orig="$(echo "${tmp}/${i}"_*_i386.deb)"
compat="$(echo "${orig}" |
sed -e 's,\(_[^_/]*_\)i386\(.deb\),-ia32\1amd64\2,')"
rm -rf "${tmp}/staging"
msg="$(fakeroot -u sh -exc '
# Unpack 32bit Debian archive
umask 022
mkdir -p "'"${tmp}"'/staging/dpkg/DEBIAN"
cd "'"${tmp}"'/staging"
ar x "'${orig}'"
tar zCfx dpkg data.tar.gz
tar zCfx dpkg/DEBIAN control.tar.gz
# Create a posix extended regular expression fragment that will
# recognize the includes which have changed. Should be rare,
# will almost always be empty.
includes=`sed -n -e "s/^[0-9a-z]* //g" \
-e "\,usr/include/,p" dpkg/DEBIAN/md5sums |
xargs -n 1 -I FILE /bin/sh -c \
"cmp -s dpkg/FILE /FILE || echo FILE" |
tr "\n" "|" |
sed -e "s,|$,,"`
# If empty, set it to not match anything.
test -z "$includes" && includes="^//"
# Turn the conflicts into an extended RE for removal from the
# Provides line.
conflicts=`sed -n -e "/Conflicts/s/Conflicts: *//;T;s/, */|/g;p" \
dpkg/DEBIAN/control`
# Rename package, change architecture, remove conflicts and dependencies
sed -r -i \
-e "/Package/s/$/-ia32/" \
-e "/Architecture/s/:.*$/: amd64/" \
-e "/Depends/s/:.*/: ia32-libs/" \
-e "/Provides/s/($conflicts)(, *)?//g;T1;s/, *$//;:1" \
-e "/Recommends/d" \
-e "/Conflicts/d" \
dpkg/DEBIAN/control
# Only keep files that live in "lib" directories or the includes
# that have changed.
sed -r -i \
-e "/\/lib64\//d" -e "/\/.?bin\//d" \
-e "\,$includes,s,[ /]include/,&32/,g;s,include/32/,include32/,g" \
-e "s, lib/, lib32/,g" \
-e "s,/lib/,/lib32/,g" \
-e "t;d" \
-e "\,^/usr/lib32/debug\(.*/lib32\),s,^/usr/lib32/debug,/usr/lib/debug," \
dpkg/DEBIAN/md5sums
# Re-run ldconfig after installation/removal
{ echo "#!/bin/sh"; echo "[ \"x\$1\" = xconfigure ]&&ldconfig||:"; } \
>dpkg/DEBIAN/postinst
{ echo "#!/bin/sh"; echo "[ \"x\$1\" = xremove ]&&ldconfig||:"; } \
>dpkg/DEBIAN/postrm
chmod 755 dpkg/DEBIAN/postinst dpkg/DEBIAN/postrm
# Remove any other control files
find dpkg/DEBIAN -mindepth 1 "(" -name control -o -name md5sums -o \
-name postinst -o -name postrm ")" -o -print |
xargs -r rm -rf
# Remove any files/dirs that live outside of "lib" directories,
# or are not in our list of changed includes.
find dpkg -mindepth 1 -regextype posix-extended \
"(" -name DEBIAN -o -name lib -o -regex "dpkg/($includes)" ")" \
-prune -o -print | tac |
xargs -r -n 1 sh -c "rm \$0 2>/dev/null || rmdir \$0 2>/dev/null || : "
find dpkg -name lib64 -o -name bin -o -name "?bin" |
tac | xargs -r rm -rf
# Remove any symbolic links that were broken by the above steps.
find -L dpkg -type l -print | tac | xargs -r rm -rf
# Rename lib to lib32, but keep debug symbols in /usr/lib/debug/usr/lib32
# That is where gdb looks for them.
find dpkg -type d -o -path "*/lib/*" -print |
xargs -r -n 1 sh -c "
i=\$(echo \"\${0}\" |
sed -e s,/lib/,/lib32/,g \
-e s,/usr/lib32/debug\\\\\(.*/lib32\\\\\),/usr/lib/debug\\\\1,);
mkdir -p \"\${i%/*}\";
mv \"\${0}\" \"\${i}\""
# Rename include to include32.
[ -d "dpkg/usr/include" ] && mv "dpkg/usr/include" "dpkg/usr/include32"
# Prune any empty directories
find dpkg -type d | tac | xargs -r -n 1 rmdir 2>/dev/null || :
# Create our own Debian package
cd ..
dpkg --build staging/dpkg .' 2>&1)"
compat="$(eval echo $(echo "${compat}" |
sed -e 's,_[^_/]*_amd64.deb,_*_amd64.deb,'))"
[ -r "${compat}" ] || {
echo "${msg}" >&2
echo "Failed to build new Debian archive!" >&2
exit 1
}
msg="$(sudo dpkg -i "${compat}" 2>&1)" && {
echo "Installed ${compat##*/}"
} || {
# echo "${msg}" >&2
echo "Skipped ${compat##*/}"
}
done
# Add symbolic links for developing 32bit code
echo "Adding missing symbolic links, enabling 32bit code development..."
for i in $(find /lib32 /usr/lib32 -maxdepth 1 -name \*.so.\* |
sed -e 's/[.]so[.][0-9].*/.so/' |
sort -u); do
[ "x${i##*/}" = "xld-linux.so" ] && continue
[ -r "$i" ] && continue
j="$(ls "$i."* | sed -e 's/.*[.]so[.]\([^.]*\)$/\1/;t;d' |
sort -n | tail -n 1)"
[ -r "$i.$j" ] || continue
sudo ln -s "${i##*/}.$j" "$i"
done
fi

View File

@ -0,0 +1,325 @@
#!/bin/bash -e
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script installs Debian-derived distributions in a chroot environment.
# It can for example be used to have an accurate 32bit build and test
# environment when otherwise working on a 64bit machine.
# N. B. it is unlikely that this script will ever work on anything other than a
# Debian-derived system.
usage() {
echo "usage: ${0##*/} [-m mirror] [-g group,...] [-s] [-c]"
echo "-g group,... groups that can use the chroot unauthenticated"
echo " Default: 'admin' and current user's group ('$(id -gn)')"
echo "-m mirror an alternate repository mirror for package downloads"
echo "-s configure default deb-srcs"
echo "-c always copy 64bit helper binaries to 32bit chroot"
echo "-h this help message"
}
process_opts() {
local OPTNAME OPTIND OPTERR OPTARG
while getopts ":g:m:sch" OPTNAME; do
case "$OPTNAME" in
g)
[ -n "${OPTARG}" ] &&
chroot_groups="${chroot_groups}${chroot_groups:+,}${OPTARG}"
;;
m)
if [ -n "${mirror}" ]; then
echo "You can only specify exactly one mirror location"
usage
exit 1
fi
mirror="$OPTARG"
;;
s)
add_srcs="y"
;;
c)
copy_64="y"
;;
h)
usage
exit 0
;;
\:)
echo "'-$OPTARG' needs an argument."
usage
exit 1
;;
*)
echo "invalid command-line option: $OPTARG"
usage
exit 1
;;
esac
done
if [ $# -ge ${OPTIND} ]; then
eval echo "Unexpected command line argument: \${${OPTIND}}"
usage
exit 1
fi
}
# Check that we are running as a regular user
[ "$(id -nu)" = root ] && {
echo "Run this script as a regular user and provide your \"sudo\"" \
"password if requested" >&2
exit 1
}
mkdir -p "$HOME/chroot/"
process_opts "$@"
# Error handler
trap 'exit 1' INT TERM QUIT
trap 'sudo apt-get clean; tput bel; echo; echo Failed' EXIT
# Install any missing applications that this script relies on. If these packages
# are already installed, don't force another "apt-get install". That would
# prevent them from being auto-removed, if they ever become eligible for that.
# And as this script only needs the packages once, there is no good reason to
# introduce a hard dependency on things such as dchroot and debootstrap.
dep=
for i in dchroot debootstrap; do
[ -d /usr/share/doc/"$i" ] || dep="$dep $i"
done
[ -n "$dep" ] && sudo apt-get -y install $dep
sudo apt-get -y install schroot
# Create directory for chroot
sudo mkdir -p /var/lib/chroot
# Find chroot environments that can be installed with debootstrap
targets="$(cd /usr/share/debootstrap/scripts
ls | grep '^[a-z]*$')"
# Ask user to pick one of the available targets
echo "The following targets are available to be installed in a chroot:"
j=1; for i in $targets; do
printf '%4d: %s\n' "$j" "$i"
j=$(($j+1))
done
while :; do
printf "Which target would you like to install: "
read n
[ "$n" -gt 0 -a "$n" -lt "$j" ] >&/dev/null && break
done
j=1; for i in $targets; do
[ "$j" -eq "$n" ] && { distname="$i"; break; }
j=$(($j+1))
done
# On x86-64, ask whether the user wants to install x86-32 or x86-64
archflag=
arch=
if [ "$(uname -m)" = x86_64 ]; then
while :; do
echo "You are running a 64bit kernel. This allows you to install either a"
printf "32bit or a 64bit chroot environment. %s" \
"Which one do you want (32, 64) "
read arch
[ "${arch}" == 32 -o "${arch}" == 64 ] && break
done
[ "${arch}" == 32 ] && archflag="--arch i386" || archflag="--arch amd64"
arch="${arch}bit"
fi
target="${distname}${arch}"
# Don't overwrite an existing installation
[ -d /var/lib/chroot/"${target}" ] && {
echo "This chroot already exists on your machine." >&2
echo "Delete /var/lib/chroot/${target} if you want to start over." >&2
exit 1
}
sudo mkdir -p /var/lib/chroot/"${target}"
# Offer to include additional standard repositories for Ubuntu-based chroots.
alt_repos=
grep ubuntu.com /usr/share/debootstrap/scripts/"${distname}" >&/dev/null && {
while :; do
echo "Would you like to add ${distname}-updates and ${distname}-security "
echo -n "to the chroot's sources.list (y/n)? "
read alt_repos
case "${alt_repos}" in
y|Y)
alt_repos="y"
break
;;
n|N)
break
;;
esac
done
}
# Remove stale entry from /etc/schroot/schroot.conf. Entries start
# with the target name in square brackets, followed by an arbitrary
# number of lines. The entry stops when either the end of file has
# been reached, or when the beginning of a new target is encountered.
# This means, we cannot easily match for a range of lines in
# "sed". Instead, we actually have to iterate over each line and check
# whether it is the beginning of a new entry.
sudo sed -ni '/^[[]'"${target%bit}"']$/,${:1;n;/^[[]/b2;b1;:2;p;n;b2};p' \
/etc/schroot/schroot.conf
# Download base system. This takes some time
if [ -z "${mirror}" ]; then
grep ubuntu.com /usr/share/debootstrap/scripts/"${distname}" >&/dev/null &&
mirror="http://archive.ubuntu.com/ubuntu" ||
mirror="http://ftp.us.debian.org/debian"
fi
sudo debootstrap ${archflag} "${distname}" /var/lib/chroot/"${target}" \
"$mirror"
# Add new entry to /etc/schroot/schroot.conf
grep ubuntu.com /usr/share/debootstrap/scripts/"${distname}" >&/dev/null &&
brand="Ubuntu" || brand="Debian"
if [ -z "${chroot_groups}" ]; then
chroot_groups="admin,$(id -gn)"
fi
sudo sh -c 'cat >>/etc/schroot/schroot.conf' <<EOF
[${target%bit}]
description=${brand} ${distname} ${arch}
type=directory
directory=/var/lib/chroot/${target}
priority=3
users=root
groups=${chroot_groups}
root-groups=${chroot_groups}
personality=linux$([ "${arch}" != 64bit ] && echo 32)
script-config=script-${target}
EOF
# Set up a special directory that changes contents depending on the target
# that is executing.
sed '/^FSTAB=/s,/mount-defaults",/mount-'"${target}"'",' \
/etc/schroot/script-defaults |
sudo sh -c 'cat >/etc/schroot/script-'"${target}"
sudo cp /etc/schroot/mount-defaults /etc/schroot/mount-"${target}"
echo "$HOME/chroot/.${target} $HOME/chroot none rw,bind 0 0" |
sudo sh -c 'cat >>/etc/schroot/mount-'"${target}"
mkdir -p "$HOME/chroot/.${target}"
# Install a helper script to launch commands in the chroot
sudo sh -c 'cat >/usr/local/bin/'"${target%bit}" <<EOF
#!/bin/bash
if [ \$# -eq 0 ]; then
exec schroot -c ${target%bit} -p
else
p="\$1"; shift
exec schroot -c ${target%bit} -p "\$p" -- "\$@"
fi
exit 1
EOF
sudo chown root:root /usr/local/bin/"${target%bit}"
sudo chmod 755 /usr/local/bin/"${target%bit}"
# Add the standard Ubuntu update repositories if requested.
[ "${alt_repos}" = "y" -a \
-r "/var/lib/chroot/${target}/etc/apt/sources.list" ] &&
sudo sed -i '/^deb .* [^ -]\+ main$/p
s/^\(deb .* [^ -]\+\) main/\1-security main/
p
t1
d
:1;s/-security main/-updates main/
t
d' "/var/lib/chroot/${target}/etc/apt/sources.list"
# Add a few more repositories to the chroot
[ "${add_srcs}" = "y" -a \
-r "/var/lib/chroot/${target}/etc/apt/sources.list" ] &&
sudo sed -i 's/ main$/ main restricted universe multiverse/
p
t1
d
:1;s/^deb/deb-src/
t
d' "/var/lib/chroot/${target}/etc/apt/sources.list"
# Update packages
sudo schroot -c "${target%bit}" -p -- /bin/sh -c '
apt-get update; apt-get -y dist-upgrade' || :
# Install a couple of missing packages
for i in debian-keyring ubuntu-keyring locales sudo; do
[ -d "/var/lib/chroot/${target}/usr/share/doc/$i" ] ||
sudo schroot -c "${target%bit}" -p -- apt-get -y install "$i" || :
done
# Configure locales
sudo schroot -c "${target%bit}" -p -- /bin/sh -c '
l='"${LANG:-en_US}"'; l="${l%%.*}"
[ -r /etc/locale.gen ] &&
sed -i "s/^# \($l\)/\1/" /etc/locale.gen
locale-gen $LANG en_US en_US.UTF-8' || :
# Configure "sudo" package
sudo schroot -c "${target%bit}" -p -- /bin/sh -c '
egrep '"'^$(id -nu) '"' /etc/sudoers >/dev/null 2>&1 ||
echo '"'$(id -nu) ALL=(ALL) ALL'"' >>/etc/sudoers'
# Install a few more commonly used packages
sudo schroot -c "${target%bit}" -p -- apt-get -y install \
autoconf automake1.9 dpkg-dev g++-multilib gcc-multilib gdb less libtool \
strace
# If running a 32bit environment on a 64bit machine, install a few binaries
# as 64bit. This is only done automatically if the chroot distro is the same as
# the host, otherwise there might be incompatibilities in build settings or
# runtime dependencies. The user can force it with the '-c' flag.
host_distro=$(grep DISTRIB_CODENAME /etc/lsb-release 2>/dev/null | \
cut -d "=" -f 2)
if [ "${copy_64}" = "y" -o \
"${host_distro}" = "${distname}" -a "${arch}" = 32bit ] && \
file /bin/bash 2>/dev/null | grep -q x86-64; then
readlinepkg=$(sudo schroot -c "${target%bit}" -p -- sh -c \
'apt-cache search "lib64readline.\$" | sort | tail -n 1 | cut -d " " -f 1')
sudo schroot -c "${target%bit}" -p -- apt-get -y install \
lib64expat1 lib64ncurses5 ${readlinepkg} lib64z1
dep=
for i in binutils gdb strace; do
[ -d /usr/share/doc/"$i" ] || dep="$dep $i"
done
[ -n "$dep" ] && sudo apt-get -y install $dep
sudo cp /usr/bin/gdb "/var/lib/chroot/${target}/usr/local/bin/"
sudo cp /usr/bin/ld "/var/lib/chroot/${target}/usr/local/bin/"
for i in libbfd libpython; do
lib="$({ ldd /usr/bin/ld; ldd /usr/bin/gdb; } |
grep "$i" | awk '{ print $3 }')"
if [ -n "$lib" -a -r "$lib" ]; then
sudo cp "$lib" "/var/lib/chroot/${target}/usr/lib64/"
fi
done
for lib in libssl libcrypt; do
sudo cp /usr/lib/$lib* "/var/lib/chroot/${target}/usr/lib64/" || :
done
fi
# Clean up package files
sudo schroot -c "${target%bit}" -p -- apt-get clean
sudo apt-get clean
# Let the user know what we did
trap '' INT TERM QUIT
trap '' EXIT
cat <<EOF
Successfully installed ${distname} ${arch}
You can run programs inside of the chroot by invoking the "${target%bit}"
command.
Your home directory is shared between the host and the chroot. But I configured
$HOME/chroot to be private to the chroot environment. You can use it
for files that need to differ between environments.
EOF

View File

@ -0,0 +1,24 @@
Internal property sheets:
essential.vsprops
Contains the common settings used throughout the projects. Is included by either ..\debug.vsprops or ..\release.vsprops, so in general, it is not included directly.
release_defaults.vsprops
Included by ..\release.vsprops. Its settings are overriden by release_impl$(CHROME_BUILD_TYPE).vsprops. Uses the default VS setting which is "Maximize Speed". Results in relatively fast build with reasonable optimization level but without whole program optimization to reduce build time.
release_impl.vsprops
Included by ..\release.vsprops by default when CHROME_BUILD_TYPE is undefined. Includes release_defaults.vsprops.
release_impl_checksenabled.vsprops
Included by ..\release.vsprops when CHROME_BUILD_TYPE=_checksenabled. Matches what release_defaults.vsprops does, but doesn't actually inherit from it as we couldn't quite get that working. The only difference is that _DEBUG is set instead of NDEBUG. Used for keeping debug checks enabled with a build that is fast enough to dogfood with.
release_impl_official.vsprops
Included by ..\release.vsprops when CHROME_BUILD_TYPE=_official. Includes release_defaults.vsprops. Enables Whole Program Optimizations (WPO), which doubles the build time. Results in much more optimized build. Uses "Full Optimization" and "Flavor small code".
release_impl_pgo_instrument.vsprops
Included by ..\release.vsprops when CHROME_BUILD_TYPE=_pgo_instrument. Includes release_defaults.vsprops. Enables Profile Guided Optimization (PGO) instrumentation (first pass). Uses "Full Optimization" and "Flavor small code".
release_impl_pgo_optimize.vsprops
Included by ..\release.vsprops when CHROME_BUILD_TYPE=_pgo_optimize. Includes release_defaults.vsprops. Enables Profile Guided Optimization (PGO) optimization (second pass). Uses "Full Optimization" and "Flavor small code".
release_impl_purify.vsprops
Included by ..\release.vsprops when CHROME_BUILD_TYPE=_purify. Includes release_defaults.vsprops. Disables optimizations. Used with Purify to test without debug tools and without optimization; i.e. NDEBUG is defined but the compiler doesn't optimize the binary.

View File

@ -0,0 +1,18 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'msvs_settings': {
'VCCLCompilerTool': {
'StringPooling': 'true',
},
'VCLinkerTool': {
# No incremental linking.
'LinkIncremental': '1',
# Eliminate Unreferenced Data (/OPT:REF).
'OptimizeReferences': '2',
# Folding on (/OPT:ICF).
'EnableCOMDATFolding': '2',
},
},
}

View File

@ -0,0 +1,6 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': ['release_defaults.gypi'],
}

View File

@ -0,0 +1,37 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': ['release_defaults.gypi'],
'defines': ['OFFICIAL_BUILD'],
'msvs_settings': {
'VCCLCompilerTool': {
'InlineFunctionExpansion': '2',
'EnableIntrinsicFunctions': 'true',
'OmitFramePointers': 'true',
'EnableFiberSafeOptimizations': 'true',
},
'VCLibrarianTool': {
'AdditionalOptions': [
'/ltcg',
'/expectedoutputsize:120000000'
],
},
'VCLinkerTool': {
'AdditionalOptions': [
'/time',
# This may reduce memory fragmentation during linking.
# The expected size is 40*1024*1024, which gives us about 10M of
# headroom as of Dec 16, 2011.
'/expectedoutputsize:41943040',
],
'LinkTimeCodeGeneration': '1',
# The /PROFILE flag causes the linker to add a "FIXUP" debug stream to
# the generated PDB. According to MSDN documentation, this flag is only
# available (or perhaps supported) in the Enterprise (team development)
# version of Visual Studio. If this blocks your official build, simply
# comment out this line, then re-run "gclient runhooks".
'Profile': 'true',
},
},
}

View File

@ -0,0 +1,59 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
# When including this gypi, the following variables must be set:
# json_schema_files: an array of json files that comprise the api model.
# cc_dir: path to generated files
# root_namespace: the C++ namespace that all generated files go under
'api_gen_dir': '<(DEPTH)/tools/json_schema_compiler',
'api_gen': '<(api_gen_dir)/compiler.py',
},
'rules': [
{
'rule_name': 'genapi',
'extension': 'json',
'inputs': [
'<(api_gen_dir)/code.py',
'<(api_gen_dir)/compiler.py',
'<(api_gen_dir)/model.py',
'<(api_gen_dir)/cc_generator.py',
'<(api_gen_dir)/h_generator.py',
'<(api_gen_dir)/cpp_type_generator.py',
'<@(json_schema_files)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/<(cc_dir)/<(RULE_INPUT_ROOT).cc',
'<(SHARED_INTERMEDIATE_DIR)/<(cc_dir)/<(RULE_INPUT_ROOT).h',
],
'action': [
'python',
'<(api_gen)',
'<(RULE_INPUT_PATH)',
'--root=<(DEPTH)',
'--destdir=<(SHARED_INTERMEDIATE_DIR)',
'--namespace=<(root_namespace)',
'<@(json_schema_files)',
],
'message': 'Generating C++ code from <(RULE_INPUT_PATH) jsons',
'process_outputs_as_sources': 1,
},
],
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)',
'<(DEPTH)',
],
'dependencies':[
'<(DEPTH)/tools/json_schema_compiler/api_gen_util.gyp:api_gen_util',
],
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)',
]
},
# This target exports a hard dependency because it generates header
# files.
'hard_dependency': 1,
}

View File

@ -0,0 +1,29 @@
# -*- python -*-
# Crocodile config file for Chromium linux
# TODO(jhawkins): We'll need to add a chromeos.croc once we get a coverage bot
# for that platform.
{
# List of rules, applied in order
'rules' : [
# Specify inclusions before exclusions, since rules are in order.
# Don't include non-Linux platform dirs
{
'regexp' : '.*/(chromeos|views)/',
'include' : 0,
},
# Don't include chromeos, windows, or mac specific files
{
'regexp' : '.*(_|/)(chromeos|mac|win|views)(\\.|_)',
'include' : 0,
},
# Groups
{
'regexp' : '.*_test_linux\\.',
'group' : 'test',
},
],
}

View File

@ -0,0 +1,36 @@
#!/bin/sh
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Helper script to run dump_syms on Chrome Linux executables and strip
# them if needed.
set -e
usage() {
echo -n "$0 <dump_syms_exe> <strip_binary> " >&2
echo "<binary_with_symbols> <symbols_output>" >&2
}
if [ $# -ne 4 ]; then
usage
exit 1
fi
SCRIPTDIR="$(readlink -f "$(dirname "$0")")"
DUMPSYMS="$1"
STRIP_BINARY="$2"
INFILE="$3"
OUTFILE="$4"
# Dump the symbols from the given binary.
if [ ! -e "$OUTFILE" -o "$INFILE" -nt "$OUTFILE" ]; then
"$DUMPSYMS" "$INFILE" > "$OUTFILE"
fi
if [ "$STRIP_BINARY" != "0" ]; then
strip "$INFILE"
fi

View File

@ -0,0 +1,37 @@
#!/bin/bash
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This program wraps around pkg-config to generate the correct include and
# library paths when cross-compiling using a sysroot.
# The assumption is that the sysroot contains the .pc files in usr/lib/pkgconfig
# and usr/share/pkgconfig (relative to the sysroot) and that they output paths
# relative to some parent path of the sysroot.
# This assumption is valid for a range of sysroots, in particular: a
# LSB-compliant root filesystem mounted at the sysroot, and a board build
# directory of a Chromium OS chroot.
root="$1"
if [ -z "$root" ]
then
echo "usage: $0 /path/to/sysroot [pkg-config-arguments] package" >&2
exit 1
fi
rewrite=`dirname $0`/rewrite_dirs.py
package=${!#}
shift
config_path=$root/usr/lib/pkgconfig:$root/usr/share/pkgconfig
set -e
# Some sysroots, like the Chromium OS ones, may generate paths that are not
# relative to the sysroot. For example,
# /path/to/chroot/build/x86-generic/usr/lib/pkgconfig/pkg.pc may have all paths
# relative to /path/to/chroot (i.e. prefix=/build/x86-generic/usr) instead of
# relative to /path/to/chroot/build/x86-generic (i.e prefix=/usr).
# To support this correctly, it's necessary to extract the prefix to strip from
# pkg-config's |prefix| variable.
prefix=`PKG_CONFIG_PATH=$config_path pkg-config --variable=prefix "$package" | sed -e 's|/usr$||'`
result=`PKG_CONFIG_PATH=$config_path pkg-config "$@"`
echo "$result"| $rewrite --sysroot "$root" --strip-prefix "$prefix"

View File

@ -0,0 +1,42 @@
#!/bin/sh
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This figures out the architecture of the version of Python we are building
# pyautolib against.
#
# python_arch.sh /usr/lib/libpython2.5.so.1.0
# python_arch.sh /path/to/sysroot/usr/lib/libpython2.4.so.1.0
#
python=$(readlink -f "$1")
if [ ! -r "$python" ]; then
echo unknown
exit 0
fi
file_out=$(file "$python")
if [ $? -ne 0 ]; then
echo unknown
exit 0
fi
echo $file_out | grep -qs "ARM"
if [ $? -eq 0 ]; then
echo arm
exit 0
fi
echo $file_out | grep -qs "x86-64"
if [ $? -eq 0 ]; then
echo x64
exit 0
fi
echo $file_out | grep -qs "Intel 80386"
if [ $? -eq 0 ]; then
echo ia32
exit 0
fi
exit 1

View File

@ -0,0 +1,71 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Rewrites paths in -I, -L and other option to be relative to a sysroot."""
import sys
import os
import optparse
REWRITE_PREFIX = ['-I',
'-idirafter',
'-imacros',
'-imultilib',
'-include',
'-iprefix',
'-iquote',
'-isystem',
'-L']
def RewritePath(path, opts):
"""Rewrites a path by stripping the prefix and prepending the sysroot."""
sysroot = opts.sysroot
prefix = opts.strip_prefix
if os.path.isabs(path) and not path.startswith(sysroot):
if path.startswith(prefix):
path = path[len(prefix):]
path = path.lstrip('/')
return os.path.join(sysroot, path)
else:
return path
def RewriteLine(line, opts):
"""Rewrites all the paths in recognized options."""
args = line.split()
count = len(args)
i = 0
while i < count:
for prefix in REWRITE_PREFIX:
# The option can be either in the form "-I /path/to/dir" or
# "-I/path/to/dir" so handle both.
if args[i] == prefix:
i += 1
try:
args[i] = RewritePath(args[i], opts)
except IndexError:
sys.stderr.write('Missing argument following %s\n' % prefix)
break
elif args[i].startswith(prefix):
args[i] = prefix + RewritePath(args[i][len(prefix):], opts)
i += 1
return ' '.join(args)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('-s', '--sysroot', default='/', help='sysroot to prepend')
parser.add_option('-p', '--strip-prefix', default='', help='prefix to strip')
opts, args = parser.parse_args(argv[1:])
for line in sys.stdin.readlines():
line = RewriteLine(line.strip(), opts)
print line
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -0,0 +1,651 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'conditions': [
['sysroot!=""', {
'variables': {
'pkg-config': './pkg-config-wrapper "<(sysroot)"',
},
}, {
'variables': {
'pkg-config': 'pkg-config'
},
}],
[ 'os_posix==1 and OS!="mac"', {
'variables': {
# We use our own copy of libssl3, although we still need to link against
# the rest of NSS.
'use_system_ssl%': 0,
},
}, {
'variables': {
'use_system_ssl%': 1,
},
}],
],
'targets': [
{
'target_name': 'gtk',
'type': 'none',
'toolsets': ['host', 'target'],
'conditions': [
['_toolset=="target"', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags gtk+-2.0 gthread-2.0)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other gtk+-2.0 gthread-2.0)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l gtk+-2.0 gthread-2.0)',
],
},
}, {
'direct_dependent_settings': {
'cflags': [
'<!@(pkg-config --cflags gtk+-2.0 gthread-2.0)',
],
},
'link_settings': {
'ldflags': [
'<!@(pkg-config --libs-only-L --libs-only-other gtk+-2.0 gthread-2.0)',
],
'libraries': [
'<!@(pkg-config --libs-only-l gtk+-2.0 gthread-2.0)',
],
},
}],
['chromeos==1', {
'link_settings': {
'libraries': [ '-lXtst' ]
}
}],
],
},
{
'target_name': 'gtkprint',
'type': 'none',
'conditions': [
['_toolset=="target"', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags gtk+-unix-print-2.0)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other gtk+-unix-print-2.0)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l gtk+-unix-print-2.0)',
],
},
}],
],
},
{
'target_name': 'ssl',
'type': 'none',
'conditions': [
['_toolset=="target"', {
'conditions': [
['use_openssl==1', {
'dependencies': [
'../../third_party/openssl/openssl.gyp:openssl',
],
}],
['use_openssl==0 and use_system_ssl==0', {
'dependencies': [
'../../net/third_party/nss/ssl.gyp:libssl',
'../../third_party/zlib/zlib.gyp:zlib',
],
'direct_dependent_settings': {
'include_dirs+': [
# We need for our local copies of the libssl3 headers to come
# before other includes, as we are shadowing system headers.
'<(DEPTH)/net/third_party/nss/ssl',
],
'cflags': [
'<!@(<(pkg-config) --cflags nss)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other nss)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l nss | sed -e "s/-lssl3//")',
],
},
}],
['use_openssl==0 and use_system_ssl==1', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags nss)',
],
'defines': [
'USE_SYSTEM_SSL',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other nss)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l nss)',
],
},
}],
]
}],
],
},
{
'target_name': 'freetype2',
'type': 'none',
'conditions': [
['_toolset=="target"', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags freetype2)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other freetype2)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l freetype2)',
],
},
}],
],
},
{
'target_name': 'fontconfig',
'type': 'none',
'conditions': [
['_toolset=="target"', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags fontconfig)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other fontconfig)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l fontconfig)',
],
},
}],
],
},
{
'target_name': 'gdk',
'type': 'none',
'conditions': [
['_toolset=="target"', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags gdk-2.0)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other gdk-2.0)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l gdk-2.0)',
],
},
}],
],
},
{
'target_name': 'gconf',
'type': 'none',
'conditions': [
['use_gconf==1 and _toolset=="target"', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags gconf-2.0)',
],
'defines': [
'USE_GCONF',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other gconf-2.0)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l gconf-2.0)',
],
},
}],
],
},
{
'target_name': 'gio',
'type': 'none',
'conditions': [
['use_gio==1 and _toolset=="target"', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags gio-2.0)',
],
'defines': [
'USE_GIO',
],
'conditions': [
['linux_link_gsettings==0', {
'defines': ['DLOPEN_GSETTINGS'],
}],
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other gio-2.0)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l gio-2.0)',
],
'conditions': [
['linux_link_gsettings==0 and OS=="linux"', {
'libraries': [
'-ldl',
],
}],
],
},
}],
],
},
{
'target_name': 'x11',
'type': 'none',
'toolsets': ['host', 'target'],
'conditions': [
['_toolset=="target"', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags x11)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other x11 xi)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l x11 xi)',
],
},
}, {
'direct_dependent_settings': {
'cflags': [
'<!@(pkg-config --cflags x11)',
],
},
'link_settings': {
'ldflags': [
'<!@(pkg-config --libs-only-L --libs-only-other x11 xi)',
],
'libraries': [
'<!@(pkg-config --libs-only-l x11 xi)',
],
},
}],
],
},
{
'target_name': 'xext',
'type': 'none',
'conditions': [
['_toolset=="target"', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags xext)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other xext)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l xext)',
],
},
}],
],
},
{
'target_name': 'xfixes',
'type': 'none',
'conditions': [
['_toolset=="target"', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags xfixes)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other xfixes)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l xfixes)',
],
},
}],
],
},
{
'target_name': 'libgcrypt',
'type': 'none',
'conditions': [
['_toolset=="target" and use_cups==1', {
'direct_dependent_settings': {
'cflags': [
'<!@(libgcrypt-config --cflags)',
],
},
'link_settings': {
'libraries': [
'<!@(libgcrypt-config --libs)',
],
},
}],
],
},
{
'target_name': 'selinux',
'type': 'none',
'conditions': [
['_toolset=="target"', {
'link_settings': {
'libraries': [
'-lselinux',
],
},
}],
],
},
{
'target_name': 'gnome_keyring',
'type': 'none',
'conditions': [
['use_gnome_keyring==1', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags gnome-keyring-1)',
],
'defines': [
'USE_GNOME_KEYRING',
],
'conditions': [
['linux_link_gnome_keyring==0', {
'defines': ['DLOPEN_GNOME_KEYRING'],
}],
],
},
'conditions': [
['linux_link_gnome_keyring!=0', {
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other gnome-keyring-1)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l gnome-keyring-1)',
],
},
}, {
'conditions': [
['OS=="linux"', {
'link_settings': {
'libraries': [
'-ldl',
],
},
}],
],
}],
],
}],
],
},
{
# The unit tests use a few convenience functions from the GNOME
# Keyring library directly. We ignore linux_link_gnome_keyring and
# link directly in this version of the target to allow this.
# *** Do not use this target in the main binary! ***
'target_name': 'gnome_keyring_direct',
'type': 'none',
'conditions': [
['use_gnome_keyring==1', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags gnome-keyring-1)',
],
'defines': [
'USE_GNOME_KEYRING',
],
'conditions': [
['linux_link_gnome_keyring==0', {
'defines': ['DLOPEN_GNOME_KEYRING'],
}],
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other gnome-keyring-1)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l gnome-keyring-1)',
],
},
}],
],
},
{
'target_name': 'dbus',
'type': 'none',
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags dbus-1)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other dbus-1)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l dbus-1)',
],
},
},
{
# TODO(satorux): Remove this once dbus-glib clients are gone.
'target_name': 'dbus-glib',
'type': 'none',
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags dbus-glib-1)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other dbus-glib-1)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l dbus-glib-1)',
],
},
},
{
'target_name': 'glib',
'type': 'none',
'toolsets': ['host', 'target'],
'conditions': [
['_toolset=="target"', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags glib-2.0 gobject-2.0 gthread-2.0)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other glib-2.0 gobject-2.0 gthread-2.0)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l glib-2.0 gobject-2.0 gthread-2.0)',
],
},
}, {
'direct_dependent_settings': {
'cflags': [
'<!@(pkg-config --cflags glib-2.0 gobject-2.0 gthread-2.0)',
],
},
'link_settings': {
'ldflags': [
'<!@(pkg-config --libs-only-L --libs-only-other glib-2.0 gobject-2.0 gthread-2.0)',
],
'libraries': [
'<!@(pkg-config --libs-only-l glib-2.0 gobject-2.0 gthread-2.0)',
],
},
}],
['chromeos==1', {
'link_settings': {
'libraries': [ '-lXtst' ]
}
}],
],
},
{
'target_name': 'pangocairo',
'type': 'none',
'toolsets': ['host', 'target'],
'conditions': [
['_toolset=="target"', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags pangocairo)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other pangocairo)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l pangocairo)',
],
},
}, {
'direct_dependent_settings': {
'cflags': [
'<!@(pkg-config --cflags pangocairo)',
],
},
'link_settings': {
'ldflags': [
'<!@(pkg-config --libs-only-L --libs-only-other pangocairo)',
],
'libraries': [
'<!@(pkg-config --libs-only-l pangocairo)',
],
},
}],
],
},
{
'target_name': 'libresolv',
'type': 'none',
'link_settings': {
'libraries': [
'-lresolv',
],
},
},
{
'target_name': 'ibus',
'type': 'none',
'conditions': [
['use_ibus==1', {
'variables': {
'ibus_min_version': '1.3.99.20110425',
},
'direct_dependent_settings': {
'defines': ['HAVE_IBUS=1'],
'cflags': [
'<!@(<(pkg-config) --cflags "ibus-1.0 >= <(ibus_min_version)")',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other "ibus-1.0 >= <(ibus_min_version)")',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l "ibus-1.0 >= <(ibus_min_version)")',
],
},
}],
],
},
{
'target_name': 'wayland',
'type': 'none',
'conditions': [
['use_wayland == 1', {
'cflags': [
'<!@(<(pkg-config) --cflags cairo wayland-client wayland-egl xkbcommon)',
],
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags cairo wayland-client wayland-egl xkbcommon)',
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other wayland-client wayland-egl xkbcommon)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l wayland-client wayland-egl xkbcommon)',
],
},
}],
],
},
{
'target_name': 'udev',
'type': 'none',
'conditions': [
# libudev is not available on *BSD
['_toolset=="target" and os_bsd!=1', {
'direct_dependent_settings': {
'cflags': [
'<!@(<(pkg-config) --cflags libudev)'
],
},
'link_settings': {
'ldflags': [
'<!@(<(pkg-config) --libs-only-L --libs-only-other libudev)',
],
'libraries': [
'<!@(<(pkg-config) --libs-only-l libudev)',
],
},
}],
],
},
],
}

View File

@ -0,0 +1,2 @@
mark@chromium.org
thomasvl@chromium.org

View File

@ -0,0 +1,273 @@
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Usage: change_mach_o_flags.py [--executable-heap] [--no-pie] <executablepath>
Arranges for the executable at |executable_path| to have its data (heap)
pages protected to prevent execution on Mac OS X 10.7 ("Lion"), and to have
the PIE (position independent executable) bit set to enable ASLR (address
space layout randomization). With --executable-heap or --no-pie, the
respective bits are cleared instead of set, making the heap executable or
disabling PIE/ASLR.
This script is able to operate on thin (single-architecture) Mach-O files
and fat (universal, multi-architecture) files. When operating on fat files,
it will set or clear the bits for each architecture contained therein.
NON-EXECUTABLE HEAP
Traditionally in Mac OS X, 32-bit processes did not have data pages set to
prohibit execution. Although user programs could call mprotect and
mach_vm_protect to deny execution of code in data pages, the kernel would
silently ignore such requests without updating the page tables, and the
hardware would happily execute code on such pages. 64-bit processes were
always given proper hardware protection of data pages. This behavior was
controllable on a system-wide level via the vm.allow_data_exec sysctl, which
is set by default to 1. The bit with value 1 (set by default) allows code
execution on data pages for 32-bit processes, and the bit with value 2
(clear by default) does the same for 64-bit processes.
In Mac OS X 10.7, executables can "opt in" to having hardware protection
against code execution on data pages applied. This is done by setting a new
bit in the |flags| field of an executable's |mach_header|. When
MH_NO_HEAP_EXECUTION is set, proper protections will be applied, regardless
of the setting of vm.allow_data_exec. See xnu-1699.22.73/osfmk/vm/vm_map.c
override_nx and xnu-1699.22.73/bsd/kern/mach_loader.c load_machfile.
The Apple toolchain has been revised to set the MH_NO_HEAP_EXECUTION when
producing executables, provided that -allow_heap_execute is not specified
at link time. Only linkers shipping with Xcode 4.0 and later (ld64-123.2 and
later) have this ability. See ld64-123.2.1/src/ld/Options.cpp
Options::reconfigureDefaults() and
ld64-123.2.1/src/ld/HeaderAndLoadCommands.hpp
HeaderAndLoadCommandsAtom<A>::flags().
This script sets the MH_NO_HEAP_EXECUTION bit on Mach-O executables. It is
intended for use with executables produced by a linker that predates Apple's
modifications to set this bit itself. It is also useful for setting this bit
for non-i386 executables, including x86_64 executables. Apple's linker only
sets it for 32-bit i386 executables, presumably under the assumption that
the value of vm.allow_data_exec is set in stone. However, if someone were to
change vm.allow_data_exec to 2 or 3, 64-bit x86_64 executables would run
without hardware protection against code execution on data pages. This
script can set the bit for x86_64 executables, guaranteeing that they run
with appropriate protection even when vm.allow_data_exec has been tampered
with.
POSITION-INDEPENDENT EXECUTABLES/ADDRESS SPACE LAYOUT RANDOMIZATION
This script sets or clears the MH_PIE bit in an executable's Mach-O header,
enabling or disabling position independence on Mac OS X 10.5 and later.
Processes running position-independent executables have varying levels of
ASLR protection depending on the OS release. The main executable's load
address, shared library load addresess, and the heap and stack base
addresses may be randomized. Position-independent executables are produced
by supplying the -pie flag to the linker (or defeated by supplying -no_pie).
Executables linked with a deployment target of 10.7 or higher have PIE on
by default.
This script is never strictly needed during the build to enable PIE, as all
linkers used are recent enough to support -pie. However, it's used to
disable the PIE bit as needed on already-linked executables.
"""
import optparse
import os
import struct
import sys
# <mach-o/fat.h>
FAT_MAGIC = 0xcafebabe
FAT_CIGAM = 0xbebafeca
# <mach-o/loader.h>
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
MH_EXECUTE = 0x2
MH_PIE = 0x00200000
MH_NO_HEAP_EXECUTION = 0x01000000
class MachOError(Exception):
"""A class for exceptions thrown by this module."""
pass
def CheckedSeek(file, offset):
"""Seeks the file-like object at |file| to offset |offset| and raises a
MachOError if anything funny happens."""
file.seek(offset, os.SEEK_SET)
new_offset = file.tell()
if new_offset != offset:
raise MachOError, \
'seek: expected offset %d, observed %d' % (offset, new_offset)
def CheckedRead(file, count):
"""Reads |count| bytes from the file-like |file| object, raising a
MachOError if any other number of bytes is read."""
bytes = file.read(count)
if len(bytes) != count:
raise MachOError, \
'read: expected length %d, observed %d' % (count, len(bytes))
return bytes
def ReadUInt32(file, endian):
"""Reads an unsinged 32-bit integer from the file-like |file| object,
treating it as having endianness specified by |endian| (per the |struct|
module), and returns it as a number. Raises a MachOError if the proper
length of data can't be read from |file|."""
bytes = CheckedRead(file, 4)
(uint32,) = struct.unpack(endian + 'I', bytes)
return uint32
def ReadMachHeader(file, endian):
"""Reads an entire |mach_header| structure (<mach-o/loader.h>) from the
file-like |file| object, treating it as having endianness specified by
|endian| (per the |struct| module), and returns a 7-tuple of its members
as numbers. Raises a MachOError if the proper length of data can't be read
from |file|."""
bytes = CheckedRead(file, 28)
magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = \
struct.unpack(endian + '7I', bytes)
return magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags
def ReadFatArch(file):
"""Reads an entire |fat_arch| structure (<mach-o/fat.h>) from the file-like
|file| object, treating it as having endianness specified by |endian|
(per the |struct| module), and returns a 5-tuple of its members as numbers.
Raises a MachOError if the proper length of data can't be read from
|file|."""
bytes = CheckedRead(file, 20)
cputype, cpusubtype, offset, size, align = struct.unpack('>5I', bytes)
return cputype, cpusubtype, offset, size, align
def WriteUInt32(file, uint32, endian):
"""Writes |uint32| as an unsinged 32-bit integer to the file-like |file|
object, treating it as having endianness specified by |endian| (per the
|struct| module)."""
bytes = struct.pack(endian + 'I', uint32)
assert len(bytes) == 4
file.write(bytes)
def HandleMachOFile(file, options, offset=0):
"""Seeks the file-like |file| object to |offset|, reads its |mach_header|,
and rewrites the header's |flags| field if appropriate. The header's
endianness is detected. Both 32-bit and 64-bit Mach-O headers are supported
(mach_header and mach_header_64). Raises MachOError if used on a header that
does not have a known magic number or is not of type MH_EXECUTE. The
MH_PIE and MH_NO_HEAP_EXECUTION bits are set or cleared in the |flags| field
according to |options| and written to |file| if any changes need to be made.
If already set or clear as specified by |options|, nothing is written."""
CheckedSeek(file, offset)
magic = ReadUInt32(file, '<')
if magic == MH_MAGIC or magic == MH_MAGIC_64:
endian = '<'
elif magic == MH_CIGAM or magic == MH_CIGAM_64:
endian = '>'
else:
raise MachOError, \
'Mach-O file at offset %d has illusion of magic' % offset
CheckedSeek(file, offset)
magic, cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = \
ReadMachHeader(file, endian)
assert magic == MH_MAGIC or magic == MH_MAGIC_64
if filetype != MH_EXECUTE:
raise MachOError, \
'Mach-O file at offset %d is type 0x%x, expected MH_EXECUTE' % \
(offset, filetype)
original_flags = flags
if options.no_heap_execution:
flags |= MH_NO_HEAP_EXECUTION
else:
flags &= ~MH_NO_HEAP_EXECUTION
if options.pie:
flags |= MH_PIE
else:
flags &= ~MH_PIE
if flags != original_flags:
CheckedSeek(file, offset + 24)
WriteUInt32(file, flags, endian)
def HandleFatFile(file, options, fat_offset=0):
"""Seeks the file-like |file| object to |offset| and loops over its
|fat_header| entries, calling HandleMachOFile for each."""
CheckedSeek(file, fat_offset)
magic = ReadUInt32(file, '>')
assert magic == FAT_MAGIC
nfat_arch = ReadUInt32(file, '>')
for index in xrange(0, nfat_arch):
cputype, cpusubtype, offset, size, align = ReadFatArch(file)
assert size >= 28
# HandleMachOFile will seek around. Come back here after calling it, in
# case it sought.
fat_arch_offset = file.tell()
HandleMachOFile(file, options, offset)
CheckedSeek(file, fat_arch_offset)
def main(me, args):
parser = optparse.OptionParser('%prog [options] <executable_path>')
parser.add_option('--executable-heap', action='store_false',
dest='no_heap_execution', default=True,
help='Clear the MH_NO_HEAP_EXECUTION bit')
parser.add_option('--no-pie', action='store_false',
dest='pie', default=True,
help='Clear the MH_PIE bit')
(options, loose_args) = parser.parse_args(args)
if len(loose_args) != 1:
parser.print_usage()
return 1
executable_path = loose_args[0]
executable_file = open(executable_path, 'rb+')
magic = ReadUInt32(executable_file, '<')
if magic == FAT_CIGAM:
# Check FAT_CIGAM and not FAT_MAGIC because the read was little-endian.
HandleFatFile(executable_file, options)
elif magic == MH_MAGIC or magic == MH_CIGAM or \
magic == MH_MAGIC_64 or magic == MH_CIGAM_64:
HandleMachOFile(executable_file, options)
else:
raise MachOError, '%s is not a Mach-O or fat file' % executable_file
executable_file.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[0], sys.argv[1:]))

View File

@ -0,0 +1,15 @@
#!/bin/sh
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is a small wrapper script around change_mach_o_flags.py allowing it to
# be invoked easily from Xcode. change_mach_o_flags.py expects its arguments
# on the command line, but Xcode puts its parameters in the environment.
set -e
exec "$(dirname "${0}")/change_mach_o_flags.py" \
"${@}" \
"${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}"

View File

@ -0,0 +1,36 @@
# -*- python -*-
# Crocodile config file for Chromium mac
{
# List of rules, applied in order
'rules' : [
# Specify inclusions before exclusions, since rules are in order.
# Don't include chromeos, linux, or windows specific files
{
'regexp' : '.*(_|/)(chromeos|linux|win|views)(\\.|_)',
'include' : 0,
},
# Don't include ChromeOS dirs
{
'regexp' : '.*/chromeos/',
'include' : 0,
},
# Groups
{
'regexp' : '.*_test_mac\\.',
'group' : 'test',
},
# Languages
{
'regexp' : '.*\\.m$',
'language' : 'ObjC',
},
{
'regexp' : '.*\\.mm$',
'language' : 'ObjC++',
},
],
}

View File

@ -0,0 +1,62 @@
#!/bin/bash
# Copyright (c) 2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is a handy wrapper script that figures out how to call the strip
# utility (strip_save_dsym in this case), if it even needs to be called at all,
# and then does it. This script should be called by a post-link phase in
# targets that might generate Mach-O executables, dynamic libraries, or
# loadable bundles.
#
# An example "Strip If Needed" build phase placed after "Link Binary With
# Libraries" would do:
# exec "${XCODEPROJ_DEPTH}/build/mac/strip_from_xcode"
if [ "${CONFIGURATION}" != "Release" ] ; then
# Only strip in release mode.
exit 0
fi
declare -a FLAGS
# MACH_O_TYPE is not set for a command-line tool, so check PRODUCT_TYPE too.
# Weird.
if [ "${MACH_O_TYPE}" = "mh_execute" ] || \
[ "${PRODUCT_TYPE}" = "com.apple.product-type.tool" ] ; then
# Strip everything (no special flags). No-op.
true
elif [ "${MACH_O_TYPE}" = "mh_dylib" ] || \
[ "${MACH_O_TYPE}" = "mh_bundle" ]; then
# Strip debugging symbols and local symbols
FLAGS[${#FLAGS[@]}]=-S
FLAGS[${#FLAGS[@]}]=-x
elif [ "${MACH_O_TYPE}" = "staticlib" ] ; then
# Don't strip static libraries.
exit 0
else
# Warn, but don't treat this as an error.
echo $0: warning: unrecognized MACH_O_TYPE ${MACH_O_TYPE}
exit 0
fi
if [ -n "${STRIPFLAGS}" ] ; then
# Pick up the standard STRIPFLAGS Xcode setting, used for "Additional Strip
# Flags".
for stripflag in "${STRIPFLAGS}" ; do
FLAGS[${#FLAGS[@]}]="${stripflag}"
done
fi
if [ -n "${CHROMIUM_STRIP_SAVE_FILE}" ] ; then
# An Xcode project can communicate a file listing symbols to saved in this
# environment variable by setting it as a build setting. This isn't a
# standard Xcode setting. It's used in preference to STRIPFLAGS to
# eliminate quoting ambiguity concerns.
FLAGS[${#FLAGS[@]}]=-s
FLAGS[${#FLAGS[@]}]="${CHROMIUM_STRIP_SAVE_FILE}"
fi
exec "$(dirname ${0})/strip_save_dsym" "${FLAGS[@]}" \
"${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}"

View File

@ -0,0 +1,341 @@
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Usage: strip_save_dsym <whatever-arguments-you-would-pass-to-strip>
#
# strip_save_dsym is a wrapper around the standard strip utility. Given an
# input Mach-O file, strip_save_dsym will save a copy of the file in a "fake"
# .dSYM bundle for debugging, and then call strip to strip the Mach-O file.
# Note that the .dSYM file is a "fake" in that it's not a self-contained
# .dSYM bundle, it just contains a copy of the original (unstripped) Mach-O
# file, and therefore contains references to object files on the filesystem.
# The generated .dSYM bundle is therefore unsuitable for debugging in the
# absence of these .o files.
#
# If a .dSYM already exists and has a newer timestamp than the Mach-O file,
# this utility does nothing. That allows strip_save_dsym to be run on a file
# that has already been stripped without trashing the .dSYM.
#
# Rationale: the "right" way to generate dSYM bundles, dsymutil, is incredibly
# slow. On the other hand, doing a file copy (which is really all that
# dsymutil does) is comparatively fast. Since we usually just want to strip
# a release-mode executable but still be able to debug it, and we don't care
# so much about generating a hermetic dSYM bundle, we'll prefer the file copy.
# If a real dSYM is ever needed, it's still possible to create one by running
# dsymutil and pointing it at the original Mach-O file inside the "fake"
# bundle, provided that the object files are available.
import errno
import os
import re
import shutil
import subprocess
import sys
import time
# Returns a list of architectures contained in a Mach-O file. The file can be
# a universal (fat) file, in which case there will be one list element for
# each contained architecture, or it can be a thin single-architecture Mach-O
# file, in which case the list will contain a single element identifying the
# architecture. On error, returns an empty list. Determines the architecture
# list by calling file.
def macho_archs(macho):
macho_types = ["executable",
"dynamically linked shared library",
"bundle"]
macho_types_re = "Mach-O (?:64-bit )?(?:" + "|".join(macho_types) + ")"
file_cmd = subprocess.Popen(["/usr/bin/file", "-b", "--", macho],
stdout=subprocess.PIPE)
archs = []
type_line = file_cmd.stdout.readline()
type_match = re.match("^%s (.*)$" % macho_types_re, type_line)
if type_match:
archs.append(type_match.group(1))
return [type_match.group(1)]
else:
type_match = re.match("^Mach-O universal binary with (.*) architectures$",
type_line)
if type_match:
for i in range(0, int(type_match.group(1))):
arch_line = file_cmd.stdout.readline()
arch_match = re.match(
"^.* \(for architecture (.*)\):\t%s .*$" % macho_types_re,
arch_line)
if arch_match:
archs.append(arch_match.group(1))
if file_cmd.wait() != 0:
archs = []
if len(archs) == 0:
print >> sys.stderr, "No architectures in %s" % macho
return archs
# Returns a dictionary mapping architectures contained in the file as returned
# by macho_archs to the LC_UUID load command for that architecture.
# Architectures with no LC_UUID load command are omitted from the dictionary.
# Determines the UUID value by calling otool.
def macho_uuids(macho):
uuids = {}
archs = macho_archs(macho)
if len(archs) == 0:
return uuids
for arch in archs:
if arch == "":
continue
otool_cmd = subprocess.Popen(["/usr/bin/otool", "-arch", arch, "-l", "-",
macho],
stdout=subprocess.PIPE)
# state 0 is when nothing UUID-related has been seen yet. State 1 is
# entered after a load command begins, but it may not be an LC_UUID load
# command. States 2, 3, and 4 are intermediate states while reading an
# LC_UUID command. State 5 is the terminal state for a successful LC_UUID
# read. State 6 is the error state.
state = 0
uuid = ""
for otool_line in otool_cmd.stdout:
if state == 0:
if re.match("^Load command .*$", otool_line):
state = 1
elif state == 1:
if re.match("^ cmd LC_UUID$", otool_line):
state = 2
else:
state = 0
elif state == 2:
if re.match("^ cmdsize 24$", otool_line):
state = 3
else:
state = 6
elif state == 3:
# The UUID display format changed in the version of otool shipping
# with the Xcode 3.2.2 prerelease. The new format is traditional:
# uuid 4D7135B2-9C56-C5F5-5F49-A994258E0955
# and with Xcode 3.2.6, then line is indented one more space:
# uuid 4D7135B2-9C56-C5F5-5F49-A994258E0955
# The old format, from cctools-750 and older's otool, breaks the UUID
# up into a sequence of bytes:
# uuid 0x4d 0x71 0x35 0xb2 0x9c 0x56 0xc5 0xf5
# 0x5f 0x49 0xa9 0x94 0x25 0x8e 0x09 0x55
new_uuid_match = re.match("^ {3,4}uuid (.{8}-.{4}-.{4}-.{4}-.{12})$",
otool_line)
if new_uuid_match:
uuid = new_uuid_match.group(1)
# Skip state 4, there is no second line to read.
state = 5
else:
old_uuid_match = re.match("^ uuid 0x(..) 0x(..) 0x(..) 0x(..) "
"0x(..) 0x(..) 0x(..) 0x(..)$",
otool_line)
if old_uuid_match:
state = 4
uuid = old_uuid_match.group(1) + old_uuid_match.group(2) + \
old_uuid_match.group(3) + old_uuid_match.group(4) + "-" + \
old_uuid_match.group(5) + old_uuid_match.group(6) + "-" + \
old_uuid_match.group(7) + old_uuid_match.group(8) + "-"
else:
state = 6
elif state == 4:
old_uuid_match = re.match("^ 0x(..) 0x(..) 0x(..) 0x(..) "
"0x(..) 0x(..) 0x(..) 0x(..)$",
otool_line)
if old_uuid_match:
state = 5
uuid += old_uuid_match.group(1) + old_uuid_match.group(2) + "-" + \
old_uuid_match.group(3) + old_uuid_match.group(4) + \
old_uuid_match.group(5) + old_uuid_match.group(6) + \
old_uuid_match.group(7) + old_uuid_match.group(8)
else:
state = 6
if otool_cmd.wait() != 0:
state = 6
if state == 5:
uuids[arch] = uuid.upper()
if len(uuids) == 0:
print >> sys.stderr, "No UUIDs in %s" % macho
return uuids
# Given a path to a Mach-O file and possible information from the environment,
# determines the desired path to the .dSYM.
def dsym_path(macho):
# If building a bundle, the .dSYM should be placed next to the bundle. Use
# WRAPPER_NAME to make this determination. If called from xcodebuild,
# WRAPPER_NAME will be set to the name of the bundle.
dsym = ""
if "WRAPPER_NAME" in os.environ:
if "BUILT_PRODUCTS_DIR" in os.environ:
dsym = os.path.join(os.environ["BUILT_PRODUCTS_DIR"],
os.environ["WRAPPER_NAME"])
else:
dsym = os.environ["WRAPPER_NAME"]
else:
dsym = macho
dsym += ".dSYM"
return dsym
# Creates a fake .dSYM bundle at dsym for macho, a Mach-O image with the
# architectures and UUIDs specified by the uuids map.
def make_fake_dsym(macho, dsym):
uuids = macho_uuids(macho)
if len(uuids) == 0:
return False
dwarf_dir = os.path.join(dsym, "Contents", "Resources", "DWARF")
dwarf_file = os.path.join(dwarf_dir, os.path.basename(macho))
try:
os.makedirs(dwarf_dir)
except OSError, (err, error_string):
if err != errno.EEXIST:
raise
shutil.copyfile(macho, dwarf_file)
# info_template is the same as what dsymutil would have written, with the
# addition of the fake_dsym key.
info_template = \
'''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleIdentifier</key>
<string>com.apple.xcode.dsym.%(root_name)s</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundlePackageType</key>
<string>dSYM</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleVersion</key>
<string>1</string>
<key>dSYM_UUID</key>
<dict>
%(uuid_dict)s </dict>
<key>fake_dsym</key>
<true/>
</dict>
</plist>
'''
root_name = os.path.basename(dsym)[:-5] # whatever.dSYM without .dSYM
uuid_dict = ""
for arch in sorted(uuids):
uuid_dict += "\t\t\t<key>" + arch + "</key>\n"\
"\t\t\t<string>" + uuids[arch] + "</string>\n"
info_dict = {
"root_name": root_name,
"uuid_dict": uuid_dict,
}
info_contents = info_template % info_dict
info_file = os.path.join(dsym, "Contents", "Info.plist")
info_fd = open(info_file, "w")
info_fd.write(info_contents)
info_fd.close()
return True
# For a Mach-O file, determines where the .dSYM bundle should be located. If
# the bundle does not exist or has a modification time older than the Mach-O
# file, calls make_fake_dsym to create a fake .dSYM bundle there, then strips
# the Mach-O file and sets the modification time on the .dSYM bundle and Mach-O
# file to be identical.
def strip_and_make_fake_dsym(macho):
dsym = dsym_path(macho)
macho_stat = os.stat(macho)
dsym_stat = None
try:
dsym_stat = os.stat(dsym)
except OSError, (err, error_string):
if err != errno.ENOENT:
raise
if dsym_stat is None or dsym_stat.st_mtime < macho_stat.st_mtime:
# Make a .dSYM bundle
if not make_fake_dsym(macho, dsym):
return False
# Strip the Mach-O file
remove_dsym = True
try:
strip_path = ""
if "SYSTEM_DEVELOPER_BIN_DIR" in os.environ:
strip_path = os.environ["SYSTEM_DEVELOPER_BIN_DIR"]
else:
strip_path = "/usr/bin"
strip_path = os.path.join(strip_path, "strip")
strip_cmdline = [strip_path] + sys.argv[1:]
strip_cmd = subprocess.Popen(strip_cmdline)
if strip_cmd.wait() == 0:
remove_dsym = False
finally:
if remove_dsym:
shutil.rmtree(dsym)
# Update modification time on the Mach-O file and .dSYM bundle
now = time.time()
os.utime(macho, (now, now))
os.utime(dsym, (now, now))
return True
def main(argv=None):
if argv is None:
argv = sys.argv
# This only supports operating on one file at a time. Look at the arguments
# to strip to figure out what the source to be stripped is. Arguments are
# processed in the same way that strip does, although to reduce complexity,
# this doesn't do all of the same checking as strip. For example, strip
# has no -Z switch and would treat -Z on the command line as an error. For
# the purposes this is needed for, that's fine.
macho = None
process_switches = True
ignore_argument = False
for arg in argv[1:]:
if ignore_argument:
ignore_argument = False
continue
if process_switches:
if arg == "-":
process_switches = False
# strip has these switches accept an argument:
if arg in ["-s", "-R", "-d", "-o", "-arch"]:
ignore_argument = True
if arg[0] == "-":
continue
if macho is None:
macho = arg
else:
print >> sys.stderr, "Too many things to strip"
return 1
if macho is None:
print >> sys.stderr, "Nothing to strip"
return 1
if not strip_and_make_fake_dsym(macho):
return 1
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))

View File

@ -0,0 +1,96 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is meant to be included into an target to create a unittest that
# invokes a set of no-compile tests. A no-compile test is a test that asserts
# a particular construct will not compile.
#
# Also see:
# http://dev.chromium.org/developers/testing/no-compile-tests
#
# To use this, create a gyp target with the following form:
# {
# 'target_name': 'my_module_nc_unittests',
# 'type': 'executable',
# 'sources': [
# 'nc_testset_1.nc',
# 'nc_testset_2.nc',
# ],
# 'includes': ['path/to/this/gypi/file'],
# }
#
# The .nc files are C++ files that contain code we wish to assert will not
# compile. Each individual test case in the file should be put in its own
# #ifdef section. The expected output should be appended with a C++-style
# comment that has a python list of regular expressions. This will likely
# be greater than 80-characters. Giving a solid expected output test is
# important so that random compile failures do not cause the test to pass.
#
# Example .nc file:
#
# #if defined(TEST_NEEDS_SEMICOLON) // [r"expected ',' or ';' at end of input"]
#
# int a = 1
#
# #elif defined(TEST_NEEDS_CAST) // [r"invalid conversion from 'void*' to 'char*'"]
#
# void* a = NULL;
# char* b = a;
#
# #endif
#
# If we needed disable TEST_NEEDS_SEMICOLON, then change the define to:
#
# DISABLE_TEST_NEEDS_SEMICOLON
# TEST_NEEDS_CAST
#
# The lines above are parsed by a regexp so avoid getting creative with the
# formatting or ifdef logic; it will likely just not work.
#
# Implementation notes:
# The .nc files are actually processed by a python script which executes the
# compiler and generates a .cc file that is empty on success, or will have a
# series of #error lines on failure, and a set of trivially passing gunit
# TEST() functions on success. This allows us to fail at the compile step when
# something goes wrong, and know during the unittest run that the test was at
# least processed when things go right.
{
# TODO(awong): Disabled until http://crbug.com/105388 is resolved.
'sources/': [['exclude', '\\.nc$']],
'conditions': [
[ 'OS=="linux" and clang==0', {
'rules': [
{
'variables': {
'nocompile_driver': '<(DEPTH)/tools/nocompile_driver.py',
'nc_result_path': ('<(INTERMEDIATE_DIR)/<(module_dir)/'
'<(RULE_INPUT_ROOT)_nc.cc'),
},
'rule_name': 'run_nocompile',
'extension': 'nc',
'inputs': [
'<(nocompile_driver)',
],
'outputs': [
'<(nc_result_path)'
],
'action': [
'python',
'<(nocompile_driver)',
'4', # number of compilers to invoke in parallel.
'<(RULE_INPUT_PATH)',
'-Wall -Werror -Wfatal-errors -I<(DEPTH)',
'<(nc_result_path)',
],
'message': 'Generating no compile results for <(RULE_INPUT_PATH)',
'process_outputs_as_sources': 1,
},
],
}, {
'sources/': [['exclude', '\\.nc$']]
}], # 'OS=="linux" and clang=="0"'
],
}

View File

@ -0,0 +1,17 @@
<?xml version="1.0" encoding="utf-8"?>
<VisualStudioToolFile
Name="Output DLL copy"
Version="8.00"
>
<Rules>
<CustomBuildRule
Name="Output DLL copy"
CommandLine="xcopy /R /C /Y $(InputPath) $(OutDir)"
Outputs="$(OutDir)\$(InputFileName)"
FileExtensions="*.dll"
>
<Properties>
</Properties>
</CustomBuildRule>
</Rules>
</VisualStudioToolFile>

View File

@ -0,0 +1,7 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Precompiled header generator for Windows builds. No include is needed
// in this file as the PCH include is forced via the "Forced Include File"
// flag in the projects generated by GYP.

View File

@ -0,0 +1,108 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Precompiled header for Chromium project on Windows, not used by
// other build configurations. Using precompiled headers speeds the
// build up significantly, around 1/4th on VS 2010 on an HP Z600 with 12
// GB of memory.
//
// Numeric comments beside includes are the number of times they were
// included under src/chrome/browser on 2011/8/20, which was used as a
// baseline for deciding what to include in the PCH. It may be
// possible to tweak the speed of the build by commenting out or
// removing some of the less frequently used headers.
#if defined(BUILD_PRECOMPILE_H_)
#error You shouldn't include the precompiled header file more than once.
#endif
#define BUILD_PRECOMPILE_H_
// The Windows header needs to come before almost all the other
// Windows-specific headers.
#include <Windows.h>
#include <dwmapi.h>
#include <shellapi.h>
#include <wincrypt.h> // 4
#include <wtypes.h> // 2
// TODO(joi): Defines in atlbase.h cause conflicts; need to figure out
// if/how this family of headers can be included in the PCH; several
// of them are used quite frequently.
/*
#include <atlbase.h>
#include <atlapp.h>
#include <atlcom.h>
#include <atlcrack.h> // 2
#include <atlctrls.h> // 2
#include <atlmisc.h> // 2
#include <atlsafe.h> // 1
#include <atltheme.h> // 1
#include <atlwin.h> // 2
*/
// TODO(joi): Objbase.h and other files that rely on it bring in
// [ #define interface struct ] which can cause problems in a
// multi-platform build like Chrome's. Tried #undef-ing it, but
// there are currently 118 targets that break if we do this, so
// leaving out of the precompiled header for now.
//#include <commctrl.h> // 2
//#include <commdlg.h> // 3
//#include <cryptuiapi.h> // 2
//#include <Objbase.h> // 2
//#include <objidl.h> // 1
//#include <ole2.h> // 1
//#include <oleacc.h> // 2
//#include <oleauto.h> // 1
//#include <oleidl.h> // 1
//#include <propkey.h> // 2
//#include <propvarutil.h> // 2
//#include <pstore.h> // 2
//#include <shlguid.h> // 1
//#include <shlwapi.h> // 1
//#include <shobjidl.h> // 4
//#include <urlhist.h> // 2
// TODO(joi): Caused other conflicts in addition to the 'interface' issue
// above, see if they can be resolved.
//#include <shlobj.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h> // 4
#include <math.h>
#include <memory.h> // 1
#include <signal.h>
#include <stdarg.h> // 1
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h> // 4
#include <algorithm>
#include <bitset> // 3
#include <cmath>
#include <cstddef>
#include <cstdio> // 3
#include <cstdlib> // 2
#include <cstring>
#include <deque>
#include <fstream> // 3
#include <functional>
#include <iomanip> // 2
#include <iosfwd> // 2
#include <iterator>
#include <limits>
#include <list>
#include <map>
#include <numeric> // 2
#include <ostream>
#include <queue>
#include <set>
#include <sstream>
#include <stack>
#include <string>
#include <utility>
#include <vector>

View File

@ -0,0 +1,92 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is meant to be included into an target to provide a rule
# to invoke protoc in a consistent manner.
#
# To use this, create a gyp target with the following form:
# {
# 'target_name': 'my_proto_lib',
# 'type': 'static_library',
# 'sources': [
# 'foo.proto',
# 'bar.proto',
# ],
# 'variables': {
# # Optional, see below: 'proto_in_dir': '.'
# 'proto_out_dir': 'dir/for/my_proto_lib'
# },
# 'includes': ['path/to/this/gypi/file'],
# }
# If necessary, you may add normal .cc files to the sources list or other gyp
# dependencies. The proto headers are guaranteed to be generated before any
# source files, even within this target, are compiled.
#
# The 'proto_in_dir' variable must be the relative path to the
# directory containing the .proto files. If left out, it defaults to '.'.
#
# The 'proto_out_dir' variable specifies the path suffix that output
# files are generated under. Targets that gyp-depend on my_proto_lib
# will be able to include the resulting proto headers with an include
# like:
# #include "dir/for/my_proto_lib/foo.pb.h"
#
# Implementation notes:
# A proto_out_dir of foo/bar produces
# <(SHARED_INTERMEDIATE_DIR)/protoc_out/foo/bar/{file1,file2}.pb.{cc,h}
# <(SHARED_INTERMEDIATE_DIR)/pyproto/foo/bar/{file1,file2}_pb2.py
{
'variables': {
'protoc': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)protoc<(EXECUTABLE_SUFFIX)',
'cc_dir': '<(SHARED_INTERMEDIATE_DIR)/protoc_out/<(proto_out_dir)',
'py_dir': '<(PRODUCT_DIR)/pyproto/<(proto_out_dir)',
'proto_in_dir%': '.',
},
'rules': [
{
'rule_name': 'genproto',
'extension': 'proto',
'inputs': [
'<(protoc)',
],
'outputs': [
'<(py_dir)/<(RULE_INPUT_ROOT)_pb2.py',
'<(cc_dir)/<(RULE_INPUT_ROOT).pb.cc',
'<(cc_dir)/<(RULE_INPUT_ROOT).pb.h',
],
'action': [
'<(protoc)',
'--proto_path=<(proto_in_dir)',
# Naively you'd use <(RULE_INPUT_PATH) here, but protoc requires
# --proto_path is a strict prefix of the path given as an argument.
'<(proto_in_dir)/<(RULE_INPUT_ROOT)<(RULE_INPUT_EXT)',
'--cpp_out=<(cc_dir)',
'--python_out=<(py_dir)',
],
'message': 'Generating C++ and Python code from <(RULE_INPUT_PATH)',
'process_outputs_as_sources': 1,
},
],
'dependencies': [
'<(DEPTH)/third_party/protobuf/protobuf.gyp:protoc#host',
'<(DEPTH)/third_party/protobuf/protobuf.gyp:protobuf_lite',
],
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/protoc_out',
],
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)/protoc_out',
]
},
'export_dependent_settings': [
# The generated headers reference headers within protobuf_lite,
# so dependencies must be able to find those headers too.
'<(DEPTH)/third_party/protobuf/protobuf.gyp:protobuf_lite',
],
# This target exports a hard dependency because it generates header
# files.
'hard_dependency': 1,
}

View File

@ -0,0 +1,17 @@
{
'conditions': [
# Handle build types.
['buildtype=="Dev"', {
'includes': ['internal/release_impl.gypi'],
}],
['buildtype=="Official"', {
'includes': ['internal/release_impl_official.gypi'],
}],
# TODO(bradnelson): may also need:
# checksenabled
# coverage
# dom_stats
# pgo_instrument
# pgo_optimize
],
}

View File

@ -0,0 +1,35 @@
#!/bin/echo Use sanitize-mac-build-log.sh or sed -f
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Use this sed script to reduce a Mac build log into something readable.
# Drop uninformative lines.
/^distcc/d
/^Check dependencies/d
/^ setenv /d
/^ cd /d
/^make: Nothing to be done/d
/^$/d
# Xcode prints a short "compiling foobar.o" line followed by the lengthy
# full command line. These deletions drop the command line.
\|^ /Developer/usr/bin/|d
\|^ /Developer/Library/PrivateFrameworks/DevToolsCore.framework/|d
\|^ /Developer/Library/Xcode/Plug-ins/CoreBuildTasks.xcplugin/|d
# Drop any goma command lines as well.
\|^ .*/gomacc |d
# And, if you've overridden something from your own bin directory, remove those
# full command lines, too.
\|^ /Users/[^/]*/bin/|d
# There's already a nice note for bindings, don't need the command line.
\|^python scripts/rule_binding.py|d
# Shorten the "compiling foobar.o" line.
s|^Distributed-CompileC \(.*\) normal i386 c++ com.apple.compilers.gcc.4_2| CC \1|
s|^CompileC \(.*\) normal i386 c++ com.apple.compilers.gcc.4_2| CC \1|

View File

@ -0,0 +1,6 @@
#!/bin/sh
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
sed -f `dirname "${0}"`/`basename "${0}" sh`sed

View File

@ -0,0 +1,14 @@
#!/bin/echo Use sanitize-win-build-log.sh or sed -f
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Use this sed script to reduce a Windows build log into something
# machine-parsable.
# Drop uninformative lines.
/The operation completed successfully./d
# Drop parallelization indicators on lines.
s/^[0-9]\+>//

View File

@ -0,0 +1,6 @@
#!/bin/sh
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
sed -f `dirname "${0}"`/`basename "${0}" sh`sed

View File

@ -0,0 +1,24 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'some',
'type': 'none',
'dependencies': [
# This file is intended to be locally modified. List the targets you use
# regularly. The generated some.sln will contains projects for only
# those targets and the targets they are transitively dependent on. This
# can result in a solution that loads and unloads faster in Visual
# Studio.
#
# Tip: Create a dummy CL to hold your local edits to this file, so they
# don't accidentally get added to another CL that you are editing.
#
# Example:
# '../chrome/chrome.gyp:chrome',
],
},
],
}

View File

@ -0,0 +1,3 @@
This directory will be removed once the files in it are committed upstream and
Chromium imports an upstream revision with these files. Contact mark for
details.

View File

@ -0,0 +1,101 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(mark): Upstream this file to googleurl.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'googleurl',
'type': '<(component)',
'dependencies': [
'../../base/base.gyp:base',
'../../third_party/icu/icu.gyp:icudata',
'../../third_party/icu/icu.gyp:icui18n',
'../../third_party/icu/icu.gyp:icuuc',
],
'sources': [
'../../googleurl/src/gurl.cc',
'../../googleurl/src/gurl.h',
'../../googleurl/src/url_canon.h',
'../../googleurl/src/url_canon_etc.cc',
'../../googleurl/src/url_canon_fileurl.cc',
'../../googleurl/src/url_canon_host.cc',
'../../googleurl/src/url_canon_icu.cc',
'../../googleurl/src/url_canon_icu.h',
'../../googleurl/src/url_canon_internal.cc',
'../../googleurl/src/url_canon_internal.h',
'../../googleurl/src/url_canon_internal_file.h',
'../../googleurl/src/url_canon_ip.cc',
'../../googleurl/src/url_canon_ip.h',
'../../googleurl/src/url_canon_mailtourl.cc',
'../../googleurl/src/url_canon_path.cc',
'../../googleurl/src/url_canon_pathurl.cc',
'../../googleurl/src/url_canon_query.cc',
'../../googleurl/src/url_canon_relative.cc',
'../../googleurl/src/url_canon_stdstring.h',
'../../googleurl/src/url_canon_stdurl.cc',
'../../googleurl/src/url_file.h',
'../../googleurl/src/url_parse.cc',
'../../googleurl/src/url_parse.h',
'../../googleurl/src/url_parse_file.cc',
'../../googleurl/src/url_parse_internal.h',
'../../googleurl/src/url_util.cc',
'../../googleurl/src/url_util.h',
],
'direct_dependent_settings': {
'include_dirs': [
'../..',
],
},
'conditions': [
['component=="shared_library"', {
'defines': [
'GURL_DLL',
'GURL_IMPLEMENTATION=1',
],
'direct_dependent_settings': {
'defines': [
'GURL_DLL',
],
},
}],
],
},
{
'target_name': 'googleurl_unittests',
'type': 'executable',
'dependencies': [
'googleurl',
'../../base/base.gyp:base_i18n',
'../../base/base.gyp:test_support_base',
'../../testing/gtest.gyp:gtest',
'../../third_party/icu/icu.gyp:icuuc',
],
'sources': [
'../../googleurl/src/gurl_unittest.cc',
'../../googleurl/src/url_canon_unittest.cc',
'../../googleurl/src/url_parse_unittest.cc',
'../../googleurl/src/url_test_utils.h',
'../../googleurl/src/url_util_unittest.cc',
# Make sure base and ICU are started up the 'Chromium way' since the
# build is using the Chromium base & ICU.
'../../base/test/run_all_unittests.cc',
],
'conditions': [
['os_posix==1 and OS!="mac"', {
'conditions': [
['linux_use_tcmalloc==1', {
'dependencies': [
'../../base/allocator/allocator.gyp:allocator',
],
}],
],
}],
],
},
],
}

View File

@ -0,0 +1,40 @@
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'pdfsqueeze',
'type': 'executable',
'sources': [
'../../third_party/pdfsqueeze/pdfsqueeze.m',
],
'defines': [
# Use defines to map the full path names that will be used for
# the vars into the short forms expected by pdfsqueeze.m.
'______third_party_pdfsqueeze_ApplyGenericRGB_qfilter=ApplyGenericRGB_qfilter',
'______third_party_pdfsqueeze_ApplyGenericRGB_qfilter_len=ApplyGenericRGB_qfilter_len',
],
'include_dirs': [
'<(INTERMEDIATE_DIR)',
],
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/Quartz.framework',
],
'actions': [
{
'action_name': 'Generate inline filter data',
'inputs': [
'../../third_party/pdfsqueeze/ApplyGenericRGB.qfilter',
],
'outputs': [
'<(INTERMEDIATE_DIR)/ApplyGenericRGB.h',
],
'action': ['xxd', '-i', '<@(_inputs)', '<@(_outputs)'],
},
],
},
],
}

View File

@ -0,0 +1,9 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Whether the Mac port uses Skia (instead of Core Graphics) by default.
# This overrides the setting in common.gypi .
{
'use_skia_on_mac%': 1,
}

View File

@ -0,0 +1,36 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'lastchange',
'type': 'none',
'variables': {
'lastchange_out_path': '<(DEPTH)/build/util/LASTCHANGE',
'default_lastchange_path': '../LASTCHANGE.in',
},
'actions': [
{
'action_name': 'lastchange',
'inputs': [
# Note: <(default_lastchange_path) is optional,
# so it doesn't show up in inputs.
'./lastchange.py',
],
'outputs': [
'<(lastchange_out_path)',
],
'action': [
'python', '<@(_inputs)',
'-o', '<(lastchange_out_path)',
'-d', '<(default_lastchange_path)',
],
'message': 'Extracting last change to <(lastchange_out_path)',
'process_outputs_as_sources': '1',
},
],
},
]
}

View File

@ -0,0 +1,230 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
lastchange.py -- Chromium revision fetching utility.
"""
import re
import optparse
import os
import subprocess
import sys
_GIT_SVN_ID_REGEX = re.compile(r'.*git-svn-id:\s*([^@]*)@([0-9]+)', re.DOTALL)
class VersionInfo(object):
def __init__(self, url, revision):
self.url = url
self.revision = revision
def FetchSVNRevision(directory, svn_url_regex):
"""
Fetch the Subversion branch and revision for a given directory.
Errors are swallowed.
Returns:
A VersionInfo object or None on error.
"""
try:
proc = subprocess.Popen(['svn', 'info'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=directory,
shell=(sys.platform=='win32'))
except OSError:
# command is apparently either not installed or not executable.
return None
if not proc:
return None
attrs = {}
for line in proc.stdout:
line = line.strip()
if not line:
continue
key, val = line.split(': ', 1)
attrs[key] = val
try:
match = svn_url_regex.search(attrs['URL'])
if match:
url = match.group(2)
else:
url = ''
revision = attrs['Revision']
except KeyError:
return None
return VersionInfo(url, revision)
def RunGitCommand(directory, command):
"""
Launches git subcommand.
Errors are swallowed.
Returns:
A process object or None.
"""
command = ['git'] + command
# Force shell usage under cygwin. This is a workaround for
# mysterious loss of cwd while invoking cygwin's git.
# We can't just pass shell=True to Popen, as under win32 this will
# cause CMD to be used, while we explicitly want a cygwin shell.
if sys.platform == 'cygwin':
command = ['sh', '-c', ' '.join(command)]
try:
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=directory,
shell=(sys.platform=='win32'))
return proc
except OSError:
return None
def FetchGitRevision(directory):
"""
Fetch the Git hash for a given directory.
Errors are swallowed.
Returns:
A VersionInfo object or None on error.
"""
proc = RunGitCommand(directory, ['rev-parse', 'HEAD'])
if proc:
output = proc.communicate()[0].strip()
if proc.returncode == 0 and output:
return VersionInfo('git', output[:7])
return None
def FetchGitSVNURLAndRevision(directory, svn_url_regex):
"""
Fetch the Subversion URL and revision through Git.
Errors are swallowed.
Returns:
A tuple containing the Subversion URL and revision.
"""
proc = RunGitCommand(directory, ['log', '-1',
'--grep=git-svn-id', '--format=%b'])
if proc:
output = proc.communicate()[0].strip()
if proc.returncode == 0 and output:
# Extract the latest SVN revision and the SVN URL.
# The target line is the last "git-svn-id: ..." line like this:
# git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85528 0039d316....
match = _GIT_SVN_ID_REGEX.search(output)
if match:
revision = match.group(2)
url_match = svn_url_regex.search(match.group(1))
if url_match:
url = url_match.group(2)
else:
url = ''
return url, revision
return None, None
def FetchGitSVNRevision(directory, svn_url_regex):
"""
Fetch the Git-SVN identifier for the local tree.
Errors are swallowed.
"""
url, revision = FetchGitSVNURLAndRevision(directory, svn_url_regex)
if url and revision:
return VersionInfo(url, revision)
return None
def FetchVersionInfo(default_lastchange, directory=None,
directory_regex_prior_to_src_url='chrome|svn'):
"""
Returns the last change (in the form of a branch, revision tuple),
from some appropriate revision control system.
"""
svn_url_regex = re.compile(
r'.*/(' + directory_regex_prior_to_src_url + r')(/.*)')
version_info = (FetchSVNRevision(directory, svn_url_regex) or
FetchGitSVNRevision(directory, svn_url_regex) or
FetchGitRevision(directory))
if not version_info:
if default_lastchange and os.path.exists(default_lastchange):
revision = open(default_lastchange, 'r').read().strip()
version_info = VersionInfo(None, revision)
else:
version_info = VersionInfo(None, None)
return version_info
def WriteIfChanged(file_name, contents):
"""
Writes the specified contents to the specified file_name
iff the contents are different than the current contents.
"""
try:
old_contents = open(file_name, 'r').read()
except EnvironmentError:
pass
else:
if contents == old_contents:
return
os.unlink(file_name)
open(file_name, 'w').write(contents)
def main(argv=None):
if argv is None:
argv = sys.argv
parser = optparse.OptionParser(usage="lastchange.py [options]")
parser.add_option("-d", "--default-lastchange", metavar="FILE",
help="default last change input FILE")
parser.add_option("-o", "--output", metavar="FILE",
help="write last change to FILE")
parser.add_option("--revision-only", action='store_true',
help="just print the SVN revision number")
opts, args = parser.parse_args(argv[1:])
out_file = opts.output
while len(args) and out_file is None:
if out_file is None:
out_file = args.pop(0)
if args:
sys.stderr.write('Unexpected arguments: %r\n\n' % args)
parser.print_help()
sys.exit(2)
version_info = FetchVersionInfo(opts.default_lastchange,
os.path.dirname(sys.argv[0]))
if version_info.revision == None:
version_info.revision = '0'
if opts.revision_only:
print version_info.revision
else:
contents = "LASTCHANGE=%s\n" % version_info.revision
if out_file:
WriteIfChanged(out_file, contents)
else:
sys.stdout.write(contents)
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -0,0 +1,50 @@
Copyright (c) 2011 The Chromium Authors. All rights reserved.
Use of this useless file is governed by a BSD-style license that can be
found in the LICENSE file.
This file is used for making non-code changes to trigger buildbot cycles. Make
any modification below this line.
============================================================================
Let's make a story. Add one sentence for each commit:
CHAPTER 1:
It was a dark and stormy night; the rain fell in torrents--except at
occasional intervals, when it was checked by a violent gust of wind which
swept up the streets (for it is in London that our scene lies), rattling along
the housetops, and fiercely agitating the scanty flame of the lamps that
struggled against the darkness. A dark figure emerged.
It was a Domo-Kun. "What took you so long?", inquired his wife.
Silence. Not noticing his silence, she continued, "Did Mr. Usagi enjoy the
waffles you brought him?" "You know him, he's not one to forego a waffle,
no matter how burnt", he snickered.
The pause was filled with the sound of thunder.
CHAPTER 2:
The syrup was as dark as night, and just as runny.
The Domo-Kun shuddered, remembering the way Mr. Usagi had speared his waffles
with his fork, watching the runny syrup spread and pool across his plate,
like the blood of a dying fawn. "It reminds me of the time --" he started, as
his wife cut in quickly: "-- please. I can't bear to hear it.". A flury of
images coming from the past flowed through his mind.
"You recall what happened on Mulholland drive?" The ceiling fan rotated slowly
overhead, barely disturbing the thick cigarette smoke. No doubt was left about
when the fan was last cleaned.
There was a poignant pause.
CHAPTER 3:
Mr. Usagi felt that something wasn't right. Shortly after the Domo-Kun left he
began feeling sick. He thought out loud to himself, "No, he wouldn't have done
that to me." He considered that perhaps he shouldn't have pushed him so far.
Perhaps he shouldn't have been so cold and sarcastic, after the unimaginable
horror that had occurred, just the week before.
Next time, there won't be any sushi. Why sushis with waffles anyway? It's like
salmon in a cereal bowl.
CHAPTER 4:

View File

@ -0,0 +1,26 @@
# -*- python -*-
# Crocodile config file for Chromium windows
{
# List of rules, applied in order
'rules' : [
# Specify inclusions before exclusions, since rules are in order.
# Don't include chromeos, posix, or linux specific files
{
'regexp' : '.*(_|/)(chromeos|linux|posix)(\\.|_)',
'include' : 0,
},
# Don't include ChromeOS dirs
{
'regexp' : '.*/chromeos/',
'include' : 0,
},
# Groups
{
'regexp' : '.*_test_win\\.',
'group' : 'test',
},
],
}

View File

@ -0,0 +1,47 @@
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil
import sys
import os
def patch_msbuild():
"""VS2010 MSBuild has a ULDI bug that we patch here. See http://goo.gl/Pn8tj.
"""
source_path = os.path.join(os.environ['ProgramFiles(x86)'],
"MSBuild",
"Microsoft.Cpp",
"v4.0",
"Microsoft.CppBuild.targets")
backup_path = source_path + ".backup"
if not os.path.exists(backup_path):
try:
print "Backing up %s..." % source_path
shutil.copyfile(source_path, backup_path)
except IOError:
print "Could not back up %s to %s. Run as Administrator?" % (
source_path, backup_path)
return 1
source = open(source_path).read()
base = ('''<Target Name="GetResolvedLinkObjs" Returns="@(ObjFullPath)" '''
'''DependsOnTargets="$(CommonBuildOnlyTargets);ComputeCLOutputs;'''
'''ResolvedLinkObjs"''')
find = base + '>'
replace = base + ''' Condition="'$(ConfigurationType)'=='StaticLibrary'">'''
result = source.replace(find, replace)
if result != source:
open(source_path, "w").write(result)
print "Patched %s." % source_path
return 0
def main():
return patch_msbuild()
if __name__ == "__main__":
sys.exit(main())

View File

@ -0,0 +1,20 @@
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
def main():
if len(sys.argv) != 2 or sys.argv[1] != '--win-only':
return 1
if sys.platform in ('win32', 'cygwin'):
self_dir = os.path.dirname(sys.argv[0])
mount_path = os.path.join(self_dir, "../../third_party/cygwin")
batch_path = os.path.join(mount_path, "setup_mount.bat")
return os.system(os.path.normpath(batch_path) + ">nul")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@ -0,0 +1,20 @@
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Include this file to make targets in your .gyp use the default
# precompiled header on Windows, in debug builds only as the official
# builders blow up (out of memory) if precompiled headers are used for
# release builds.
{
'conditions': [
['OS=="win" and chromium_win_pch==1', {
'target_defaults': {
'msvs_precompiled_header': '<(DEPTH)/build/precompile.h',
'msvs_precompiled_source': '<(DEPTH)/build/precompile.cc',
'sources': ['<(DEPTH)/build/precompile.cc'],
}
}],
],
}

View File

@ -0,0 +1,6 @@
# This file is generated by gyp; do not edit.
export builddir_name ?= trunk/out
.PHONY: all
all:
$(MAKE) peerconnection_server peerconnection_client

View File

@ -0,0 +1,113 @@
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'includes': [ 'src/build/common.gypi', ],
'variables': {
'peerconnection_sample': 'third_party/libjingle/source/talk/examples/peerconnection',
},
'targets': [
{
'target_name': 'peerconnection_server',
'type': 'executable',
'sources': [
'<(peerconnection_sample)/server/data_socket.cc',
'<(peerconnection_sample)/server/data_socket.h',
'<(peerconnection_sample)/server/main.cc',
'<(peerconnection_sample)/server/peer_channel.cc',
'<(peerconnection_sample)/server/peer_channel.h',
'<(peerconnection_sample)/server/utils.cc',
'<(peerconnection_sample)/server/utils.h',
],
'include_dirs': [
'third_party/libjingle/source',
],
},
],
'conditions': [
['OS=="win"', {
'targets': [
{
'target_name': 'peerconnection_client',
'type': 'executable',
'sources': [
'<(peerconnection_sample)/client/conductor.cc',
'<(peerconnection_sample)/client/conductor.h',
'<(peerconnection_sample)/client/defaults.cc',
'<(peerconnection_sample)/client/defaults.h',
'<(peerconnection_sample)/client/main.cc',
'<(peerconnection_sample)/client/main_wnd.cc',
'<(peerconnection_sample)/client/main_wnd.h',
'<(peerconnection_sample)/client/peer_connection_client.cc',
'<(peerconnection_sample)/client/peer_connection_client.h',
'third_party/libjingle/source/talk/base/win32socketinit.cc',
'third_party/libjingle/source/talk/base/win32socketserver.cc',
],
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': '2', # Windows
},
},
'dependencies': [
'third_party/libjingle/libjingle.gyp:libjingle_app',
],
'include_dirs': [
'src',
'src/modules/interface',
'third_party/libjingle/source',
],
},
], # targets
}, ], # OS="win"
['OS=="linux"', {
'targets': [
{
'target_name': 'peerconnection_client',
'type': 'executable',
'sources': [
'<(peerconnection_sample)/client/conductor.cc',
'<(peerconnection_sample)/client/conductor.h',
'<(peerconnection_sample)/client/defaults.cc',
'<(peerconnection_sample)/client/defaults.h',
'<(peerconnection_sample)/client/linux/main.cc',
'<(peerconnection_sample)/client/linux/main_wnd.cc',
'<(peerconnection_sample)/client/linux/main_wnd.h',
'<(peerconnection_sample)/client/peer_connection_client.cc',
'<(peerconnection_sample)/client/peer_connection_client.h',
],
'dependencies': [
'third_party/libjingle/libjingle.gyp:libjingle_app',
# TODO(tommi): Switch to this and remove specific gtk dependency
# sections below for cflags and link_settings.
# '<(DEPTH)/build/linux/system.gyp:gtk',
],
'include_dirs': [
'src',
'src/modules/interface',
'third_party/libjingle/source',
],
'cflags': [
'<!@(pkg-config --cflags gtk+-2.0)',
],
'link_settings': {
'ldflags': [
'<!@(pkg-config --libs-only-L --libs-only-other gtk+-2.0 gthread-2.0)',
],
'libraries': [
'<!@(pkg-config --libs-only-l gtk+-2.0 gthread-2.0)',
'-lX11',
'-lXext',
],
},
},
], # targets
}, ], # OS="linux"
],
}

View File

@ -0,0 +1,239 @@
# This file is generated by gyp; do not edit.
TOOLSET := target
TARGET := peerconnection_client
DEFS_Debug := '-D_FILE_OFFSET_BITS=64' \
'-DCHROMIUM_BUILD' \
'-DUSE_NSS=1' \
'-DTOOLKIT_USES_GTK=1' \
'-DGTK_DISABLE_SINGLE_INCLUDES=1' \
'-DENABLE_REMOTING=1' \
'-DENABLE_P2P_APIS=1' \
'-DENABLE_CONFIGURATION_POLICY' \
'-DENABLE_INPUT_SPEECH' \
'-DENABLE_NOTIFICATIONS' \
'-DENABLE_GPU=1' \
'-DENABLE_EGLIMAGE=1' \
'-DUSE_SKIA=1' \
'-DENABLE_REGISTER_PROTOCOL_HANDLER=1' \
'-DENABLE_WEB_INTENTS=1' \
'-DENABLE_PLUGIN_INSTALLATION=1' \
'-DWEBRTC_TARGET_PC' \
'-DWEBRTC_LINUX' \
'-DWEBRTC_THREAD_RR' \
'-DFEATURE_ENABLE_SSL' \
'-DFEATURE_ENABLE_VOICEMAIL' \
'-DEXPAT_RELATIVE_PATH' \
'-DWEBRTC_RELATIVE_PATH' \
'-DLINUX' \
'-DPOSIX' \
'-D__STDC_FORMAT_MACROS' \
'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
'-D_DEBUG'
# Flags passed to all source files.
CFLAGS_Debug := -Werror \
-pthread \
-fno-exceptions \
-fno-strict-aliasing \
-Wall \
-Wno-unused-parameter \
-Wno-missing-field-initializers \
-fvisibility=hidden \
-pipe \
-fPIC \
-Wextra \
-Wno-unused-parameter \
-Wno-missing-field-initializers \
-pthread \
-I/usr/include/gtk-2.0 \
-I/usr/lib64/gtk-2.0/include \
-I/usr/include/atk-1.0 \
-I/usr/include/cairo \
-I/usr/include/gdk-pixbuf-2.0 \
-I/usr/include/pango-1.0 \
-I/usr/include/glib-2.0 \
-I/usr/lib64/glib-2.0/include \
-I/usr/include/pixman-1 \
-I/usr/include/freetype2 \
-I/usr/include/libpng12 \
-O0 \
-g
# Flags passed to only C files.
CFLAGS_C_Debug :=
# Flags passed to only C++ files.
CFLAGS_CC_Debug := -fno-rtti \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare
INCS_Debug := -Isrc \
-I. \
-Isrc/modules/interface \
-Ithird_party/libjingle/source
DEFS_Release := '-D_FILE_OFFSET_BITS=64' \
'-DCHROMIUM_BUILD' \
'-DUSE_NSS=1' \
'-DTOOLKIT_USES_GTK=1' \
'-DGTK_DISABLE_SINGLE_INCLUDES=1' \
'-DENABLE_REMOTING=1' \
'-DENABLE_P2P_APIS=1' \
'-DENABLE_CONFIGURATION_POLICY' \
'-DENABLE_INPUT_SPEECH' \
'-DENABLE_NOTIFICATIONS' \
'-DENABLE_GPU=1' \
'-DENABLE_EGLIMAGE=1' \
'-DUSE_SKIA=1' \
'-DENABLE_REGISTER_PROTOCOL_HANDLER=1' \
'-DENABLE_WEB_INTENTS=1' \
'-DENABLE_PLUGIN_INSTALLATION=1' \
'-DWEBRTC_TARGET_PC' \
'-DWEBRTC_LINUX' \
'-DWEBRTC_THREAD_RR' \
'-DFEATURE_ENABLE_SSL' \
'-DFEATURE_ENABLE_VOICEMAIL' \
'-DEXPAT_RELATIVE_PATH' \
'-DWEBRTC_RELATIVE_PATH' \
'-DLINUX' \
'-DPOSIX' \
'-D__STDC_FORMAT_MACROS' \
'-DNDEBUG' \
'-DNVALGRIND' \
'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
# Flags passed to all source files.
CFLAGS_Release := -Werror \
-pthread \
-fno-exceptions \
-fno-strict-aliasing \
-Wall \
-Wno-unused-parameter \
-Wno-missing-field-initializers \
-fvisibility=hidden \
-pipe \
-fPIC \
-Wextra \
-Wno-unused-parameter \
-Wno-missing-field-initializers \
-pthread \
-I/usr/include/gtk-2.0 \
-I/usr/lib64/gtk-2.0/include \
-I/usr/include/atk-1.0 \
-I/usr/include/cairo \
-I/usr/include/gdk-pixbuf-2.0 \
-I/usr/include/pango-1.0 \
-I/usr/include/glib-2.0 \
-I/usr/lib64/glib-2.0/include \
-I/usr/include/pixman-1 \
-I/usr/include/freetype2 \
-I/usr/include/libpng12 \
-O2 \
-fno-ident \
-fdata-sections \
-ffunction-sections
# Flags passed to only C files.
CFLAGS_C_Release :=
# Flags passed to only C++ files.
CFLAGS_CC_Release := -fno-rtti \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare
INCS_Release := -Isrc \
-I. \
-Isrc/modules/interface \
-Ithird_party/libjingle/source
OBJS := $(obj).target/$(TARGET)/third_party/libjingle/source/talk/examples/peerconnection/client/conductor.o \
$(obj).target/$(TARGET)/third_party/libjingle/source/talk/examples/peerconnection/client/defaults.o \
$(obj).target/$(TARGET)/third_party/libjingle/source/talk/examples/peerconnection/client/linux/main.o \
$(obj).target/$(TARGET)/third_party/libjingle/source/talk/examples/peerconnection/client/linux/main_wnd.o \
$(obj).target/$(TARGET)/third_party/libjingle/source/talk/examples/peerconnection/client/peer_connection_client.o
# Add to the list of files we specially track dependencies for.
all_deps += $(OBJS)
# Make sure our dependencies are built before any of us.
$(OBJS): | $(obj).target/third_party/libjingle/libjingle_app.a $(obj).target/third_party/expat/expat.stamp $(obj).target/third_party/jsoncpp/libjsoncpp.a $(obj).target/third_party/libsrtp/libsrtp.a $(obj).target/src/modules/libvideo_capture_module.a $(obj).target/src/modules/libwebrtc_utility.a $(obj).target/src/modules/libaudio_coding_module.a $(obj).target/src/modules/libCNG.a $(obj).target/src/common_audio/libsignal_processing.a $(obj).target/src/modules/libG711.a $(obj).target/src/modules/libG722.a $(obj).target/src/modules/libiLBC.a $(obj).target/src/modules/libiSAC.a $(obj).target/src/modules/libiSACFix.a $(obj).target/src/modules/libPCM16B.a $(obj).target/src/modules/libNetEq.a $(obj).target/src/common_audio/libresampler.a $(obj).target/src/common_audio/libvad.a $(obj).target/src/system_wrappers/source/libsystem_wrappers.a $(obj).target/src/modules/libwebrtc_video_coding.a $(obj).target/src/modules/libwebrtc_i420.a $(obj).target/src/modules/libwebrtc_vp8.a $(obj).target/src/common_video/libwebrtc_libyuv.a $(obj).target/third_party/libyuv/libyuv.a $(obj).target/third_party/libvpx/libvpx.a $(obj).target/src/modules/libvideo_render_module.a $(obj).target/src/video_engine/libvideo_engine_core.a $(obj).target/src/common_video/libwebrtc_jpeg.a $(obj).target/third_party/libjpeg_turbo/libjpeg_turbo.a $(obj).target/src/modules/libmedia_file.a $(obj).target/src/modules/librtp_rtcp.a $(obj).target/src/modules/libudp_transport.a $(obj).target/src/modules/libvideo_processing.a $(obj).target/src/modules/libvideo_processing_sse2.a $(obj).target/src/voice_engine/libvoice_engine_core.a $(obj).target/src/modules/libaudio_conference_mixer.a $(obj).target/src/modules/libaudio_processing.a $(obj).target/src/modules/libaec.a $(obj).target/src/modules/libapm_util.a $(obj).target/src/modules/libaec_sse2.a $(obj).target/src/modules/libaecm.a $(obj).target/src/modules/libagc.a $(obj).target/src/modules/libns.a $(obj).target/src/modules/libaudioproc_debug_proto.a $(obj).target/third_party/protobuf/libprotobuf_lite.a $(obj).target/src/modules/libaudio_device.a $(obj).target/third_party/libjingle/libjingle_p2p.a $(obj).target/third_party/libjingle/libjingle.a
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.
$(OBJS): TOOLSET := $(TOOLSET)
$(OBJS): GYP_CFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_C_$(BUILDTYPE))
$(OBJS): GYP_CXXFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_CC_$(BUILDTYPE))
# Suffix rules, putting all outputs into $(obj).
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
# Try building from generated source, too.
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
# End of this set of suffix rules
### Rules for final target.
LDFLAGS_Debug := -pthread \
-Wl,-z,noexecstack \
-fPIC \
-B$(builddir)/../../third_party/gold
LDFLAGS_Release := -pthread \
-Wl,-z,noexecstack \
-fPIC \
-B$(builddir)/../../third_party/gold \
-Wl,-O1 \
-Wl,--as-needed \
-Wl,--gc-sections
LIBS := -lgtk-x11-2.0 \
-lgdk-x11-2.0 \
-latk-1.0 \
-lgio-2.0 \
-lpangoft2-1.0 \
-lpangocairo-1.0 \
-lgdk_pixbuf-2.0 \
-lpng12 \
-lm \
-lcairo \
-lpango-1.0 \
-lfreetype \
-lfontconfig \
-lgobject-2.0 \
-lgmodule-2.0 \
-lgthread-2.0 \
-lrt \
-lglib-2.0 \
-lX11 \
-lXext \
-lexpat \
-ldl \
-lasound \
-lpulse
$(builddir)/peerconnection_client: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))
$(builddir)/peerconnection_client: LIBS := $(LIBS)
$(builddir)/peerconnection_client: LD_INPUTS := $(OBJS) $(obj).target/third_party/libjingle/libjingle_app.a $(obj).target/third_party/jsoncpp/libjsoncpp.a $(obj).target/third_party/libsrtp/libsrtp.a $(obj).target/src/modules/libvideo_capture_module.a $(obj).target/src/modules/libwebrtc_utility.a $(obj).target/src/modules/libaudio_coding_module.a $(obj).target/src/modules/libCNG.a $(obj).target/src/common_audio/libsignal_processing.a $(obj).target/src/modules/libG711.a $(obj).target/src/modules/libG722.a $(obj).target/src/modules/libiLBC.a $(obj).target/src/modules/libiSAC.a $(obj).target/src/modules/libiSACFix.a $(obj).target/src/modules/libPCM16B.a $(obj).target/src/modules/libNetEq.a $(obj).target/src/common_audio/libresampler.a $(obj).target/src/common_audio/libvad.a $(obj).target/src/system_wrappers/source/libsystem_wrappers.a $(obj).target/src/modules/libwebrtc_video_coding.a $(obj).target/src/modules/libwebrtc_i420.a $(obj).target/src/modules/libwebrtc_vp8.a $(obj).target/src/common_video/libwebrtc_libyuv.a $(obj).target/third_party/libyuv/libyuv.a $(obj).target/third_party/libvpx/libvpx.a $(obj).target/src/modules/libvideo_render_module.a $(obj).target/src/video_engine/libvideo_engine_core.a $(obj).target/src/common_video/libwebrtc_jpeg.a $(obj).target/third_party/libjpeg_turbo/libjpeg_turbo.a $(obj).target/src/modules/libmedia_file.a $(obj).target/src/modules/librtp_rtcp.a $(obj).target/src/modules/libudp_transport.a $(obj).target/src/modules/libvideo_processing.a $(obj).target/src/modules/libvideo_processing_sse2.a $(obj).target/src/voice_engine/libvoice_engine_core.a $(obj).target/src/modules/libaudio_conference_mixer.a $(obj).target/src/modules/libaudio_processing.a $(obj).target/src/modules/libaec.a $(obj).target/src/modules/libapm_util.a $(obj).target/src/modules/libaec_sse2.a $(obj).target/src/modules/libaecm.a $(obj).target/src/modules/libagc.a $(obj).target/src/modules/libns.a $(obj).target/src/modules/libaudioproc_debug_proto.a $(obj).target/third_party/protobuf/libprotobuf_lite.a $(obj).target/src/modules/libaudio_device.a $(obj).target/third_party/libjingle/libjingle_p2p.a $(obj).target/third_party/libjingle/libjingle.a
$(builddir)/peerconnection_client: TOOLSET := $(TOOLSET)
$(builddir)/peerconnection_client: $(OBJS) $(obj).target/third_party/libjingle/libjingle_app.a $(obj).target/third_party/jsoncpp/libjsoncpp.a $(obj).target/third_party/libsrtp/libsrtp.a $(obj).target/src/modules/libvideo_capture_module.a $(obj).target/src/modules/libwebrtc_utility.a $(obj).target/src/modules/libaudio_coding_module.a $(obj).target/src/modules/libCNG.a $(obj).target/src/common_audio/libsignal_processing.a $(obj).target/src/modules/libG711.a $(obj).target/src/modules/libG722.a $(obj).target/src/modules/libiLBC.a $(obj).target/src/modules/libiSAC.a $(obj).target/src/modules/libiSACFix.a $(obj).target/src/modules/libPCM16B.a $(obj).target/src/modules/libNetEq.a $(obj).target/src/common_audio/libresampler.a $(obj).target/src/common_audio/libvad.a $(obj).target/src/system_wrappers/source/libsystem_wrappers.a $(obj).target/src/modules/libwebrtc_video_coding.a $(obj).target/src/modules/libwebrtc_i420.a $(obj).target/src/modules/libwebrtc_vp8.a $(obj).target/src/common_video/libwebrtc_libyuv.a $(obj).target/third_party/libyuv/libyuv.a $(obj).target/third_party/libvpx/libvpx.a $(obj).target/src/modules/libvideo_render_module.a $(obj).target/src/video_engine/libvideo_engine_core.a $(obj).target/src/common_video/libwebrtc_jpeg.a $(obj).target/third_party/libjpeg_turbo/libjpeg_turbo.a $(obj).target/src/modules/libmedia_file.a $(obj).target/src/modules/librtp_rtcp.a $(obj).target/src/modules/libudp_transport.a $(obj).target/src/modules/libvideo_processing.a $(obj).target/src/modules/libvideo_processing_sse2.a $(obj).target/src/voice_engine/libvoice_engine_core.a $(obj).target/src/modules/libaudio_conference_mixer.a $(obj).target/src/modules/libaudio_processing.a $(obj).target/src/modules/libaec.a $(obj).target/src/modules/libapm_util.a $(obj).target/src/modules/libaec_sse2.a $(obj).target/src/modules/libaecm.a $(obj).target/src/modules/libagc.a $(obj).target/src/modules/libns.a $(obj).target/src/modules/libaudioproc_debug_proto.a $(obj).target/third_party/protobuf/libprotobuf_lite.a $(obj).target/src/modules/libaudio_device.a $(obj).target/third_party/libjingle/libjingle_p2p.a $(obj).target/third_party/libjingle/libjingle.a FORCE_DO_CMD
$(call do_cmd,link)
all_deps += $(builddir)/peerconnection_client
# Add target alias
.PHONY: peerconnection_client
peerconnection_client: $(builddir)/peerconnection_client
# Add executable to "all" target.
.PHONY: all
all: $(builddir)/peerconnection_client

View File

@ -0,0 +1,174 @@
# This file is generated by gyp; do not edit.
TOOLSET := target
TARGET := peerconnection_server
DEFS_Debug := '-D_FILE_OFFSET_BITS=64' \
'-DCHROMIUM_BUILD' \
'-DUSE_NSS=1' \
'-DTOOLKIT_USES_GTK=1' \
'-DGTK_DISABLE_SINGLE_INCLUDES=1' \
'-DENABLE_REMOTING=1' \
'-DENABLE_P2P_APIS=1' \
'-DENABLE_CONFIGURATION_POLICY' \
'-DENABLE_INPUT_SPEECH' \
'-DENABLE_NOTIFICATIONS' \
'-DENABLE_GPU=1' \
'-DENABLE_EGLIMAGE=1' \
'-DUSE_SKIA=1' \
'-DENABLE_REGISTER_PROTOCOL_HANDLER=1' \
'-DENABLE_WEB_INTENTS=1' \
'-DENABLE_PLUGIN_INSTALLATION=1' \
'-DWEBRTC_TARGET_PC' \
'-DWEBRTC_LINUX' \
'-DWEBRTC_THREAD_RR' \
'-D__STDC_FORMAT_MACROS' \
'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
'-D_DEBUG'
# Flags passed to all source files.
CFLAGS_Debug := -Werror \
-pthread \
-fno-exceptions \
-fno-strict-aliasing \
-Wall \
-Wno-unused-parameter \
-Wno-missing-field-initializers \
-fvisibility=hidden \
-pipe \
-fPIC \
-Wextra \
-Wno-unused-parameter \
-Wno-missing-field-initializers \
-O0 \
-g
# Flags passed to only C files.
CFLAGS_C_Debug :=
# Flags passed to only C++ files.
CFLAGS_CC_Debug := -fno-rtti \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare
INCS_Debug := -Isrc \
-I. \
-Ithird_party/libjingle/source
DEFS_Release := '-D_FILE_OFFSET_BITS=64' \
'-DCHROMIUM_BUILD' \
'-DUSE_NSS=1' \
'-DTOOLKIT_USES_GTK=1' \
'-DGTK_DISABLE_SINGLE_INCLUDES=1' \
'-DENABLE_REMOTING=1' \
'-DENABLE_P2P_APIS=1' \
'-DENABLE_CONFIGURATION_POLICY' \
'-DENABLE_INPUT_SPEECH' \
'-DENABLE_NOTIFICATIONS' \
'-DENABLE_GPU=1' \
'-DENABLE_EGLIMAGE=1' \
'-DUSE_SKIA=1' \
'-DENABLE_REGISTER_PROTOCOL_HANDLER=1' \
'-DENABLE_WEB_INTENTS=1' \
'-DENABLE_PLUGIN_INSTALLATION=1' \
'-DWEBRTC_TARGET_PC' \
'-DWEBRTC_LINUX' \
'-DWEBRTC_THREAD_RR' \
'-D__STDC_FORMAT_MACROS' \
'-DNDEBUG' \
'-DNVALGRIND' \
'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
# Flags passed to all source files.
CFLAGS_Release := -Werror \
-pthread \
-fno-exceptions \
-fno-strict-aliasing \
-Wall \
-Wno-unused-parameter \
-Wno-missing-field-initializers \
-fvisibility=hidden \
-pipe \
-fPIC \
-Wextra \
-Wno-unused-parameter \
-Wno-missing-field-initializers \
-O2 \
-fno-ident \
-fdata-sections \
-ffunction-sections
# Flags passed to only C files.
CFLAGS_C_Release :=
# Flags passed to only C++ files.
CFLAGS_CC_Release := -fno-rtti \
-fno-threadsafe-statics \
-fvisibility-inlines-hidden \
-Wsign-compare
INCS_Release := -Isrc \
-I. \
-Ithird_party/libjingle/source
OBJS := $(obj).target/$(TARGET)/third_party/libjingle/source/talk/examples/peerconnection/server/data_socket.o \
$(obj).target/$(TARGET)/third_party/libjingle/source/talk/examples/peerconnection/server/main.o \
$(obj).target/$(TARGET)/third_party/libjingle/source/talk/examples/peerconnection/server/peer_channel.o \
$(obj).target/$(TARGET)/third_party/libjingle/source/talk/examples/peerconnection/server/utils.o
# Add to the list of files we specially track dependencies for.
all_deps += $(OBJS)
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.
$(OBJS): TOOLSET := $(TOOLSET)
$(OBJS): GYP_CFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_C_$(BUILDTYPE))
$(OBJS): GYP_CXXFLAGS := $(DEFS_$(BUILDTYPE)) $(INCS_$(BUILDTYPE)) $(CFLAGS_$(BUILDTYPE)) $(CFLAGS_CC_$(BUILDTYPE))
# Suffix rules, putting all outputs into $(obj).
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(srcdir)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
# Try building from generated source, too.
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj).$(TOOLSET)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
$(obj).$(TOOLSET)/$(TARGET)/%.o: $(obj)/%.cc FORCE_DO_CMD
@$(call do_cmd,cxx,1)
# End of this set of suffix rules
### Rules for final target.
LDFLAGS_Debug := -pthread \
-Wl,-z,noexecstack \
-fPIC \
-B$(builddir)/../../third_party/gold
LDFLAGS_Release := -pthread \
-Wl,-z,noexecstack \
-fPIC \
-B$(builddir)/../../third_party/gold \
-Wl,-O1 \
-Wl,--as-needed \
-Wl,--gc-sections
LIBS :=
$(builddir)/peerconnection_server: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))
$(builddir)/peerconnection_server: LIBS := $(LIBS)
$(builddir)/peerconnection_server: LD_INPUTS := $(OBJS)
$(builddir)/peerconnection_server: TOOLSET := $(TOOLSET)
$(builddir)/peerconnection_server: $(OBJS) FORCE_DO_CMD
$(call do_cmd,link)
all_deps += $(builddir)/peerconnection_server
# Add target alias
.PHONY: peerconnection_server
peerconnection_server: $(builddir)/peerconnection_server
# Add executable to "all" target.
.PHONY: all
all: $(builddir)/peerconnection_server

View File

@ -0,0 +1,29 @@
Copyright (c) 2011, The WebRTC project authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Google nor the names of its contributors may
be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

Some files were not shown because too many files have changed in this diff Show More