update OpenHarmony 2.0 Canary

This commit is contained in:
mamingshuai 2021-06-02 00:34:27 +08:00
parent a358ab2586
commit 1cd76e1975
153 changed files with 29701 additions and 61 deletions

26
.clang-format Normal file
View File

@ -0,0 +1,26 @@
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This isn't meant to be authoritative, but it's good enough to be useful.
# Still use your best judgement for formatting decisions: clang-format
# sometimes makes strange choices.
BasedOnStyle: Google
AllowShortFunctionsOnASingleLine: Inline
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
ConstructorInitializerAllOnOneLineOrOnePerLine: false
Cpp11BracedListStyle: false
IndentCaseLabels: false
DerivePointerBinding: false

11
.editorconfig Normal file
View File

@ -0,0 +1,11 @@
root = true
[*]
charset = utf-8
indent_style = space
indent_size = 2
insert_final_newline = true
end_of_line = lf
[CMakeLists.txt]
indent_style = tab

15
.gitattributes vendored Normal file
View File

@ -0,0 +1,15 @@
*.tgz filter=lfs diff=lfs merge=lfs -text
*.trp filter=lfs diff=lfs merge=lfs -text
*.apk filter=lfs diff=lfs merge=lfs -text
*.jar filter=lfs diff=lfs merge=lfs -text
*.mp4 filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.asm filter=lfs diff=lfs merge=lfs -text
*.8svn filter=lfs diff=lfs merge=lfs -text
*.9svn filter=lfs diff=lfs merge=lfs -text
*.dylib filter=lfs diff=lfs merge=lfs -text
*.exe filter=lfs diff=lfs merge=lfs -text
*.a filter=lfs diff=lfs merge=lfs -text
*.so filter=lfs diff=lfs merge=lfs -text
*.bin filter=lfs diff=lfs merge=lfs -text
*.dll filter=lfs diff=lfs merge=lfs -text

59
.github/workflows/linux.yml vendored Normal file
View File

@ -0,0 +1,59 @@
name: Linux
on:
pull_request:
push:
release:
types: published
jobs:
build:
runs-on: [ubuntu-latest]
container:
image: centos:7
steps:
- uses: actions/checkout@v2
- name: Install dependencies
run: |
curl -L -O https://github.com/Kitware/CMake/releases/download/v3.16.4/cmake-3.16.4-Linux-x86_64.sh
chmod +x cmake-3.16.4-Linux-x86_64.sh
./cmake-3.16.4-Linux-x86_64.sh --skip-license --prefix=/usr/local
curl -L -O https://www.mirrorservice.org/sites/dl.fedoraproject.org/pub/epel/7/x86_64/Packages/p/p7zip-16.02-10.el7.x86_64.rpm
curl -L -O https://www.mirrorservice.org/sites/dl.fedoraproject.org/pub/epel/7/x86_64/Packages/p/p7zip-plugins-16.02-10.el7.x86_64.rpm
rpm -U --quiet p7zip-16.02-10.el7.x86_64.rpm
rpm -U --quiet p7zip-plugins-16.02-10.el7.x86_64.rpm
yum install -y make gcc-c++
- name: Build ninja
shell: bash
run: |
cmake -DCMAKE_BUILD_TYPE=Release -B build
cmake --build build --parallel --config Release
strip build/ninja
- name: Test ninja
run: ./ninja_test
working-directory: build
- name: Create ninja archive
run: |
mkdir artifact
7z a artifact/ninja-linux.zip ./build/ninja
# Upload ninja binary archive as an artifact
- name: Upload artifact
uses: actions/upload-artifact@v1
with:
name: ninja-binary-archives
path: artifact
- name: Upload release asset
if: github.event.action == 'published'
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./artifact/ninja-linux.zip
asset_name: ninja-linux.zip
asset_content_type: application/zip

53
.github/workflows/macos.yml vendored Normal file
View File

@ -0,0 +1,53 @@
name: macOS
on:
pull_request:
push:
release:
types: published
jobs:
build:
runs-on: macOS-latest
steps:
- uses: actions/checkout@v2
- name: Install dependencies
run: brew install re2c p7zip cmake
- name: Build ninja
shell: bash
env:
MACOSX_DEPLOYMENT_TARGET: 10.12
run: |
cmake -DCMAKE_BUILD_TYPE=Release -B build
cmake --build build --parallel --config Release
- name: Test ninja
run: ctest -vv
working-directory: build
- name: Create ninja archive
shell: bash
run: |
mkdir artifact
7z a artifact/ninja-mac.zip ./build/ninja
# Upload ninja binary archive as an artifact
- name: Upload artifact
uses: actions/upload-artifact@v1
with:
name: ninja-binary-archives
path: artifact
- name: Upload release asset
if: github.event.action == 'published'
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./artifact/ninja-mac.zip
asset_name: ninja-mac.zip
asset_content_type: application/zip

51
.github/workflows/windows.yml vendored Normal file
View File

@ -0,0 +1,51 @@
name: Windows
on:
pull_request:
push:
release:
types: published
jobs:
build:
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install dependencies
run: choco install re2c
- name: Build ninja
shell: bash
run: |
cmake -DCMAKE_BUILD_TYPE=Release -B build
cmake --build build --parallel --config Release
- name: Test ninja
run: .\ninja_test.exe
working-directory: build/Release
- name: Create ninja archive
shell: bash
run: |
mkdir artifact
7z a artifact/ninja-win.zip ./build/Release/ninja.exe
# Upload ninja binary archive as an artifact
- name: Upload artifact
uses: actions/upload-artifact@v1
with:
name: ninja-binary-archives
path: artifact
- name: Upload release asset
if: github.event.action == 'published'
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ github.event.release.upload_url }}
asset_path: ./artifact/ninja-win.zip
asset_name: ninja-win.zip
asset_content_type: application/zip

40
.gitignore vendored Normal file
View File

@ -0,0 +1,40 @@
*.pyc
*.obj
*.exe
*.pdb
*.ilk
/build*/
/build.ninja
/ninja
/ninja.bootstrap
/build_log_perftest
/canon_perftest
/clparser_perftest
/depfile_parser_perftest
/hash_collision_bench
/ninja_test
/manifest_parser_perftest
/graph.png
/doc/manual.html
/doc/doxygen
*.patch
.DS_Store
# Eclipse project files
.project
.cproject
# SublimeText project files
*.sublime-project
*.sublime-workspace
# Ninja output
.ninja_deps
.ninja_log
# Visual Studio Code project files
/.vscode/
/.ccls-cache/
# Qt Creator project files
/CMakeLists.txt.user

36
.travis.yml Normal file
View File

@ -0,0 +1,36 @@
matrix:
include:
- os: linux
dist: precise
compiler: gcc
- os: linux
dist: precise
compiler: clang
- os: linux
dist: trusty
compiler: gcc
- os: linux
dist: trusty
compiler: clang
- os: linux
dist: xenial
compiler: gcc
- os: linux
dist: xenial
compiler: clang
- os: osx
osx_image: xcode10
- os: osx
osx_image: xcode10.1
sudo: false
language: cpp
before_install:
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install re2c ; fi
- if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install re2c python ; fi
script:
- ./misc/ci.py
- python3 configure.py --bootstrap
- ./ninja all
- ./ninja_test --gtest_filter=-SubprocessTest.SetWithLots
- ./misc/ninja_syntax_test.py
- ./misc/output_test.py

140
CMakeLists.txt Normal file
View File

@ -0,0 +1,140 @@
cmake_minimum_required(VERSION 3.15)
project(ninja)
# --- optional link-time optimization
if(CMAKE_BUILD_TYPE MATCHES "Release")
include(CheckIPOSupported)
check_ipo_supported(RESULT lto_supported OUTPUT error)
if(lto_supported)
message(STATUS "IPO / LTO enabled")
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
else()
message(STATUS "IPO / LTO not supported: <${error}>")
endif()
endif()
# --- compiler flags
if(MSVC)
set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
string(APPEND CMAKE_CXX_FLAGS " /W4 /GR- /Zc:__cplusplus")
else()
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag(-Wno-deprecated flag_no_deprecated)
if(flag_no_deprecated)
string(APPEND CMAKE_CXX_FLAGS " -Wno-deprecated")
endif()
check_cxx_compiler_flag(-fdiagnostics-color flag_color_diag)
if(flag_color_diag)
string(APPEND CMAKE_CXX_FLAGS " -fdiagnostics-color")
endif()
endif()
# --- optional re2c
find_program(RE2C re2c)
if(RE2C)
# the depfile parser and ninja lexers are generated using re2c.
function(re2c IN OUT)
add_custom_command(DEPENDS ${IN} OUTPUT ${OUT}
COMMAND ${RE2C} -b -i --no-generation-date -o ${OUT} ${IN}
)
endfunction()
re2c(${PROJECT_SOURCE_DIR}/src/depfile_parser.in.cc ${PROJECT_BINARY_DIR}/depfile_parser.cc)
re2c(${PROJECT_SOURCE_DIR}/src/lexer.in.cc ${PROJECT_BINARY_DIR}/lexer.cc)
add_library(libninja-re2c OBJECT ${PROJECT_BINARY_DIR}/depfile_parser.cc ${PROJECT_BINARY_DIR}/lexer.cc)
else()
message(WARNING "re2c was not found; changes to src/*.in.cc will not affect your build.")
add_library(libninja-re2c OBJECT src/depfile_parser.cc src/lexer.cc)
endif()
target_include_directories(libninja-re2c PRIVATE src)
# Core source files all build into ninja library.
add_library(libninja OBJECT
src/build_log.cc
src/build.cc
src/clean.cc
src/clparser.cc
src/dyndep.cc
src/dyndep_parser.cc
src/debug_flags.cc
src/deps_log.cc
src/disk_interface.cc
src/edit_distance.cc
src/eval_env.cc
src/graph.cc
src/graphviz.cc
src/line_printer.cc
src/manifest_parser.cc
src/metrics.cc
src/parser.cc
src/state.cc
src/string_piece_util.cc
src/util.cc
src/version.cc
)
if(WIN32)
target_sources(libninja PRIVATE
src/subprocess-win32.cc
src/includes_normalize-win32.cc
src/msvc_helper-win32.cc
src/msvc_helper_main-win32.cc
src/getopt.c
)
if(MSVC)
target_sources(libninja PRIVATE src/minidump-win32.cc)
endif()
else()
target_sources(libninja PRIVATE src/subprocess-posix.cc)
endif()
#Fixes GetActiveProcessorCount on MinGW
if(MINGW)
target_compile_definitions(libninja PRIVATE _WIN32_WINNT=0x0601 __USE_MINGW_ANSI_STDIO=1)
endif()
# Main executable is library plus main() function.
add_executable(ninja src/ninja.cc)
target_link_libraries(ninja PRIVATE libninja libninja-re2c)
# Tests all build into ninja_test executable.
add_executable(ninja_test
src/build_log_test.cc
src/build_test.cc
src/clean_test.cc
src/clparser_test.cc
src/depfile_parser_test.cc
src/deps_log_test.cc
src/disk_interface_test.cc
src/dyndep_parser_test.cc
src/edit_distance_test.cc
src/graph_test.cc
src/lexer_test.cc
src/manifest_parser_test.cc
src/ninja_test.cc
src/state_test.cc
src/string_piece_util_test.cc
src/subprocess_test.cc
src/test.cc
src/util_test.cc
)
if(WIN32)
target_sources(ninja_test PRIVATE src/includes_normalize_test.cc src/msvc_helper_test.cc)
endif()
target_link_libraries(ninja_test PRIVATE libninja libninja-re2c)
foreach(perftest
build_log_perftest
canon_perftest
clparser_perftest
depfile_parser_perftest
hash_collision_bench
manifest_parser_perftest
)
add_executable(${perftest} src/${perftest}.cc)
target_link_libraries(${perftest} PRIVATE libninja libninja-re2c)
endforeach()
enable_testing()
add_test(NinjaTest ninja_test)
install(TARGETS ninja DESTINATION bin)

34
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,34 @@
# How to successfully make changes to Ninja
We're very wary of changes that increase the complexity of Ninja (in particular,
new build file syntax or command-line flags) or increase the maintenance burden
of Ninja. Ninja is already successfully used by hundreds of developers for large
projects and it already achieves (most of) the goals we set out for it to do.
It's probably best to discuss new feature ideas on the
[mailing list](https://groups.google.com/forum/#!forum/ninja-build) or in an
issue before creating a PR.
## Coding guidelines
Generally it's the
[Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) with
a few additions:
* Any code merged into the Ninja codebase which will be part of the main
executable must compile as C++03. You may use C++11 features in a test or an
unimportant tool if you guard your code with `#if __cplusplus >= 201103L`.
* We have used `using namespace std;` a lot in the past. For new contributions,
please try to avoid relying on it and instead whenever possible use `std::`.
However, please do not change existing code simply to add `std::` unless your
contribution already needs to change that line of code anyway.
* All source files should have the Google Inc. license header.
* Use `///` for [Doxygen](http://www.doxygen.nl/) (use `\a` to refer to
arguments).
* It's not necessary to document each argument, especially when they're
relatively self-evident (e.g. in
`CanonicalizePath(string* path, string* err)`, the arguments are hopefully
obvious).
If you're unsure about code formatting, please use
[clang-format](https://clang.llvm.org/docs/ClangFormat.html). However, please do
not format code that is not otherwise part of your contribution.

202
COPYING Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2010
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

66
OAT.xml Normal file
View File

@ -0,0 +1,66 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Copyright (c) 2021 Huawei Device Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Notes:
This is project config file for OpenHarmony OSS Audit Tool, if you have any questions or concerns, please email chenyaxun.
-->
<!-- OAT(OSS Audit Tool) configuration guide:
basedir: Root dir, the basedir + project path is the real source file location.
licensefile:
1.If the project don't have "LICENSE" in root dir, please define all the license files in this project in , OAT will check license files according to this rule.
tasklist(only for batch mode):
1. task: Define oat check thread, each task will start a new thread.
2. task name: Only an name, no practical effect.
3. task policy: Default policy for projects under this task, this field is required and the specified policy must defined in policylist.
4. task filter: Default filefilter for projects under this task, this field is required and the specified filefilter must defined in filefilterlist.
5. task project: Projects to be checked, the path field define the source root dir of the project.
policyList:
1. policy: All policyitems will be merged to default OAT.xml rules, the name of policy doesn't affect OAT check process.
2. policyitem: The fields type, name, path, desc is required, and the fields rule, group, filefilter is optional,the default value is:
<policyitem type="" name="" path="" desc="" rule="may" group="defaultGroup" filefilter="defaultPolicyFilter"/>
3. policyitem type:
"compatibility" is used to check license compatibility in the specified path;
"license" is used to check source license header in the specified path;
"copyright" is used to check source copyright header in the specified path;
"import" is used to check source dependency in the specified path, such as import ... ,include ...
"filetype" is used to check file type in the specified path, supported file types: archive, binary
"filename" is used to check whether the specified file exists in the specified path(support projectroot in default OAT.xml), supported file names: LICENSE, README, README.OpenSource
4. policyitem name: This field is used for define the license, copyright, "*" means match all, the "!" prefix means could not match this value. For example, "!GPL" means can not use GPL license.
5. policyitem path: This field is used for define the source file scope to apply this policyitem, the "!" prefix means exclude the files. For example, "!.*/lib/.*" means files in lib dir will be exclude while process this policyitem.
6. policyitem rule and group: These two fields are used together to merge policy results. "may" policyitems in the same group means any one in this group passed, the result will be passed.
7. policyitem filefilter: Used to bind filefilter which define filter rules.
8. filefilter: Filter rules, the type filename is used to filter file name, the type filepath is used to filter file path.
Note:If the text contains special characters, please escape them according to the following rules:
" == &gt;
& == &gt;
' == &gt;
< == &gt;
> == &gt;
-->
<configuration>
<oatconfig>
<licensefile>COPYING</licensefile>
<filefilterlist>
<filefilter name="defaultPolicyFilter" desc="">
<filteritem type="filepath" name="misc/packaging/ninja.spec" desc="ninja spec file, not license statement."/>
</filefilter>
</filefilterlist>
</oatconfig>
</configuration>

11
README.OpenSource Normal file
View File

@ -0,0 +1,11 @@
[
{
"Name": "ninja",
"License": "Apache License V2.0",
"License File": "COPYING",
"Version Number": "1.10.1",
"Owner": "wangweichao2@huawei.com",
"Upstream URL": "https://ninja-build.org/",
"Description": "a small build system with a focus on speed"
}
]

View File

@ -1,36 +0,0 @@
# third_party_ninja
#### Description
Third-party open-source software ninja | 三方开源软件ninja
#### Software Architecture
Software architecture description
#### Installation
1. xxxx
2. xxxx
3. xxxx
#### Instructions
1. xxxx
2. xxxx
3. xxxx
#### Contribution
1. Fork the repository
2. Create Feat_xxx branch
3. Commit your code
4. Create Pull Request
#### Gitee Feature
1. You can use Readme\_XXX.md to support different languages, such as Readme\_en.md, Readme\_zh.md
2. Gitee blog [blog.gitee.com](https://blog.gitee.com)
3. Explore open source project [https://gitee.com/explore](https://gitee.com/explore)
4. The most valuable open source project [GVP](https://gitee.com/gvp)
5. The manual of Gitee [https://gitee.com/help](https://gitee.com/help)
6. The most popular members [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)

View File

@ -1,37 +1,51 @@
# third_party_ninja
# Ninja
#### 介绍
Third-party open-source software ninja | 三方开源软件ninja
Ninja is a small build system with a focus on speed.
https://ninja-build.org/
#### 软件架构
软件架构说明
See [the manual](https://ninja-build.org/manual.html) or
`doc/manual.asciidoc` included in the distribution for background
and more details.
Binaries for Linux, Mac, and Windows are available at
[GitHub](https://github.com/ninja-build/ninja/releases).
Run `./ninja -h` for Ninja help.
#### 安装教程
Installation is not necessary because the only required file is the
resulting ninja binary. However, to enable features like Bash
completion and Emacs and Vim editing modes, some files in misc/ must be
copied to appropriate locations.
1. xxxx
2. xxxx
3. xxxx
If you're interested in making changes to Ninja, read
[CONTRIBUTING.md](CONTRIBUTING.md) first.
#### 使用说明
## Building Ninja itself
1. xxxx
2. xxxx
3. xxxx
You can either build Ninja via the custom generator script written in Python or
via CMake. For more details see
[the wiki](https://github.com/ninja-build/ninja/wiki).
#### 参与贡献
### Python
1. Fork 本仓库
2. 新建 Feat_xxx 分支
3. 提交代码
4. 新建 Pull Request
```
./configure.py --bootstrap
```
This will generate the `ninja` binary and a `build.ninja` file you can now use
to build Ninja with itself.
#### 特技
### CMake
1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md
2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com)
3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目
4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目
5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help)
6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)
```
cmake -Bbuild-cmake -H.
cmake --build build-cmake
```
The `ninja` binary will now be inside the `build-cmake` directory (you can
choose any other name you like).
To run the unit tests:
```
./build-cmake/ninja_test
```

33
RELEASING Normal file
View File

@ -0,0 +1,33 @@
Notes to myself on all the steps to make for a Ninja release.
Push new release branch:
1. Run afl-fuzz for a day or so and run ninja_test
2. Consider sending a heads-up to the ninja-build mailing list first
3. Make sure branches 'master' and 'release' are synced up locally
4. Update src/version.cc with new version (with ".git"), then
git commit -am 'mark this 1.5.0.git'
5. git checkout release; git merge master
6. Fix version number in src/version.cc (it will likely conflict in the above)
7. Fix version in doc/manual.asciidoc (exists only on release branch)
8. commit, tag, push (don't forget to push --tags)
git commit -am v1.5.0; git push origin release
git tag v1.5.0; git push --tags
# Push the 1.5.0.git change on master too:
git checkout master; git push origin master
9. Construct release notes from prior notes
credits: git shortlog -s --no-merges REV..
Release on github:
1. https://github.com/blog/1547-release-your-software
Add binaries to https://github.com/ninja-build/ninja/releases
Make announcement on mailing list:
1. copy old mail
Update website:
1. Make sure your ninja checkout is on the v1.5.0 tag
2. Clone https://github.com/ninja-build/ninja-build.github.io
3. In that repo, `./update-docs.sh`
4. Update index.html with newest version and link to release notes
5. git commit -m 'run update-docs.sh, 1.5.0 release'
6. git push origin master

61
appveyor.yml Normal file
View File

@ -0,0 +1,61 @@
version: 1.0.{build}
image:
- Visual Studio 2017
- Ubuntu1804
environment:
CLICOLOR_FORCE: 1
CHERE_INVOKING: 1 # Tell Bash to inherit the current working directory
matrix:
- MSYSTEM: MINGW64
- MSYSTEM: MSVC
- MSYSTEM: LINUX
matrix:
exclude:
- image: Visual Studio 2017
MSYSTEM: LINUX
- image: Ubuntu1804
MSYSTEM: MINGW64
- image: Ubuntu1804
MSYSTEM: MSVC
for:
-
matrix:
only:
- MSYSTEM: MINGW64
build_script:
ps: "C:\\msys64\\usr\\bin\\bash -lc @\"\n
pacman -S --quiet --noconfirm --needed re2c 2>&1\n
./configure.py --bootstrap --platform mingw 2>&1\n
./ninja all\n
./ninja_test 2>&1\n
./misc/ninja_syntax_test.py 2>&1\n\"@"
-
matrix:
only:
- MSYSTEM: MSVC
build_script:
- cmd: >-
call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvars64.bat"
python configure.py --bootstrap
ninja.bootstrap.exe all
ninja_test
python misc/ninja_syntax_test.py
- matrix:
only:
- image: Ubuntu1804
build_script:
- ./configure.py --bootstrap
- ./ninja all
- ./ninja_test
- misc/ninja_syntax_test.py
- misc/output_test.py
test: off

709
configure.py Executable file
View File

@ -0,0 +1,709 @@
#!/usr/bin/env python
#
# Copyright 2001 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that generates the build.ninja for ninja itself.
Projects that use ninja themselves should either write a similar script
or use a meta-build system that supports Ninja output."""
from __future__ import print_function
from optparse import OptionParser
import os
import pipes
import string
import subprocess
import sys
sourcedir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(sourcedir, 'misc'))
import ninja_syntax
class Platform(object):
"""Represents a host/target platform and its specific build attributes."""
def __init__(self, platform):
self._platform = platform
if self._platform is not None:
return
self._platform = sys.platform
if self._platform.startswith('linux'):
self._platform = 'linux'
elif self._platform.startswith('freebsd'):
self._platform = 'freebsd'
elif self._platform.startswith('gnukfreebsd'):
self._platform = 'freebsd'
elif self._platform.startswith('openbsd'):
self._platform = 'openbsd'
elif self._platform.startswith('solaris') or self._platform == 'sunos5':
self._platform = 'solaris'
elif self._platform.startswith('mingw'):
self._platform = 'mingw'
elif self._platform.startswith('win'):
self._platform = 'msvc'
elif self._platform.startswith('bitrig'):
self._platform = 'bitrig'
elif self._platform.startswith('netbsd'):
self._platform = 'netbsd'
elif self._platform.startswith('aix'):
self._platform = 'aix'
elif self._platform.startswith('os400'):
self._platform = 'os400'
elif self._platform.startswith('dragonfly'):
self._platform = 'dragonfly'
@staticmethod
def known_platforms():
return ['linux', 'darwin', 'freebsd', 'openbsd', 'solaris', 'sunos5',
'mingw', 'msvc', 'gnukfreebsd', 'bitrig', 'netbsd', 'aix',
'dragonfly']
def platform(self):
return self._platform
def is_linux(self):
return self._platform == 'linux'
def is_mingw(self):
return self._platform == 'mingw'
def is_msvc(self):
return self._platform == 'msvc'
def msvc_needs_fs(self):
popen = subprocess.Popen(['cl', '/nologo', '/?'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
return b'/FS' in out
def is_windows(self):
return self.is_mingw() or self.is_msvc()
def is_solaris(self):
return self._platform == 'solaris'
def is_aix(self):
return self._platform == 'aix'
def is_os400_pase(self):
return self._platform == 'os400' or os.uname().sysname.startswith('OS400')
def uses_usr_local(self):
return self._platform in ('freebsd', 'openbsd', 'bitrig', 'dragonfly', 'netbsd')
def supports_ppoll(self):
return self._platform in ('freebsd', 'linux', 'openbsd', 'bitrig',
'dragonfly')
def supports_ninja_browse(self):
return (not self.is_windows()
and not self.is_solaris()
and not self.is_aix())
def can_rebuild_in_place(self):
return not (self.is_windows() or self.is_aix())
class Bootstrap:
"""API shim for ninja_syntax.Writer that instead runs the commands.
Used to bootstrap Ninja from scratch. In --bootstrap mode this
class is used to execute all the commands to build an executable.
It also proxies all calls to an underlying ninja_syntax.Writer, to
behave like non-bootstrap mode.
"""
def __init__(self, writer, verbose=False):
self.writer = writer
self.verbose = verbose
# Map of variable name => expanded variable value.
self.vars = {}
# Map of rule name => dict of rule attributes.
self.rules = {
'phony': {}
}
def comment(self, text):
return self.writer.comment(text)
def newline(self):
return self.writer.newline()
def variable(self, key, val):
# In bootstrap mode, we have no ninja process to catch /showIncludes
# output.
self.vars[key] = self._expand(val).replace('/showIncludes', '')
return self.writer.variable(key, val)
def rule(self, name, **kwargs):
self.rules[name] = kwargs
return self.writer.rule(name, **kwargs)
def build(self, outputs, rule, inputs=None, **kwargs):
ruleattr = self.rules[rule]
cmd = ruleattr.get('command')
if cmd is None: # A phony rule, for example.
return
# Implement just enough of Ninja variable expansion etc. to
# make the bootstrap build work.
local_vars = {
'in': self._expand_paths(inputs),
'out': self._expand_paths(outputs)
}
for key, val in kwargs.get('variables', []):
local_vars[key] = ' '.join(ninja_syntax.as_list(val))
self._run_command(self._expand(cmd, local_vars))
return self.writer.build(outputs, rule, inputs, **kwargs)
def default(self, paths):
return self.writer.default(paths)
def _expand_paths(self, paths):
"""Expand $vars in an array of paths, e.g. from a 'build' block."""
paths = ninja_syntax.as_list(paths)
return ' '.join(map(self._shell_escape, (map(self._expand, paths))))
def _expand(self, str, local_vars={}):
"""Expand $vars in a string."""
return ninja_syntax.expand(str, self.vars, local_vars)
def _shell_escape(self, path):
"""Quote paths containing spaces."""
return '"%s"' % path if ' ' in path else path
def _run_command(self, cmdline):
"""Run a subcommand, quietly. Prints the full command on error."""
try:
if self.verbose:
print(cmdline)
subprocess.check_call(cmdline, shell=True)
except subprocess.CalledProcessError:
print('when running: ', cmdline)
raise
parser = OptionParser()
profilers = ['gmon', 'pprof']
parser.add_option('--bootstrap', action='store_true',
help='bootstrap a ninja binary from nothing')
parser.add_option('--verbose', action='store_true',
help='enable verbose build')
parser.add_option('--platform',
help='target platform (' +
'/'.join(Platform.known_platforms()) + ')',
choices=Platform.known_platforms())
parser.add_option('--host',
help='host platform (' +
'/'.join(Platform.known_platforms()) + ')',
choices=Platform.known_platforms())
parser.add_option('--debug', action='store_true',
help='enable debugging extras',)
parser.add_option('--profile', metavar='TYPE',
choices=profilers,
help='enable profiling (' + '/'.join(profilers) + ')',)
parser.add_option('--with-gtest', metavar='PATH', help='ignored')
parser.add_option('--with-python', metavar='EXE',
help='use EXE as the Python interpreter',
default=os.path.basename(sys.executable))
parser.add_option('--force-pselect', action='store_true',
help='ppoll() is used by default where available, '
'but some platforms may need to use pselect instead',)
(options, args) = parser.parse_args()
if args:
print('ERROR: extra unparsed command-line arguments:', args)
sys.exit(1)
platform = Platform(options.platform)
if options.host:
host = Platform(options.host)
else:
host = platform
BUILD_FILENAME = 'build.ninja'
ninja_writer = ninja_syntax.Writer(open(BUILD_FILENAME, 'w'))
n = ninja_writer
if options.bootstrap:
# Make the build directory.
try:
os.mkdir('build')
except OSError:
pass
# Wrap ninja_writer with the Bootstrapper, which also executes the
# commands.
print('bootstrapping ninja...')
n = Bootstrap(n, verbose=options.verbose)
n.comment('This file is used to build ninja itself.')
n.comment('It is generated by ' + os.path.basename(__file__) + '.')
n.newline()
n.variable('ninja_required_version', '1.3')
n.newline()
n.comment('The arguments passed to configure.py, for rerunning it.')
configure_args = sys.argv[1:]
if '--bootstrap' in configure_args:
configure_args.remove('--bootstrap')
n.variable('configure_args', ' '.join(configure_args))
env_keys = set(['CXX', 'AR', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS'])
configure_env = dict((k, os.environ[k]) for k in os.environ if k in env_keys)
if configure_env:
config_str = ' '.join([k + '=' + pipes.quote(configure_env[k])
for k in configure_env])
n.variable('configure_env', config_str + '$ ')
n.newline()
CXX = configure_env.get('CXX', 'g++')
objext = '.o'
if platform.is_msvc():
CXX = 'cl'
objext = '.obj'
def src(filename):
return os.path.join('$root', 'src', filename)
def built(filename):
return os.path.join('$builddir', filename)
def doc(filename):
return os.path.join('$root', 'doc', filename)
def cc(name, **kwargs):
return n.build(built(name + objext), 'cxx', src(name + '.c'), **kwargs)
def cxx(name, **kwargs):
return n.build(built(name + objext), 'cxx', src(name + '.cc'), **kwargs)
def binary(name):
if platform.is_windows():
exe = name + '.exe'
n.build(name, 'phony', exe)
return exe
return name
root = sourcedir
if root == os.getcwd():
# In the common case where we're building directly in the source
# tree, simplify all the paths to just be cwd-relative.
root = '.'
n.variable('root', root)
n.variable('builddir', 'build')
n.variable('cxx', CXX)
if platform.is_msvc():
n.variable('ar', 'link')
else:
n.variable('ar', configure_env.get('AR', 'ar'))
if platform.is_msvc():
cflags = ['/showIncludes',
'/nologo', # Don't print startup banner.
'/Zi', # Create pdb with debug info.
'/W4', # Highest warning level.
'/WX', # Warnings as errors.
'/wd4530', '/wd4100', '/wd4706', '/wd4244',
'/wd4512', '/wd4800', '/wd4702', '/wd4819',
# Disable warnings about constant conditional expressions.
'/wd4127',
# Disable warnings about passing "this" during initialization.
'/wd4355',
# Disable warnings about ignored typedef in DbgHelp.h
'/wd4091',
'/GR-', # Disable RTTI.
# Disable size_t -> int truncation warning.
# We never have strings or arrays larger than 2**31.
'/wd4267',
'/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS',
'/D_HAS_EXCEPTIONS=0',
'/DNINJA_PYTHON="%s"' % options.with_python]
if platform.msvc_needs_fs():
cflags.append('/FS')
ldflags = ['/DEBUG', '/libpath:$builddir']
if not options.debug:
cflags += ['/Ox', '/DNDEBUG', '/GL']
ldflags += ['/LTCG', '/OPT:REF', '/OPT:ICF']
else:
cflags = ['-g', '-Wall', '-Wextra',
'-Wno-deprecated',
'-Wno-missing-field-initializers',
'-Wno-unused-parameter',
'-fno-rtti',
'-fno-exceptions',
'-fvisibility=hidden', '-pipe',
'-DNINJA_PYTHON="%s"' % options.with_python]
if options.debug:
cflags += ['-D_GLIBCXX_DEBUG', '-D_GLIBCXX_DEBUG_PEDANTIC']
cflags.remove('-fno-rtti') # Needed for above pedanticness.
else:
cflags += ['-O2', '-DNDEBUG']
try:
proc = subprocess.Popen(
[CXX, '-fdiagnostics-color', '-c', '-x', 'c++', '/dev/null',
'-o', '/dev/null'],
stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)
if proc.wait() == 0:
cflags += ['-fdiagnostics-color']
except:
pass
if platform.is_mingw():
cflags += ['-D_WIN32_WINNT=0x0601', '-D__USE_MINGW_ANSI_STDIO=1']
ldflags = ['-L$builddir']
if platform.uses_usr_local():
cflags.append('-I/usr/local/include')
ldflags.append('-L/usr/local/lib')
if platform.is_aix():
# printf formats for int64_t, uint64_t; large file support
cflags.append('-D__STDC_FORMAT_MACROS')
cflags.append('-D_LARGE_FILES')
libs = []
if platform.is_mingw():
cflags.remove('-fvisibility=hidden');
ldflags.append('-static')
elif platform.is_solaris():
cflags.remove('-fvisibility=hidden')
elif platform.is_aix():
cflags.remove('-fvisibility=hidden')
elif platform.is_msvc():
pass
else:
if options.profile == 'gmon':
cflags.append('-pg')
ldflags.append('-pg')
elif options.profile == 'pprof':
cflags.append('-fno-omit-frame-pointer')
libs.extend(['-Wl,--no-as-needed', '-lprofiler'])
if platform.supports_ppoll() and not options.force_pselect:
cflags.append('-DUSE_PPOLL')
if platform.supports_ninja_browse():
cflags.append('-DNINJA_HAVE_BROWSE')
# Search for generated headers relative to build dir.
cflags.append('-I.')
def shell_escape(str):
"""Escape str such that it's interpreted as a single argument by
the shell."""
# This isn't complete, but it's just enough to make NINJA_PYTHON work.
if platform.is_windows():
return str
if '"' in str:
return "'%s'" % str.replace("'", "\\'")
return str
if 'CFLAGS' in configure_env:
cflags.append(configure_env['CFLAGS'])
ldflags.append(configure_env['CFLAGS'])
if 'CXXFLAGS' in configure_env:
cflags.append(configure_env['CXXFLAGS'])
ldflags.append(configure_env['CXXFLAGS'])
n.variable('cflags', ' '.join(shell_escape(flag) for flag in cflags))
if 'LDFLAGS' in configure_env:
ldflags.append(configure_env['LDFLAGS'])
n.variable('ldflags', ' '.join(shell_escape(flag) for flag in ldflags))
n.newline()
if platform.is_msvc():
n.rule('cxx',
command='$cxx $cflags -c $in /Fo$out /Fd' + built('$pdb'),
description='CXX $out',
deps='msvc' # /showIncludes is included in $cflags.
)
else:
n.rule('cxx',
command='$cxx -MMD -MT $out -MF $out.d $cflags -c $in -o $out',
depfile='$out.d',
deps='gcc',
description='CXX $out')
n.newline()
if host.is_msvc():
n.rule('ar',
command='lib /nologo /ltcg /out:$out $in',
description='LIB $out')
elif host.is_mingw():
n.rule('ar',
command='$ar crs $out $in',
description='AR $out')
else:
n.rule('ar',
command='rm -f $out && $ar crs $out $in',
description='AR $out')
n.newline()
if platform.is_msvc():
n.rule('link',
command='$cxx $in $libs /nologo /link $ldflags /out:$out',
description='LINK $out')
else:
n.rule('link',
command='$cxx $ldflags -o $out $in $libs',
description='LINK $out')
n.newline()
objs = []
if platform.supports_ninja_browse():
n.comment('browse_py.h is used to inline browse.py.')
n.rule('inline',
command='"%s"' % src('inline.sh') + ' $varname < $in > $out',
description='INLINE $out')
n.build(built('browse_py.h'), 'inline', src('browse.py'),
implicit=src('inline.sh'),
variables=[('varname', 'kBrowsePy')])
n.newline()
objs += cxx('browse', order_only=built('browse_py.h'))
n.newline()
n.comment('the depfile parser and ninja lexers are generated using re2c.')
def has_re2c():
try:
proc = subprocess.Popen(['re2c', '-V'], stdout=subprocess.PIPE)
return int(proc.communicate()[0], 10) >= 1103
except OSError:
return False
if has_re2c():
n.rule('re2c',
command='re2c -b -i --no-generation-date -o $out $in',
description='RE2C $out')
# Generate the .cc files in the source directory so we can check them in.
n.build(src('depfile_parser.cc'), 're2c', src('depfile_parser.in.cc'))
n.build(src('lexer.cc'), 're2c', src('lexer.in.cc'))
else:
print("warning: A compatible version of re2c (>= 0.11.3) was not found; "
"changes to src/*.in.cc will not affect your build.")
n.newline()
n.comment('Core source files all build into ninja library.')
cxxvariables = []
if platform.is_msvc():
cxxvariables = [('pdb', 'ninja.pdb')]
for name in ['build',
'build_log',
'clean',
'clparser',
'debug_flags',
'depfile_parser',
'deps_log',
'disk_interface',
'dyndep',
'dyndep_parser',
'edit_distance',
'eval_env',
'graph',
'graphviz',
'lexer',
'line_printer',
'manifest_parser',
'metrics',
'parser',
'state',
'string_piece_util',
'util',
'version']:
objs += cxx(name, variables=cxxvariables)
if platform.is_windows():
for name in ['subprocess-win32',
'includes_normalize-win32',
'msvc_helper-win32',
'msvc_helper_main-win32']:
objs += cxx(name, variables=cxxvariables)
if platform.is_msvc():
objs += cxx('minidump-win32', variables=cxxvariables)
objs += cc('getopt')
else:
objs += cxx('subprocess-posix')
if platform.is_aix():
objs += cc('getopt')
if platform.is_msvc():
ninja_lib = n.build(built('ninja.lib'), 'ar', objs)
else:
ninja_lib = n.build(built('libninja.a'), 'ar', objs)
n.newline()
if platform.is_msvc():
libs.append('ninja.lib')
else:
libs.append('-lninja')
if platform.is_aix() and not platform.is_os400_pase():
libs.append('-lperfstat')
all_targets = []
n.comment('Main executable is library plus main() function.')
objs = cxx('ninja', variables=cxxvariables)
ninja = n.build(binary('ninja'), 'link', objs, implicit=ninja_lib,
variables=[('libs', libs)])
n.newline()
all_targets += ninja
if options.bootstrap:
# We've built the ninja binary. Don't run any more commands
# through the bootstrap executor, but continue writing the
# build.ninja file.
n = ninja_writer
n.comment('Tests all build into ninja_test executable.')
objs = []
if platform.is_msvc():
cxxvariables = [('pdb', 'ninja_test.pdb')]
for name in ['build_log_test',
'build_test',
'clean_test',
'clparser_test',
'depfile_parser_test',
'deps_log_test',
'dyndep_parser_test',
'disk_interface_test',
'edit_distance_test',
'graph_test',
'lexer_test',
'manifest_parser_test',
'ninja_test',
'state_test',
'string_piece_util_test',
'subprocess_test',
'test',
'util_test']:
objs += cxx(name, variables=cxxvariables)
if platform.is_windows():
for name in ['includes_normalize_test', 'msvc_helper_test']:
objs += cxx(name, variables=cxxvariables)
ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib,
variables=[('libs', libs)])
n.newline()
all_targets += ninja_test
n.comment('Ancillary executables.')
for name in ['build_log_perftest',
'canon_perftest',
'depfile_parser_perftest',
'hash_collision_bench',
'manifest_parser_perftest',
'clparser_perftest']:
if platform.is_msvc():
cxxvariables = [('pdb', name + '.pdb')]
objs = cxx(name, variables=cxxvariables)
all_targets += n.build(binary(name), 'link', objs,
implicit=ninja_lib, variables=[('libs', libs)])
n.newline()
n.comment('Generate a graph using the "graph" tool.')
n.rule('gendot',
command='./ninja -t graph all > $out')
n.rule('gengraph',
command='dot -Tpng $in > $out')
dot = n.build(built('graph.dot'), 'gendot', ['ninja', 'build.ninja'])
n.build('graph.png', 'gengraph', dot)
n.newline()
n.comment('Generate the manual using asciidoc.')
n.rule('asciidoc',
command='asciidoc -b docbook -d book -o $out $in',
description='ASCIIDOC $out')
n.rule('xsltproc',
command='xsltproc --nonet doc/docbook.xsl $in > $out',
description='XSLTPROC $out')
docbookxml = n.build(built('manual.xml'), 'asciidoc', doc('manual.asciidoc'))
manual = n.build(doc('manual.html'), 'xsltproc', docbookxml,
implicit=[doc('style.css'), doc('docbook.xsl')])
n.build('manual', 'phony',
order_only=manual)
n.newline()
n.rule('dblatex',
command='dblatex -q -o $out -p doc/dblatex.xsl $in',
description='DBLATEX $out')
n.build(doc('manual.pdf'), 'dblatex', docbookxml,
implicit=[doc('dblatex.xsl')])
n.comment('Generate Doxygen.')
n.rule('doxygen',
command='doxygen $in',
description='DOXYGEN $in')
n.variable('doxygen_mainpage_generator',
src('gen_doxygen_mainpage.sh'))
n.rule('doxygen_mainpage',
command='$doxygen_mainpage_generator $in > $out',
description='DOXYGEN_MAINPAGE $out')
mainpage = n.build(built('doxygen_mainpage'), 'doxygen_mainpage',
['README.md', 'COPYING'],
implicit=['$doxygen_mainpage_generator'])
n.build('doxygen', 'doxygen', doc('doxygen.config'),
implicit=mainpage)
n.newline()
if not host.is_mingw():
n.comment('Regenerate build files if build script changes.')
n.rule('configure',
command='${configure_env}%s $root/configure.py $configure_args' %
options.with_python,
generator=True)
n.build('build.ninja', 'configure',
implicit=['$root/configure.py',
os.path.normpath('$root/misc/ninja_syntax.py')])
n.newline()
n.default(ninja)
n.newline()
if host.is_linux():
n.comment('Packaging')
n.rule('rpmbuild',
command="misc/packaging/rpmbuild.sh",
description='Building rpms..')
n.build('rpm', 'rpmbuild')
n.newline()
n.build('all', 'phony', all_targets)
n.close()
print('wrote %s.' % BUILD_FILENAME)
if options.bootstrap:
print('bootstrap complete. rebuilding...')
rebuild_args = []
if platform.can_rebuild_in_place():
rebuild_args.append('./ninja')
else:
if platform.is_windows():
bootstrap_exe = 'ninja.bootstrap.exe'
final_exe = 'ninja.exe'
else:
bootstrap_exe = './ninja.bootstrap'
final_exe = './ninja'
if os.path.exists(bootstrap_exe):
os.unlink(bootstrap_exe)
os.rename(final_exe, bootstrap_exe)
rebuild_args.append(bootstrap_exe)
if options.verbose:
rebuild_args.append('-v')
subprocess.check_call(rebuild_args)

11
doc/README.md Normal file
View File

@ -0,0 +1,11 @@
This directory contains the Ninja manual and support files used in
building it. Here's a brief overview of how it works.
The source text, `manual.asciidoc`, is written in the AsciiDoc format.
AsciiDoc can generate HTML but it doesn't look great; instead, we use
AsciiDoc to generate the Docbook XML format and then provide our own
Docbook XSL tweaks to produce HTML from that.
In theory using AsciiDoc and DocBook allows us to produce nice PDF
documentation etc. In reality it's not clear anyone wants that, but the
build rules are in place to generate it if you install dblatex.

7
doc/dblatex.xsl Normal file
View File

@ -0,0 +1,7 @@
<!-- This custom XSL tweaks the dblatex XML settings. -->
<xsl:stylesheet xmlns:xsl='http://www.w3.org/1999/XSL/Transform' version='1.0'>
<!-- These parameters disable the list of collaborators and revisions.
Together remove a useless page from the front matter. -->
<xsl:param name='doc.collab.show'>0</xsl:param>
<xsl:param name='latex.output.revhistory'>0</xsl:param>
</xsl:stylesheet>

34
doc/docbook.xsl Normal file
View File

@ -0,0 +1,34 @@
<!-- This custom XSL tweaks the DocBook XML -> HTML settings to produce
an OK-looking manual. -->
<!DOCTYPE xsl:stylesheet [
<!ENTITY css SYSTEM "style.css">
]>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version='1.0'>
<xsl:import href="http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl"/>
<!-- Embed our stylesheet as the user-provided <head> content. -->
<xsl:template name="user.head.content"><style>&css;</style></xsl:template>
<!-- Remove the body.attributes block, which specifies a bunch of
useless bgcolor etc. attrs on the <body> tag. -->
<xsl:template name="body.attributes"></xsl:template>
<!-- Specify that in "book" form (which we're using), we only want a
single table of contents at the beginning of the document. -->
<xsl:param name="generate.toc">book toc</xsl:param>
<!-- Don't put the "Chapter 1." prefix on the "chapters". -->
<xsl:param name="chapter.autolabel">0</xsl:param>
<!-- Make builds reproducible by generating the same IDs from the same inputs -->
<xsl:param name="generate.consistent.ids">1</xsl:param>
<!-- Use <ul> for the table of contents. By default DocBook uses a
<dl>, which makes no semantic sense. I imagine they just did
it because it looks nice? -->
<xsl:param name="toc.list.type">ul</xsl:param>
<xsl:output method="html" encoding="utf-8" indent="no"
doctype-public=""/>
</xsl:stylesheet>

1250
doc/doxygen.config Normal file

File diff suppressed because it is too large Load Diff

1173
doc/manual.asciidoc Normal file

File diff suppressed because it is too large Load Diff

29
doc/style.css Normal file
View File

@ -0,0 +1,29 @@
body {
margin: 5ex 10ex;
max-width: 80ex;
line-height: 1.5;
font-family: sans-serif;
}
h1, h2, h3 {
font-weight: normal;
}
pre, code {
font-family: x, monospace;
}
pre {
padding: 1ex;
background: #eee;
border: solid 1px #ddd;
min-width: 0;
font-size: 90%;
}
code {
color: #007;
}
div.chapter {
margin-top: 4em;
border-top: solid 2px black;
}
p {
margin-top: 0;
}

View File

@ -0,0 +1 @@
build

View File

@ -0,0 +1 @@
default

View File

@ -0,0 +1 @@
include

View File

@ -0,0 +1 @@
pool

View File

@ -0,0 +1 @@
rule

View File

@ -0,0 +1 @@
subninja

View File

@ -0,0 +1 @@
a

View File

@ -0,0 +1 @@
b

View File

@ -0,0 +1 @@
:

View File

@ -0,0 +1 @@
$

View File

@ -0,0 +1 @@
$

View File

@ -0,0 +1 @@
=

View File

@ -0,0 +1 @@

View File

@ -0,0 +1 @@
|

View File

@ -0,0 +1 @@
||

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,5 @@
rule b
command = clang -MMD -MF $out.d -o $out -c $in
description = building $out
build a.o: b a.c

57
misc/bash-completion Normal file
View File

@ -0,0 +1,57 @@
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Add the following to your .bashrc to tab-complete ninja targets
# . path/to/ninja/misc/bash-completion
_ninja_target() {
local cur prev targets dir line targets_command OPTIND
# When available, use bash_completion to:
# 1) Complete words when the cursor is in the middle of the word
# 2) Complete paths with files or directories, as appropriate
if _get_comp_words_by_ref cur prev &>/dev/null ; then
case $prev in
-f)
_filedir
return 0
;;
-C)
_filedir -d
return 0
;;
esac
else
cur="${COMP_WORDS[COMP_CWORD]}"
fi
if [[ "$cur" == "--"* ]]; then
# there is currently only one argument that takes --
COMPREPLY=($(compgen -P '--' -W 'version' -- "${cur:2}"))
else
dir="."
line=$(echo ${COMP_LINE} | cut -d" " -f 2-)
# filter out all non relevant arguments but keep C for dirs
while getopts :C:f:j:l:k:nvd:t: opt $line; do
case $opt in
# eval for tilde expansion
C) eval dir="$OPTARG" ;;
esac
done;
targets_command="eval ninja -C \"${dir}\" -t targets all 2>/dev/null | cut -d: -f1"
COMPREPLY=($(compgen -W '`${targets_command}`' -- "$cur"))
fi
return
}
complete -F _ninja_target ninja

41
misc/ci.py Executable file
View File

@ -0,0 +1,41 @@
#!/usr/bin/env python3
import os
ignores = [
'.git/',
'misc/afl-fuzz-tokens/',
'ninja_deps',
'src/depfile_parser.cc',
'src/lexer.cc',
]
error_count = 0
def error(path, msg):
global error_count
error_count += 1
print('\x1b[1;31m{}\x1b[0;31m{}\x1b[0m'.format(path, msg))
for root, directory, filenames in os.walk('.'):
for filename in filenames:
path = os.path.join(root, filename)[2:]
if any([path.startswith(x) for x in ignores]):
continue
with open(path, 'rb') as file:
line_nr = 1
try:
for line in [x.decode() for x in file.readlines()]:
if len(line) == 0 or line[-1] != '\n':
error(path, ' missing newline at end of file.')
if len(line) > 1:
if line[-2] == '\r':
error(path, ' has Windows line endings.')
break
if line[-2] == ' ' or line[-2] == '\t':
error(path, ':{} has trailing whitespace.'.format(line_nr))
line_nr += 1
except UnicodeError:
pass # binary file
exit(error_count)

23
misc/inherited-fds.ninja Normal file
View File

@ -0,0 +1,23 @@
# This build file prints out a list of open file descriptors in
# Ninja subprocesses, to help verify we don't accidentally leak
# any.
# Because one fd leak was in the code managing multiple subprocesses,
# this test brings up multiple subprocesses and then dumps the fd
# table of the last one.
# Use like: ./ninja -f misc/inherited-fds.ninja
rule sleep
command = sleep 10000
rule dump
command = sleep 1; ls -l /proc/self/fd; exit 1
build all: phony a b c d e
build a: sleep
build b: sleep
build c: sleep
build d: sleep
build e: dump

View File

@ -0,0 +1,38 @@
# An input file for running a "slow" build.
# Use like: ninja -f misc/long-slow-build.ninja all
rule sleep
command = sleep 1
description = SLEEP $out
build 0: sleep README
build 1: sleep README
build 2: sleep README
build 3: sleep README
build 4: sleep README
build 5: sleep README
build 6: sleep README
build 7: sleep README
build 8: sleep README
build 9: sleep README
build 10: sleep 0
build 11: sleep 1
build 12: sleep 2
build 13: sleep 3
build 14: sleep 4
build 15: sleep 5
build 16: sleep 6
build 17: sleep 7
build 18: sleep 8
build 19: sleep 9
build 20: sleep 10
build 21: sleep 11
build 22: sleep 12
build 23: sleep 13
build 24: sleep 14
build 25: sleep 15
build 26: sleep 16
build 27: sleep 17
build 28: sleep 18
build 29: sleep 19
build all: phony 20 21 22 23 24 25 26 27 28 29

56
misc/measure.py Executable file
View File

@ -0,0 +1,56 @@
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""measure the runtime of a command by repeatedly running it.
"""
from __future__ import print_function
import time
import subprocess
import sys
devnull = open('/dev/null', 'w')
def run(cmd, repeat=10):
print('sampling:', end=' ')
sys.stdout.flush()
samples = []
for _ in range(repeat):
start = time.time()
subprocess.call(cmd, stdout=devnull, stderr=devnull)
end = time.time()
dt = (end - start) * 1000
print('%dms' % int(dt), end=' ')
sys.stdout.flush()
samples.append(dt)
print()
# We're interested in the 'pure' runtime of the code, which is
# conceptually the smallest time we'd see if we ran it enough times
# such that it got the perfect time slices / disk cache hits.
best = min(samples)
# Also print how varied the outputs were in an attempt to make it
# more obvious if something has gone terribly wrong.
err = sum(s - best for s in samples) / float(len(samples))
print('estimate: %dms (mean err %.1fms)' % (best, err))
if __name__ == '__main__':
if len(sys.argv) < 2:
print('usage: measure.py command args...')
sys.exit(1)
run(cmd=sys.argv[1:])

85
misc/ninja-mode.el Normal file
View File

@ -0,0 +1,85 @@
;;; ninja-mode.el --- Major mode for editing .ninja files -*- lexical-binding: t -*-
;; Package-Requires: ((emacs "24"))
;; Copyright 2011 Google Inc. All Rights Reserved.
;;
;; Licensed under the Apache License, Version 2.0 (the "License");
;; you may not use this file except in compliance with the License.
;; You may obtain a copy of the License at
;;
;; http://www.apache.org/licenses/LICENSE-2.0
;;
;; Unless required by applicable law or agreed to in writing, software
;; distributed under the License is distributed on an "AS IS" BASIS,
;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
;; See the License for the specific language governing permissions and
;; limitations under the License.
;;; Commentary:
;; Simple emacs mode for editing .ninja files.
;; Just some syntax highlighting for now.
;;; Code:
(defvar ninja-keywords
`((,(concat "^" (regexp-opt '("rule" "build" "subninja" "include"
"pool" "default")
'words))
. font-lock-keyword-face)
("\\([[:alnum:]_]+\\) =" 1 font-lock-variable-name-face)
;; Variable expansion.
("$[[:alnum:]_]+" . font-lock-variable-name-face)
("${[[:alnum:]._]+}" . font-lock-variable-name-face)
;; Rule names
("rule +\\([[:alnum:]_.-]+\\)" 1 font-lock-function-name-face)
;; Build Statement - highlight the rule used,
;; allow for escaped $,: in outputs.
("build +\\(?:[^:$\n]\\|$[:$]\\)+ *: *\\([[:alnum:]_.-]+\\)"
1 font-lock-function-name-face)))
(defvar ninja-mode-syntax-table
(let ((table (make-syntax-table)))
(modify-syntax-entry ?\" "." table)
table)
"Syntax table used in `ninja-mode'.")
(defun ninja-syntax-propertize (start end)
(save-match-data
(goto-char start)
(while (search-forward "#" end t)
(let ((match-pos (match-beginning 0)))
(when (and
;; Is it the first non-white character on the line?
(eq match-pos (save-excursion (back-to-indentation) (point)))
(save-excursion
(goto-char (line-end-position 0))
(or
;; If we're continuing the previous line, it's not a
;; comment.
(not (eq ?$ (char-before)))
;; Except if the previous line is a comment as well, as the
;; continuation dollar is ignored then.
(nth 4 (syntax-ppss)))))
(put-text-property match-pos (1+ match-pos) 'syntax-table '(11))
(let ((line-end (line-end-position)))
;; Avoid putting properties past the end of the buffer.
;; Otherwise we get an `args-out-of-range' error.
(unless (= line-end (1+ (buffer-size)))
(put-text-property line-end (1+ line-end) 'syntax-table '(12)))))))))
;;;###autoload
(define-derived-mode ninja-mode prog-mode "ninja"
(set (make-local-variable 'comment-start) "#")
(set (make-local-variable 'parse-sexp-lookup-properties) t)
(set (make-local-variable 'syntax-propertize-function) #'ninja-syntax-propertize)
(setq font-lock-defaults '(ninja-keywords)))
;; Run ninja-mode for files ending in .ninja.
;;;###autoload
(add-to-list 'auto-mode-alist '("\\.ninja$" . ninja-mode))
(provide 'ninja-mode)
;;; ninja-mode.el ends here

87
misc/ninja.vim Normal file
View File

@ -0,0 +1,87 @@
" ninja build file syntax.
" Language: ninja build file as described at
" http://ninja-build.org/manual.html
" Version: 1.5
" Last Change: 2018/04/05
" Maintainer: Nicolas Weber <nicolasweber@gmx.de>
" Version 1.4 of this script is in the upstream vim repository and will be
" included in the next vim release. If you change this, please send your change
" upstream.
" ninja lexer and parser are at
" https://github.com/ninja-build/ninja/blob/master/src/lexer.in.cc
" https://github.com/ninja-build/ninja/blob/master/src/manifest_parser.cc
if exists("b:current_syntax")
finish
endif
let s:cpo_save = &cpo
set cpo&vim
syn case match
" Comments are only matched when the # is at the beginning of the line (with
" optional whitespace), as long as the prior line didn't end with a $
" continuation.
syn match ninjaComment /\(\$\n\)\@<!\_^\s*#.*$/ contains=@Spell
" Toplevel statements are the ones listed here and
" toplevel variable assignments (ident '=' value).
" lexer.in.cc, ReadToken() and manifest_parser.cc, Parse()
syn match ninjaKeyword "^build\>"
syn match ninjaKeyword "^rule\>"
syn match ninjaKeyword "^pool\>"
syn match ninjaKeyword "^default\>"
syn match ninjaKeyword "^include\>"
syn match ninjaKeyword "^subninja\>"
" Both 'build' and 'rule' begin a variable scope that ends
" on the first line without indent. 'rule' allows only a
" limited set of magic variables, 'build' allows general
" let assignments.
" manifest_parser.cc, ParseRule()
syn region ninjaRule start="^rule" end="^\ze\S" contains=TOP transparent
syn keyword ninjaRuleCommand contained containedin=ninjaRule command
\ deps depfile description generator
\ pool restat rspfile rspfile_content
syn region ninjaPool start="^pool" end="^\ze\S" contains=TOP transparent
syn keyword ninjaPoolCommand contained containedin=ninjaPool depth
" Strings are parsed as follows:
" lexer.in.cc, ReadEvalString()
" simple_varname = [a-zA-Z0-9_-]+;
" varname = [a-zA-Z0-9_.-]+;
" $$ -> $
" $\n -> line continuation
" '$ ' -> escaped space
" $simple_varname -> variable
" ${varname} -> variable
syn match ninjaDollar "\$\$"
syn match ninjaWrapLineOperator "\$$"
syn match ninjaSimpleVar "\$[a-zA-Z0-9_-]\+"
syn match ninjaVar "\${[a-zA-Z0-9_.-]\+}"
" operators are:
" variable assignment =
" rule definition :
" implicit dependency |
" order-only dependency ||
syn match ninjaOperator "\(=\|:\||\|||\)\ze\s"
hi def link ninjaComment Comment
hi def link ninjaKeyword Keyword
hi def link ninjaRuleCommand Statement
hi def link ninjaPoolCommand Statement
hi def link ninjaDollar ninjaOperator
hi def link ninjaWrapLineOperator ninjaOperator
hi def link ninjaOperator Operator
hi def link ninjaSimpleVar ninjaVar
hi def link ninjaVar Identifier
let b:current_syntax = "ninja"
let &cpo = s:cpo_save
unlet s:cpo_save

197
misc/ninja_syntax.py Normal file
View File

@ -0,0 +1,197 @@
#!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import re
import textwrap
def escape_path(word):
return word.replace('$ ', '$$ ').replace(' ', '$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2, break_long_words=False,
break_on_hyphens=False):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None, implicit_outputs=None, pool=None):
outputs = as_list(outputs)
out_outputs = [escape_path(x) for x in outputs]
all_inputs = [escape_path(x) for x in as_list(inputs)]
if implicit:
implicit = [escape_path(x) for x in as_list(implicit)]
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = [escape_path(x) for x in as_list(order_only)]
all_inputs.append('||')
all_inputs.extend(order_only)
if implicit_outputs:
implicit_outputs = [escape_path(x)
for x in as_list(implicit_outputs)]
out_outputs.append('|')
out_outputs.extend(implicit_outputs)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if pool is not None:
self._line(' pool = %s' % pool)
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if (space < 0 or
self._count_dollars_before_index(text, space) % 2 == 0):
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if (space < 0 or
self._count_dollars_before_index(text, space) % 2 == 0):
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def close(self):
self.output.close()
def as_list(input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
def expand(string, vars, local_vars={}):
"""Expand a string containing $vars as Ninja would.
Note: doesn't handle the full Ninja variable syntax, but it's enough
to make configure.py's use of it work.
"""
def exp(m):
var = m.group(1)
if var == '$':
return '$'
return local_vars.get(var, vars.get(var, ''))
return re.sub(r'\$(\$|\w*)', exp, string)

191
misc/ninja_syntax_test.py Executable file
View File

@ -0,0 +1,191 @@
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import ninja_syntax
LONGWORD = 'a' * 10
LONGWORDWITHSPACES = 'a'*5 + '$ ' + 'a'*5
INDENT = ' '
class TestLineWordWrap(unittest.TestCase):
def setUp(self):
self.out = StringIO()
self.n = ninja_syntax.Writer(self.out, width=8)
def test_single_long_word(self):
# We shouldn't wrap a single long word.
self.n._line(LONGWORD)
self.assertEqual(LONGWORD + '\n', self.out.getvalue())
def test_few_long_words(self):
# We should wrap a line where the second word is overlong.
self.n._line(' '.join(['x', LONGWORD, 'y']))
self.assertEqual(' $\n'.join(['x',
INDENT + LONGWORD,
INDENT + 'y']) + '\n',
self.out.getvalue())
def test_comment_wrap(self):
# Filenames should not be wrapped
self.n.comment('Hello /usr/local/build-tools/bin')
self.assertEqual('# Hello\n# /usr/local/build-tools/bin\n',
self.out.getvalue())
def test_short_words_indented(self):
# Test that indent is taking into account when breaking subsequent lines.
# The second line should not be ' to tree', as that's longer than the
# test layout width of 8.
self.n._line('line_one to tree')
self.assertEqual('''\
line_one $
to $
tree
''',
self.out.getvalue())
def test_few_long_words_indented(self):
# Check wrapping in the presence of indenting.
self.n._line(' '.join(['x', LONGWORD, 'y']), indent=1)
self.assertEqual(' $\n'.join([' ' + 'x',
' ' + INDENT + LONGWORD,
' ' + INDENT + 'y']) + '\n',
self.out.getvalue())
def test_escaped_spaces(self):
self.n._line(' '.join(['x', LONGWORDWITHSPACES, 'y']))
self.assertEqual(' $\n'.join(['x',
INDENT + LONGWORDWITHSPACES,
INDENT + 'y']) + '\n',
self.out.getvalue())
def test_fit_many_words(self):
self.n = ninja_syntax.Writer(self.out, width=78)
self.n._line('command = cd ../../chrome; python ../tools/grit/grit/format/repack.py ../out/Debug/obj/chrome/chrome_dll.gen/repack/theme_resources_large.pak ../out/Debug/gen/chrome/theme_resources_large.pak', 1)
self.assertEqual('''\
command = cd ../../chrome; python ../tools/grit/grit/format/repack.py $
../out/Debug/obj/chrome/chrome_dll.gen/repack/theme_resources_large.pak $
../out/Debug/gen/chrome/theme_resources_large.pak
''',
self.out.getvalue())
def test_leading_space(self):
self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping
self.n.variable('foo', ['', '-bar', '-somethinglong'], 0)
self.assertEqual('''\
foo = -bar $
-somethinglong
''',
self.out.getvalue())
def test_embedded_dollar_dollar(self):
self.n = ninja_syntax.Writer(self.out, width=15) # force wrapping
self.n.variable('foo', ['a$$b', '-somethinglong'], 0)
self.assertEqual('''\
foo = a$$b $
-somethinglong
''',
self.out.getvalue())
def test_two_embedded_dollar_dollars(self):
self.n = ninja_syntax.Writer(self.out, width=17) # force wrapping
self.n.variable('foo', ['a$$b', '-somethinglong'], 0)
self.assertEqual('''\
foo = a$$b $
-somethinglong
''',
self.out.getvalue())
def test_leading_dollar_dollar(self):
self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping
self.n.variable('foo', ['$$b', '-somethinglong'], 0)
self.assertEqual('''\
foo = $$b $
-somethinglong
''',
self.out.getvalue())
def test_trailing_dollar_dollar(self):
self.n = ninja_syntax.Writer(self.out, width=14) # force wrapping
self.n.variable('foo', ['a$$', '-somethinglong'], 0)
self.assertEqual('''\
foo = a$$ $
-somethinglong
''',
self.out.getvalue())
class TestBuild(unittest.TestCase):
def setUp(self):
self.out = StringIO()
self.n = ninja_syntax.Writer(self.out)
def test_variables_dict(self):
self.n.build('out', 'cc', 'in', variables={'name': 'value'})
self.assertEqual('''\
build out: cc in
name = value
''',
self.out.getvalue())
def test_variables_list(self):
self.n.build('out', 'cc', 'in', variables=[('name', 'value')])
self.assertEqual('''\
build out: cc in
name = value
''',
self.out.getvalue())
def test_implicit_outputs(self):
self.n.build('o', 'cc', 'i', implicit_outputs='io')
self.assertEqual('''\
build o | io: cc i
''',
self.out.getvalue())
class TestExpand(unittest.TestCase):
def test_basic(self):
vars = {'x': 'X'}
self.assertEqual('foo', ninja_syntax.expand('foo', vars))
def test_var(self):
vars = {'xyz': 'XYZ'}
self.assertEqual('fooXYZ', ninja_syntax.expand('foo$xyz', vars))
def test_vars(self):
vars = {'x': 'X', 'y': 'YYY'}
self.assertEqual('XYYY', ninja_syntax.expand('$x$y', vars))
def test_space(self):
vars = {}
self.assertEqual('x y z', ninja_syntax.expand('x$ y$ z', vars))
def test_locals(self):
vars = {'x': 'a'}
local_vars = {'x': 'b'}
self.assertEqual('a', ninja_syntax.expand('$x', vars))
self.assertEqual('b', ninja_syntax.expand('$x', vars, local_vars))
def test_double(self):
self.assertEqual('a b$c', ninja_syntax.expand('a$ b$$c', {}))
if __name__ == '__main__':
unittest.main()

115
misc/output_test.py Executable file
View File

@ -0,0 +1,115 @@
#!/usr/bin/env python3
"""Runs ./ninja and checks if the output is correct.
In order to simulate a smart terminal it uses the 'script' command.
"""
import os
import platform
import subprocess
import sys
import tempfile
import unittest
default_env = dict(os.environ)
if 'NINJA_STATUS' in default_env:
del default_env['NINJA_STATUS']
if 'CLICOLOR_FORCE' in default_env:
del default_env['CLICOLOR_FORCE']
default_env['TERM'] = ''
NINJA_PATH = os.path.abspath('./ninja')
def run(build_ninja, flags='', pipe=False, env=default_env):
with tempfile.TemporaryDirectory() as d:
os.chdir(d)
with open('build.ninja', 'w') as f:
f.write(build_ninja)
f.flush()
ninja_cmd = '{} {}'.format(NINJA_PATH, flags)
try:
if pipe:
output = subprocess.check_output([ninja_cmd], shell=True, env=env)
elif platform.system() == 'Darwin':
output = subprocess.check_output(['script', '-q', '/dev/null', 'bash', '-c', ninja_cmd],
env=env)
else:
output = subprocess.check_output(['script', '-qfec', ninja_cmd, '/dev/null'],
env=env)
except subprocess.CalledProcessError as err:
sys.stdout.buffer.write(err.output)
raise err
final_output = ''
for line in output.decode('utf-8').splitlines(True):
if len(line) > 0 and line[-1] == '\r':
continue
final_output += line.replace('\r', '')
return final_output
@unittest.skipIf(platform.system() == 'Windows', 'These test methods do not work on Windows')
class Output(unittest.TestCase):
def test_issue_1418(self):
self.assertEqual(run(
'''rule echo
command = sleep $delay && echo $out
description = echo $out
build a: echo
delay = 3
build b: echo
delay = 2
build c: echo
delay = 1
''', '-j3'),
'''[1/3] echo c\x1b[K
c
[2/3] echo b\x1b[K
b
[3/3] echo a\x1b[K
a
''')
def test_issue_1214(self):
print_red = '''rule echo
command = printf '\x1b[31mred\x1b[0m'
description = echo $out
build a: echo
'''
# Only strip color when ninja's output is piped.
self.assertEqual(run(print_red),
'''[1/1] echo a\x1b[K
\x1b[31mred\x1b[0m
''')
self.assertEqual(run(print_red, pipe=True),
'''[1/1] echo a
red
''')
# Even in verbose mode, colors should still only be stripped when piped.
self.assertEqual(run(print_red, flags='-v'),
'''[1/1] printf '\x1b[31mred\x1b[0m'
\x1b[31mred\x1b[0m
''')
self.assertEqual(run(print_red, flags='-v', pipe=True),
'''[1/1] printf '\x1b[31mred\x1b[0m'
red
''')
# CLICOLOR_FORCE=1 can be used to disable escape code stripping.
env = default_env.copy()
env['CLICOLOR_FORCE'] = '1'
self.assertEqual(run(print_red, pipe=True, env=env),
'''[1/1] echo a
\x1b[31mred\x1b[0m
''')
def test_pr_1685(self):
# Running those tools without .ninja_deps and .ninja_log shouldn't fail.
self.assertEqual(run('', flags='-t recompact'), '')
self.assertEqual(run('', flags='-t restat'), '')
def test_status(self):
self.assertEqual(run(''), 'ninja: no work to do.\n')
if __name__ == '__main__':
unittest.main()

42
misc/packaging/ninja.spec Normal file
View File

@ -0,0 +1,42 @@
Summary: Ninja is a small build system with a focus on speed.
Name: ninja
Version: %{ver}
Release: %{rel}%{?dist}
Group: Development/Tools
License: Apache 2.0
URL: https://github.com/ninja-build/ninja
Source0: %{name}-%{version}-%{rel}.tar.gz
BuildRoot: %{_tmppath}/%{name}-%{version}-%{rel}
BuildRequires: asciidoc
%description
Ninja is yet another build system. It takes as input the interdependencies of files (typically source code and output executables) and
orchestrates building them, quickly.
Ninja joins a sea of other build systems. Its distinguishing goal is to be fast. It is born from my work on the Chromium browser project,
which has over 30,000 source files and whose other build systems (including one built from custom non-recursive Makefiles) can take ten
seconds to start building after changing one file. Ninja is under a second.
%prep
%setup -q -n %{name}-%{version}-%{rel}
%build
echo Building..
./configure.py --bootstrap
./ninja manual
%install
mkdir -p %{buildroot}%{_bindir} %{buildroot}%{_docdir}
cp -p ninja %{buildroot}%{_bindir}/
%files
%defattr(-, root, root)
%doc COPYING README.md doc/manual.html
%{_bindir}/*
%clean
rm -rf %{buildroot}
#The changelog is built automatically from Git history
%changelog

29
misc/packaging/rpmbuild.sh Executable file
View File

@ -0,0 +1,29 @@
#!/bin/bash
echo Building ninja RPMs..
GITROOT=$(git rev-parse --show-toplevel)
cd $GITROOT
VER=1.0
REL=$(git rev-parse --short HEAD)git
RPMTOPDIR=$GITROOT/rpm-build
echo "Ver: $VER, Release: $REL"
# Create tarball
mkdir -p $RPMTOPDIR/{SOURCES,SPECS}
git archive --format=tar --prefix=ninja-${VER}-${REL}/ HEAD | gzip -c > $RPMTOPDIR/SOURCES/ninja-${VER}-${REL}.tar.gz
# Convert git log to RPM's ChangeLog format (shown with rpm -qp --changelog <rpm file>)
sed -e "s/%{ver}/$VER/" -e "s/%{rel}/$REL/" misc/packaging/ninja.spec > $RPMTOPDIR/SPECS/ninja.spec
git log --format="* %cd %aN%n- (%h) %s%d%n" --date=local | sed -r 's/[0-9]+:[0-9]+:[0-9]+ //' >> $RPMTOPDIR/SPECS/ninja.spec
# Build SRC and binary RPMs
rpmbuild --quiet \
--define "_topdir $RPMTOPDIR" \
--define "_rpmdir $PWD" \
--define "_srcrpmdir $PWD" \
--define '_rpmfilename %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm' \
-ba $RPMTOPDIR/SPECS/ninja.spec &&
rm -rf $RPMTOPDIR &&
echo Done

View File

@ -0,0 +1,272 @@
#!/usr/bin/env python
"""Writes large manifest files, for manifest parser performance testing.
The generated manifest files are (eerily) similar in appearance and size to the
ones used in the Chromium project.
Usage:
python misc/write_fake_manifests.py outdir # Will run for about 5s.
The program contains a hardcoded random seed, so it will generate the same
output every time it runs. By changing the seed, it's easy to generate many
different sets of manifest files.
"""
import argparse
import contextlib
import os
import random
import sys
import ninja_syntax
def paretoint(avg, alpha):
"""Returns a random integer that's avg on average, following a power law.
alpha determines the shape of the power curve. alpha has to be larger
than 1. The closer alpha is to 1, the higher the variation of the returned
numbers."""
return int(random.paretovariate(alpha) * avg / (alpha / (alpha - 1)))
# Based on http://neugierig.org/software/chromium/class-name-generator.html
def moar(avg_options, p_suffix):
kStart = ['render', 'web', 'browser', 'tab', 'content', 'extension', 'url',
'file', 'sync', 'content', 'http', 'profile']
kOption = ['view', 'host', 'holder', 'container', 'impl', 'ref',
'delegate', 'widget', 'proxy', 'stub', 'context',
'manager', 'master', 'watcher', 'service', 'file', 'data',
'resource', 'device', 'info', 'provider', 'internals', 'tracker',
'api', 'layer']
kOS = ['win', 'mac', 'aura', 'linux', 'android', 'unittest', 'browsertest']
num_options = min(paretoint(avg_options, alpha=4), 5)
# The original allows kOption to repeat as long as no consecutive options
# repeat. This version doesn't allow any option repetition.
name = [random.choice(kStart)] + random.sample(kOption, num_options)
if random.random() < p_suffix:
name.append(random.choice(kOS))
return '_'.join(name)
class GenRandom(object):
def __init__(self, src_dir):
self.seen_names = set([None])
self.seen_defines = set([None])
self.src_dir = src_dir
def _unique_string(self, seen, avg_options=1.3, p_suffix=0.1):
s = None
while s in seen:
s = moar(avg_options, p_suffix)
seen.add(s)
return s
def _n_unique_strings(self, n):
seen = set([None])
return [self._unique_string(seen, avg_options=3, p_suffix=0.4)
for _ in xrange(n)]
def target_name(self):
return self._unique_string(p_suffix=0, seen=self.seen_names)
def path(self):
return os.path.sep.join([
self._unique_string(self.seen_names, avg_options=1, p_suffix=0)
for _ in xrange(1 + paretoint(0.6, alpha=4))])
def src_obj_pairs(self, path, name):
num_sources = paretoint(55, alpha=2) + 1
return [(os.path.join(self.src_dir, path, s + '.cc'),
os.path.join('obj', path, '%s.%s.o' % (name, s)))
for s in self._n_unique_strings(num_sources)]
def defines(self):
return [
'-DENABLE_' + self._unique_string(self.seen_defines).upper()
for _ in xrange(paretoint(20, alpha=3))]
LIB, EXE = 0, 1
class Target(object):
def __init__(self, gen, kind):
self.name = gen.target_name()
self.dir_path = gen.path()
self.ninja_file_path = os.path.join(
'obj', self.dir_path, self.name + '.ninja')
self.src_obj_pairs = gen.src_obj_pairs(self.dir_path, self.name)
if kind == LIB:
self.output = os.path.join('lib' + self.name + '.a')
elif kind == EXE:
self.output = os.path.join(self.name)
self.defines = gen.defines()
self.deps = []
self.kind = kind
self.has_compile_depends = random.random() < 0.4
def write_target_ninja(ninja, target, src_dir):
compile_depends = None
if target.has_compile_depends:
compile_depends = os.path.join(
'obj', target.dir_path, target.name + '.stamp')
ninja.build(compile_depends, 'stamp', target.src_obj_pairs[0][0])
ninja.newline()
ninja.variable('defines', target.defines)
ninja.variable('includes', '-I' + src_dir)
ninja.variable('cflags', ['-Wall', '-fno-rtti', '-fno-exceptions'])
ninja.newline()
for src, obj in target.src_obj_pairs:
ninja.build(obj, 'cxx', src, implicit=compile_depends)
ninja.newline()
deps = [dep.output for dep in target.deps]
libs = [dep.output for dep in target.deps if dep.kind == LIB]
if target.kind == EXE:
ninja.variable('libs', libs)
if sys.platform == "darwin":
ninja.variable('ldflags', '-Wl,-pie')
link = { LIB: 'alink', EXE: 'link'}[target.kind]
ninja.build(target.output, link, [obj for _, obj in target.src_obj_pairs],
implicit=deps)
def write_sources(target, root_dir):
need_main = target.kind == EXE
includes = []
# Include siblings.
for cc_filename, _ in target.src_obj_pairs:
h_filename = os.path.basename(os.path.splitext(cc_filename)[0] + '.h')
includes.append(h_filename)
# Include deps.
for dep in target.deps:
for cc_filename, _ in dep.src_obj_pairs:
h_filename = os.path.basename(
os.path.splitext(cc_filename)[0] + '.h')
includes.append("%s/%s" % (dep.dir_path, h_filename))
for cc_filename, _ in target.src_obj_pairs:
cc_path = os.path.join(root_dir, cc_filename)
h_path = os.path.splitext(cc_path)[0] + '.h'
namespace = os.path.basename(target.dir_path)
class_ = os.path.splitext(os.path.basename(cc_filename))[0]
try:
os.makedirs(os.path.dirname(cc_path))
except OSError:
pass
with open(h_path, 'w') as f:
f.write('namespace %s { struct %s { %s(); }; }' % (namespace,
class_, class_))
with open(cc_path, 'w') as f:
for include in includes:
f.write('#include "%s"\n' % include)
f.write('\n')
f.write('namespace %s { %s::%s() {} }' % (namespace,
class_, class_))
if need_main:
f.write('int main(int argc, char **argv) {}\n')
need_main = False
def write_master_ninja(master_ninja, targets):
"""Writes master build.ninja file, referencing all given subninjas."""
master_ninja.variable('cxx', 'c++')
master_ninja.variable('ld', '$cxx')
if sys.platform == 'darwin':
master_ninja.variable('alink', 'libtool -static')
else:
master_ninja.variable('alink', 'ar rcs')
master_ninja.newline()
master_ninja.pool('link_pool', depth=4)
master_ninja.newline()
master_ninja.rule('cxx', description='CXX $out',
command='$cxx -MMD -MF $out.d $defines $includes $cflags -c $in -o $out',
depfile='$out.d', deps='gcc')
master_ninja.rule('alink', description='ARCHIVE $out',
command='rm -f $out && $alink -o $out $in')
master_ninja.rule('link', description='LINK $out', pool='link_pool',
command='$ld $ldflags -o $out $in $libs')
master_ninja.rule('stamp', description='STAMP $out', command='touch $out')
master_ninja.newline()
for target in targets:
master_ninja.subninja(target.ninja_file_path)
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for target in targets:
if target.name != target.output:
master_ninja.build(target.name, 'phony', target.output)
master_ninja.newline()
master_ninja.build('all', 'phony', [target.output for target in targets])
master_ninja.default('all')
@contextlib.contextmanager
def FileWriter(path):
"""Context manager for a ninja_syntax object writing to a file."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
f = open(path, 'w')
yield ninja_syntax.Writer(f)
f.close()
def random_targets(num_targets, src_dir):
gen = GenRandom(src_dir)
# N-1 static libraries, and 1 executable depending on all of them.
targets = [Target(gen, LIB) for i in xrange(num_targets - 1)]
for i in range(len(targets)):
targets[i].deps = [t for t in targets[0:i] if random.random() < 0.05]
last_target = Target(gen, EXE)
last_target.deps = targets[:]
last_target.src_obj_pairs = last_target.src_obj_pairs[0:10] # Trim.
targets.append(last_target)
return targets
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--sources', nargs="?", const="src",
help='write sources to directory (relative to output directory)')
parser.add_argument('-t', '--targets', type=int, default=1500,
help='number of targets (default: 1500)')
parser.add_argument('-S', '--seed', type=int, help='random seed',
default=12345)
parser.add_argument('outdir', help='output directory')
args = parser.parse_args()
root_dir = args.outdir
random.seed(args.seed)
do_write_sources = args.sources is not None
src_dir = args.sources if do_write_sources else "src"
targets = random_targets(args.targets, src_dir)
for target in targets:
with FileWriter(os.path.join(root_dir, target.ninja_file_path)) as n:
write_target_ninja(n, target, src_dir)
if do_write_sources:
write_sources(target, root_dir)
with FileWriter(os.path.join(root_dir, 'build.ninja')) as master_ninja:
master_ninja.width = 120
write_master_ninja(master_ninja, targets)
if __name__ == '__main__':
sys.exit(main())

72
misc/zsh-completion Normal file
View File

@ -0,0 +1,72 @@
#compdef ninja
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Add the following to your .zshrc to tab-complete ninja targets
# fpath=(path/to/ninja/misc/zsh-completion $fpath)
__get_targets() {
dir="."
if [ -n "${opt_args[-C]}" ];
then
eval dir="${opt_args[-C]}"
fi
file="build.ninja"
if [ -n "${opt_args[-f]}" ];
then
eval file="${opt_args[-f]}"
fi
targets_command="ninja -f \"${file}\" -C \"${dir}\" -t targets all"
eval ${targets_command} 2>/dev/null | cut -d: -f1
}
__get_tools() {
ninja -t list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2
}
__get_modes() {
ninja -d list 2>/dev/null | while read -r a b; do echo $a; done | tail -n +2 | sed '$d'
}
__modes() {
local -a modes
modes=(${(fo)"$(__get_modes)"})
_describe 'modes' modes
}
__tools() {
local -a tools
tools=(${(fo)"$(__get_tools)"})
_describe 'tools' tools
}
__targets() {
local -a targets
targets=(${(fo)"$(__get_targets)"})
_describe 'targets' targets
}
_arguments \
{-h,--help}'[Show help]' \
'--version[Print ninja version]' \
'-C+[Change to directory before doing anything else]:directories:_directories' \
'-f+[Specify input build file (default=build.ninja)]:files:_files' \
'-j+[Run N jobs in parallel (default=number of CPUs available)]:number of jobs' \
'-l+[Do not start new jobs if the load average is greater than N]:number of jobs' \
'-k+[Keep going until N jobs fail (default=1)]:number of jobs' \
'-n[Dry run (do not run commands but act like they succeeded)]' \
'-v[Show all command lines while building]' \
'-d+[Enable debugging (use -d list to list modes)]:modes:__modes' \
'-t+[Run a subtool (use -t list to list subtools)]:tools:__tools' \
'*::targets:__targets'

78
src/browse.cc Normal file
View File

@ -0,0 +1,78 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "browse.h"
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <vector>
#include "build/browse_py.h"
void RunBrowsePython(State* state, const char* ninja_command,
const char* input_file, int argc, char* argv[]) {
// Fork off a Python process and have it run our code via its stdin.
// (Actually the Python process becomes the parent.)
int pipefd[2];
if (pipe(pipefd) < 0) {
perror("ninja: pipe");
return;
}
pid_t pid = fork();
if (pid < 0) {
perror("ninja: fork");
return;
}
if (pid > 0) { // Parent.
close(pipefd[1]);
do {
if (dup2(pipefd[0], 0) < 0) {
perror("ninja: dup2");
break;
}
std::vector<const char *> command;
command.push_back(NINJA_PYTHON);
command.push_back("-");
command.push_back("--ninja-command");
command.push_back(ninja_command);
command.push_back("-f");
command.push_back(input_file);
for (int i = 0; i < argc; i++) {
command.push_back(argv[i]);
}
command.push_back(NULL);
execvp(command[0], (char**)&command[0]);
if (errno == ENOENT) {
printf("ninja: %s is required for the browse tool\n", NINJA_PYTHON);
} else {
perror("ninja: execvp");
}
} while (false);
_exit(1);
} else { // Child.
close(pipefd[0]);
// Write the script file into the stdin of the Python process.
ssize_t len = write(pipefd[1], kBrowsePy, sizeof(kBrowsePy));
if (len < (ssize_t)sizeof(kBrowsePy))
perror("ninja: write");
close(pipefd[1]);
exit(0);
}
}

28
src/browse.h Normal file
View File

@ -0,0 +1,28 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_BROWSE_H_
#define NINJA_BROWSE_H_
struct State;
/// Run in "browse" mode, which execs a Python webserver.
/// \a ninja_command is the command used to invoke ninja.
/// \a args are the number of arguments to be passed to the Python script.
/// \a argv are arguments to be passed to the Python script.
/// This function does not return if it runs successfully.
void RunBrowsePython(State* state, const char* ninja_command,
const char* input_file, int argc, char* argv[]);
#endif // NINJA_BROWSE_H_

233
src/browse.py Executable file
View File

@ -0,0 +1,233 @@
#!/usr/bin/env python
#
# Copyright 2001 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple web server for browsing dependency graph data.
This script is inlined into the final executable and spawned by
it when needed.
"""
from __future__ import print_function
try:
import http.server as httpserver
import socketserver
except ImportError:
import BaseHTTPServer as httpserver
import SocketServer as socketserver
import argparse
import os
import socket
import subprocess
import sys
import webbrowser
if sys.version_info >= (3, 2):
from html import escape
else:
from cgi import escape
try:
from urllib.request import unquote
except ImportError:
from urllib2 import unquote
from collections import namedtuple
Node = namedtuple('Node', ['inputs', 'rule', 'target', 'outputs'])
# Ideally we'd allow you to navigate to a build edge or a build node,
# with appropriate views for each. But there's no way to *name* a build
# edge so we can only display nodes.
#
# For a given node, it has at most one input edge, which has n
# different inputs. This becomes node.inputs. (We leave out the
# outputs of the input edge due to what follows.) The node can have
# multiple dependent output edges. Rather than attempting to display
# those, they are summarized by taking the union of all their outputs.
#
# This means there's no single view that shows you all inputs and outputs
# of an edge. But I think it's less confusing than alternatives.
def match_strip(line, prefix):
if not line.startswith(prefix):
return (False, line)
return (True, line[len(prefix):])
def html_escape(text):
return escape(text, quote=True)
def parse(text):
lines = iter(text.split('\n'))
target = None
rule = None
inputs = []
outputs = []
try:
target = next(lines)[:-1] # strip trailing colon
line = next(lines)
(match, rule) = match_strip(line, ' input: ')
if match:
(match, line) = match_strip(next(lines), ' ')
while match:
type = None
(match, line) = match_strip(line, '| ')
if match:
type = 'implicit'
(match, line) = match_strip(line, '|| ')
if match:
type = 'order-only'
inputs.append((line, type))
(match, line) = match_strip(next(lines), ' ')
match, _ = match_strip(line, ' outputs:')
if match:
(match, line) = match_strip(next(lines), ' ')
while match:
outputs.append(line)
(match, line) = match_strip(next(lines), ' ')
except StopIteration:
pass
return Node(inputs, rule, target, outputs)
def create_page(body):
return '''<!DOCTYPE html>
<style>
body {
font-family: sans;
font-size: 0.8em;
margin: 4ex;
}
h1 {
font-weight: normal;
font-size: 140%;
text-align: center;
margin: 0;
}
h2 {
font-weight: normal;
font-size: 120%;
}
tt {
font-family: WebKitHack, monospace;
white-space: nowrap;
}
.filelist {
-webkit-columns: auto 2;
}
</style>
''' + body
def generate_html(node):
document = ['<h1><tt>%s</tt></h1>' % html_escape(node.target)]
if node.inputs:
document.append('<h2>target is built using rule <tt>%s</tt> of</h2>' %
html_escape(node.rule))
if len(node.inputs) > 0:
document.append('<div class=filelist>')
for input, type in sorted(node.inputs):
extra = ''
if type:
extra = ' (%s)' % html_escape(type)
document.append('<tt><a href="?%s">%s</a>%s</tt><br>' %
(html_escape(input), html_escape(input), extra))
document.append('</div>')
if node.outputs:
document.append('<h2>dependent edges build:</h2>')
document.append('<div class=filelist>')
for output in sorted(node.outputs):
document.append('<tt><a href="?%s">%s</a></tt><br>' %
(html_escape(output), html_escape(output)))
document.append('</div>')
return '\n'.join(document)
def ninja_dump(target):
cmd = [args.ninja_command, '-f', args.f, '-t', 'query', target]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
return proc.communicate() + (proc.returncode,)
class RequestHandler(httpserver.BaseHTTPRequestHandler):
def do_GET(self):
assert self.path[0] == '/'
target = unquote(self.path[1:])
if target == '':
self.send_response(302)
self.send_header('Location', '?' + args.initial_target)
self.end_headers()
return
if not target.startswith('?'):
self.send_response(404)
self.end_headers()
return
target = target[1:]
ninja_output, ninja_error, exit_code = ninja_dump(target)
if exit_code == 0:
page_body = generate_html(parse(ninja_output.strip()))
else:
# Relay ninja's error message.
page_body = '<h1><tt>%s</tt></h1>' % html_escape(ninja_error)
self.send_response(200)
self.end_headers()
self.wfile.write(create_page(page_body).encode('utf-8'))
def log_message(self, format, *args):
pass # Swallow console spam.
parser = argparse.ArgumentParser(prog='ninja -t browse')
parser.add_argument('--port', '-p', default=8000, type=int,
help='Port number to use (default %(default)d)')
parser.add_argument('--hostname', '-a', default='localhost', type=str,
help='Hostname to bind to (default %(default)s)')
parser.add_argument('--no-browser', action='store_true',
help='Do not open a webbrowser on startup.')
parser.add_argument('--ninja-command', default='ninja',
help='Path to ninja binary (default %(default)s)')
parser.add_argument('-f', default='build.ninja',
help='Path to build.ninja file (default %(default)s)')
parser.add_argument('initial_target', default='all', nargs='?',
help='Initial target to show (default %(default)s)')
class HTTPServer(socketserver.ThreadingMixIn, httpserver.HTTPServer):
# terminate server immediately when Python exits.
daemon_threads = True
args = parser.parse_args()
port = args.port
hostname = args.hostname
httpd = HTTPServer((hostname,port), RequestHandler)
try:
if hostname == "":
hostname = socket.gethostname()
print('Web server running on %s:%d, ctl-C to abort...' % (hostname,port) )
print('Web server pid %d' % os.getpid(), file=sys.stderr )
if not args.no_browser:
webbrowser.open_new('http://%s:%s' % (hostname, port) )
httpd.serve_forever()
except KeyboardInterrupt:
print()
pass # Swallow console spam.

1136
src/build.cc Normal file

File diff suppressed because it is too large Load Diff

338
src/build.h Normal file
View File

@ -0,0 +1,338 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_BUILD_H_
#define NINJA_BUILD_H_
#include <cstdio>
#include <map>
#include <memory>
#include <queue>
#include <set>
#include <string>
#include <vector>
#include "depfile_parser.h"
#include "graph.h" // XXX needed for DependencyScan; should rearrange.
#include "exit_status.h"
#include "line_printer.h"
#include "metrics.h"
#include "util.h" // int64_t
struct BuildLog;
struct BuildStatus;
struct Builder;
struct DiskInterface;
struct Edge;
struct Node;
struct State;
/// Plan stores the state of a build plan: what we intend to build,
/// which steps we're ready to execute.
struct Plan {
Plan(Builder* builder = NULL);
/// Add a target to our plan (including all its dependencies).
/// Returns false if we don't need to build this target; may
/// fill in |err| with an error message if there's a problem.
bool AddTarget(const Node* node, string* err);
// Pop a ready edge off the queue of edges to build.
// Returns NULL if there's no work to do.
Edge* FindWork();
/// Returns true if there's more work to be done.
bool more_to_do() const { return wanted_edges_ > 0 && command_edges_ > 0; }
/// Dumps the current state of the plan.
void Dump() const;
enum EdgeResult {
kEdgeFailed,
kEdgeSucceeded
};
/// Mark an edge as done building (whether it succeeded or failed).
/// If any of the edge's outputs are dyndep bindings of their dependents,
/// this loads dynamic dependencies from the nodes' paths.
/// Returns 'false' if loading dyndep info fails and 'true' otherwise.
bool EdgeFinished(Edge* edge, EdgeResult result, string* err);
/// Clean the given node during the build.
/// Return false on error.
bool CleanNode(DependencyScan* scan, Node* node, string* err);
/// Number of edges with commands to run.
int command_edge_count() const { return command_edges_; }
/// Reset state. Clears want and ready sets.
void Reset();
/// Update the build plan to account for modifications made to the graph
/// by information loaded from a dyndep file.
bool DyndepsLoaded(DependencyScan* scan, const Node* node,
const DyndepFile& ddf, string* err);
private:
bool RefreshDyndepDependents(DependencyScan* scan, const Node* node, string* err);
void UnmarkDependents(const Node* node, set<Node*>* dependents);
bool AddSubTarget(const Node* node, const Node* dependent, string* err,
set<Edge*>* dyndep_walk);
/// Update plan with knowledge that the given node is up to date.
/// If the node is a dyndep binding on any of its dependents, this
/// loads dynamic dependencies from the node's path.
/// Returns 'false' if loading dyndep info fails and 'true' otherwise.
bool NodeFinished(Node* node, string* err);
/// Enumerate possible steps we want for an edge.
enum Want
{
/// We do not want to build the edge, but we might want to build one of
/// its dependents.
kWantNothing,
/// We want to build the edge, but have not yet scheduled it.
kWantToStart,
/// We want to build the edge, have scheduled it, and are waiting
/// for it to complete.
kWantToFinish
};
void EdgeWanted(const Edge* edge);
bool EdgeMaybeReady(map<Edge*, Want>::iterator want_e, string* err);
/// Submits a ready edge as a candidate for execution.
/// The edge may be delayed from running, for example if it's a member of a
/// currently-full pool.
void ScheduleWork(map<Edge*, Want>::iterator want_e);
/// Keep track of which edges we want to build in this plan. If this map does
/// not contain an entry for an edge, we do not want to build the entry or its
/// dependents. If it does contain an entry, the enumeration indicates what
/// we want for the edge.
map<Edge*, Want> want_;
set<Edge*> ready_;
Builder* builder_;
/// Total number of edges that have commands (not phony).
int command_edges_;
/// Total remaining number of wanted edges.
int wanted_edges_;
};
/// CommandRunner is an interface that wraps running the build
/// subcommands. This allows tests to abstract out running commands.
/// RealCommandRunner is an implementation that actually runs commands.
struct CommandRunner {
virtual ~CommandRunner() {}
virtual bool CanRunMore() const = 0;
virtual bool StartCommand(Edge* edge) = 0;
/// The result of waiting for a command.
struct Result {
Result() : edge(NULL) {}
Edge* edge;
ExitStatus status;
string output;
bool success() const { return status == ExitSuccess; }
};
/// Wait for a command to complete, or return false if interrupted.
virtual bool WaitForCommand(Result* result) = 0;
virtual vector<Edge*> GetActiveEdges() { return vector<Edge*>(); }
virtual void Abort() {}
};
/// Options (e.g. verbosity, parallelism) passed to a build.
struct BuildConfig {
BuildConfig() : verbosity(NORMAL), dry_run(false), parallelism(1),
failures_allowed(1), max_load_average(-0.0f) {}
enum Verbosity {
NORMAL,
QUIET, // No output -- used when testing.
VERBOSE
};
Verbosity verbosity;
bool dry_run;
int parallelism;
int failures_allowed;
/// The maximum load average we must not exceed. A negative value
/// means that we do not have any limit.
double max_load_average;
DepfileParserOptions depfile_parser_options;
};
/// Builder wraps the build process: starting commands, updating status.
struct Builder {
Builder(State* state, const BuildConfig& config,
BuildLog* build_log, DepsLog* deps_log,
DiskInterface* disk_interface);
~Builder();
/// Clean up after interrupted commands by deleting output files.
void Cleanup();
Node* AddTarget(const string& name, string* err);
/// Add a target to the build, scanning dependencies.
/// @return false on error.
bool AddTarget(Node* target, string* err);
/// Returns true if the build targets are already up to date.
bool AlreadyUpToDate() const;
/// Run the build. Returns false on error.
/// It is an error to call this function when AlreadyUpToDate() is true.
bool Build(string* err);
bool StartEdge(Edge* edge, string* err);
/// Update status ninja logs following a command termination.
/// @return false if the build can not proceed further due to a fatal error.
bool FinishCommand(CommandRunner::Result* result, string* err);
/// Used for tests.
void SetBuildLog(BuildLog* log) {
scan_.set_build_log(log);
}
/// Load the dyndep information provided by the given node.
bool LoadDyndeps(Node* node, string* err);
State* state_;
const BuildConfig& config_;
Plan plan_;
#if __cplusplus < 201703L
auto_ptr<CommandRunner> command_runner_;
#else
unique_ptr<CommandRunner> command_runner_; // auto_ptr was removed in C++17.
#endif
BuildStatus* status_;
private:
bool ExtractDeps(CommandRunner::Result* result, const string& deps_type,
const string& deps_prefix, vector<Node*>* deps_nodes,
string* err);
DiskInterface* disk_interface_;
DependencyScan scan_;
// Unimplemented copy ctor and operator= ensure we don't copy the auto_ptr.
Builder(const Builder &other); // DO NOT IMPLEMENT
void operator=(const Builder &other); // DO NOT IMPLEMENT
};
/// Tracks the status of a build: completion fraction, printing updates.
struct BuildStatus {
explicit BuildStatus(const BuildConfig& config);
void PlanHasTotalEdges(int total);
void BuildEdgeStarted(const Edge* edge);
void BuildEdgeFinished(Edge* edge, bool success, const string& output,
int* start_time, int* end_time);
void BuildLoadDyndeps();
void BuildStarted();
void BuildFinished();
enum EdgeStatus {
kEdgeStarted,
kEdgeFinished,
};
/// Format the progress status string by replacing the placeholders.
/// See the user manual for more information about the available
/// placeholders.
/// @param progress_status_format The format of the progress status.
/// @param status The status of the edge.
string FormatProgressStatus(const char* progress_status_format,
EdgeStatus status) const;
private:
void PrintStatus(const Edge* edge, EdgeStatus status);
const BuildConfig& config_;
/// Time the build started.
int64_t start_time_millis_;
int started_edges_, finished_edges_, total_edges_;
/// Map of running edge to time the edge started running.
typedef map<const Edge*, int> RunningEdgeMap;
RunningEdgeMap running_edges_;
/// Prints progress output.
LinePrinter printer_;
/// The custom progress status format to use.
const char* progress_status_format_;
template<size_t S>
void SnprintfRate(double rate, char(&buf)[S], const char* format) const {
if (rate == -1)
snprintf(buf, S, "?");
else
snprintf(buf, S, format, rate);
}
struct RateInfo {
RateInfo() : rate_(-1) {}
void Restart() { stopwatch_.Restart(); }
double Elapsed() const { return stopwatch_.Elapsed(); }
double rate() { return rate_; }
void UpdateRate(int edges) {
if (edges && stopwatch_.Elapsed())
rate_ = edges / stopwatch_.Elapsed();
}
private:
double rate_;
Stopwatch stopwatch_;
};
struct SlidingRateInfo {
SlidingRateInfo(int n) : rate_(-1), N(n), last_update_(-1) {}
void Restart() { stopwatch_.Restart(); }
double rate() { return rate_; }
void UpdateRate(int update_hint) {
if (update_hint == last_update_)
return;
last_update_ = update_hint;
if (times_.size() == N)
times_.pop();
times_.push(stopwatch_.Elapsed());
if (times_.back() != times_.front())
rate_ = times_.size() / (times_.back() - times_.front());
}
private:
double rate_;
Stopwatch stopwatch_;
const size_t N;
queue<double> times_;
int last_update_;
};
mutable RateInfo overall_rate_;
mutable SlidingRateInfo current_rate_;
};
#endif // NINJA_BUILD_H_

491
src/build_log.cc Normal file
View File

@ -0,0 +1,491 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// On AIX, inttypes.h gets indirectly included by build_log.h.
// It's easiest just to ask for the printf format macros right away.
#ifndef _WIN32
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#endif
#include "build_log.h"
#include "disk_interface.h"
#include <cassert>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#ifndef _WIN32
#include <inttypes.h>
#include <unistd.h>
#endif
#include "build.h"
#include "graph.h"
#include "metrics.h"
#include "util.h"
#if defined(_MSC_VER) && (_MSC_VER < 1800)
#define strtoll _strtoi64
#endif
// Implementation details:
// Each run's log appends to the log file.
// To load, we run through all log entries in series, throwing away
// older runs.
// Once the number of redundant entries exceeds a threshold, we write
// out a new file and replace the existing one with it.
namespace {
const char kFileSignature[] = "# ninja log v%d\n";
const int kOldestSupportedVersion = 4;
const int kCurrentVersion = 5;
// 64bit MurmurHash2, by Austin Appleby
#if defined(_MSC_VER)
#define BIG_CONSTANT(x) (x)
#else // defined(_MSC_VER)
#define BIG_CONSTANT(x) (x##LLU)
#endif // !defined(_MSC_VER)
inline
uint64_t MurmurHash64A(const void* key, size_t len) {
static const uint64_t seed = 0xDECAFBADDECAFBADull;
const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
const int r = 47;
uint64_t h = seed ^ (len * m);
const unsigned char* data = (const unsigned char*)key;
while (len >= 8) {
uint64_t k;
memcpy(&k, data, sizeof k);
k *= m;
k ^= k >> r;
k *= m;
h ^= k;
h *= m;
data += 8;
len -= 8;
}
switch (len & 7)
{
case 7: h ^= uint64_t(data[6]) << 48;
NINJA_FALLTHROUGH;
case 6: h ^= uint64_t(data[5]) << 40;
NINJA_FALLTHROUGH;
case 5: h ^= uint64_t(data[4]) << 32;
NINJA_FALLTHROUGH;
case 4: h ^= uint64_t(data[3]) << 24;
NINJA_FALLTHROUGH;
case 3: h ^= uint64_t(data[2]) << 16;
NINJA_FALLTHROUGH;
case 2: h ^= uint64_t(data[1]) << 8;
NINJA_FALLTHROUGH;
case 1: h ^= uint64_t(data[0]);
h *= m;
};
h ^= h >> r;
h *= m;
h ^= h >> r;
return h;
}
#undef BIG_CONSTANT
} // namespace
// static
uint64_t BuildLog::LogEntry::HashCommand(StringPiece command) {
return MurmurHash64A(command.str_, command.len_);
}
BuildLog::LogEntry::LogEntry(const string& output)
: output(output) {}
BuildLog::LogEntry::LogEntry(const string& output, uint64_t command_hash,
int start_time, int end_time, TimeStamp restat_mtime)
: output(output), command_hash(command_hash),
start_time(start_time), end_time(end_time), mtime(restat_mtime)
{}
BuildLog::BuildLog()
: log_file_(NULL), needs_recompaction_(false) {}
BuildLog::~BuildLog() {
Close();
}
bool BuildLog::OpenForWrite(const string& path, const BuildLogUser& user,
string* err) {
if (needs_recompaction_) {
if (!Recompact(path, user, err))
return false;
}
assert(!log_file_);
log_file_path_ = path; // we don't actually open the file right now, but will
// do so on the first write attempt
return true;
}
bool BuildLog::RecordCommand(Edge* edge, int start_time, int end_time,
TimeStamp mtime) {
string command = edge->EvaluateCommand(true);
uint64_t command_hash = LogEntry::HashCommand(command);
for (vector<Node*>::iterator out = edge->outputs_.begin();
out != edge->outputs_.end(); ++out) {
const string& path = (*out)->path();
Entries::iterator i = entries_.find(path);
LogEntry* log_entry;
if (i != entries_.end()) {
log_entry = i->second;
} else {
log_entry = new LogEntry(path);
entries_.insert(Entries::value_type(log_entry->output, log_entry));
}
log_entry->command_hash = command_hash;
log_entry->start_time = start_time;
log_entry->end_time = end_time;
log_entry->mtime = mtime;
if (!OpenForWriteIfNeeded()) {
return false;
}
if (log_file_) {
if (!WriteEntry(log_file_, *log_entry))
return false;
if (fflush(log_file_) != 0) {
return false;
}
}
}
return true;
}
void BuildLog::Close() {
OpenForWriteIfNeeded(); // create the file even if nothing has been recorded
if (log_file_)
fclose(log_file_);
log_file_ = NULL;
}
bool BuildLog::OpenForWriteIfNeeded() {
if (log_file_path_.empty()) {
return true;
}
log_file_ = fopen(log_file_path_.c_str(), "ab");
if (!log_file_) {
return false;
}
setvbuf(log_file_, NULL, _IOLBF, BUFSIZ);
SetCloseOnExec(fileno(log_file_));
// Opening a file in append mode doesn't set the file pointer to the file's
// end on Windows. Do that explicitly.
fseek(log_file_, 0, SEEK_END);
if (ftell(log_file_) == 0) {
if (fprintf(log_file_, kFileSignature, kCurrentVersion) < 0) {
return false;
}
}
log_file_path_.clear();
return true;
}
struct LineReader {
explicit LineReader(FILE* file)
: file_(file), buf_end_(buf_), line_start_(buf_), line_end_(NULL) {
memset(buf_, 0, sizeof(buf_));
}
// Reads a \n-terminated line from the file passed to the constructor.
// On return, *line_start points to the beginning of the next line, and
// *line_end points to the \n at the end of the line. If no newline is seen
// in a fixed buffer size, *line_end is set to NULL. Returns false on EOF.
bool ReadLine(char** line_start, char** line_end) {
if (line_start_ >= buf_end_ || !line_end_) {
// Buffer empty, refill.
size_t size_read = fread(buf_, 1, sizeof(buf_), file_);
if (!size_read)
return false;
line_start_ = buf_;
buf_end_ = buf_ + size_read;
} else {
// Advance to next line in buffer.
line_start_ = line_end_ + 1;
}
line_end_ = (char*)memchr(line_start_, '\n', buf_end_ - line_start_);
if (!line_end_) {
// No newline. Move rest of data to start of buffer, fill rest.
size_t already_consumed = line_start_ - buf_;
size_t size_rest = (buf_end_ - buf_) - already_consumed;
memmove(buf_, line_start_, size_rest);
size_t read = fread(buf_ + size_rest, 1, sizeof(buf_) - size_rest, file_);
buf_end_ = buf_ + size_rest + read;
line_start_ = buf_;
line_end_ = (char*)memchr(line_start_, '\n', buf_end_ - line_start_);
}
*line_start = line_start_;
*line_end = line_end_;
return true;
}
private:
FILE* file_;
char buf_[256 << 10];
char* buf_end_; // Points one past the last valid byte in |buf_|.
char* line_start_;
// Points at the next \n in buf_ after line_start, or NULL.
char* line_end_;
};
LoadStatus BuildLog::Load(const string& path, string* err) {
METRIC_RECORD(".ninja_log load");
FILE* file = fopen(path.c_str(), "r");
if (!file) {
if (errno == ENOENT)
return LOAD_NOT_FOUND;
*err = strerror(errno);
return LOAD_ERROR;
}
int log_version = 0;
int unique_entry_count = 0;
int total_entry_count = 0;
LineReader reader(file);
char* line_start = 0;
char* line_end = 0;
while (reader.ReadLine(&line_start, &line_end)) {
if (!log_version) {
sscanf(line_start, kFileSignature, &log_version);
if (log_version < kOldestSupportedVersion) {
*err = ("build log version invalid, perhaps due to being too old; "
"starting over");
fclose(file);
unlink(path.c_str());
// Don't report this as a failure. An empty build log will cause
// us to rebuild the outputs anyway.
return LOAD_SUCCESS;
}
}
// If no newline was found in this chunk, read the next.
if (!line_end)
continue;
const char kFieldSeparator = '\t';
char* start = line_start;
char* end = (char*)memchr(start, kFieldSeparator, line_end - start);
if (!end)
continue;
*end = 0;
int start_time = 0, end_time = 0;
TimeStamp restat_mtime = 0;
start_time = atoi(start);
start = end + 1;
end = (char*)memchr(start, kFieldSeparator, line_end - start);
if (!end)
continue;
*end = 0;
end_time = atoi(start);
start = end + 1;
end = (char*)memchr(start, kFieldSeparator, line_end - start);
if (!end)
continue;
*end = 0;
restat_mtime = strtoll(start, NULL, 10);
start = end + 1;
end = (char*)memchr(start, kFieldSeparator, line_end - start);
if (!end)
continue;
string output = string(start, end - start);
start = end + 1;
end = line_end;
LogEntry* entry;
Entries::iterator i = entries_.find(output);
if (i != entries_.end()) {
entry = i->second;
} else {
entry = new LogEntry(output);
entries_.insert(Entries::value_type(entry->output, entry));
++unique_entry_count;
}
++total_entry_count;
entry->start_time = start_time;
entry->end_time = end_time;
entry->mtime = restat_mtime;
if (log_version >= 5) {
char c = *end; *end = '\0';
entry->command_hash = (uint64_t)strtoull(start, NULL, 16);
*end = c;
} else {
entry->command_hash = LogEntry::HashCommand(StringPiece(start,
end - start));
}
}
fclose(file);
if (!line_start) {
return LOAD_SUCCESS; // file was empty
}
// Decide whether it's time to rebuild the log:
// - if we're upgrading versions
// - if it's getting large
int kMinCompactionEntryCount = 100;
int kCompactionRatio = 3;
if (log_version < kCurrentVersion) {
needs_recompaction_ = true;
} else if (total_entry_count > kMinCompactionEntryCount &&
total_entry_count > unique_entry_count * kCompactionRatio) {
needs_recompaction_ = true;
}
return LOAD_SUCCESS;
}
BuildLog::LogEntry* BuildLog::LookupByOutput(const string& path) {
Entries::iterator i = entries_.find(path);
if (i != entries_.end())
return i->second;
return NULL;
}
bool BuildLog::WriteEntry(FILE* f, const LogEntry& entry) {
return fprintf(f, "%d\t%d\t%" PRId64 "\t%s\t%" PRIx64 "\n",
entry.start_time, entry.end_time, entry.mtime,
entry.output.c_str(), entry.command_hash) > 0;
}
bool BuildLog::Recompact(const string& path, const BuildLogUser& user,
string* err) {
METRIC_RECORD(".ninja_log recompact");
Close();
string temp_path = path + ".recompact";
FILE* f = fopen(temp_path.c_str(), "wb");
if (!f) {
*err = strerror(errno);
return false;
}
if (fprintf(f, kFileSignature, kCurrentVersion) < 0) {
*err = strerror(errno);
fclose(f);
return false;
}
vector<StringPiece> dead_outputs;
for (Entries::iterator i = entries_.begin(); i != entries_.end(); ++i) {
if (user.IsPathDead(i->first)) {
dead_outputs.push_back(i->first);
continue;
}
if (!WriteEntry(f, *i->second)) {
*err = strerror(errno);
fclose(f);
return false;
}
}
for (size_t i = 0; i < dead_outputs.size(); ++i)
entries_.erase(dead_outputs[i]);
fclose(f);
if (unlink(path.c_str()) < 0) {
*err = strerror(errno);
return false;
}
if (rename(temp_path.c_str(), path.c_str()) < 0) {
*err = strerror(errno);
return false;
}
return true;
}
bool BuildLog::Restat(const StringPiece path,
const DiskInterface& disk_interface,
const int output_count, char** outputs,
std::string* const err) {
METRIC_RECORD(".ninja_log restat");
Close();
std::string temp_path = path.AsString() + ".restat";
FILE* f = fopen(temp_path.c_str(), "wb");
if (!f) {
*err = strerror(errno);
return false;
}
if (fprintf(f, kFileSignature, kCurrentVersion) < 0) {
*err = strerror(errno);
fclose(f);
return false;
}
for (Entries::iterator i = entries_.begin(); i != entries_.end(); ++i) {
bool skip = output_count > 0;
for (int j = 0; j < output_count; ++j) {
if (i->second->output == outputs[j]) {
skip = false;
break;
}
}
if (!skip) {
const TimeStamp mtime = disk_interface.Stat(i->second->output, err);
if (mtime == -1) {
fclose(f);
return false;
}
i->second->mtime = mtime;
}
if (!WriteEntry(f, *i->second)) {
*err = strerror(errno);
fclose(f);
return false;
}
}
fclose(f);
if (unlink(path.str_) < 0) {
*err = strerror(errno);
return false;
}
if (rename(temp_path.c_str(), path.str_) < 0) {
*err = strerror(errno);
return false;
}
return true;
}

107
src/build_log.h Normal file
View File

@ -0,0 +1,107 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_BUILD_LOG_H_
#define NINJA_BUILD_LOG_H_
#include <string>
#include <stdio.h>
using namespace std;
#include "hash_map.h"
#include "load_status.h"
#include "timestamp.h"
#include "util.h" // uint64_t
struct DiskInterface;
struct Edge;
/// Can answer questions about the manifest for the BuildLog.
struct BuildLogUser {
/// Return if a given output is no longer part of the build manifest.
/// This is only called during recompaction and doesn't have to be fast.
virtual bool IsPathDead(StringPiece s) const = 0;
};
/// Store a log of every command ran for every build.
/// It has a few uses:
///
/// 1) (hashes of) command lines for existing output files, so we know
/// when we need to rebuild due to the command changing
/// 2) timing information, perhaps for generating reports
/// 3) restat information
struct BuildLog {
BuildLog();
~BuildLog();
/// Prepares writing to the log file without actually opening it - that will
/// happen when/if it's needed
bool OpenForWrite(const string& path, const BuildLogUser& user, string* err);
bool RecordCommand(Edge* edge, int start_time, int end_time,
TimeStamp mtime = 0);
void Close();
/// Load the on-disk log.
LoadStatus Load(const string& path, string* err);
struct LogEntry {
string output;
uint64_t command_hash;
int start_time;
int end_time;
TimeStamp mtime;
static uint64_t HashCommand(StringPiece command);
// Used by tests.
bool operator==(const LogEntry& o) {
return output == o.output && command_hash == o.command_hash &&
start_time == o.start_time && end_time == o.end_time &&
mtime == o.mtime;
}
explicit LogEntry(const string& output);
LogEntry(const string& output, uint64_t command_hash,
int start_time, int end_time, TimeStamp restat_mtime);
};
/// Lookup a previously-run command by its output path.
LogEntry* LookupByOutput(const string& path);
/// Serialize an entry into a log file.
bool WriteEntry(FILE* f, const LogEntry& entry);
/// Rewrite the known log entries, throwing away old data.
bool Recompact(const string& path, const BuildLogUser& user, string* err);
/// Restat all outputs in the log
bool Restat(StringPiece path, const DiskInterface& disk_interface,
int output_count, char** outputs, std::string* err);
typedef ExternalStringHashMap<LogEntry*>::Type Entries;
const Entries& entries() const { return entries_; }
private:
/// Should be called before using log_file_. When false is returned, errno
/// will be set.
bool OpenForWriteIfNeeded();
Entries entries_;
FILE* log_file_;
std::string log_file_path_;
bool needs_recompaction_;
};
#endif // NINJA_BUILD_LOG_H_

149
src/build_log_perftest.cc Normal file
View File

@ -0,0 +1,149 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <stdlib.h>
#include "build_log.h"
#include "graph.h"
#include "manifest_parser.h"
#include "state.h"
#include "util.h"
#include "metrics.h"
#ifndef _WIN32
#include <unistd.h>
#endif
const char kTestFilename[] = "BuildLogPerfTest-tempfile";
struct NoDeadPaths : public BuildLogUser {
virtual bool IsPathDead(StringPiece) const { return false; }
};
bool WriteTestData(string* err) {
BuildLog log;
NoDeadPaths no_dead_paths;
if (!log.OpenForWrite(kTestFilename, no_dead_paths, err))
return false;
/*
A histogram of command lengths in chromium. For example, 407 builds,
1.4% of all builds, had commands longer than 32 bytes but shorter than 64.
32 407 1.4%
64 183 0.6%
128 1461 5.1%
256 791 2.8%
512 1314 4.6%
1024 6114 21.3%
2048 11759 41.0%
4096 2056 7.2%
8192 4567 15.9%
16384 13 0.0%
32768 4 0.0%
65536 5 0.0%
The average command length is 4.1 kB and there were 28674 commands in total,
which makes for a total log size of ~120 MB (also counting output filenames).
Based on this, write 30000 many 4 kB long command lines.
*/
// ManifestParser is the only object allowed to create Rules.
const size_t kRuleSize = 4000;
string long_rule_command = "gcc ";
for (int i = 0; long_rule_command.size() < kRuleSize; ++i) {
char buf[80];
sprintf(buf, "-I../../and/arbitrary/but/fairly/long/path/suffixed/%d ", i);
long_rule_command += buf;
}
long_rule_command += "$in -o $out\n";
State state;
ManifestParser parser(&state, NULL);
if (!parser.ParseTest("rule cxx\n command = " + long_rule_command, err))
return false;
// Create build edges. Using ManifestParser is as fast as using the State api
// for edge creation, so just use that.
const int kNumCommands = 30000;
string build_rules;
for (int i = 0; i < kNumCommands; ++i) {
char buf[80];
sprintf(buf, "build input%d.o: cxx input%d.cc\n", i, i);
build_rules += buf;
}
if (!parser.ParseTest(build_rules, err))
return false;
for (int i = 0; i < kNumCommands; ++i) {
log.RecordCommand(state.edges_[i],
/*start_time=*/100 * i,
/*end_time=*/100 * i + 1,
/*mtime=*/0);
}
return true;
}
int main() {
vector<int> times;
string err;
if (!WriteTestData(&err)) {
fprintf(stderr, "Failed to write test data: %s\n", err.c_str());
return 1;
}
{
// Read once to warm up disk cache.
BuildLog log;
if (!log.Load(kTestFilename, &err)) {
fprintf(stderr, "Failed to read test data: %s\n", err.c_str());
return 1;
}
}
const int kNumRepetitions = 5;
for (int i = 0; i < kNumRepetitions; ++i) {
int64_t start = GetTimeMillis();
BuildLog log;
if (!log.Load(kTestFilename, &err)) {
fprintf(stderr, "Failed to read test data: %s\n", err.c_str());
return 1;
}
int delta = (int)(GetTimeMillis() - start);
printf("%dms\n", delta);
times.push_back(delta);
}
int min = times[0];
int max = times[0];
float total = 0;
for (size_t i = 0; i < times.size(); ++i) {
total += times[i];
if (times[i] < min)
min = times[i];
else if (times[i] > max)
max = times[i];
}
printf("min %dms max %dms avg %.1fms\n",
min, max, total / times.size());
unlink(kTestFilename);
return 0;
}

356
src/build_log_test.cc Normal file
View File

@ -0,0 +1,356 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "build_log.h"
#include "util.h"
#include "test.h"
#include <sys/stat.h>
#ifdef _WIN32
#include <fcntl.h>
#include <share.h>
#else
#include <sys/types.h>
#include <unistd.h>
#endif
#include <cassert>
namespace {
const char kTestFilename[] = "BuildLogTest-tempfile";
struct BuildLogTest : public StateTestWithBuiltinRules, public BuildLogUser {
virtual void SetUp() {
// In case a crashing test left a stale file behind.
unlink(kTestFilename);
}
virtual void TearDown() {
unlink(kTestFilename);
}
virtual bool IsPathDead(StringPiece s) const { return false; }
};
TEST_F(BuildLogTest, WriteRead) {
AssertParse(&state_,
"build out: cat mid\n"
"build mid: cat in\n");
BuildLog log1;
string err;
EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err));
ASSERT_EQ("", err);
log1.RecordCommand(state_.edges_[0], 15, 18);
log1.RecordCommand(state_.edges_[1], 20, 25);
log1.Close();
BuildLog log2;
EXPECT_TRUE(log2.Load(kTestFilename, &err));
ASSERT_EQ("", err);
ASSERT_EQ(2u, log1.entries().size());
ASSERT_EQ(2u, log2.entries().size());
BuildLog::LogEntry* e1 = log1.LookupByOutput("out");
ASSERT_TRUE(e1);
BuildLog::LogEntry* e2 = log2.LookupByOutput("out");
ASSERT_TRUE(e2);
ASSERT_TRUE(*e1 == *e2);
ASSERT_EQ(15, e1->start_time);
ASSERT_EQ("out", e1->output);
}
TEST_F(BuildLogTest, FirstWriteAddsSignature) {
const char kExpectedVersion[] = "# ninja log vX\n";
const size_t kVersionPos = strlen(kExpectedVersion) - 2; // Points at 'X'.
BuildLog log;
string contents, err;
EXPECT_TRUE(log.OpenForWrite(kTestFilename, *this, &err));
ASSERT_EQ("", err);
log.Close();
ASSERT_EQ(0, ReadFile(kTestFilename, &contents, &err));
ASSERT_EQ("", err);
if (contents.size() >= kVersionPos)
contents[kVersionPos] = 'X';
EXPECT_EQ(kExpectedVersion, contents);
// Opening the file anew shouldn't add a second version string.
EXPECT_TRUE(log.OpenForWrite(kTestFilename, *this, &err));
ASSERT_EQ("", err);
log.Close();
contents.clear();
ASSERT_EQ(0, ReadFile(kTestFilename, &contents, &err));
ASSERT_EQ("", err);
if (contents.size() >= kVersionPos)
contents[kVersionPos] = 'X';
EXPECT_EQ(kExpectedVersion, contents);
}
TEST_F(BuildLogTest, DoubleEntry) {
FILE* f = fopen(kTestFilename, "wb");
fprintf(f, "# ninja log v4\n");
fprintf(f, "0\t1\t2\tout\tcommand abc\n");
fprintf(f, "3\t4\t5\tout\tcommand def\n");
fclose(f);
string err;
BuildLog log;
EXPECT_TRUE(log.Load(kTestFilename, &err));
ASSERT_EQ("", err);
BuildLog::LogEntry* e = log.LookupByOutput("out");
ASSERT_TRUE(e);
ASSERT_NO_FATAL_FAILURE(AssertHash("command def", e->command_hash));
}
TEST_F(BuildLogTest, Truncate) {
AssertParse(&state_,
"build out: cat mid\n"
"build mid: cat in\n");
{
BuildLog log1;
string err;
EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err));
ASSERT_EQ("", err);
log1.RecordCommand(state_.edges_[0], 15, 18);
log1.RecordCommand(state_.edges_[1], 20, 25);
log1.Close();
}
struct stat statbuf;
ASSERT_EQ(0, stat(kTestFilename, &statbuf));
ASSERT_GT(statbuf.st_size, 0);
// For all possible truncations of the input file, assert that we don't
// crash when parsing.
for (off_t size = statbuf.st_size; size > 0; --size) {
BuildLog log2;
string err;
EXPECT_TRUE(log2.OpenForWrite(kTestFilename, *this, &err));
ASSERT_EQ("", err);
log2.RecordCommand(state_.edges_[0], 15, 18);
log2.RecordCommand(state_.edges_[1], 20, 25);
log2.Close();
ASSERT_TRUE(Truncate(kTestFilename, size, &err));
BuildLog log3;
err.clear();
ASSERT_TRUE(log3.Load(kTestFilename, &err) == LOAD_SUCCESS || !err.empty());
}
}
TEST_F(BuildLogTest, ObsoleteOldVersion) {
FILE* f = fopen(kTestFilename, "wb");
fprintf(f, "# ninja log v3\n");
fprintf(f, "123 456 0 out command\n");
fclose(f);
string err;
BuildLog log;
EXPECT_TRUE(log.Load(kTestFilename, &err));
ASSERT_NE(err.find("version"), string::npos);
}
TEST_F(BuildLogTest, SpacesInOutputV4) {
FILE* f = fopen(kTestFilename, "wb");
fprintf(f, "# ninja log v4\n");
fprintf(f, "123\t456\t456\tout with space\tcommand\n");
fclose(f);
string err;
BuildLog log;
EXPECT_TRUE(log.Load(kTestFilename, &err));
ASSERT_EQ("", err);
BuildLog::LogEntry* e = log.LookupByOutput("out with space");
ASSERT_TRUE(e);
ASSERT_EQ(123, e->start_time);
ASSERT_EQ(456, e->end_time);
ASSERT_EQ(456, e->mtime);
ASSERT_NO_FATAL_FAILURE(AssertHash("command", e->command_hash));
}
TEST_F(BuildLogTest, DuplicateVersionHeader) {
// Old versions of ninja accidentally wrote multiple version headers to the
// build log on Windows. This shouldn't crash, and the second version header
// should be ignored.
FILE* f = fopen(kTestFilename, "wb");
fprintf(f, "# ninja log v4\n");
fprintf(f, "123\t456\t456\tout\tcommand\n");
fprintf(f, "# ninja log v4\n");
fprintf(f, "456\t789\t789\tout2\tcommand2\n");
fclose(f);
string err;
BuildLog log;
EXPECT_TRUE(log.Load(kTestFilename, &err));
ASSERT_EQ("", err);
BuildLog::LogEntry* e = log.LookupByOutput("out");
ASSERT_TRUE(e);
ASSERT_EQ(123, e->start_time);
ASSERT_EQ(456, e->end_time);
ASSERT_EQ(456, e->mtime);
ASSERT_NO_FATAL_FAILURE(AssertHash("command", e->command_hash));
e = log.LookupByOutput("out2");
ASSERT_TRUE(e);
ASSERT_EQ(456, e->start_time);
ASSERT_EQ(789, e->end_time);
ASSERT_EQ(789, e->mtime);
ASSERT_NO_FATAL_FAILURE(AssertHash("command2", e->command_hash));
}
struct TestDiskInterface : public DiskInterface {
virtual TimeStamp Stat(const string& path, string* err) const {
return 4;
}
virtual bool WriteFile(const string& path, const string& contents) {
assert(false);
return true;
}
virtual bool MakeDir(const string& path) {
assert(false);
return false;
}
virtual Status ReadFile(const string& path, string* contents, string* err) {
assert(false);
return NotFound;
}
virtual int RemoveFile(const string& path) {
assert(false);
return 0;
}
};
TEST_F(BuildLogTest, Restat) {
FILE* f = fopen(kTestFilename, "wb");
fprintf(f, "# ninja log v4\n"
"1\t2\t3\tout\tcommand\n");
fclose(f);
std::string err;
BuildLog log;
EXPECT_TRUE(log.Load(kTestFilename, &err));
ASSERT_EQ("", err);
BuildLog::LogEntry* e = log.LookupByOutput("out");
ASSERT_EQ(3, e->mtime);
TestDiskInterface testDiskInterface;
char out2[] = { 'o', 'u', 't', '2', 0 };
char* filter2[] = { out2 };
EXPECT_TRUE(log.Restat(kTestFilename, testDiskInterface, 1, filter2, &err));
ASSERT_EQ("", err);
e = log.LookupByOutput("out");
ASSERT_EQ(3, e->mtime); // unchanged, since the filter doesn't match
EXPECT_TRUE(log.Restat(kTestFilename, testDiskInterface, 0, NULL, &err));
ASSERT_EQ("", err);
e = log.LookupByOutput("out");
ASSERT_EQ(4, e->mtime);
}
TEST_F(BuildLogTest, VeryLongInputLine) {
// Ninja's build log buffer is currently 256kB. Lines longer than that are
// silently ignored, but don't affect parsing of other lines.
FILE* f = fopen(kTestFilename, "wb");
fprintf(f, "# ninja log v4\n");
fprintf(f, "123\t456\t456\tout\tcommand start");
for (size_t i = 0; i < (512 << 10) / strlen(" more_command"); ++i)
fputs(" more_command", f);
fprintf(f, "\n");
fprintf(f, "456\t789\t789\tout2\tcommand2\n");
fclose(f);
string err;
BuildLog log;
EXPECT_TRUE(log.Load(kTestFilename, &err));
ASSERT_EQ("", err);
BuildLog::LogEntry* e = log.LookupByOutput("out");
ASSERT_EQ(NULL, e);
e = log.LookupByOutput("out2");
ASSERT_TRUE(e);
ASSERT_EQ(456, e->start_time);
ASSERT_EQ(789, e->end_time);
ASSERT_EQ(789, e->mtime);
ASSERT_NO_FATAL_FAILURE(AssertHash("command2", e->command_hash));
}
TEST_F(BuildLogTest, MultiTargetEdge) {
AssertParse(&state_,
"build out out.d: cat\n");
BuildLog log;
log.RecordCommand(state_.edges_[0], 21, 22);
ASSERT_EQ(2u, log.entries().size());
BuildLog::LogEntry* e1 = log.LookupByOutput("out");
ASSERT_TRUE(e1);
BuildLog::LogEntry* e2 = log.LookupByOutput("out.d");
ASSERT_TRUE(e2);
ASSERT_EQ("out", e1->output);
ASSERT_EQ("out.d", e2->output);
ASSERT_EQ(21, e1->start_time);
ASSERT_EQ(21, e2->start_time);
ASSERT_EQ(22, e2->end_time);
ASSERT_EQ(22, e2->end_time);
}
struct BuildLogRecompactTest : public BuildLogTest {
virtual bool IsPathDead(StringPiece s) const { return s == "out2"; }
};
TEST_F(BuildLogRecompactTest, Recompact) {
AssertParse(&state_,
"build out: cat in\n"
"build out2: cat in\n");
BuildLog log1;
string err;
EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err));
ASSERT_EQ("", err);
// Record the same edge several times, to trigger recompaction
// the next time the log is opened.
for (int i = 0; i < 200; ++i)
log1.RecordCommand(state_.edges_[0], 15, 18 + i);
log1.RecordCommand(state_.edges_[1], 21, 22);
log1.Close();
// Load...
BuildLog log2;
EXPECT_TRUE(log2.Load(kTestFilename, &err));
ASSERT_EQ("", err);
ASSERT_EQ(2u, log2.entries().size());
ASSERT_TRUE(log2.LookupByOutput("out"));
ASSERT_TRUE(log2.LookupByOutput("out2"));
// ...and force a recompaction.
EXPECT_TRUE(log2.OpenForWrite(kTestFilename, *this, &err));
log2.Close();
// "out2" is dead, it should've been removed.
BuildLog log3;
EXPECT_TRUE(log2.Load(kTestFilename, &err));
ASSERT_EQ("", err);
ASSERT_EQ(1u, log2.entries().size());
ASSERT_TRUE(log2.LookupByOutput("out"));
ASSERT_FALSE(log2.LookupByOutput("out2"));
}
} // anonymous namespace

3302
src/build_test.cc Normal file

File diff suppressed because it is too large Load Diff

57
src/canon_perftest.cc Normal file
View File

@ -0,0 +1,57 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <string.h>
#include "util.h"
#include "metrics.h"
const char kPath[] =
"../../third_party/WebKit/Source/WebCore/"
"platform/leveldb/LevelDBWriteBatch.cpp";
int main() {
vector<int> times;
string err;
char buf[200];
size_t len = strlen(kPath);
strcpy(buf, kPath);
for (int j = 0; j < 5; ++j) {
const int kNumRepetitions = 2000000;
int64_t start = GetTimeMillis();
uint64_t slash_bits;
for (int i = 0; i < kNumRepetitions; ++i) {
CanonicalizePath(buf, &len, &slash_bits, &err);
}
int delta = (int)(GetTimeMillis() - start);
times.push_back(delta);
}
int min = times[0];
int max = times[0];
float total = 0;
for (size_t i = 0; i < times.size(); ++i) {
total += times[i];
if (times[i] < min)
min = times[i];
else if (times[i] > max)
max = times[i];
}
printf("min %dms max %dms avg %.1fms\n",
min, max, total / times.size());
}

293
src/clean.cc Normal file
View File

@ -0,0 +1,293 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "clean.h"
#include <assert.h>
#include <stdio.h>
#include "disk_interface.h"
#include "graph.h"
#include "state.h"
#include "util.h"
Cleaner::Cleaner(State* state,
const BuildConfig& config,
DiskInterface* disk_interface)
: state_(state),
config_(config),
dyndep_loader_(state, disk_interface),
removed_(),
cleaned_(),
cleaned_files_count_(0),
disk_interface_(disk_interface),
status_(0) {
}
int Cleaner::RemoveFile(const string& path) {
return disk_interface_->RemoveFile(path);
}
bool Cleaner::FileExists(const string& path) {
string err;
TimeStamp mtime = disk_interface_->Stat(path, &err);
if (mtime == -1)
Error("%s", err.c_str());
return mtime > 0; // Treat Stat() errors as "file does not exist".
}
void Cleaner::Report(const string& path) {
++cleaned_files_count_;
if (IsVerbose())
printf("Remove %s\n", path.c_str());
}
void Cleaner::Remove(const string& path) {
if (!IsAlreadyRemoved(path)) {
removed_.insert(path);
if (config_.dry_run) {
if (FileExists(path))
Report(path);
} else {
int ret = RemoveFile(path);
if (ret == 0)
Report(path);
else if (ret == -1)
status_ = 1;
}
}
}
bool Cleaner::IsAlreadyRemoved(const string& path) {
set<string>::iterator i = removed_.find(path);
return (i != removed_.end());
}
void Cleaner::RemoveEdgeFiles(Edge* edge) {
string depfile = edge->GetUnescapedDepfile();
if (!depfile.empty())
Remove(depfile);
string rspfile = edge->GetUnescapedRspfile();
if (!rspfile.empty())
Remove(rspfile);
}
void Cleaner::PrintHeader() {
if (config_.verbosity == BuildConfig::QUIET)
return;
printf("Cleaning...");
if (IsVerbose())
printf("\n");
else
printf(" ");
fflush(stdout);
}
void Cleaner::PrintFooter() {
if (config_.verbosity == BuildConfig::QUIET)
return;
printf("%d files.\n", cleaned_files_count_);
}
int Cleaner::CleanAll(bool generator) {
Reset();
PrintHeader();
LoadDyndeps();
for (vector<Edge*>::iterator e = state_->edges_.begin();
e != state_->edges_.end(); ++e) {
// Do not try to remove phony targets
if ((*e)->is_phony())
continue;
// Do not remove generator's files unless generator specified.
if (!generator && (*e)->GetBindingBool("generator"))
continue;
for (vector<Node*>::iterator out_node = (*e)->outputs_.begin();
out_node != (*e)->outputs_.end(); ++out_node) {
Remove((*out_node)->path());
}
RemoveEdgeFiles(*e);
}
PrintFooter();
return status_;
}
int Cleaner::CleanDead(const BuildLog::Entries& entries) {
Reset();
PrintHeader();
for (BuildLog::Entries::const_iterator i = entries.begin(); i != entries.end(); ++i) {
Node* n = state_->LookupNode(i->first);
if (!n || !n->in_edge()) {
Remove(i->first.AsString());
}
}
PrintFooter();
return status_;
}
void Cleaner::DoCleanTarget(Node* target) {
if (Edge* e = target->in_edge()) {
// Do not try to remove phony targets
if (!e->is_phony()) {
Remove(target->path());
RemoveEdgeFiles(e);
}
for (vector<Node*>::iterator n = e->inputs_.begin(); n != e->inputs_.end();
++n) {
Node* next = *n;
// call DoCleanTarget recursively if this node has not been visited
if (cleaned_.count(next) == 0) {
DoCleanTarget(next);
}
}
}
// mark this target to be cleaned already
cleaned_.insert(target);
}
int Cleaner::CleanTarget(Node* target) {
assert(target);
Reset();
PrintHeader();
LoadDyndeps();
DoCleanTarget(target);
PrintFooter();
return status_;
}
int Cleaner::CleanTarget(const char* target) {
assert(target);
Reset();
Node* node = state_->LookupNode(target);
if (node) {
CleanTarget(node);
} else {
Error("unknown target '%s'", target);
status_ = 1;
}
return status_;
}
int Cleaner::CleanTargets(int target_count, char* targets[]) {
Reset();
PrintHeader();
LoadDyndeps();
for (int i = 0; i < target_count; ++i) {
string target_name = targets[i];
uint64_t slash_bits;
string err;
if (!CanonicalizePath(&target_name, &slash_bits, &err)) {
Error("failed to canonicalize '%s': %s", target_name.c_str(), err.c_str());
status_ = 1;
} else {
Node* target = state_->LookupNode(target_name);
if (target) {
if (IsVerbose())
printf("Target %s\n", target_name.c_str());
DoCleanTarget(target);
} else {
Error("unknown target '%s'", target_name.c_str());
status_ = 1;
}
}
}
PrintFooter();
return status_;
}
void Cleaner::DoCleanRule(const Rule* rule) {
assert(rule);
for (vector<Edge*>::iterator e = state_->edges_.begin();
e != state_->edges_.end(); ++e) {
if ((*e)->rule().name() == rule->name()) {
for (vector<Node*>::iterator out_node = (*e)->outputs_.begin();
out_node != (*e)->outputs_.end(); ++out_node) {
Remove((*out_node)->path());
RemoveEdgeFiles(*e);
}
}
}
}
int Cleaner::CleanRule(const Rule* rule) {
assert(rule);
Reset();
PrintHeader();
LoadDyndeps();
DoCleanRule(rule);
PrintFooter();
return status_;
}
int Cleaner::CleanRule(const char* rule) {
assert(rule);
Reset();
const Rule* r = state_->bindings_.LookupRule(rule);
if (r) {
CleanRule(r);
} else {
Error("unknown rule '%s'", rule);
status_ = 1;
}
return status_;
}
int Cleaner::CleanRules(int rule_count, char* rules[]) {
assert(rules);
Reset();
PrintHeader();
LoadDyndeps();
for (int i = 0; i < rule_count; ++i) {
const char* rule_name = rules[i];
const Rule* rule = state_->bindings_.LookupRule(rule_name);
if (rule) {
if (IsVerbose())
printf("Rule %s\n", rule_name);
DoCleanRule(rule);
} else {
Error("unknown rule '%s'", rule_name);
status_ = 1;
}
}
PrintFooter();
return status_;
}
void Cleaner::Reset() {
status_ = 0;
cleaned_files_count_ = 0;
removed_.clear();
cleaned_.clear();
}
void Cleaner::LoadDyndeps() {
// Load dyndep files that exist, before they are cleaned.
for (vector<Edge*>::iterator e = state_->edges_.begin();
e != state_->edges_.end(); ++e) {
if (Node* dyndep = (*e)->dyndep_) {
// Capture and ignore errors loading the dyndep file.
// We clean as much of the graph as we know.
std::string err;
dyndep_loader_.LoadDyndeps(dyndep, &err);
}
}
}

113
src/clean.h Normal file
View File

@ -0,0 +1,113 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_CLEAN_H_
#define NINJA_CLEAN_H_
#include <set>
#include <string>
#include "build.h"
#include "dyndep.h"
#include "build_log.h"
using namespace std;
struct State;
struct Node;
struct Rule;
struct DiskInterface;
struct Cleaner {
/// Build a cleaner object with the given @a disk_interface
Cleaner(State* state,
const BuildConfig& config,
DiskInterface* disk_interface);
/// Clean the given @a target and all the file built for it.
/// @return non-zero if an error occurs.
int CleanTarget(Node* target);
/// Clean the given target @a target.
/// @return non-zero if an error occurs.
int CleanTarget(const char* target);
/// Clean the given target @a targets.
/// @return non-zero if an error occurs.
int CleanTargets(int target_count, char* targets[]);
/// Clean all built files, except for files created by generator rules.
/// @param generator If set, also clean files created by generator rules.
/// @return non-zero if an error occurs.
int CleanAll(bool generator = false);
/// Clean all the file built with the given rule @a rule.
/// @return non-zero if an error occurs.
int CleanRule(const Rule* rule);
/// Clean the file produced by the given @a rule.
/// @return non-zero if an error occurs.
int CleanRule(const char* rule);
/// Clean the file produced by the given @a rules.
/// @return non-zero if an error occurs.
int CleanRules(int rule_count, char* rules[]);
/// Clean the files produced by previous builds that are no longer in the
/// manifest.
/// @return non-zero if an error occurs.
int CleanDead(const BuildLog::Entries& entries);
/// @return the number of file cleaned.
int cleaned_files_count() const {
return cleaned_files_count_;
}
/// @return whether the cleaner is in verbose mode.
bool IsVerbose() const {
return (config_.verbosity != BuildConfig::QUIET
&& (config_.verbosity == BuildConfig::VERBOSE || config_.dry_run));
}
private:
/// Remove the file @a path.
/// @return whether the file has been removed.
int RemoveFile(const string& path);
/// @returns whether the file @a path exists.
bool FileExists(const string& path);
void Report(const string& path);
/// Remove the given @a path file only if it has not been already removed.
void Remove(const string& path);
/// @return whether the given @a path has already been removed.
bool IsAlreadyRemoved(const string& path);
/// Remove the depfile and rspfile for an Edge.
void RemoveEdgeFiles(Edge* edge);
/// Helper recursive method for CleanTarget().
void DoCleanTarget(Node* target);
void PrintHeader();
void PrintFooter();
void DoCleanRule(const Rule* rule);
void Reset();
/// Load dependencies from dyndep bindings.
void LoadDyndeps();
State* state_;
const BuildConfig& config_;
DyndepLoader dyndep_loader_;
set<string> removed_;
set<Node*> cleaned_;
int cleaned_files_count_;
DiskInterface* disk_interface_;
int status_;
};
#endif // NINJA_CLEAN_H_

538
src/clean_test.cc Normal file
View File

@ -0,0 +1,538 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "clean.h"
#include "build.h"
#include "util.h"
#include "test.h"
#ifndef _WIN32
#include <unistd.h>
#endif
namespace {
const char kTestFilename[] = "CleanTest-tempfile";
struct CleanTest : public StateTestWithBuiltinRules {
VirtualFileSystem fs_;
BuildConfig config_;
virtual void SetUp() {
config_.verbosity = BuildConfig::QUIET;
}
};
TEST_F(CleanTest, CleanAll) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build in1: cat src1\n"
"build out1: cat in1\n"
"build in2: cat src2\n"
"build out2: cat in2\n"));
fs_.Create("in1", "");
fs_.Create("out1", "");
fs_.Create("in2", "");
fs_.Create("out2", "");
Cleaner cleaner(&state_, config_, &fs_);
ASSERT_EQ(0, cleaner.cleaned_files_count());
EXPECT_EQ(0, cleaner.CleanAll());
EXPECT_EQ(4, cleaner.cleaned_files_count());
EXPECT_EQ(4u, fs_.files_removed_.size());
// Check they are removed.
string err;
EXPECT_EQ(0, fs_.Stat("in1", &err));
EXPECT_EQ(0, fs_.Stat("out1", &err));
EXPECT_EQ(0, fs_.Stat("in2", &err));
EXPECT_EQ(0, fs_.Stat("out2", &err));
fs_.files_removed_.clear();
EXPECT_EQ(0, cleaner.CleanAll());
EXPECT_EQ(0, cleaner.cleaned_files_count());
EXPECT_EQ(0u, fs_.files_removed_.size());
}
TEST_F(CleanTest, CleanAllDryRun) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build in1: cat src1\n"
"build out1: cat in1\n"
"build in2: cat src2\n"
"build out2: cat in2\n"));
fs_.Create("in1", "");
fs_.Create("out1", "");
fs_.Create("in2", "");
fs_.Create("out2", "");
config_.dry_run = true;
Cleaner cleaner(&state_, config_, &fs_);
ASSERT_EQ(0, cleaner.cleaned_files_count());
EXPECT_EQ(0, cleaner.CleanAll());
EXPECT_EQ(4, cleaner.cleaned_files_count());
EXPECT_EQ(0u, fs_.files_removed_.size());
// Check they are not removed.
string err;
EXPECT_LT(0, fs_.Stat("in1", &err));
EXPECT_LT(0, fs_.Stat("out1", &err));
EXPECT_LT(0, fs_.Stat("in2", &err));
EXPECT_LT(0, fs_.Stat("out2", &err));
fs_.files_removed_.clear();
EXPECT_EQ(0, cleaner.CleanAll());
EXPECT_EQ(4, cleaner.cleaned_files_count());
EXPECT_EQ(0u, fs_.files_removed_.size());
}
TEST_F(CleanTest, CleanTarget) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build in1: cat src1\n"
"build out1: cat in1\n"
"build in2: cat src2\n"
"build out2: cat in2\n"));
fs_.Create("in1", "");
fs_.Create("out1", "");
fs_.Create("in2", "");
fs_.Create("out2", "");
Cleaner cleaner(&state_, config_, &fs_);
ASSERT_EQ(0, cleaner.cleaned_files_count());
ASSERT_EQ(0, cleaner.CleanTarget("out1"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(2u, fs_.files_removed_.size());
// Check they are removed.
string err;
EXPECT_EQ(0, fs_.Stat("in1", &err));
EXPECT_EQ(0, fs_.Stat("out1", &err));
EXPECT_LT(0, fs_.Stat("in2", &err));
EXPECT_LT(0, fs_.Stat("out2", &err));
fs_.files_removed_.clear();
ASSERT_EQ(0, cleaner.CleanTarget("out1"));
EXPECT_EQ(0, cleaner.cleaned_files_count());
EXPECT_EQ(0u, fs_.files_removed_.size());
}
TEST_F(CleanTest, CleanTargetDryRun) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build in1: cat src1\n"
"build out1: cat in1\n"
"build in2: cat src2\n"
"build out2: cat in2\n"));
fs_.Create("in1", "");
fs_.Create("out1", "");
fs_.Create("in2", "");
fs_.Create("out2", "");
config_.dry_run = true;
Cleaner cleaner(&state_, config_, &fs_);
ASSERT_EQ(0, cleaner.cleaned_files_count());
ASSERT_EQ(0, cleaner.CleanTarget("out1"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(0u, fs_.files_removed_.size());
// Check they are not removed.
string err;
EXPECT_LT(0, fs_.Stat("in1", &err));
EXPECT_LT(0, fs_.Stat("out1", &err));
EXPECT_LT(0, fs_.Stat("in2", &err));
EXPECT_LT(0, fs_.Stat("out2", &err));
fs_.files_removed_.clear();
ASSERT_EQ(0, cleaner.CleanTarget("out1"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(0u, fs_.files_removed_.size());
}
TEST_F(CleanTest, CleanRule) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"rule cat_e\n"
" command = cat -e $in > $out\n"
"build in1: cat_e src1\n"
"build out1: cat in1\n"
"build in2: cat_e src2\n"
"build out2: cat in2\n"));
fs_.Create("in1", "");
fs_.Create("out1", "");
fs_.Create("in2", "");
fs_.Create("out2", "");
Cleaner cleaner(&state_, config_, &fs_);
ASSERT_EQ(0, cleaner.cleaned_files_count());
ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(2u, fs_.files_removed_.size());
// Check they are removed.
string err;
EXPECT_EQ(0, fs_.Stat("in1", &err));
EXPECT_LT(0, fs_.Stat("out1", &err));
EXPECT_EQ(0, fs_.Stat("in2", &err));
EXPECT_LT(0, fs_.Stat("out2", &err));
fs_.files_removed_.clear();
ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
EXPECT_EQ(0, cleaner.cleaned_files_count());
EXPECT_EQ(0u, fs_.files_removed_.size());
}
TEST_F(CleanTest, CleanRuleDryRun) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"rule cat_e\n"
" command = cat -e $in > $out\n"
"build in1: cat_e src1\n"
"build out1: cat in1\n"
"build in2: cat_e src2\n"
"build out2: cat in2\n"));
fs_.Create("in1", "");
fs_.Create("out1", "");
fs_.Create("in2", "");
fs_.Create("out2", "");
config_.dry_run = true;
Cleaner cleaner(&state_, config_, &fs_);
ASSERT_EQ(0, cleaner.cleaned_files_count());
ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(0u, fs_.files_removed_.size());
// Check they are not removed.
string err;
EXPECT_LT(0, fs_.Stat("in1", &err));
EXPECT_LT(0, fs_.Stat("out1", &err));
EXPECT_LT(0, fs_.Stat("in2", &err));
EXPECT_LT(0, fs_.Stat("out2", &err));
fs_.files_removed_.clear();
ASSERT_EQ(0, cleaner.CleanRule("cat_e"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(0u, fs_.files_removed_.size());
}
TEST_F(CleanTest, CleanRuleGenerator) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"rule regen\n"
" command = cat $in > $out\n"
" generator = 1\n"
"build out1: cat in1\n"
"build out2: regen in2\n"));
fs_.Create("out1", "");
fs_.Create("out2", "");
Cleaner cleaner(&state_, config_, &fs_);
EXPECT_EQ(0, cleaner.CleanAll());
EXPECT_EQ(1, cleaner.cleaned_files_count());
EXPECT_EQ(1u, fs_.files_removed_.size());
fs_.Create("out1", "");
EXPECT_EQ(0, cleaner.CleanAll(/*generator=*/true));
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(2u, fs_.files_removed_.size());
}
TEST_F(CleanTest, CleanDepFile) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"rule cc\n"
" command = cc $in > $out\n"
" depfile = $out.d\n"
"build out1: cc in1\n"));
fs_.Create("out1", "");
fs_.Create("out1.d", "");
Cleaner cleaner(&state_, config_, &fs_);
EXPECT_EQ(0, cleaner.CleanAll());
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(2u, fs_.files_removed_.size());
}
TEST_F(CleanTest, CleanDepFileOnCleanTarget) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"rule cc\n"
" command = cc $in > $out\n"
" depfile = $out.d\n"
"build out1: cc in1\n"));
fs_.Create("out1", "");
fs_.Create("out1.d", "");
Cleaner cleaner(&state_, config_, &fs_);
EXPECT_EQ(0, cleaner.CleanTarget("out1"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(2u, fs_.files_removed_.size());
}
TEST_F(CleanTest, CleanDepFileOnCleanRule) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"rule cc\n"
" command = cc $in > $out\n"
" depfile = $out.d\n"
"build out1: cc in1\n"));
fs_.Create("out1", "");
fs_.Create("out1.d", "");
Cleaner cleaner(&state_, config_, &fs_);
EXPECT_EQ(0, cleaner.CleanRule("cc"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(2u, fs_.files_removed_.size());
}
TEST_F(CleanTest, CleanDyndep) {
// Verify that a dyndep file can be loaded to discover a new output
// to be cleaned.
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build out: cat in || dd\n"
" dyndep = dd\n"
));
fs_.Create("in", "");
fs_.Create("dd",
"ninja_dyndep_version = 1\n"
"build out | out.imp: dyndep\n"
);
fs_.Create("out", "");
fs_.Create("out.imp", "");
Cleaner cleaner(&state_, config_, &fs_);
ASSERT_EQ(0, cleaner.cleaned_files_count());
EXPECT_EQ(0, cleaner.CleanAll());
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(2u, fs_.files_removed_.size());
string err;
EXPECT_EQ(0, fs_.Stat("out", &err));
EXPECT_EQ(0, fs_.Stat("out.imp", &err));
}
TEST_F(CleanTest, CleanDyndepMissing) {
// Verify that a missing dyndep file is tolerated.
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build out: cat in || dd\n"
" dyndep = dd\n"
));
fs_.Create("in", "");
fs_.Create("out", "");
fs_.Create("out.imp", "");
Cleaner cleaner(&state_, config_, &fs_);
ASSERT_EQ(0, cleaner.cleaned_files_count());
EXPECT_EQ(0, cleaner.CleanAll());
EXPECT_EQ(1, cleaner.cleaned_files_count());
EXPECT_EQ(1u, fs_.files_removed_.size());
string err;
EXPECT_EQ(0, fs_.Stat("out", &err));
EXPECT_EQ(1, fs_.Stat("out.imp", &err));
}
TEST_F(CleanTest, CleanRspFile) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"rule cc\n"
" command = cc $in > $out\n"
" rspfile = $rspfile\n"
" rspfile_content=$in\n"
"build out1: cc in1\n"
" rspfile = cc1.rsp\n"));
fs_.Create("out1", "");
fs_.Create("cc1.rsp", "");
Cleaner cleaner(&state_, config_, &fs_);
EXPECT_EQ(0, cleaner.CleanAll());
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(2u, fs_.files_removed_.size());
}
TEST_F(CleanTest, CleanRsp) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"rule cat_rsp \n"
" command = cat $rspfile > $out\n"
" rspfile = $rspfile\n"
" rspfile_content = $in\n"
"build in1: cat src1\n"
"build out1: cat in1\n"
"build in2: cat_rsp src2\n"
" rspfile=in2.rsp\n"
"build out2: cat_rsp in2\n"
" rspfile=out2.rsp\n"
));
fs_.Create("in1", "");
fs_.Create("out1", "");
fs_.Create("in2.rsp", "");
fs_.Create("out2.rsp", "");
fs_.Create("in2", "");
fs_.Create("out2", "");
Cleaner cleaner(&state_, config_, &fs_);
ASSERT_EQ(0, cleaner.cleaned_files_count());
ASSERT_EQ(0, cleaner.CleanTarget("out1"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
ASSERT_EQ(0, cleaner.CleanTarget("in2"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
ASSERT_EQ(0, cleaner.CleanRule("cat_rsp"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_EQ(6u, fs_.files_removed_.size());
// Check they are removed.
string err;
EXPECT_EQ(0, fs_.Stat("in1", &err));
EXPECT_EQ(0, fs_.Stat("out1", &err));
EXPECT_EQ(0, fs_.Stat("in2", &err));
EXPECT_EQ(0, fs_.Stat("out2", &err));
EXPECT_EQ(0, fs_.Stat("in2.rsp", &err));
EXPECT_EQ(0, fs_.Stat("out2.rsp", &err));
}
TEST_F(CleanTest, CleanFailure) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build dir: cat src1\n"));
fs_.MakeDir("dir");
Cleaner cleaner(&state_, config_, &fs_);
EXPECT_NE(0, cleaner.CleanAll());
}
TEST_F(CleanTest, CleanPhony) {
string err;
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build phony: phony t1 t2\n"
"build t1: cat\n"
"build t2: cat\n"));
fs_.Create("phony", "");
fs_.Create("t1", "");
fs_.Create("t2", "");
// Check that CleanAll does not remove "phony".
Cleaner cleaner(&state_, config_, &fs_);
EXPECT_EQ(0, cleaner.CleanAll());
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_LT(0, fs_.Stat("phony", &err));
fs_.Create("t1", "");
fs_.Create("t2", "");
// Check that CleanTarget does not remove "phony".
EXPECT_EQ(0, cleaner.CleanTarget("phony"));
EXPECT_EQ(2, cleaner.cleaned_files_count());
EXPECT_LT(0, fs_.Stat("phony", &err));
}
TEST_F(CleanTest, CleanDepFileAndRspFileWithSpaces) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"rule cc_dep\n"
" command = cc $in > $out\n"
" depfile = $out.d\n"
"rule cc_rsp\n"
" command = cc $in > $out\n"
" rspfile = $out.rsp\n"
" rspfile_content = $in\n"
"build out$ 1: cc_dep in$ 1\n"
"build out$ 2: cc_rsp in$ 1\n"
));
fs_.Create("out 1", "");
fs_.Create("out 2", "");
fs_.Create("out 1.d", "");
fs_.Create("out 2.rsp", "");
Cleaner cleaner(&state_, config_, &fs_);
EXPECT_EQ(0, cleaner.CleanAll());
EXPECT_EQ(4, cleaner.cleaned_files_count());
EXPECT_EQ(4u, fs_.files_removed_.size());
string err;
EXPECT_EQ(0, fs_.Stat("out 1", &err));
EXPECT_EQ(0, fs_.Stat("out 2", &err));
EXPECT_EQ(0, fs_.Stat("out 1.d", &err));
EXPECT_EQ(0, fs_.Stat("out 2.rsp", &err));
}
struct CleanDeadTest : public CleanTest, public BuildLogUser{
virtual void SetUp() {
// In case a crashing test left a stale file behind.
unlink(kTestFilename);
CleanTest::SetUp();
}
virtual void TearDown() {
unlink(kTestFilename);
}
virtual bool IsPathDead(StringPiece) const { return false; }
};
TEST_F(CleanDeadTest, CleanDead) {
State state;
ASSERT_NO_FATAL_FAILURE(AssertParse(&state,
"rule cat\n"
" command = cat $in > $out\n"
"build out1: cat in\n"
"build out2: cat in\n"
));
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build out2: cat in\n"
));
fs_.Create("in", "");
fs_.Create("out1", "");
fs_.Create("out2", "");
BuildLog log1;
string err;
EXPECT_TRUE(log1.OpenForWrite(kTestFilename, *this, &err));
ASSERT_EQ("", err);
log1.RecordCommand(state.edges_[0], 15, 18);
log1.RecordCommand(state.edges_[1], 20, 25);
log1.Close();
BuildLog log2;
EXPECT_TRUE(log2.Load(kTestFilename, &err));
ASSERT_EQ("", err);
ASSERT_EQ(2u, log2.entries().size());
ASSERT_TRUE(log2.LookupByOutput("out1"));
ASSERT_TRUE(log2.LookupByOutput("out2"));
// First use the manifest that describe how to build out1.
Cleaner cleaner1(&state, config_, &fs_);
EXPECT_EQ(0, cleaner1.CleanDead(log2.entries()));
EXPECT_EQ(0, cleaner1.cleaned_files_count());
EXPECT_EQ(0u, fs_.files_removed_.size());
EXPECT_NE(0, fs_.Stat("in", &err));
EXPECT_NE(0, fs_.Stat("out1", &err));
EXPECT_NE(0, fs_.Stat("out2", &err));
// Then use the manifest that does not build out1 anymore.
Cleaner cleaner2(&state_, config_, &fs_);
EXPECT_EQ(0, cleaner2.CleanDead(log2.entries()));
EXPECT_EQ(1, cleaner2.cleaned_files_count());
EXPECT_EQ(1u, fs_.files_removed_.size());
EXPECT_EQ("out1", *(fs_.files_removed_.begin()));
EXPECT_NE(0, fs_.Stat("in", &err));
EXPECT_EQ(0, fs_.Stat("out1", &err));
EXPECT_NE(0, fs_.Stat("out2", &err));
// Nothing to do now.
EXPECT_EQ(0, cleaner2.CleanDead(log2.entries()));
EXPECT_EQ(0, cleaner2.cleaned_files_count());
EXPECT_EQ(1u, fs_.files_removed_.size());
EXPECT_EQ("out1", *(fs_.files_removed_.begin()));
EXPECT_NE(0, fs_.Stat("in", &err));
EXPECT_EQ(0, fs_.Stat("out1", &err));
EXPECT_NE(0, fs_.Stat("out2", &err));
log2.Close();
}
} // anonymous namespace

126
src/clparser.cc Normal file
View File

@ -0,0 +1,126 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "clparser.h"
#include <algorithm>
#include <assert.h>
#include <string.h>
#include "metrics.h"
#include "string_piece_util.h"
#ifdef _WIN32
#include "includes_normalize.h"
#include "string_piece.h"
#else
#include "util.h"
#endif
namespace {
/// Return true if \a input ends with \a needle.
bool EndsWith(const string& input, const string& needle) {
return (input.size() >= needle.size() &&
input.substr(input.size() - needle.size()) == needle);
}
} // anonymous namespace
// static
string CLParser::FilterShowIncludes(const string& line,
const string& deps_prefix) {
const string kDepsPrefixEnglish = "Note: including file: ";
const char* in = line.c_str();
const char* end = in + line.size();
const string& prefix = deps_prefix.empty() ? kDepsPrefixEnglish : deps_prefix;
if (end - in > (int)prefix.size() &&
memcmp(in, prefix.c_str(), (int)prefix.size()) == 0) {
in += prefix.size();
while (*in == ' ')
++in;
return line.substr(in - line.c_str());
}
return "";
}
// static
bool CLParser::IsSystemInclude(string path) {
transform(path.begin(), path.end(), path.begin(), ToLowerASCII);
// TODO: this is a heuristic, perhaps there's a better way?
return (path.find("program files") != string::npos ||
path.find("microsoft visual studio") != string::npos);
}
// static
bool CLParser::FilterInputFilename(string line) {
transform(line.begin(), line.end(), line.begin(), ToLowerASCII);
// TODO: other extensions, like .asm?
return EndsWith(line, ".c") ||
EndsWith(line, ".cc") ||
EndsWith(line, ".cxx") ||
EndsWith(line, ".cpp");
}
// static
bool CLParser::Parse(const string& output, const string& deps_prefix,
string* filtered_output, string* err) {
METRIC_RECORD("CLParser::Parse");
// Loop over all lines in the output to process them.
assert(&output != filtered_output);
size_t start = 0;
#ifdef _WIN32
IncludesNormalize normalizer(".");
#endif
while (start < output.size()) {
size_t end = output.find_first_of("\r\n", start);
if (end == string::npos)
end = output.size();
string line = output.substr(start, end - start);
string include = FilterShowIncludes(line, deps_prefix);
if (!include.empty()) {
string normalized;
#ifdef _WIN32
if (!normalizer.Normalize(include, &normalized, err))
return false;
#else
// TODO: should this make the path relative to cwd?
normalized = include;
uint64_t slash_bits;
if (!CanonicalizePath(&normalized, &slash_bits, err))
return false;
#endif
if (!IsSystemInclude(normalized))
includes_.insert(normalized);
} else if (FilterInputFilename(line)) {
// Drop it.
// TODO: if we support compiling multiple output files in a single
// cl.exe invocation, we should stash the filename.
} else {
filtered_output->append(line);
filtered_output->append("\n");
}
if (end < output.size() && output[end] == '\r')
++end;
if (end < output.size() && output[end] == '\n')
++end;
start = end;
}
return true;
}

52
src/clparser.h Normal file
View File

@ -0,0 +1,52 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_CLPARSER_H_
#define NINJA_CLPARSER_H_
#include <set>
#include <string>
using namespace std;
/// Visual Studio's cl.exe requires some massaging to work with Ninja;
/// for example, it emits include information on stderr in a funny
/// format when building with /showIncludes. This class parses this
/// output.
struct CLParser {
/// Parse a line of cl.exe output and extract /showIncludes info.
/// If a dependency is extracted, returns a nonempty string.
/// Exposed for testing.
static string FilterShowIncludes(const string& line,
const string& deps_prefix);
/// Return true if a mentioned include file is a system path.
/// Filtering these out reduces dependency information considerably.
static bool IsSystemInclude(string path);
/// Parse a line of cl.exe output and return true if it looks like
/// it's printing an input filename. This is a heuristic but it appears
/// to be the best we can do.
/// Exposed for testing.
static bool FilterInputFilename(string line);
/// Parse the full output of cl, filling filtered_output with the text that
/// should be printed (if any). Returns true on success, or false with err
/// filled. output must not be the same object as filtered_object.
bool Parse(const string& output, const string& deps_prefix,
string* filtered_output, string* err);
set<string> includes_;
};
#endif // NINJA_CLPARSER_H_

157
src/clparser_perftest.cc Normal file
View File

@ -0,0 +1,157 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <stdlib.h>
#include "clparser.h"
#include "metrics.h"
int main(int argc, char* argv[]) {
// Output of /showIncludes from #include <iostream>
string perf_testdata =
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\iostream\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\istream\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ostream\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ios\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xlocnum\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\climits\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\yvals.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xkeycheck.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\crtdefs.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\sal.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ConcurrencySal.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vadefs.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\use_ansi.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\limits.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cmath\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\math.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xtgmath.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xtr1common\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstdlib\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\stdlib.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_malloc.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_search.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\stddef.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wstdlib.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstdio\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\stdio.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wstdio.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_stdio_config.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\streambuf\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xiosbase\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xlocale\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstring\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\string.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_memory.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_memcpy_s.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\errno.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_string.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wstring.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\stdexcept\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\exception\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\type_traits\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xstddef\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstddef\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\initializer_list\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\malloc.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_exception.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\eh.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_terminate.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xstring\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xmemory0\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cstdint\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\stdint.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\limits\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ymath.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cfloat\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\float.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cwchar\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\wchar.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wconio.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wctype.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wdirect.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wio.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_share.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wprocess.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\corecrt_wtime.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\sys/stat.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\sys/types.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\new\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_new.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xutility\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\utility\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\iosfwd\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\crtdbg.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_new_debug.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xatomic0.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\intrin.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\setjmp.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\immintrin.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\wmmintrin.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\nmmintrin.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\smmintrin.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\tmmintrin.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\pmmintrin.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\emmintrin.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xmmintrin.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\mmintrin.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\ammintrin.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\mm3dnow.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\typeinfo\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime_typeinfo.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\vcruntime.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xlocinfo\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xlocinfo.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\ctype.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\locale.h\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\xfacet\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\system_error\r\n"
"Note: including file: C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\INCLUDE\\cerrno\r\n"
"Note: including file: C:\\Program Files (x86)\\Windows Kits\\10\\include\\10.0.10240.0\\ucrt\\share.h\r\n";
for (int limit = 1 << 10; limit < (1<<20); limit *= 2) {
int64_t start = GetTimeMillis();
for (int rep = 0; rep < limit; ++rep) {
string output;
string err;
CLParser parser;
if (!parser.Parse(perf_testdata, "", &output, &err)) {
printf("%s\n", err.c_str());
return 1;
}
}
int64_t end = GetTimeMillis();
if (end - start > 2000) {
int delta_ms = (int)(end - start);
printf("Parse %d times in %dms avg %.1fus\n",
limit, delta_ms, float(delta_ms * 1000) / limit);
break;
}
}
return 0;
}

117
src/clparser_test.cc Normal file
View File

@ -0,0 +1,117 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "clparser.h"
#include "test.h"
#include "util.h"
TEST(CLParserTest, ShowIncludes) {
ASSERT_EQ("", CLParser::FilterShowIncludes("", ""));
ASSERT_EQ("", CLParser::FilterShowIncludes("Sample compiler output", ""));
ASSERT_EQ("c:\\Some Files\\foobar.h",
CLParser::FilterShowIncludes("Note: including file: "
"c:\\Some Files\\foobar.h", ""));
ASSERT_EQ("c:\\initspaces.h",
CLParser::FilterShowIncludes("Note: including file: "
"c:\\initspaces.h", ""));
ASSERT_EQ("c:\\initspaces.h",
CLParser::FilterShowIncludes("Non-default prefix: inc file: "
"c:\\initspaces.h",
"Non-default prefix: inc file:"));
}
TEST(CLParserTest, FilterInputFilename) {
ASSERT_TRUE(CLParser::FilterInputFilename("foobar.cc"));
ASSERT_TRUE(CLParser::FilterInputFilename("foo bar.cc"));
ASSERT_TRUE(CLParser::FilterInputFilename("baz.c"));
ASSERT_TRUE(CLParser::FilterInputFilename("FOOBAR.CC"));
ASSERT_FALSE(CLParser::FilterInputFilename(
"src\\cl_helper.cc(166) : fatal error C1075: end "
"of file found ..."));
}
TEST(CLParserTest, ParseSimple) {
CLParser parser;
string output, err;
ASSERT_TRUE(parser.Parse(
"foo\r\n"
"Note: inc file prefix: foo.h\r\n"
"bar\r\n",
"Note: inc file prefix:", &output, &err));
ASSERT_EQ("foo\nbar\n", output);
ASSERT_EQ(1u, parser.includes_.size());
ASSERT_EQ("foo.h", *parser.includes_.begin());
}
TEST(CLParserTest, ParseFilenameFilter) {
CLParser parser;
string output, err;
ASSERT_TRUE(parser.Parse(
"foo.cc\r\n"
"cl: warning\r\n",
"", &output, &err));
ASSERT_EQ("cl: warning\n", output);
}
TEST(CLParserTest, ParseSystemInclude) {
CLParser parser;
string output, err;
ASSERT_TRUE(parser.Parse(
"Note: including file: c:\\Program Files\\foo.h\r\n"
"Note: including file: d:\\Microsoft Visual Studio\\bar.h\r\n"
"Note: including file: path.h\r\n",
"", &output, &err));
// We should have dropped the first two includes because they look like
// system headers.
ASSERT_EQ("", output);
ASSERT_EQ(1u, parser.includes_.size());
ASSERT_EQ("path.h", *parser.includes_.begin());
}
TEST(CLParserTest, DuplicatedHeader) {
CLParser parser;
string output, err;
ASSERT_TRUE(parser.Parse(
"Note: including file: foo.h\r\n"
"Note: including file: bar.h\r\n"
"Note: including file: foo.h\r\n",
"", &output, &err));
// We should have dropped one copy of foo.h.
ASSERT_EQ("", output);
ASSERT_EQ(2u, parser.includes_.size());
}
TEST(CLParserTest, DuplicatedHeaderPathConverted) {
CLParser parser;
string output, err;
// This isn't inline in the Parse() call below because the #ifdef in
// a macro expansion would confuse MSVC2013's preprocessor.
const char kInput[] =
"Note: including file: sub/./foo.h\r\n"
"Note: including file: bar.h\r\n"
#ifdef _WIN32
"Note: including file: sub\\foo.h\r\n";
#else
"Note: including file: sub/foo.h\r\n";
#endif
ASSERT_TRUE(parser.Parse(kInput, "", &output, &err));
// We should have dropped one copy of foo.h.
ASSERT_EQ("", output);
ASSERT_EQ(2u, parser.includes_.size());
}

21
src/debug_flags.cc Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
bool g_explaining = false;
bool g_keep_depfile = false;
bool g_keep_rsp = false;
bool g_experimental_statcache = true;

33
src/debug_flags.h Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_EXPLAIN_H_
#define NINJA_EXPLAIN_H_
#include <stdio.h>
#define EXPLAIN(fmt, ...) { \
if (g_explaining) \
fprintf(stderr, "ninja explain: " fmt "\n", __VA_ARGS__); \
}
extern bool g_explaining;
extern bool g_keep_depfile;
extern bool g_keep_rsp;
extern bool g_experimental_statcache;
#endif // NINJA_EXPLAIN_H_

369
src/depfile_parser.cc Normal file
View File

@ -0,0 +1,369 @@
/* Generated by re2c 1.3 */
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "depfile_parser.h"
#include "util.h"
#include <algorithm>
DepfileParser::DepfileParser(DepfileParserOptions options)
: options_(options)
{
}
// A note on backslashes in Makefiles, from reading the docs:
// Backslash-newline is the line continuation character.
// Backslash-# escapes a # (otherwise meaningful as a comment start).
// Backslash-% escapes a % (otherwise meaningful as a special).
// Finally, quoting the GNU manual, "Backslashes that are not in danger
// of quoting % characters go unmolested."
// How do you end a line with a backslash? The netbsd Make docs suggest
// reading the result of a shell command echoing a backslash!
//
// Rather than implement all of above, we follow what GCC/Clang produces:
// Backslashes escape a space or hash sign.
// When a space is preceded by 2N+1 backslashes, it is represents N backslashes
// followed by space.
// When a space is preceded by 2N backslashes, it represents 2N backslashes at
// the end of a filename.
// A hash sign is escaped by a single backslash. All other backslashes remain
// unchanged.
//
// If anyone actually has depfiles that rely on the more complicated
// behavior we can adjust this.
bool DepfileParser::Parse(string* content, string* err) {
// in: current parser input point.
// end: end of input.
// parsing_targets: whether we are parsing targets or dependencies.
char* in = &(*content)[0];
char* end = in + content->size();
bool have_target = false;
bool parsing_targets = true;
bool poisoned_input = false;
while (in < end) {
bool have_newline = false;
// out: current output point (typically same as in, but can fall behind
// as we de-escape backslashes).
char* out = in;
// filename: start of the current parsed filename.
char* filename = out;
for (;;) {
// start: beginning of the current parsed span.
const char* start = in;
char* yymarker = NULL;
{
unsigned char yych;
static const unsigned char yybm[] = {
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 128, 0, 0, 0, 128, 0, 0,
128, 128, 0, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 0, 0, 128, 0, 0,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 0, 128, 0, 128,
0, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 0, 128, 128, 0,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128,
};
yych = *in;
if (yybm[0+yych] & 128) {
goto yy9;
}
if (yych <= '\r') {
if (yych <= '\t') {
if (yych >= 0x01) goto yy4;
} else {
if (yych <= '\n') goto yy6;
if (yych <= '\f') goto yy4;
goto yy8;
}
} else {
if (yych <= '$') {
if (yych <= '#') goto yy4;
goto yy12;
} else {
if (yych <= '?') goto yy4;
if (yych <= '\\') goto yy13;
goto yy4;
}
}
++in;
{
break;
}
yy4:
++in;
yy5:
{
// For any other character (e.g. whitespace), swallow it here,
// allowing the outer logic to loop around again.
break;
}
yy6:
++in;
{
// A newline ends the current file name and the current rule.
have_newline = true;
break;
}
yy8:
yych = *++in;
if (yych == '\n') goto yy6;
goto yy5;
yy9:
yych = *++in;
if (yybm[0+yych] & 128) {
goto yy9;
}
yy11:
{
// Got a span of plain text.
int len = (int)(in - start);
// Need to shift it over if we're overwriting backslashes.
if (out < start)
memmove(out, start, len);
out += len;
continue;
}
yy12:
yych = *++in;
if (yych == '$') goto yy14;
goto yy5;
yy13:
yych = *(yymarker = ++in);
if (yych <= ' ') {
if (yych <= '\n') {
if (yych <= 0x00) goto yy5;
if (yych <= '\t') goto yy16;
goto yy17;
} else {
if (yych == '\r') goto yy19;
if (yych <= 0x1F) goto yy16;
goto yy21;
}
} else {
if (yych <= '9') {
if (yych == '#') goto yy23;
goto yy16;
} else {
if (yych <= ':') goto yy25;
if (yych == '\\') goto yy27;
goto yy16;
}
}
yy14:
++in;
{
// De-escape dollar character.
*out++ = '$';
continue;
}
yy16:
++in;
goto yy11;
yy17:
++in;
{
// A line continuation ends the current file name.
break;
}
yy19:
yych = *++in;
if (yych == '\n') goto yy17;
in = yymarker;
goto yy5;
yy21:
++in;
{
// 2N+1 backslashes plus space -> N backslashes plus space.
int len = (int)(in - start);
int n = len / 2 - 1;
if (out < start)
memset(out, '\\', n);
out += n;
*out++ = ' ';
continue;
}
yy23:
++in;
{
// De-escape hash sign, but preserve other leading backslashes.
int len = (int)(in - start);
if (len > 2 && out < start)
memset(out, '\\', len - 2);
out += len - 2;
*out++ = '#';
continue;
}
yy25:
yych = *++in;
if (yych <= '\f') {
if (yych <= 0x00) goto yy28;
if (yych <= 0x08) goto yy26;
if (yych <= '\n') goto yy28;
} else {
if (yych <= '\r') goto yy28;
if (yych == ' ') goto yy28;
}
yy26:
{
// De-escape colon sign, but preserve other leading backslashes.
// Regular expression uses lookahead to make sure that no whitespace
// nor EOF follows. In that case it'd be the : at the end of a target
int len = (int)(in - start);
if (len > 2 && out < start)
memset(out, '\\', len - 2);
out += len - 2;
*out++ = ':';
continue;
}
yy27:
yych = *++in;
if (yych <= ' ') {
if (yych <= '\n') {
if (yych <= 0x00) goto yy11;
if (yych <= '\t') goto yy16;
goto yy11;
} else {
if (yych == '\r') goto yy11;
if (yych <= 0x1F) goto yy16;
goto yy30;
}
} else {
if (yych <= '9') {
if (yych == '#') goto yy23;
goto yy16;
} else {
if (yych <= ':') goto yy25;
if (yych == '\\') goto yy32;
goto yy16;
}
}
yy28:
++in;
{
// Backslash followed by : and whitespace.
// It is therefore normal text and not an escaped colon
int len = (int)(in - start - 1);
// Need to shift it over if we're overwriting backslashes.
if (out < start)
memmove(out, start, len);
out += len;
if (*(in - 1) == '\n')
have_newline = true;
break;
}
yy30:
++in;
{
// 2N backslashes plus space -> 2N backslashes, end of filename.
int len = (int)(in - start);
if (out < start)
memset(out, '\\', len - 1);
out += len - 1;
break;
}
yy32:
yych = *++in;
if (yych <= ' ') {
if (yych <= '\n') {
if (yych <= 0x00) goto yy11;
if (yych <= '\t') goto yy16;
goto yy11;
} else {
if (yych == '\r') goto yy11;
if (yych <= 0x1F) goto yy16;
goto yy21;
}
} else {
if (yych <= '9') {
if (yych == '#') goto yy23;
goto yy16;
} else {
if (yych <= ':') goto yy25;
if (yych == '\\') goto yy27;
goto yy16;
}
}
}
}
int len = (int)(out - filename);
const bool is_dependency = !parsing_targets;
if (len > 0 && filename[len - 1] == ':') {
len--; // Strip off trailing colon, if any.
parsing_targets = false;
have_target = true;
}
if (len > 0) {
StringPiece piece = StringPiece(filename, len);
// If we've seen this as an input before, skip it.
std::vector<StringPiece>::iterator pos = std::find(ins_.begin(), ins_.end(), piece);
if (pos == ins_.end()) {
if (is_dependency) {
if (poisoned_input) {
*err = "inputs may not also have inputs";
return false;
}
// New input.
ins_.push_back(piece);
} else {
// Check for a new output.
if (std::find(outs_.begin(), outs_.end(), piece) == outs_.end())
outs_.push_back(piece);
}
} else if (!is_dependency) {
// We've passed an input on the left side; reject new inputs.
poisoned_input = true;
}
}
if (have_newline) {
// A newline ends a rule so the next filename will be a new target.
parsing_targets = true;
poisoned_input = false;
}
}
if (!have_target) {
*err = "expected ':' in depfile";
return false;
}
return true;
}

43
src/depfile_parser.h Normal file
View File

@ -0,0 +1,43 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_DEPFILE_PARSER_H_
#define NINJA_DEPFILE_PARSER_H_
#include <string>
#include <vector>
using namespace std;
#include "string_piece.h"
struct DepfileParserOptions {
DepfileParserOptions() {}
};
/// Parser for the dependency information emitted by gcc's -M flags.
struct DepfileParser {
explicit DepfileParser(DepfileParserOptions options =
DepfileParserOptions());
/// Parse an input file. Input must be NUL-terminated.
/// Warning: may mutate the content in-place and parsed StringPieces are
/// pointers within it.
bool Parse(string* content, string* err);
std::vector<StringPiece> outs_;
vector<StringPiece> ins_;
DepfileParserOptions options_;
};
#endif // NINJA_DEPFILE_PARSER_H_

205
src/depfile_parser.in.cc Normal file
View File

@ -0,0 +1,205 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "depfile_parser.h"
#include "util.h"
#include <algorithm>
DepfileParser::DepfileParser(DepfileParserOptions options)
: options_(options)
{
}
// A note on backslashes in Makefiles, from reading the docs:
// Backslash-newline is the line continuation character.
// Backslash-# escapes a # (otherwise meaningful as a comment start).
// Backslash-% escapes a % (otherwise meaningful as a special).
// Finally, quoting the GNU manual, "Backslashes that are not in danger
// of quoting % characters go unmolested."
// How do you end a line with a backslash? The netbsd Make docs suggest
// reading the result of a shell command echoing a backslash!
//
// Rather than implement all of above, we follow what GCC/Clang produces:
// Backslashes escape a space or hash sign.
// When a space is preceded by 2N+1 backslashes, it is represents N backslashes
// followed by space.
// When a space is preceded by 2N backslashes, it represents 2N backslashes at
// the end of a filename.
// A hash sign is escaped by a single backslash. All other backslashes remain
// unchanged.
//
// If anyone actually has depfiles that rely on the more complicated
// behavior we can adjust this.
bool DepfileParser::Parse(string* content, string* err) {
// in: current parser input point.
// end: end of input.
// parsing_targets: whether we are parsing targets or dependencies.
char* in = &(*content)[0];
char* end = in + content->size();
bool have_target = false;
bool parsing_targets = true;
bool poisoned_input = false;
while (in < end) {
bool have_newline = false;
// out: current output point (typically same as in, but can fall behind
// as we de-escape backslashes).
char* out = in;
// filename: start of the current parsed filename.
char* filename = out;
for (;;) {
// start: beginning of the current parsed span.
const char* start = in;
char* yymarker = NULL;
/*!re2c
re2c:define:YYCTYPE = "unsigned char";
re2c:define:YYCURSOR = in;
re2c:define:YYLIMIT = end;
re2c:define:YYMARKER = yymarker;
re2c:yyfill:enable = 0;
re2c:indent:top = 2;
re2c:indent:string = " ";
nul = "\000";
newline = '\r'?'\n';
'\\\\'* '\\ ' {
// 2N+1 backslashes plus space -> N backslashes plus space.
int len = (int)(in - start);
int n = len / 2 - 1;
if (out < start)
memset(out, '\\', n);
out += n;
*out++ = ' ';
continue;
}
'\\\\'+ ' ' {
// 2N backslashes plus space -> 2N backslashes, end of filename.
int len = (int)(in - start);
if (out < start)
memset(out, '\\', len - 1);
out += len - 1;
break;
}
'\\'+ '#' {
// De-escape hash sign, but preserve other leading backslashes.
int len = (int)(in - start);
if (len > 2 && out < start)
memset(out, '\\', len - 2);
out += len - 2;
*out++ = '#';
continue;
}
'\\'+ ':' [\x00\x20\r\n\t] {
// Backslash followed by : and whitespace.
// It is therefore normal text and not an escaped colon
int len = (int)(in - start - 1);
// Need to shift it over if we're overwriting backslashes.
if (out < start)
memmove(out, start, len);
out += len;
if (*(in - 1) == '\n')
have_newline = true;
break;
}
'\\'+ ':' {
// De-escape colon sign, but preserve other leading backslashes.
// Regular expression uses lookahead to make sure that no whitespace
// nor EOF follows. In that case it'd be the : at the end of a target
int len = (int)(in - start);
if (len > 2 && out < start)
memset(out, '\\', len - 2);
out += len - 2;
*out++ = ':';
continue;
}
'$$' {
// De-escape dollar character.
*out++ = '$';
continue;
}
'\\'+ [^\000\r\n] | [a-zA-Z0-9+,/_:.~()}{%=@\x5B\x5D!\x80-\xFF-]+ {
// Got a span of plain text.
int len = (int)(in - start);
// Need to shift it over if we're overwriting backslashes.
if (out < start)
memmove(out, start, len);
out += len;
continue;
}
nul {
break;
}
'\\' newline {
// A line continuation ends the current file name.
break;
}
newline {
// A newline ends the current file name and the current rule.
have_newline = true;
break;
}
[^] {
// For any other character (e.g. whitespace), swallow it here,
// allowing the outer logic to loop around again.
break;
}
*/
}
int len = (int)(out - filename);
const bool is_dependency = !parsing_targets;
if (len > 0 && filename[len - 1] == ':') {
len--; // Strip off trailing colon, if any.
parsing_targets = false;
have_target = true;
}
if (len > 0) {
StringPiece piece = StringPiece(filename, len);
// If we've seen this as an input before, skip it.
std::vector<StringPiece>::iterator pos = std::find(ins_.begin(), ins_.end(), piece);
if (pos == ins_.end()) {
if (is_dependency) {
if (poisoned_input) {
*err = "inputs may not also have inputs";
return false;
}
// New input.
ins_.push_back(piece);
} else {
// Check for a new output.
if (std::find(outs_.begin(), outs_.end(), piece) == outs_.end())
outs_.push_back(piece);
}
} else if (!is_dependency) {
// We've passed an input on the left side; reject new inputs.
poisoned_input = true;
}
}
if (have_newline) {
// A newline ends a rule so the next filename will be a new target.
parsing_targets = true;
poisoned_input = false;
}
}
if (!have_target) {
*err = "expected ':' in depfile";
return false;
}
return true;
}

View File

@ -0,0 +1,77 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stdio.h>
#include <stdlib.h>
#include "depfile_parser.h"
#include "util.h"
#include "metrics.h"
int main(int argc, char* argv[]) {
if (argc < 2) {
printf("usage: %s <file1> <file2...>\n", argv[0]);
return 1;
}
vector<float> times;
for (int i = 1; i < argc; ++i) {
const char* filename = argv[i];
for (int limit = 1 << 10; limit < (1<<20); limit *= 2) {
int64_t start = GetTimeMillis();
for (int rep = 0; rep < limit; ++rep) {
string buf;
string err;
if (ReadFile(filename, &buf, &err) < 0) {
printf("%s: %s\n", filename, err.c_str());
return 1;
}
DepfileParser parser;
if (!parser.Parse(&buf, &err)) {
printf("%s: %s\n", filename, err.c_str());
return 1;
}
}
int64_t end = GetTimeMillis();
if (end - start > 100) {
int delta = (int)(end - start);
float time = delta*1000 / (float)limit;
printf("%s: %.1fus\n", filename, time);
times.push_back(time);
break;
}
}
}
if (!times.empty()) {
float min = times[0];
float max = times[0];
float total = 0;
for (size_t i = 0; i < times.size(); ++i) {
total += times[i];
if (times[i] < min)
min = times[i];
else if (times[i] > max)
max = times[i];
}
printf("min %.1fus max %.1fus avg %.1fus\n",
min, max, total / times.size());
}
return 0;
}

378
src/depfile_parser_test.cc Normal file
View File

@ -0,0 +1,378 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "depfile_parser.h"
#include "test.h"
struct DepfileParserTest : public testing::Test {
bool Parse(const char* input, string* err);
DepfileParser parser_;
string input_;
};
bool DepfileParserTest::Parse(const char* input, string* err) {
input_ = input;
return parser_.Parse(&input_, err);
}
TEST_F(DepfileParserTest, Basic) {
string err;
EXPECT_TRUE(Parse(
"build/ninja.o: ninja.cc ninja.h eval_env.h manifest_parser.h\n",
&err));
ASSERT_EQ("", err);
ASSERT_EQ(1u, parser_.outs_.size());
EXPECT_EQ("build/ninja.o", parser_.outs_[0].AsString());
EXPECT_EQ(4u, parser_.ins_.size());
}
TEST_F(DepfileParserTest, EarlyNewlineAndWhitespace) {
string err;
EXPECT_TRUE(Parse(
" \\\n"
" out: in\n",
&err));
ASSERT_EQ("", err);
}
TEST_F(DepfileParserTest, Continuation) {
string err;
EXPECT_TRUE(Parse(
"foo.o: \\\n"
" bar.h baz.h\n",
&err));
ASSERT_EQ("", err);
ASSERT_EQ(1u, parser_.outs_.size());
EXPECT_EQ("foo.o", parser_.outs_[0].AsString());
EXPECT_EQ(2u, parser_.ins_.size());
}
TEST_F(DepfileParserTest, CarriageReturnContinuation) {
string err;
EXPECT_TRUE(Parse(
"foo.o: \\\r\n"
" bar.h baz.h\r\n",
&err));
ASSERT_EQ("", err);
ASSERT_EQ(1u, parser_.outs_.size());
EXPECT_EQ("foo.o", parser_.outs_[0].AsString());
EXPECT_EQ(2u, parser_.ins_.size());
}
TEST_F(DepfileParserTest, BackSlashes) {
string err;
EXPECT_TRUE(Parse(
"Project\\Dir\\Build\\Release8\\Foo\\Foo.res : \\\n"
" Dir\\Library\\Foo.rc \\\n"
" Dir\\Library\\Version\\Bar.h \\\n"
" Dir\\Library\\Foo.ico \\\n"
" Project\\Thing\\Bar.tlb \\\n",
&err));
ASSERT_EQ("", err);
ASSERT_EQ(1u, parser_.outs_.size());
EXPECT_EQ("Project\\Dir\\Build\\Release8\\Foo\\Foo.res",
parser_.outs_[0].AsString());
EXPECT_EQ(4u, parser_.ins_.size());
}
TEST_F(DepfileParserTest, Spaces) {
string err;
EXPECT_TRUE(Parse(
"a\\ bc\\ def: a\\ b c d",
&err));
ASSERT_EQ("", err);
ASSERT_EQ(1u, parser_.outs_.size());
EXPECT_EQ("a bc def",
parser_.outs_[0].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("a b",
parser_.ins_[0].AsString());
EXPECT_EQ("c",
parser_.ins_[1].AsString());
EXPECT_EQ("d",
parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, MultipleBackslashes) {
// Successive 2N+1 backslashes followed by space (' ') are replaced by N >= 0
// backslashes and the space. A single backslash before hash sign is removed.
// Other backslashes remain untouched (including 2N backslashes followed by
// space).
string err;
EXPECT_TRUE(Parse(
"a\\ b\\#c.h: \\\\\\\\\\ \\\\\\\\ \\\\share\\info\\\\#1",
&err));
ASSERT_EQ("", err);
ASSERT_EQ(1u, parser_.outs_.size());
EXPECT_EQ("a b#c.h",
parser_.outs_[0].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("\\\\ ",
parser_.ins_[0].AsString());
EXPECT_EQ("\\\\\\\\",
parser_.ins_[1].AsString());
EXPECT_EQ("\\\\share\\info\\#1",
parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, Escapes) {
// Put backslashes before a variety of characters, see which ones make
// it through.
string err;
EXPECT_TRUE(Parse(
"\\!\\@\\#$$\\%\\^\\&\\[\\]\\\\:",
&err));
ASSERT_EQ("", err);
ASSERT_EQ(1u, parser_.outs_.size());
EXPECT_EQ("\\!\\@#$\\%\\^\\&\\[\\]\\\\",
parser_.outs_[0].AsString());
ASSERT_EQ(0u, parser_.ins_.size());
}
TEST_F(DepfileParserTest, EscapedColons)
{
std::string err;
// Tests for correct parsing of depfiles produced on Windows
// by both Clang, GCC pre 10 and GCC 10
EXPECT_TRUE(Parse(
"c\\:\\gcc\\x86_64-w64-mingw32\\include\\stddef.o: \\\n"
" c:\\gcc\\x86_64-w64-mingw32\\include\\stddef.h \n",
&err));
ASSERT_EQ("", err);
ASSERT_EQ(1u, parser_.outs_.size());
EXPECT_EQ("c:\\gcc\\x86_64-w64-mingw32\\include\\stddef.o",
parser_.outs_[0].AsString());
ASSERT_EQ(1u, parser_.ins_.size());
EXPECT_EQ("c:\\gcc\\x86_64-w64-mingw32\\include\\stddef.h",
parser_.ins_[0].AsString());
}
TEST_F(DepfileParserTest, EscapedTargetColon)
{
std::string err;
EXPECT_TRUE(Parse(
"foo1\\: x\n"
"foo1\\:\n"
"foo1\\:\r\n"
"foo1\\:\t\n"
"foo1\\:",
&err));
ASSERT_EQ("", err);
ASSERT_EQ(1u, parser_.outs_.size());
EXPECT_EQ("foo1\\", parser_.outs_[0].AsString());
ASSERT_EQ(1u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
}
TEST_F(DepfileParserTest, SpecialChars) {
// See filenames like istreambuf.iterator_op!= in
// https://github.com/google/libcxx/tree/master/test/iterators/stream.iterators/istreambuf.iterator/
string err;
EXPECT_TRUE(Parse(
"C:/Program\\ Files\\ (x86)/Microsoft\\ crtdefs.h: \\\n"
" en@quot.header~ t+t-x!=1 \\\n"
" openldap/slapd.d/cn=config/cn=schema/cn={0}core.ldif\\\n"
" Fu\303\244ball\\\n"
" a[1]b@2%c",
&err));
ASSERT_EQ("", err);
ASSERT_EQ(1u, parser_.outs_.size());
EXPECT_EQ("C:/Program Files (x86)/Microsoft crtdefs.h",
parser_.outs_[0].AsString());
ASSERT_EQ(5u, parser_.ins_.size());
EXPECT_EQ("en@quot.header~",
parser_.ins_[0].AsString());
EXPECT_EQ("t+t-x!=1",
parser_.ins_[1].AsString());
EXPECT_EQ("openldap/slapd.d/cn=config/cn=schema/cn={0}core.ldif",
parser_.ins_[2].AsString());
EXPECT_EQ("Fu\303\244ball",
parser_.ins_[3].AsString());
EXPECT_EQ("a[1]b@2%c",
parser_.ins_[4].AsString());
}
TEST_F(DepfileParserTest, UnifyMultipleOutputs) {
// check that multiple duplicate targets are properly unified
string err;
EXPECT_TRUE(Parse("foo foo: x y z", &err));
ASSERT_EQ(1u, parser_.outs_.size());
ASSERT_EQ("foo", parser_.outs_[0].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
EXPECT_EQ("y", parser_.ins_[1].AsString());
EXPECT_EQ("z", parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, MultipleDifferentOutputs) {
// check that multiple different outputs are accepted by the parser
string err;
EXPECT_TRUE(Parse("foo bar: x y z", &err));
ASSERT_EQ(2u, parser_.outs_.size());
ASSERT_EQ("foo", parser_.outs_[0].AsString());
ASSERT_EQ("bar", parser_.outs_[1].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
EXPECT_EQ("y", parser_.ins_[1].AsString());
EXPECT_EQ("z", parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, MultipleEmptyRules) {
string err;
EXPECT_TRUE(Parse("foo: x\n"
"foo: \n"
"foo:\n", &err));
ASSERT_EQ(1u, parser_.outs_.size());
ASSERT_EQ("foo", parser_.outs_[0].AsString());
ASSERT_EQ(1u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
}
TEST_F(DepfileParserTest, UnifyMultipleRulesLF) {
string err;
EXPECT_TRUE(Parse("foo: x\n"
"foo: y\n"
"foo \\\n"
"foo: z\n", &err));
ASSERT_EQ(1u, parser_.outs_.size());
ASSERT_EQ("foo", parser_.outs_[0].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
EXPECT_EQ("y", parser_.ins_[1].AsString());
EXPECT_EQ("z", parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, UnifyMultipleRulesCRLF) {
string err;
EXPECT_TRUE(Parse("foo: x\r\n"
"foo: y\r\n"
"foo \\\r\n"
"foo: z\r\n", &err));
ASSERT_EQ(1u, parser_.outs_.size());
ASSERT_EQ("foo", parser_.outs_[0].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
EXPECT_EQ("y", parser_.ins_[1].AsString());
EXPECT_EQ("z", parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, UnifyMixedRulesLF) {
string err;
EXPECT_TRUE(Parse("foo: x\\\n"
" y\n"
"foo \\\n"
"foo: z\n", &err));
ASSERT_EQ(1u, parser_.outs_.size());
ASSERT_EQ("foo", parser_.outs_[0].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
EXPECT_EQ("y", parser_.ins_[1].AsString());
EXPECT_EQ("z", parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, UnifyMixedRulesCRLF) {
string err;
EXPECT_TRUE(Parse("foo: x\\\r\n"
" y\r\n"
"foo \\\r\n"
"foo: z\r\n", &err));
ASSERT_EQ(1u, parser_.outs_.size());
ASSERT_EQ("foo", parser_.outs_[0].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
EXPECT_EQ("y", parser_.ins_[1].AsString());
EXPECT_EQ("z", parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, IndentedRulesLF) {
string err;
EXPECT_TRUE(Parse(" foo: x\n"
" foo: y\n"
" foo: z\n", &err));
ASSERT_EQ(1u, parser_.outs_.size());
ASSERT_EQ("foo", parser_.outs_[0].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
EXPECT_EQ("y", parser_.ins_[1].AsString());
EXPECT_EQ("z", parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, IndentedRulesCRLF) {
string err;
EXPECT_TRUE(Parse(" foo: x\r\n"
" foo: y\r\n"
" foo: z\r\n", &err));
ASSERT_EQ(1u, parser_.outs_.size());
ASSERT_EQ("foo", parser_.outs_[0].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
EXPECT_EQ("y", parser_.ins_[1].AsString());
EXPECT_EQ("z", parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, TolerateMP) {
string err;
EXPECT_TRUE(Parse("foo: x y z\n"
"x:\n"
"y:\n"
"z:\n", &err));
ASSERT_EQ(1u, parser_.outs_.size());
ASSERT_EQ("foo", parser_.outs_[0].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
EXPECT_EQ("y", parser_.ins_[1].AsString());
EXPECT_EQ("z", parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, MultipleRulesTolerateMP) {
string err;
EXPECT_TRUE(Parse("foo: x\n"
"x:\n"
"foo: y\n"
"y:\n"
"foo: z\n"
"z:\n", &err));
ASSERT_EQ(1u, parser_.outs_.size());
ASSERT_EQ("foo", parser_.outs_[0].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
EXPECT_EQ("y", parser_.ins_[1].AsString());
EXPECT_EQ("z", parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, MultipleRulesDifferentOutputs) {
// check that multiple different outputs are accepted by the parser
// when spread across multiple rules
string err;
EXPECT_TRUE(Parse("foo: x y\n"
"bar: y z\n", &err));
ASSERT_EQ(2u, parser_.outs_.size());
ASSERT_EQ("foo", parser_.outs_[0].AsString());
ASSERT_EQ("bar", parser_.outs_[1].AsString());
ASSERT_EQ(3u, parser_.ins_.size());
EXPECT_EQ("x", parser_.ins_[0].AsString());
EXPECT_EQ("y", parser_.ins_[1].AsString());
EXPECT_EQ("z", parser_.ins_[2].AsString());
}
TEST_F(DepfileParserTest, BuggyMP) {
std::string err;
EXPECT_FALSE(Parse("foo: x y z\n"
"x: alsoin\n"
"y:\n"
"z:\n", &err));
ASSERT_EQ("inputs may not also have inputs", err);
}

434
src/deps_log.cc Normal file
View File

@ -0,0 +1,434 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "deps_log.h"
#include <assert.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#ifndef _WIN32
#include <unistd.h>
#elif defined(_MSC_VER) && (_MSC_VER < 1900)
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
#endif
#include "graph.h"
#include "metrics.h"
#include "state.h"
#include "util.h"
// The version is stored as 4 bytes after the signature and also serves as a
// byte order mark. Signature and version combined are 16 bytes long.
const char kFileSignature[] = "# ninjadeps\n";
const int kCurrentVersion = 4;
// Record size is currently limited to less than the full 32 bit, due to
// internal buffers having to have this size.
const unsigned kMaxRecordSize = (1 << 19) - 1;
DepsLog::~DepsLog() {
Close();
}
bool DepsLog::OpenForWrite(const string& path, string* err) {
if (needs_recompaction_) {
if (!Recompact(path, err))
return false;
}
assert(!file_);
file_path_ = path; // we don't actually open the file right now, but will do
// so on the first write attempt
return true;
}
bool DepsLog::RecordDeps(Node* node, TimeStamp mtime,
const vector<Node*>& nodes) {
return RecordDeps(node, mtime, nodes.size(),
nodes.empty() ? NULL : (Node**)&nodes.front());
}
bool DepsLog::RecordDeps(Node* node, TimeStamp mtime,
int node_count, Node** nodes) {
// Track whether there's any new data to be recorded.
bool made_change = false;
// Assign ids to all nodes that are missing one.
if (node->id() < 0) {
if (!RecordId(node))
return false;
made_change = true;
}
for (int i = 0; i < node_count; ++i) {
if (nodes[i]->id() < 0) {
if (!RecordId(nodes[i]))
return false;
made_change = true;
}
}
// See if the new data is different than the existing data, if any.
if (!made_change) {
Deps* deps = GetDeps(node);
if (!deps ||
deps->mtime != mtime ||
deps->node_count != node_count) {
made_change = true;
} else {
for (int i = 0; i < node_count; ++i) {
if (deps->nodes[i] != nodes[i]) {
made_change = true;
break;
}
}
}
}
// Don't write anything if there's no new info.
if (!made_change)
return true;
// Update on-disk representation.
unsigned size = 4 * (1 + 2 + node_count);
if (size > kMaxRecordSize) {
errno = ERANGE;
return false;
}
if (!OpenForWriteIfNeeded()) {
return false;
}
size |= 0x80000000; // Deps record: set high bit.
if (fwrite(&size, 4, 1, file_) < 1)
return false;
int id = node->id();
if (fwrite(&id, 4, 1, file_) < 1)
return false;
uint32_t mtime_part = static_cast<uint32_t>(mtime & 0xffffffff);
if (fwrite(&mtime_part, 4, 1, file_) < 1)
return false;
mtime_part = static_cast<uint32_t>((mtime >> 32) & 0xffffffff);
if (fwrite(&mtime_part, 4, 1, file_) < 1)
return false;
for (int i = 0; i < node_count; ++i) {
id = nodes[i]->id();
if (fwrite(&id, 4, 1, file_) < 1)
return false;
}
if (fflush(file_) != 0)
return false;
// Update in-memory representation.
Deps* deps = new Deps(mtime, node_count);
for (int i = 0; i < node_count; ++i)
deps->nodes[i] = nodes[i];
UpdateDeps(node->id(), deps);
return true;
}
void DepsLog::Close() {
OpenForWriteIfNeeded(); // create the file even if nothing has been recorded
if (file_)
fclose(file_);
file_ = NULL;
}
LoadStatus DepsLog::Load(const string& path, State* state, string* err) {
METRIC_RECORD(".ninja_deps load");
char buf[kMaxRecordSize + 1];
FILE* f = fopen(path.c_str(), "rb");
if (!f) {
if (errno == ENOENT)
return LOAD_NOT_FOUND;
*err = strerror(errno);
return LOAD_ERROR;
}
bool valid_header = true;
int version = 0;
if (!fgets(buf, sizeof(buf), f) || fread(&version, 4, 1, f) < 1)
valid_header = false;
// Note: For version differences, this should migrate to the new format.
// But the v1 format could sometimes (rarely) end up with invalid data, so
// don't migrate v1 to v3 to force a rebuild. (v2 only existed for a few days,
// and there was no release with it, so pretend that it never happened.)
if (!valid_header || strcmp(buf, kFileSignature) != 0 ||
version != kCurrentVersion) {
if (version == 1)
*err = "deps log version change; rebuilding";
else
*err = "bad deps log signature or version; starting over";
fclose(f);
unlink(path.c_str());
// Don't report this as a failure. An empty deps log will cause
// us to rebuild the outputs anyway.
return LOAD_SUCCESS;
}
long offset;
bool read_failed = false;
int unique_dep_record_count = 0;
int total_dep_record_count = 0;
for (;;) {
offset = ftell(f);
unsigned size;
if (fread(&size, 4, 1, f) < 1) {
if (!feof(f))
read_failed = true;
break;
}
bool is_deps = (size >> 31) != 0;
size = size & 0x7FFFFFFF;
if (size > kMaxRecordSize || fread(buf, size, 1, f) < 1) {
read_failed = true;
break;
}
if (is_deps) {
assert(size % 4 == 0);
int* deps_data = reinterpret_cast<int*>(buf);
int out_id = deps_data[0];
TimeStamp mtime;
mtime = (TimeStamp)(((uint64_t)(unsigned int)deps_data[2] << 32) |
(uint64_t)(unsigned int)deps_data[1]);
deps_data += 3;
int deps_count = (size / 4) - 3;
Deps* deps = new Deps(mtime, deps_count);
for (int i = 0; i < deps_count; ++i) {
assert(deps_data[i] < (int)nodes_.size());
assert(nodes_[deps_data[i]]);
deps->nodes[i] = nodes_[deps_data[i]];
}
total_dep_record_count++;
if (!UpdateDeps(out_id, deps))
++unique_dep_record_count;
} else {
int path_size = size - 4;
assert(path_size > 0); // CanonicalizePath() rejects empty paths.
// There can be up to 3 bytes of padding.
if (buf[path_size - 1] == '\0') --path_size;
if (buf[path_size - 1] == '\0') --path_size;
if (buf[path_size - 1] == '\0') --path_size;
StringPiece subpath(buf, path_size);
// It is not necessary to pass in a correct slash_bits here. It will
// either be a Node that's in the manifest (in which case it will already
// have a correct slash_bits that GetNode will look up), or it is an
// implicit dependency from a .d which does not affect the build command
// (and so need not have its slashes maintained).
Node* node = state->GetNode(subpath, 0);
// Check that the expected index matches the actual index. This can only
// happen if two ninja processes write to the same deps log concurrently.
// (This uses unary complement to make the checksum look less like a
// dependency record entry.)
unsigned checksum = *reinterpret_cast<unsigned*>(buf + size - 4);
int expected_id = ~checksum;
int id = nodes_.size();
if (id != expected_id) {
read_failed = true;
break;
}
assert(node->id() < 0);
node->set_id(id);
nodes_.push_back(node);
}
}
if (read_failed) {
// An error occurred while loading; try to recover by truncating the
// file to the last fully-read record.
if (ferror(f)) {
*err = strerror(ferror(f));
} else {
*err = "premature end of file";
}
fclose(f);
if (!Truncate(path, offset, err))
return LOAD_ERROR;
// The truncate succeeded; we'll just report the load error as a
// warning because the build can proceed.
*err += "; recovering";
return LOAD_SUCCESS;
}
fclose(f);
// Rebuild the log if there are too many dead records.
int kMinCompactionEntryCount = 1000;
int kCompactionRatio = 3;
if (total_dep_record_count > kMinCompactionEntryCount &&
total_dep_record_count > unique_dep_record_count * kCompactionRatio) {
needs_recompaction_ = true;
}
return LOAD_SUCCESS;
}
DepsLog::Deps* DepsLog::GetDeps(Node* node) {
// Abort if the node has no id (never referenced in the deps) or if
// there's no deps recorded for the node.
if (node->id() < 0 || node->id() >= (int)deps_.size())
return NULL;
return deps_[node->id()];
}
bool DepsLog::Recompact(const string& path, string* err) {
METRIC_RECORD(".ninja_deps recompact");
Close();
string temp_path = path + ".recompact";
// OpenForWrite() opens for append. Make sure it's not appending to a
// left-over file from a previous recompaction attempt that crashed somehow.
unlink(temp_path.c_str());
DepsLog new_log;
if (!new_log.OpenForWrite(temp_path, err))
return false;
// Clear all known ids so that new ones can be reassigned. The new indices
// will refer to the ordering in new_log, not in the current log.
for (vector<Node*>::iterator i = nodes_.begin(); i != nodes_.end(); ++i)
(*i)->set_id(-1);
// Write out all deps again.
for (int old_id = 0; old_id < (int)deps_.size(); ++old_id) {
Deps* deps = deps_[old_id];
if (!deps) continue; // If nodes_[old_id] is a leaf, it has no deps.
if (!IsDepsEntryLiveFor(nodes_[old_id]))
continue;
if (!new_log.RecordDeps(nodes_[old_id], deps->mtime,
deps->node_count, deps->nodes)) {
new_log.Close();
return false;
}
}
new_log.Close();
// All nodes now have ids that refer to new_log, so steal its data.
deps_.swap(new_log.deps_);
nodes_.swap(new_log.nodes_);
if (unlink(path.c_str()) < 0) {
*err = strerror(errno);
return false;
}
if (rename(temp_path.c_str(), path.c_str()) < 0) {
*err = strerror(errno);
return false;
}
return true;
}
bool DepsLog::IsDepsEntryLiveFor(Node* node) {
// Skip entries that don't have in-edges or whose edges don't have a
// "deps" attribute. They were in the deps log from previous builds, but
// the the files they were for were removed from the build and their deps
// entries are no longer needed.
// (Without the check for "deps", a chain of two or more nodes that each
// had deps wouldn't be collected in a single recompaction.)
return node->in_edge() && !node->in_edge()->GetBinding("deps").empty();
}
bool DepsLog::UpdateDeps(int out_id, Deps* deps) {
if (out_id >= (int)deps_.size())
deps_.resize(out_id + 1);
bool delete_old = deps_[out_id] != NULL;
if (delete_old)
delete deps_[out_id];
deps_[out_id] = deps;
return delete_old;
}
bool DepsLog::RecordId(Node* node) {
int path_size = node->path().size();
int padding = (4 - path_size % 4) % 4; // Pad path to 4 byte boundary.
unsigned size = path_size + padding + 4;
if (size > kMaxRecordSize) {
errno = ERANGE;
return false;
}
if (!OpenForWriteIfNeeded()) {
return false;
}
if (fwrite(&size, 4, 1, file_) < 1)
return false;
if (fwrite(node->path().data(), path_size, 1, file_) < 1) {
assert(!node->path().empty());
return false;
}
if (padding && fwrite("\0\0", padding, 1, file_) < 1)
return false;
int id = nodes_.size();
unsigned checksum = ~(unsigned)id;
if (fwrite(&checksum, 4, 1, file_) < 1)
return false;
if (fflush(file_) != 0)
return false;
node->set_id(id);
nodes_.push_back(node);
return true;
}
bool DepsLog::OpenForWriteIfNeeded() {
if (file_path_.empty()) {
return true;
}
file_ = fopen(file_path_.c_str(), "ab");
if (!file_) {
return false;
}
// Set the buffer size to this and flush the file buffer after every record
// to make sure records aren't written partially.
setvbuf(file_, NULL, _IOFBF, kMaxRecordSize + 1);
SetCloseOnExec(fileno(file_));
// Opening a file in append mode doesn't set the file pointer to the file's
// end on Windows. Do that explicitly.
fseek(file_, 0, SEEK_END);
if (ftell(file_) == 0) {
if (fwrite(kFileSignature, sizeof(kFileSignature) - 1, 1, file_) < 1) {
return false;
}
if (fwrite(&kCurrentVersion, 4, 1, file_) < 1) {
return false;
}
}
if (fflush(file_) != 0) {
return false;
}
file_path_.clear();
return true;
}

129
src/deps_log.h Normal file
View File

@ -0,0 +1,129 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_DEPS_LOG_H_
#define NINJA_DEPS_LOG_H_
#include <string>
#include <vector>
using namespace std;
#include <stdio.h>
#include "load_status.h"
#include "timestamp.h"
struct Node;
struct State;
/// As build commands run they can output extra dependency information
/// (e.g. header dependencies for C source) dynamically. DepsLog collects
/// that information at build time and uses it for subsequent builds.
///
/// The on-disk format is based on two primary design constraints:
/// - it must be written to as a stream (during the build, which may be
/// interrupted);
/// - it can be read all at once on startup. (Alternative designs, where
/// it contains indexing information, were considered and discarded as
/// too complicated to implement; if the file is small than reading it
/// fully on startup is acceptable.)
/// Here are some stats from the Windows Chrome dependency files, to
/// help guide the design space. The total text in the files sums to
/// 90mb so some compression is warranted to keep load-time fast.
/// There's about 10k files worth of dependencies that reference about
/// 40k total paths totalling 2mb of unique strings.
///
/// Based on these stats, here's the current design.
/// The file is structured as version header followed by a sequence of records.
/// Each record is either a path string or a dependency list.
/// Numbering the path strings in file order gives them dense integer ids.
/// A dependency list maps an output id to a list of input ids.
///
/// Concretely, a record is:
/// four bytes record length, high bit indicates record type
/// (but max record sizes are capped at 512kB)
/// path records contain the string name of the path, followed by up to 3
/// padding bytes to align on 4 byte boundaries, followed by the
/// one's complement of the expected index of the record (to detect
/// concurrent writes of multiple ninja processes to the log).
/// dependency records are an array of 4-byte integers
/// [output path id,
/// output path mtime (lower 4 bytes), output path mtime (upper 4 bytes),
/// input path id, input path id...]
/// (The mtime is compared against the on-disk output path mtime
/// to verify the stored data is up-to-date.)
/// If two records reference the same output the latter one in the file
/// wins, allowing updates to just be appended to the file. A separate
/// repacking step can run occasionally to remove dead records.
struct DepsLog {
DepsLog() : needs_recompaction_(false), file_(NULL) {}
~DepsLog();
// Writing (build-time) interface.
bool OpenForWrite(const string& path, string* err);
bool RecordDeps(Node* node, TimeStamp mtime, const vector<Node*>& nodes);
bool RecordDeps(Node* node, TimeStamp mtime, int node_count, Node** nodes);
void Close();
// Reading (startup-time) interface.
struct Deps {
Deps(int64_t mtime, int node_count)
: mtime(mtime), node_count(node_count), nodes(new Node*[node_count]) {}
~Deps() { delete [] nodes; }
TimeStamp mtime;
int node_count;
Node** nodes;
};
LoadStatus Load(const string& path, State* state, string* err);
Deps* GetDeps(Node* node);
/// Rewrite the known log entries, throwing away old data.
bool Recompact(const string& path, string* err);
/// Returns if the deps entry for a node is still reachable from the manifest.
///
/// The deps log can contain deps entries for files that were built in the
/// past but are no longer part of the manifest. This function returns if
/// this is the case for a given node. This function is slow, don't call
/// it from code that runs on every build.
bool IsDepsEntryLiveFor(Node* node);
/// Used for tests.
const vector<Node*>& nodes() const { return nodes_; }
const vector<Deps*>& deps() const { return deps_; }
private:
// Updates the in-memory representation. Takes ownership of |deps|.
// Returns true if a prior deps record was deleted.
bool UpdateDeps(int out_id, Deps* deps);
// Write a node name record, assigning it an id.
bool RecordId(Node* node);
/// Should be called before using file_. When false is returned, errno will
/// be set.
bool OpenForWriteIfNeeded();
bool needs_recompaction_;
FILE* file_;
std::string file_path_;
/// Maps id -> Node.
vector<Node*> nodes_;
/// Maps id -> deps of that id.
vector<Deps*> deps_;
friend struct DepsLogTest;
};
#endif // NINJA_DEPS_LOG_H_

479
src/deps_log_test.cc Normal file
View File

@ -0,0 +1,479 @@
// Copyright 2012 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "deps_log.h"
#include <sys/stat.h>
#ifndef _WIN32
#include <unistd.h>
#endif
#include "graph.h"
#include "util.h"
#include "test.h"
namespace {
const char kTestFilename[] = "DepsLogTest-tempfile";
struct DepsLogTest : public testing::Test {
virtual void SetUp() {
// In case a crashing test left a stale file behind.
unlink(kTestFilename);
}
virtual void TearDown() {
unlink(kTestFilename);
}
};
TEST_F(DepsLogTest, WriteRead) {
State state1;
DepsLog log1;
string err;
EXPECT_TRUE(log1.OpenForWrite(kTestFilename, &err));
ASSERT_EQ("", err);
{
vector<Node*> deps;
deps.push_back(state1.GetNode("foo.h", 0));
deps.push_back(state1.GetNode("bar.h", 0));
log1.RecordDeps(state1.GetNode("out.o", 0), 1, deps);
deps.clear();
deps.push_back(state1.GetNode("foo.h", 0));
deps.push_back(state1.GetNode("bar2.h", 0));
log1.RecordDeps(state1.GetNode("out2.o", 0), 2, deps);
DepsLog::Deps* log_deps = log1.GetDeps(state1.GetNode("out.o", 0));
ASSERT_TRUE(log_deps);
ASSERT_EQ(1, log_deps->mtime);
ASSERT_EQ(2, log_deps->node_count);
ASSERT_EQ("foo.h", log_deps->nodes[0]->path());
ASSERT_EQ("bar.h", log_deps->nodes[1]->path());
}
log1.Close();
State state2;
DepsLog log2;
EXPECT_TRUE(log2.Load(kTestFilename, &state2, &err));
ASSERT_EQ("", err);
ASSERT_EQ(log1.nodes().size(), log2.nodes().size());
for (int i = 0; i < (int)log1.nodes().size(); ++i) {
Node* node1 = log1.nodes()[i];
Node* node2 = log2.nodes()[i];
ASSERT_EQ(i, node1->id());
ASSERT_EQ(node1->id(), node2->id());
}
// Spot-check the entries in log2.
DepsLog::Deps* log_deps = log2.GetDeps(state2.GetNode("out2.o", 0));
ASSERT_TRUE(log_deps);
ASSERT_EQ(2, log_deps->mtime);
ASSERT_EQ(2, log_deps->node_count);
ASSERT_EQ("foo.h", log_deps->nodes[0]->path());
ASSERT_EQ("bar2.h", log_deps->nodes[1]->path());
}
TEST_F(DepsLogTest, LotsOfDeps) {
const int kNumDeps = 100000; // More than 64k.
State state1;
DepsLog log1;
string err;
EXPECT_TRUE(log1.OpenForWrite(kTestFilename, &err));
ASSERT_EQ("", err);
{
vector<Node*> deps;
for (int i = 0; i < kNumDeps; ++i) {
char buf[32];
sprintf(buf, "file%d.h", i);
deps.push_back(state1.GetNode(buf, 0));
}
log1.RecordDeps(state1.GetNode("out.o", 0), 1, deps);
DepsLog::Deps* log_deps = log1.GetDeps(state1.GetNode("out.o", 0));
ASSERT_EQ(kNumDeps, log_deps->node_count);
}
log1.Close();
State state2;
DepsLog log2;
EXPECT_TRUE(log2.Load(kTestFilename, &state2, &err));
ASSERT_EQ("", err);
DepsLog::Deps* log_deps = log2.GetDeps(state2.GetNode("out.o", 0));
ASSERT_EQ(kNumDeps, log_deps->node_count);
}
// Verify that adding the same deps twice doesn't grow the file.
TEST_F(DepsLogTest, DoubleEntry) {
// Write some deps to the file and grab its size.
int file_size;
{
State state;
DepsLog log;
string err;
EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
ASSERT_EQ("", err);
vector<Node*> deps;
deps.push_back(state.GetNode("foo.h", 0));
deps.push_back(state.GetNode("bar.h", 0));
log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
log.Close();
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
file_size = (int)st.st_size;
ASSERT_GT(file_size, 0);
}
// Now reload the file, and read the same deps.
{
State state;
DepsLog log;
string err;
EXPECT_TRUE(log.Load(kTestFilename, &state, &err));
EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
ASSERT_EQ("", err);
vector<Node*> deps;
deps.push_back(state.GetNode("foo.h", 0));
deps.push_back(state.GetNode("bar.h", 0));
log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
log.Close();
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
int file_size_2 = (int)st.st_size;
ASSERT_EQ(file_size, file_size_2);
}
}
// Verify that adding the new deps works and can be compacted away.
TEST_F(DepsLogTest, Recompact) {
const char kManifest[] =
"rule cc\n"
" command = cc\n"
" deps = gcc\n"
"build out.o: cc\n"
"build other_out.o: cc\n";
// Write some deps to the file and grab its size.
int file_size;
{
State state;
ASSERT_NO_FATAL_FAILURE(AssertParse(&state, kManifest));
DepsLog log;
string err;
ASSERT_TRUE(log.OpenForWrite(kTestFilename, &err));
ASSERT_EQ("", err);
vector<Node*> deps;
deps.push_back(state.GetNode("foo.h", 0));
deps.push_back(state.GetNode("bar.h", 0));
log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
deps.clear();
deps.push_back(state.GetNode("foo.h", 0));
deps.push_back(state.GetNode("baz.h", 0));
log.RecordDeps(state.GetNode("other_out.o", 0), 1, deps);
log.Close();
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
file_size = (int)st.st_size;
ASSERT_GT(file_size, 0);
}
// Now reload the file, and add slightly different deps.
int file_size_2;
{
State state;
ASSERT_NO_FATAL_FAILURE(AssertParse(&state, kManifest));
DepsLog log;
string err;
ASSERT_TRUE(log.Load(kTestFilename, &state, &err));
ASSERT_TRUE(log.OpenForWrite(kTestFilename, &err));
ASSERT_EQ("", err);
vector<Node*> deps;
deps.push_back(state.GetNode("foo.h", 0));
log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
log.Close();
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
file_size_2 = (int)st.st_size;
// The file should grow to record the new deps.
ASSERT_GT(file_size_2, file_size);
}
// Now reload the file, verify the new deps have replaced the old, then
// recompact.
int file_size_3;
{
State state;
ASSERT_NO_FATAL_FAILURE(AssertParse(&state, kManifest));
DepsLog log;
string err;
ASSERT_TRUE(log.Load(kTestFilename, &state, &err));
Node* out = state.GetNode("out.o", 0);
DepsLog::Deps* deps = log.GetDeps(out);
ASSERT_TRUE(deps);
ASSERT_EQ(1, deps->mtime);
ASSERT_EQ(1, deps->node_count);
ASSERT_EQ("foo.h", deps->nodes[0]->path());
Node* other_out = state.GetNode("other_out.o", 0);
deps = log.GetDeps(other_out);
ASSERT_TRUE(deps);
ASSERT_EQ(1, deps->mtime);
ASSERT_EQ(2, deps->node_count);
ASSERT_EQ("foo.h", deps->nodes[0]->path());
ASSERT_EQ("baz.h", deps->nodes[1]->path());
ASSERT_TRUE(log.Recompact(kTestFilename, &err));
// The in-memory deps graph should still be valid after recompaction.
deps = log.GetDeps(out);
ASSERT_TRUE(deps);
ASSERT_EQ(1, deps->mtime);
ASSERT_EQ(1, deps->node_count);
ASSERT_EQ("foo.h", deps->nodes[0]->path());
ASSERT_EQ(out, log.nodes()[out->id()]);
deps = log.GetDeps(other_out);
ASSERT_TRUE(deps);
ASSERT_EQ(1, deps->mtime);
ASSERT_EQ(2, deps->node_count);
ASSERT_EQ("foo.h", deps->nodes[0]->path());
ASSERT_EQ("baz.h", deps->nodes[1]->path());
ASSERT_EQ(other_out, log.nodes()[other_out->id()]);
// The file should have shrunk a bit for the smaller deps.
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
file_size_3 = (int)st.st_size;
ASSERT_LT(file_size_3, file_size_2);
}
// Now reload the file and recompact with an empty manifest. The previous
// entries should be removed.
{
State state;
// Intentionally not parsing kManifest here.
DepsLog log;
string err;
ASSERT_TRUE(log.Load(kTestFilename, &state, &err));
Node* out = state.GetNode("out.o", 0);
DepsLog::Deps* deps = log.GetDeps(out);
ASSERT_TRUE(deps);
ASSERT_EQ(1, deps->mtime);
ASSERT_EQ(1, deps->node_count);
ASSERT_EQ("foo.h", deps->nodes[0]->path());
Node* other_out = state.GetNode("other_out.o", 0);
deps = log.GetDeps(other_out);
ASSERT_TRUE(deps);
ASSERT_EQ(1, deps->mtime);
ASSERT_EQ(2, deps->node_count);
ASSERT_EQ("foo.h", deps->nodes[0]->path());
ASSERT_EQ("baz.h", deps->nodes[1]->path());
ASSERT_TRUE(log.Recompact(kTestFilename, &err));
// The previous entries should have been removed.
deps = log.GetDeps(out);
ASSERT_FALSE(deps);
deps = log.GetDeps(other_out);
ASSERT_FALSE(deps);
// The .h files pulled in via deps should no longer have ids either.
ASSERT_EQ(-1, state.LookupNode("foo.h")->id());
ASSERT_EQ(-1, state.LookupNode("baz.h")->id());
// The file should have shrunk more.
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
int file_size_4 = (int)st.st_size;
ASSERT_LT(file_size_4, file_size_3);
}
}
// Verify that invalid file headers cause a new build.
TEST_F(DepsLogTest, InvalidHeader) {
const char *kInvalidHeaders[] = {
"", // Empty file.
"# ninjad", // Truncated first line.
"# ninjadeps\n", // No version int.
"# ninjadeps\n\001\002", // Truncated version int.
"# ninjadeps\n\001\002\003\004" // Invalid version int.
};
for (size_t i = 0; i < sizeof(kInvalidHeaders) / sizeof(kInvalidHeaders[0]);
++i) {
FILE* deps_log = fopen(kTestFilename, "wb");
ASSERT_TRUE(deps_log != NULL);
ASSERT_EQ(
strlen(kInvalidHeaders[i]),
fwrite(kInvalidHeaders[i], 1, strlen(kInvalidHeaders[i]), deps_log));
ASSERT_EQ(0 ,fclose(deps_log));
string err;
DepsLog log;
State state;
ASSERT_TRUE(log.Load(kTestFilename, &state, &err));
EXPECT_EQ("bad deps log signature or version; starting over", err);
}
}
// Simulate what happens when loading a truncated log file.
TEST_F(DepsLogTest, Truncated) {
// Create a file with some entries.
{
State state;
DepsLog log;
string err;
EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
ASSERT_EQ("", err);
vector<Node*> deps;
deps.push_back(state.GetNode("foo.h", 0));
deps.push_back(state.GetNode("bar.h", 0));
log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
deps.clear();
deps.push_back(state.GetNode("foo.h", 0));
deps.push_back(state.GetNode("bar2.h", 0));
log.RecordDeps(state.GetNode("out2.o", 0), 2, deps);
log.Close();
}
// Get the file size.
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
// Try reloading at truncated sizes.
// Track how many nodes/deps were found; they should decrease with
// smaller sizes.
int node_count = 5;
int deps_count = 2;
for (int size = (int)st.st_size; size > 0; --size) {
string err;
ASSERT_TRUE(Truncate(kTestFilename, size, &err));
State state;
DepsLog log;
EXPECT_TRUE(log.Load(kTestFilename, &state, &err));
if (!err.empty()) {
// At some point the log will be so short as to be unparseable.
break;
}
ASSERT_GE(node_count, (int)log.nodes().size());
node_count = log.nodes().size();
// Count how many non-NULL deps entries there are.
int new_deps_count = 0;
for (vector<DepsLog::Deps*>::const_iterator i = log.deps().begin();
i != log.deps().end(); ++i) {
if (*i)
++new_deps_count;
}
ASSERT_GE(deps_count, new_deps_count);
deps_count = new_deps_count;
}
}
// Run the truncation-recovery logic.
TEST_F(DepsLogTest, TruncatedRecovery) {
// Create a file with some entries.
{
State state;
DepsLog log;
string err;
EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
ASSERT_EQ("", err);
vector<Node*> deps;
deps.push_back(state.GetNode("foo.h", 0));
deps.push_back(state.GetNode("bar.h", 0));
log.RecordDeps(state.GetNode("out.o", 0), 1, deps);
deps.clear();
deps.push_back(state.GetNode("foo.h", 0));
deps.push_back(state.GetNode("bar2.h", 0));
log.RecordDeps(state.GetNode("out2.o", 0), 2, deps);
log.Close();
}
// Shorten the file, corrupting the last record.
{
struct stat st;
ASSERT_EQ(0, stat(kTestFilename, &st));
string err;
ASSERT_TRUE(Truncate(kTestFilename, st.st_size - 2, &err));
}
// Load the file again, add an entry.
{
State state;
DepsLog log;
string err;
EXPECT_TRUE(log.Load(kTestFilename, &state, &err));
ASSERT_EQ("premature end of file; recovering", err);
err.clear();
// The truncated entry should've been discarded.
EXPECT_EQ(NULL, log.GetDeps(state.GetNode("out2.o", 0)));
EXPECT_TRUE(log.OpenForWrite(kTestFilename, &err));
ASSERT_EQ("", err);
// Add a new entry.
vector<Node*> deps;
deps.push_back(state.GetNode("foo.h", 0));
deps.push_back(state.GetNode("bar2.h", 0));
log.RecordDeps(state.GetNode("out2.o", 0), 3, deps);
log.Close();
}
// Load the file a third time to verify appending after a mangled
// entry doesn't break things.
{
State state;
DepsLog log;
string err;
EXPECT_TRUE(log.Load(kTestFilename, &state, &err));
// The truncated entry should exist.
DepsLog::Deps* deps = log.GetDeps(state.GetNode("out2.o", 0));
ASSERT_TRUE(deps);
}
}
} // anonymous namespace

285
src/disk_interface.cc Normal file
View File

@ -0,0 +1,285 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "disk_interface.h"
#include <algorithm>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#ifdef _WIN32
#include <sstream>
#include <windows.h>
#include <direct.h> // _mkdir
#else
#include <unistd.h>
#endif
#include "metrics.h"
#include "util.h"
namespace {
string DirName(const string& path) {
#ifdef _WIN32
static const char kPathSeparators[] = "\\/";
#else
static const char kPathSeparators[] = "/";
#endif
static const char* const kEnd = kPathSeparators + sizeof(kPathSeparators) - 1;
string::size_type slash_pos = path.find_last_of(kPathSeparators);
if (slash_pos == string::npos)
return string(); // Nothing to do.
while (slash_pos > 0 &&
std::find(kPathSeparators, kEnd, path[slash_pos - 1]) != kEnd)
--slash_pos;
return path.substr(0, slash_pos);
}
int MakeDir(const string& path) {
#ifdef _WIN32
return _mkdir(path.c_str());
#else
return mkdir(path.c_str(), 0777);
#endif
}
#ifdef _WIN32
TimeStamp TimeStampFromFileTime(const FILETIME& filetime) {
// FILETIME is in 100-nanosecond increments since the Windows epoch.
// We don't much care about epoch correctness but we do want the
// resulting value to fit in a 64-bit integer.
uint64_t mtime = ((uint64_t)filetime.dwHighDateTime << 32) |
((uint64_t)filetime.dwLowDateTime);
// 1600 epoch -> 2000 epoch (subtract 400 years).
return (TimeStamp)mtime - 12622770400LL * (1000000000LL / 100);
}
TimeStamp StatSingleFile(const string& path, string* err) {
WIN32_FILE_ATTRIBUTE_DATA attrs;
if (!GetFileAttributesExA(path.c_str(), GetFileExInfoStandard, &attrs)) {
DWORD win_err = GetLastError();
if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND)
return 0;
*err = "GetFileAttributesEx(" + path + "): " + GetLastErrorString();
return -1;
}
return TimeStampFromFileTime(attrs.ftLastWriteTime);
}
bool IsWindows7OrLater() {
OSVERSIONINFOEX version_info =
{ sizeof(OSVERSIONINFOEX), 6, 1, 0, 0, {0}, 0, 0, 0, 0, 0};
DWORDLONG comparison = 0;
VER_SET_CONDITION(comparison, VER_MAJORVERSION, VER_GREATER_EQUAL);
VER_SET_CONDITION(comparison, VER_MINORVERSION, VER_GREATER_EQUAL);
return VerifyVersionInfo(
&version_info, VER_MAJORVERSION | VER_MINORVERSION, comparison);
}
bool StatAllFilesInDir(const string& dir, map<string, TimeStamp>* stamps,
string* err) {
// FindExInfoBasic is 30% faster than FindExInfoStandard.
static bool can_use_basic_info = IsWindows7OrLater();
// This is not in earlier SDKs.
const FINDEX_INFO_LEVELS kFindExInfoBasic =
static_cast<FINDEX_INFO_LEVELS>(1);
FINDEX_INFO_LEVELS level =
can_use_basic_info ? kFindExInfoBasic : FindExInfoStandard;
WIN32_FIND_DATAA ffd;
HANDLE find_handle = FindFirstFileExA((dir + "\\*").c_str(), level, &ffd,
FindExSearchNameMatch, NULL, 0);
if (find_handle == INVALID_HANDLE_VALUE) {
DWORD win_err = GetLastError();
if (win_err == ERROR_FILE_NOT_FOUND || win_err == ERROR_PATH_NOT_FOUND)
return true;
*err = "FindFirstFileExA(" + dir + "): " + GetLastErrorString();
return false;
}
do {
string lowername = ffd.cFileName;
if (lowername == "..") {
// Seems to just copy the timestamp for ".." from ".", which is wrong.
// This is the case at least on NTFS under Windows 7.
continue;
}
transform(lowername.begin(), lowername.end(), lowername.begin(), ::tolower);
stamps->insert(make_pair(lowername,
TimeStampFromFileTime(ffd.ftLastWriteTime)));
} while (FindNextFileA(find_handle, &ffd));
FindClose(find_handle);
return true;
}
#endif // _WIN32
} // namespace
// DiskInterface ---------------------------------------------------------------
bool DiskInterface::MakeDirs(const string& path) {
string dir = DirName(path);
if (dir.empty())
return true; // Reached root; assume it's there.
string err;
TimeStamp mtime = Stat(dir, &err);
if (mtime < 0) {
Error("%s", err.c_str());
return false;
}
if (mtime > 0)
return true; // Exists already; we're done.
// Directory doesn't exist. Try creating its parent first.
bool success = MakeDirs(dir);
if (!success)
return false;
return MakeDir(dir);
}
// RealDiskInterface -----------------------------------------------------------
TimeStamp RealDiskInterface::Stat(const string& path, string* err) const {
METRIC_RECORD("node stat");
#ifdef _WIN32
// MSDN: "Naming Files, Paths, and Namespaces"
// http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
if (!path.empty() && path[0] != '\\' && path.size() > MAX_PATH) {
ostringstream err_stream;
err_stream << "Stat(" << path << "): Filename longer than " << MAX_PATH
<< " characters";
*err = err_stream.str();
return -1;
}
if (!use_cache_)
return StatSingleFile(path, err);
string dir = DirName(path);
string base(path.substr(dir.size() ? dir.size() + 1 : 0));
if (base == "..") {
// StatAllFilesInDir does not report any information for base = "..".
base = ".";
dir = path;
}
transform(dir.begin(), dir.end(), dir.begin(), ::tolower);
transform(base.begin(), base.end(), base.begin(), ::tolower);
Cache::iterator ci = cache_.find(dir);
if (ci == cache_.end()) {
ci = cache_.insert(make_pair(dir, DirCache())).first;
if (!StatAllFilesInDir(dir.empty() ? "." : dir, &ci->second, err)) {
cache_.erase(ci);
return -1;
}
}
DirCache::iterator di = ci->second.find(base);
return di != ci->second.end() ? di->second : 0;
#else
struct stat st;
if (stat(path.c_str(), &st) < 0) {
if (errno == ENOENT || errno == ENOTDIR)
return 0;
*err = "stat(" + path + "): " + strerror(errno);
return -1;
}
// Some users (Flatpak) set mtime to 0, this should be harmless
// and avoids conflicting with our return value of 0 meaning
// that it doesn't exist.
if (st.st_mtime == 0)
return 1;
#if defined(_AIX)
return (int64_t)st.st_mtime * 1000000000LL + st.st_mtime_n;
#elif defined(__APPLE__)
return ((int64_t)st.st_mtimespec.tv_sec * 1000000000LL +
st.st_mtimespec.tv_nsec);
#elif defined(st_mtime) // A macro, so we're likely on modern POSIX.
return (int64_t)st.st_mtim.tv_sec * 1000000000LL + st.st_mtim.tv_nsec;
#else
return (int64_t)st.st_mtime * 1000000000LL + st.st_mtimensec;
#endif
#endif
}
bool RealDiskInterface::WriteFile(const string& path, const string& contents) {
FILE* fp = fopen(path.c_str(), "w");
if (fp == NULL) {
Error("WriteFile(%s): Unable to create file. %s",
path.c_str(), strerror(errno));
return false;
}
if (fwrite(contents.data(), 1, contents.length(), fp) < contents.length()) {
Error("WriteFile(%s): Unable to write to the file. %s",
path.c_str(), strerror(errno));
fclose(fp);
return false;
}
if (fclose(fp) == EOF) {
Error("WriteFile(%s): Unable to close the file. %s",
path.c_str(), strerror(errno));
return false;
}
return true;
}
bool RealDiskInterface::MakeDir(const string& path) {
if (::MakeDir(path) < 0) {
if (errno == EEXIST) {
return true;
}
Error("mkdir(%s): %s", path.c_str(), strerror(errno));
return false;
}
return true;
}
FileReader::Status RealDiskInterface::ReadFile(const string& path,
string* contents,
string* err) {
switch (::ReadFile(path, contents, err)) {
case 0: return Okay;
case -ENOENT: return NotFound;
default: return OtherError;
}
}
int RealDiskInterface::RemoveFile(const string& path) {
if (remove(path.c_str()) < 0) {
switch (errno) {
case ENOENT:
return 1;
default:
Error("remove(%s): %s", path.c_str(), strerror(errno));
return -1;
}
} else {
return 0;
}
}
void RealDiskInterface::AllowStatCache(bool allow) {
#ifdef _WIN32
use_cache_ = allow;
if (!use_cache_)
cache_.clear();
#endif
}

100
src/disk_interface.h Normal file
View File

@ -0,0 +1,100 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_DISK_INTERFACE_H_
#define NINJA_DISK_INTERFACE_H_
#include <map>
#include <string>
using namespace std;
#include "timestamp.h"
/// Interface for reading files from disk. See DiskInterface for details.
/// This base offers the minimum interface needed just to read files.
struct FileReader {
virtual ~FileReader() {}
/// Result of ReadFile.
enum Status {
Okay,
NotFound,
OtherError
};
/// Read and store in given string. On success, return Okay.
/// On error, return another Status and fill |err|.
virtual Status ReadFile(const string& path, string* contents,
string* err) = 0;
};
/// Interface for accessing the disk.
///
/// Abstract so it can be mocked out for tests. The real implementation
/// is RealDiskInterface.
struct DiskInterface: public FileReader {
/// stat() a file, returning the mtime, or 0 if missing and -1 on
/// other errors.
virtual TimeStamp Stat(const string& path, string* err) const = 0;
/// Create a directory, returning false on failure.
virtual bool MakeDir(const string& path) = 0;
/// Create a file, with the specified name and contents
/// Returns true on success, false on failure
virtual bool WriteFile(const string& path, const string& contents) = 0;
/// Remove the file named @a path. It behaves like 'rm -f path' so no errors
/// are reported if it does not exists.
/// @returns 0 if the file has been removed,
/// 1 if the file does not exist, and
/// -1 if an error occurs.
virtual int RemoveFile(const string& path) = 0;
/// Create all the parent directories for path; like mkdir -p
/// `basename path`.
bool MakeDirs(const string& path);
};
/// Implementation of DiskInterface that actually hits the disk.
struct RealDiskInterface : public DiskInterface {
RealDiskInterface()
#ifdef _WIN32
: use_cache_(false)
#endif
{}
virtual ~RealDiskInterface() {}
virtual TimeStamp Stat(const string& path, string* err) const;
virtual bool MakeDir(const string& path);
virtual bool WriteFile(const string& path, const string& contents);
virtual Status ReadFile(const string& path, string* contents, string* err);
virtual int RemoveFile(const string& path);
/// Whether stat information can be cached. Only has an effect on Windows.
void AllowStatCache(bool allow);
private:
#ifdef _WIN32
/// Whether stat information can be cached.
bool use_cache_;
typedef map<string, TimeStamp> DirCache;
// TODO: Neither a map nor a hashmap seems ideal here. If the statcache
// works out, come up with a better data structure.
typedef map<string, DirCache> Cache;
mutable Cache cache_;
#endif
};
#endif // NINJA_DISK_INTERFACE_H_

322
src/disk_interface_test.cc Normal file
View File

@ -0,0 +1,322 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <assert.h>
#include <stdio.h>
#ifdef _WIN32
#include <io.h>
#include <windows.h>
#endif
#include "disk_interface.h"
#include "graph.h"
#include "test.h"
namespace {
struct DiskInterfaceTest : public testing::Test {
virtual void SetUp() {
// These tests do real disk accesses, so create a temp dir.
temp_dir_.CreateAndEnter("Ninja-DiskInterfaceTest");
}
virtual void TearDown() {
temp_dir_.Cleanup();
}
bool Touch(const char* path) {
FILE *f = fopen(path, "w");
if (!f)
return false;
return fclose(f) == 0;
}
ScopedTempDir temp_dir_;
RealDiskInterface disk_;
};
TEST_F(DiskInterfaceTest, StatMissingFile) {
string err;
EXPECT_EQ(0, disk_.Stat("nosuchfile", &err));
EXPECT_EQ("", err);
// On Windows, the errno for a file in a nonexistent directory
// is different.
EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile", &err));
EXPECT_EQ("", err);
// On POSIX systems, the errno is different if a component of the
// path prefix is not a directory.
ASSERT_TRUE(Touch("notadir"));
EXPECT_EQ(0, disk_.Stat("notadir/nosuchfile", &err));
EXPECT_EQ("", err);
}
TEST_F(DiskInterfaceTest, StatBadPath) {
string err;
#ifdef _WIN32
string bad_path("cc:\\foo");
EXPECT_EQ(-1, disk_.Stat(bad_path, &err));
EXPECT_NE("", err);
#else
string too_long_name(512, 'x');
EXPECT_EQ(-1, disk_.Stat(too_long_name, &err));
EXPECT_NE("", err);
#endif
}
TEST_F(DiskInterfaceTest, StatExistingFile) {
string err;
ASSERT_TRUE(Touch("file"));
EXPECT_GT(disk_.Stat("file", &err), 1);
EXPECT_EQ("", err);
}
TEST_F(DiskInterfaceTest, StatExistingDir) {
string err;
ASSERT_TRUE(disk_.MakeDir("subdir"));
ASSERT_TRUE(disk_.MakeDir("subdir/subsubdir"));
EXPECT_GT(disk_.Stat("..", &err), 1);
EXPECT_EQ("", err);
EXPECT_GT(disk_.Stat(".", &err), 1);
EXPECT_EQ("", err);
EXPECT_GT(disk_.Stat("subdir", &err), 1);
EXPECT_EQ("", err);
EXPECT_GT(disk_.Stat("subdir/subsubdir", &err), 1);
EXPECT_EQ("", err);
EXPECT_EQ(disk_.Stat("subdir", &err),
disk_.Stat("subdir/.", &err));
EXPECT_EQ(disk_.Stat("subdir", &err),
disk_.Stat("subdir/subsubdir/..", &err));
EXPECT_EQ(disk_.Stat("subdir/subsubdir", &err),
disk_.Stat("subdir/subsubdir/.", &err));
}
#ifdef _WIN32
TEST_F(DiskInterfaceTest, StatCache) {
string err;
ASSERT_TRUE(Touch("file1"));
ASSERT_TRUE(Touch("fiLE2"));
ASSERT_TRUE(disk_.MakeDir("subdir"));
ASSERT_TRUE(disk_.MakeDir("subdir/subsubdir"));
ASSERT_TRUE(Touch("subdir\\subfile1"));
ASSERT_TRUE(Touch("subdir\\SUBFILE2"));
ASSERT_TRUE(Touch("subdir\\SUBFILE3"));
disk_.AllowStatCache(false);
TimeStamp parent_stat_uncached = disk_.Stat("..", &err);
disk_.AllowStatCache(true);
EXPECT_GT(disk_.Stat("FIle1", &err), 1);
EXPECT_EQ("", err);
EXPECT_GT(disk_.Stat("file1", &err), 1);
EXPECT_EQ("", err);
EXPECT_GT(disk_.Stat("subdir/subfile2", &err), 1);
EXPECT_EQ("", err);
EXPECT_GT(disk_.Stat("sUbdir\\suBFile1", &err), 1);
EXPECT_EQ("", err);
EXPECT_GT(disk_.Stat("..", &err), 1);
EXPECT_EQ("", err);
EXPECT_GT(disk_.Stat(".", &err), 1);
EXPECT_EQ("", err);
EXPECT_GT(disk_.Stat("subdir", &err), 1);
EXPECT_EQ("", err);
EXPECT_GT(disk_.Stat("subdir/subsubdir", &err), 1);
EXPECT_EQ("", err);
#ifndef _MSC_VER // TODO: Investigate why. Also see https://github.com/ninja-build/ninja/pull/1423
EXPECT_EQ(disk_.Stat("subdir", &err),
disk_.Stat("subdir/.", &err));
EXPECT_EQ("", err);
EXPECT_EQ(disk_.Stat("subdir", &err),
disk_.Stat("subdir/subsubdir/..", &err));
#endif
EXPECT_EQ("", err);
EXPECT_EQ(disk_.Stat("..", &err), parent_stat_uncached);
EXPECT_EQ("", err);
EXPECT_EQ(disk_.Stat("subdir/subsubdir", &err),
disk_.Stat("subdir/subsubdir/.", &err));
EXPECT_EQ("", err);
// Test error cases.
string bad_path("cc:\\foo");
EXPECT_EQ(-1, disk_.Stat(bad_path, &err));
EXPECT_NE("", err); err.clear();
EXPECT_EQ(-1, disk_.Stat(bad_path, &err));
EXPECT_NE("", err); err.clear();
EXPECT_EQ(0, disk_.Stat("nosuchfile", &err));
EXPECT_EQ("", err);
EXPECT_EQ(0, disk_.Stat("nosuchdir/nosuchfile", &err));
EXPECT_EQ("", err);
}
#endif
TEST_F(DiskInterfaceTest, ReadFile) {
string err;
std::string content;
ASSERT_EQ(DiskInterface::NotFound,
disk_.ReadFile("foobar", &content, &err));
EXPECT_EQ("", content);
EXPECT_NE("", err); // actual value is platform-specific
err.clear();
const char* kTestFile = "testfile";
FILE* f = fopen(kTestFile, "wb");
ASSERT_TRUE(f);
const char* kTestContent = "test content\nok";
fprintf(f, "%s", kTestContent);
ASSERT_EQ(0, fclose(f));
ASSERT_EQ(DiskInterface::Okay,
disk_.ReadFile(kTestFile, &content, &err));
EXPECT_EQ(kTestContent, content);
EXPECT_EQ("", err);
}
TEST_F(DiskInterfaceTest, MakeDirs) {
string path = "path/with/double//slash/";
EXPECT_TRUE(disk_.MakeDirs(path));
FILE* f = fopen((path + "a_file").c_str(), "w");
EXPECT_TRUE(f);
EXPECT_EQ(0, fclose(f));
#ifdef _WIN32
string path2 = "another\\with\\back\\\\slashes\\";
EXPECT_TRUE(disk_.MakeDirs(path2.c_str()));
FILE* f2 = fopen((path2 + "a_file").c_str(), "w");
EXPECT_TRUE(f2);
EXPECT_EQ(0, fclose(f2));
#endif
}
TEST_F(DiskInterfaceTest, RemoveFile) {
const char* kFileName = "file-to-remove";
ASSERT_TRUE(Touch(kFileName));
EXPECT_EQ(0, disk_.RemoveFile(kFileName));
EXPECT_EQ(1, disk_.RemoveFile(kFileName));
EXPECT_EQ(1, disk_.RemoveFile("does not exist"));
}
struct StatTest : public StateTestWithBuiltinRules,
public DiskInterface {
StatTest() : scan_(&state_, NULL, NULL, this, NULL) {}
// DiskInterface implementation.
virtual TimeStamp Stat(const string& path, string* err) const;
virtual bool WriteFile(const string& path, const string& contents) {
assert(false);
return true;
}
virtual bool MakeDir(const string& path) {
assert(false);
return false;
}
virtual Status ReadFile(const string& path, string* contents, string* err) {
assert(false);
return NotFound;
}
virtual int RemoveFile(const string& path) {
assert(false);
return 0;
}
DependencyScan scan_;
map<string, TimeStamp> mtimes_;
mutable vector<string> stats_;
};
TimeStamp StatTest::Stat(const string& path, string* err) const {
stats_.push_back(path);
map<string, TimeStamp>::const_iterator i = mtimes_.find(path);
if (i == mtimes_.end())
return 0; // File not found.
return i->second;
}
TEST_F(StatTest, Simple) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build out: cat in\n"));
Node* out = GetNode("out");
string err;
EXPECT_TRUE(out->Stat(this, &err));
EXPECT_EQ("", err);
ASSERT_EQ(1u, stats_.size());
scan_.RecomputeDirty(out, NULL);
ASSERT_EQ(2u, stats_.size());
ASSERT_EQ("out", stats_[0]);
ASSERT_EQ("in", stats_[1]);
}
TEST_F(StatTest, TwoStep) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build out: cat mid\n"
"build mid: cat in\n"));
Node* out = GetNode("out");
string err;
EXPECT_TRUE(out->Stat(this, &err));
EXPECT_EQ("", err);
ASSERT_EQ(1u, stats_.size());
scan_.RecomputeDirty(out, NULL);
ASSERT_EQ(3u, stats_.size());
ASSERT_EQ("out", stats_[0]);
ASSERT_TRUE(GetNode("out")->dirty());
ASSERT_EQ("mid", stats_[1]);
ASSERT_TRUE(GetNode("mid")->dirty());
ASSERT_EQ("in", stats_[2]);
}
TEST_F(StatTest, Tree) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build out: cat mid1 mid2\n"
"build mid1: cat in11 in12\n"
"build mid2: cat in21 in22\n"));
Node* out = GetNode("out");
string err;
EXPECT_TRUE(out->Stat(this, &err));
EXPECT_EQ("", err);
ASSERT_EQ(1u, stats_.size());
scan_.RecomputeDirty(out, NULL);
ASSERT_EQ(1u + 6u, stats_.size());
ASSERT_EQ("mid1", stats_[1]);
ASSERT_TRUE(GetNode("mid1")->dirty());
ASSERT_EQ("in11", stats_[2]);
}
TEST_F(StatTest, Middle) {
ASSERT_NO_FATAL_FAILURE(AssertParse(&state_,
"build out: cat mid\n"
"build mid: cat in\n"));
mtimes_["in"] = 1;
mtimes_["mid"] = 0; // missing
mtimes_["out"] = 1;
Node* out = GetNode("out");
string err;
EXPECT_TRUE(out->Stat(this, &err));
EXPECT_EQ("", err);
ASSERT_EQ(1u, stats_.size());
scan_.RecomputeDirty(out, NULL);
ASSERT_FALSE(GetNode("in")->dirty());
ASSERT_TRUE(GetNode("mid")->dirty());
ASSERT_TRUE(GetNode("out")->dirty());
}
} // namespace

124
src/dyndep.cc Normal file
View File

@ -0,0 +1,124 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dyndep.h"
#include <assert.h>
#include <stdio.h>
#include "debug_flags.h"
#include "disk_interface.h"
#include "dyndep_parser.h"
#include "graph.h"
#include "state.h"
#include "util.h"
bool DyndepLoader::LoadDyndeps(Node* node, std::string* err) const {
DyndepFile ddf;
return LoadDyndeps(node, &ddf, err);
}
bool DyndepLoader::LoadDyndeps(Node* node, DyndepFile* ddf,
std::string* err) const {
// We are loading the dyndep file now so it is no longer pending.
node->set_dyndep_pending(false);
// Load the dyndep information from the file.
EXPLAIN("loading dyndep file '%s'", node->path().c_str());
if (!LoadDyndepFile(node, ddf, err))
return false;
// Update each edge that specified this node as its dyndep binding.
std::vector<Edge*> const& out_edges = node->out_edges();
for (std::vector<Edge*>::const_iterator oe = out_edges.begin();
oe != out_edges.end(); ++oe) {
Edge* const edge = *oe;
if (edge->dyndep_ != node)
continue;
DyndepFile::iterator ddi = ddf->find(edge);
if (ddi == ddf->end()) {
*err = ("'" + edge->outputs_[0]->path() + "' "
"not mentioned in its dyndep file "
"'" + node->path() + "'");
return false;
}
ddi->second.used_ = true;
Dyndeps const& dyndeps = ddi->second;
if (!UpdateEdge(edge, &dyndeps, err)) {
return false;
}
}
// Reject extra outputs in dyndep file.
for (DyndepFile::const_iterator oe = ddf->begin(); oe != ddf->end();
++oe) {
if (!oe->second.used_) {
Edge* const edge = oe->first;
*err = ("dyndep file '" + node->path() + "' mentions output "
"'" + edge->outputs_[0]->path() + "' whose build statement "
"does not have a dyndep binding for the file");
return false;
}
}
return true;
}
bool DyndepLoader::UpdateEdge(Edge* edge, Dyndeps const* dyndeps,
std::string* err) const {
// Add dyndep-discovered bindings to the edge.
// We know the edge already has its own binding
// scope because it has a "dyndep" binding.
if (dyndeps->restat_)
edge->env_->AddBinding("restat", "1");
// Add the dyndep-discovered outputs to the edge.
edge->outputs_.insert(edge->outputs_.end(),
dyndeps->implicit_outputs_.begin(),
dyndeps->implicit_outputs_.end());
edge->implicit_outs_ += dyndeps->implicit_outputs_.size();
// Add this edge as incoming to each new output.
for (std::vector<Node*>::const_iterator i =
dyndeps->implicit_outputs_.begin();
i != dyndeps->implicit_outputs_.end(); ++i) {
if ((*i)->in_edge() != NULL) {
*err = "multiple rules generate " + (*i)->path();
return false;
}
(*i)->set_in_edge(edge);
}
// Add the dyndep-discovered inputs to the edge.
edge->inputs_.insert(edge->inputs_.end() - edge->order_only_deps_,
dyndeps->implicit_inputs_.begin(),
dyndeps->implicit_inputs_.end());
edge->implicit_deps_ += dyndeps->implicit_inputs_.size();
// Add this edge as outgoing from each new input.
for (std::vector<Node*>::const_iterator i =
dyndeps->implicit_inputs_.begin();
i != dyndeps->implicit_inputs_.end(); ++i)
(*i)->AddOutEdge(edge);
return true;
}
bool DyndepLoader::LoadDyndepFile(Node* file, DyndepFile* ddf,
std::string* err) const {
DyndepParser parser(state_, disk_interface_, ddf);
return parser.Load(file->path(), err);
}

64
src/dyndep.h Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_DYNDEP_LOADER_H_
#define NINJA_DYNDEP_LOADER_H_
#include <map>
#include <string>
#include <vector>
struct DiskInterface;
struct Edge;
struct Node;
struct State;
/// Store dynamically-discovered dependency information for one edge.
struct Dyndeps {
Dyndeps() : used_(false), restat_(false) {}
bool used_;
bool restat_;
std::vector<Node*> implicit_inputs_;
std::vector<Node*> implicit_outputs_;
};
/// Store data loaded from one dyndep file. Map from an edge
/// to its dynamically-discovered dependency information.
/// This is a struct rather than a typedef so that we can
/// forward-declare it in other headers.
struct DyndepFile: public std::map<Edge*, Dyndeps> {};
/// DyndepLoader loads dynamically discovered dependencies, as
/// referenced via the "dyndep" attribute in build files.
struct DyndepLoader {
DyndepLoader(State* state, DiskInterface* disk_interface)
: state_(state), disk_interface_(disk_interface) {}
/// Load a dyndep file from the given node's path and update the
/// build graph with the new information. One overload accepts
/// a caller-owned 'DyndepFile' object in which to store the
/// information loaded from the dyndep file.
bool LoadDyndeps(Node* node, std::string* err) const;
bool LoadDyndeps(Node* node, DyndepFile* ddf, std::string* err) const;
private:
bool LoadDyndepFile(Node* file, DyndepFile* ddf, std::string* err) const;
bool UpdateEdge(Edge* edge, Dyndeps const* dyndeps, std::string* err) const;
State* state_;
DiskInterface* disk_interface_;
};
#endif // NINJA_DYNDEP_LOADER_H_

223
src/dyndep_parser.cc Normal file
View File

@ -0,0 +1,223 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dyndep_parser.h"
#include <vector>
#include "dyndep.h"
#include "graph.h"
#include "state.h"
#include "util.h"
#include "version.h"
DyndepParser::DyndepParser(State* state, FileReader* file_reader,
DyndepFile* dyndep_file)
: Parser(state, file_reader)
, dyndep_file_(dyndep_file) {
}
bool DyndepParser::Parse(const string& filename, const string& input,
string* err) {
lexer_.Start(filename, input);
// Require a supported ninja_dyndep_version value immediately so
// we can exit before encountering any syntactic surprises.
bool haveDyndepVersion = false;
for (;;) {
Lexer::Token token = lexer_.ReadToken();
switch (token) {
case Lexer::BUILD: {
if (!haveDyndepVersion)
return lexer_.Error("expected 'ninja_dyndep_version = ...'", err);
if (!ParseEdge(err))
return false;
break;
}
case Lexer::IDENT: {
lexer_.UnreadToken();
if (haveDyndepVersion)
return lexer_.Error(string("unexpected ") + Lexer::TokenName(token),
err);
if (!ParseDyndepVersion(err))
return false;
haveDyndepVersion = true;
break;
}
case Lexer::ERROR:
return lexer_.Error(lexer_.DescribeLastError(), err);
case Lexer::TEOF:
if (!haveDyndepVersion)
return lexer_.Error("expected 'ninja_dyndep_version = ...'", err);
return true;
case Lexer::NEWLINE:
break;
default:
return lexer_.Error(string("unexpected ") + Lexer::TokenName(token),
err);
}
}
return false; // not reached
}
bool DyndepParser::ParseDyndepVersion(string* err) {
string name;
EvalString let_value;
if (!ParseLet(&name, &let_value, err))
return false;
if (name != "ninja_dyndep_version") {
return lexer_.Error("expected 'ninja_dyndep_version = ...'", err);
}
string version = let_value.Evaluate(&env_);
int major, minor;
ParseVersion(version, &major, &minor);
if (major != 1 || minor != 0) {
return lexer_.Error(
string("unsupported 'ninja_dyndep_version = ") + version + "'", err);
return false;
}
return true;
}
bool DyndepParser::ParseLet(string* key, EvalString* value, string* err) {
if (!lexer_.ReadIdent(key))
return lexer_.Error("expected variable name", err);
if (!ExpectToken(Lexer::EQUALS, err))
return false;
if (!lexer_.ReadVarValue(value, err))
return false;
return true;
}
bool DyndepParser::ParseEdge(string* err) {
// Parse one explicit output. We expect it to already have an edge.
// We will record its dynamically-discovered dependency information.
Dyndeps* dyndeps = NULL;
{
EvalString out0;
if (!lexer_.ReadPath(&out0, err))
return false;
if (out0.empty())
return lexer_.Error("expected path", err);
string path = out0.Evaluate(&env_);
string path_err;
uint64_t slash_bits;
if (!CanonicalizePath(&path, &slash_bits, &path_err))
return lexer_.Error(path_err, err);
Node* node = state_->LookupNode(path);
if (!node || !node->in_edge())
return lexer_.Error("no build statement exists for '" + path + "'", err);
Edge* edge = node->in_edge();
std::pair<DyndepFile::iterator, bool> res =
dyndep_file_->insert(DyndepFile::value_type(edge, Dyndeps()));
if (!res.second)
return lexer_.Error("multiple statements for '" + path + "'", err);
dyndeps = &res.first->second;
}
// Disallow explicit outputs.
{
EvalString out;
if (!lexer_.ReadPath(&out, err))
return false;
if (!out.empty())
return lexer_.Error("explicit outputs not supported", err);
}
// Parse implicit outputs, if any.
vector<EvalString> outs;
if (lexer_.PeekToken(Lexer::PIPE)) {
for (;;) {
EvalString out;
if (!lexer_.ReadPath(&out, err))
return err;
if (out.empty())
break;
outs.push_back(out);
}
}
if (!ExpectToken(Lexer::COLON, err))
return false;
string rule_name;
if (!lexer_.ReadIdent(&rule_name) || rule_name != "dyndep")
return lexer_.Error("expected build command name 'dyndep'", err);
// Disallow explicit inputs.
{
EvalString in;
if (!lexer_.ReadPath(&in, err))
return false;
if (!in.empty())
return lexer_.Error("explicit inputs not supported", err);
}
// Parse implicit inputs, if any.
vector<EvalString> ins;
if (lexer_.PeekToken(Lexer::PIPE)) {
for (;;) {
EvalString in;
if (!lexer_.ReadPath(&in, err))
return err;
if (in.empty())
break;
ins.push_back(in);
}
}
// Disallow order-only inputs.
if (lexer_.PeekToken(Lexer::PIPE2))
return lexer_.Error("order-only inputs not supported", err);
if (!ExpectToken(Lexer::NEWLINE, err))
return false;
if (lexer_.PeekToken(Lexer::INDENT)) {
string key;
EvalString val;
if (!ParseLet(&key, &val, err))
return false;
if (key != "restat")
return lexer_.Error("binding is not 'restat'", err);
string value = val.Evaluate(&env_);
dyndeps->restat_ = !value.empty();
}
dyndeps->implicit_inputs_.reserve(ins.size());
for (vector<EvalString>::iterator i = ins.begin(); i != ins.end(); ++i) {
string path = i->Evaluate(&env_);
string path_err;
uint64_t slash_bits;
if (!CanonicalizePath(&path, &slash_bits, &path_err))
return lexer_.Error(path_err, err);
Node* n = state_->GetNode(path, slash_bits);
dyndeps->implicit_inputs_.push_back(n);
}
dyndeps->implicit_outputs_.reserve(outs.size());
for (vector<EvalString>::iterator i = outs.begin(); i != outs.end(); ++i) {
string path = i->Evaluate(&env_);
string path_err;
uint64_t slash_bits;
if (!CanonicalizePath(&path, &slash_bits, &path_err))
return lexer_.Error(path_err, err);
Node* n = state_->GetNode(path, slash_bits);
dyndeps->implicit_outputs_.push_back(n);
}
return true;
}

46
src/dyndep_parser.h Normal file
View File

@ -0,0 +1,46 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_DYNDEP_PARSER_H_
#define NINJA_DYNDEP_PARSER_H_
#include "eval_env.h"
#include "parser.h"
struct DyndepFile;
struct EvalString;
/// Parses dyndep files.
struct DyndepParser: public Parser {
DyndepParser(State* state, FileReader* file_reader,
DyndepFile* dyndep_file);
/// Parse a text string of input. Used by tests.
bool ParseTest(const string& input, string* err) {
return Parse("input", input, err);
}
private:
/// Parse a file, given its contents as a string.
bool Parse(const string& filename, const string& input, string* err);
bool ParseDyndepVersion(string* err);
bool ParseLet(string* key, EvalString* val, string* err);
bool ParseEdge(string* err);
DyndepFile* dyndep_file_;
BindingEnv env_;
};
#endif // NINJA_DYNDEP_PARSER_H_

512
src/dyndep_parser_test.cc Normal file
View File

@ -0,0 +1,512 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dyndep_parser.h"
#include <map>
#include <vector>
#include "dyndep.h"
#include "graph.h"
#include "state.h"
#include "test.h"
struct DyndepParserTest : public testing::Test {
void AssertParse(const char* input) {
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_TRUE(parser.ParseTest(input, &err));
ASSERT_EQ("", err);
}
virtual void SetUp() {
::AssertParse(&state_,
"rule touch\n"
" command = touch $out\n"
"build out otherout: touch\n");
}
State state_;
VirtualFileSystem fs_;
DyndepFile dyndep_file_;
};
TEST_F(DyndepParserTest, Empty) {
const char kInput[] =
"";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:1: expected 'ninja_dyndep_version = ...'\n", err);
}
TEST_F(DyndepParserTest, Version1) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1\n"));
}
TEST_F(DyndepParserTest, Version1Extra) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1-extra\n"));
}
TEST_F(DyndepParserTest, Version1_0) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1.0\n"));
}
TEST_F(DyndepParserTest, Version1_0Extra) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1.0-extra\n"));
}
TEST_F(DyndepParserTest, CommentVersion) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"# comment\n"
"ninja_dyndep_version = 1\n"));
}
TEST_F(DyndepParserTest, BlankLineVersion) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"\n"
"ninja_dyndep_version = 1\n"));
}
TEST_F(DyndepParserTest, VersionCRLF) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1\r\n"));
}
TEST_F(DyndepParserTest, CommentVersionCRLF) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"# comment\r\n"
"ninja_dyndep_version = 1\r\n"));
}
TEST_F(DyndepParserTest, BlankLineVersionCRLF) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"\r\n"
"ninja_dyndep_version = 1\r\n"));
}
TEST_F(DyndepParserTest, VersionUnexpectedEOF) {
const char kInput[] =
"ninja_dyndep_version = 1.0";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:1: unexpected EOF\n"
"ninja_dyndep_version = 1.0\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, UnsupportedVersion0) {
const char kInput[] =
"ninja_dyndep_version = 0\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:1: unsupported 'ninja_dyndep_version = 0'\n"
"ninja_dyndep_version = 0\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, UnsupportedVersion1_1) {
const char kInput[] =
"ninja_dyndep_version = 1.1\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:1: unsupported 'ninja_dyndep_version = 1.1'\n"
"ninja_dyndep_version = 1.1\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, DuplicateVersion) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"ninja_dyndep_version = 1\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:2: unexpected identifier\n", err);
}
TEST_F(DyndepParserTest, MissingVersionOtherVar) {
const char kInput[] =
"not_ninja_dyndep_version = 1\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:1: expected 'ninja_dyndep_version = ...'\n"
"not_ninja_dyndep_version = 1\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, MissingVersionBuild) {
const char kInput[] =
"build out: dyndep\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:1: expected 'ninja_dyndep_version = ...'\n", err);
}
TEST_F(DyndepParserTest, UnexpectedEqual) {
const char kInput[] =
"= 1\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:1: unexpected '='\n", err);
}
TEST_F(DyndepParserTest, UnexpectedIndent) {
const char kInput[] =
" = 1\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:1: unexpected indent\n", err);
}
TEST_F(DyndepParserTest, OutDuplicate) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build out: dyndep\n"
"build out: dyndep\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:3: multiple statements for 'out'\n"
"build out: dyndep\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, OutDuplicateThroughOther) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build out: dyndep\n"
"build otherout: dyndep\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:3: multiple statements for 'otherout'\n"
"build otherout: dyndep\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, NoOutEOF) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:2: unexpected EOF\n"
"build\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, NoOutColon) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build :\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:2: expected path\n"
"build :\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, OutNoStatement) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build missing: dyndep\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:2: no build statement exists for 'missing'\n"
"build missing: dyndep\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, OutEOF) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build out";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:2: unexpected EOF\n"
"build out\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, OutNoRule) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build out:";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:2: expected build command name 'dyndep'\n"
"build out:\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, OutBadRule) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build out: touch";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:2: expected build command name 'dyndep'\n"
"build out: touch\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, BuildEOF) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build out: dyndep";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:2: unexpected EOF\n"
"build out: dyndep\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, ExplicitOut) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build out exp: dyndep\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:2: explicit outputs not supported\n"
"build out exp: dyndep\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, ExplicitIn) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build out: dyndep exp\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:2: explicit inputs not supported\n"
"build out: dyndep exp\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, OrderOnlyIn) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build out: dyndep ||\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:2: order-only inputs not supported\n"
"build out: dyndep ||\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, BadBinding) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build out: dyndep\n"
" not_restat = 1\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:3: binding is not 'restat'\n"
" not_restat = 1\n"
" ^ near here", err);
}
TEST_F(DyndepParserTest, RestatTwice) {
const char kInput[] =
"ninja_dyndep_version = 1\n"
"build out: dyndep\n"
" restat = 1\n"
" restat = 1\n";
DyndepParser parser(&state_, &fs_, &dyndep_file_);
string err;
EXPECT_FALSE(parser.ParseTest(kInput, &err));
EXPECT_EQ("input:4: unexpected indent\n", err);
}
TEST_F(DyndepParserTest, NoImplicit) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1\n"
"build out: dyndep\n"));
EXPECT_EQ(1u, dyndep_file_.size());
DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
ASSERT_NE(i, dyndep_file_.end());
EXPECT_EQ(false, i->second.restat_);
EXPECT_EQ(0u, i->second.implicit_outputs_.size());
EXPECT_EQ(0u, i->second.implicit_inputs_.size());
}
TEST_F(DyndepParserTest, EmptyImplicit) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1\n"
"build out | : dyndep |\n"));
EXPECT_EQ(1u, dyndep_file_.size());
DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
ASSERT_NE(i, dyndep_file_.end());
EXPECT_EQ(false, i->second.restat_);
EXPECT_EQ(0u, i->second.implicit_outputs_.size());
EXPECT_EQ(0u, i->second.implicit_inputs_.size());
}
TEST_F(DyndepParserTest, ImplicitIn) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1\n"
"build out: dyndep | impin\n"));
EXPECT_EQ(1u, dyndep_file_.size());
DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
ASSERT_NE(i, dyndep_file_.end());
EXPECT_EQ(false, i->second.restat_);
EXPECT_EQ(0u, i->second.implicit_outputs_.size());
ASSERT_EQ(1u, i->second.implicit_inputs_.size());
EXPECT_EQ("impin", i->second.implicit_inputs_[0]->path());
}
TEST_F(DyndepParserTest, ImplicitIns) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1\n"
"build out: dyndep | impin1 impin2\n"));
EXPECT_EQ(1u, dyndep_file_.size());
DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
ASSERT_NE(i, dyndep_file_.end());
EXPECT_EQ(false, i->second.restat_);
EXPECT_EQ(0u, i->second.implicit_outputs_.size());
ASSERT_EQ(2u, i->second.implicit_inputs_.size());
EXPECT_EQ("impin1", i->second.implicit_inputs_[0]->path());
EXPECT_EQ("impin2", i->second.implicit_inputs_[1]->path());
}
TEST_F(DyndepParserTest, ImplicitOut) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1\n"
"build out | impout: dyndep\n"));
EXPECT_EQ(1u, dyndep_file_.size());
DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
ASSERT_NE(i, dyndep_file_.end());
EXPECT_EQ(false, i->second.restat_);
ASSERT_EQ(1u, i->second.implicit_outputs_.size());
EXPECT_EQ("impout", i->second.implicit_outputs_[0]->path());
EXPECT_EQ(0u, i->second.implicit_inputs_.size());
}
TEST_F(DyndepParserTest, ImplicitOuts) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1\n"
"build out | impout1 impout2 : dyndep\n"));
EXPECT_EQ(1u, dyndep_file_.size());
DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
ASSERT_NE(i, dyndep_file_.end());
EXPECT_EQ(false, i->second.restat_);
ASSERT_EQ(2u, i->second.implicit_outputs_.size());
EXPECT_EQ("impout1", i->second.implicit_outputs_[0]->path());
EXPECT_EQ("impout2", i->second.implicit_outputs_[1]->path());
EXPECT_EQ(0u, i->second.implicit_inputs_.size());
}
TEST_F(DyndepParserTest, ImplicitInsAndOuts) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1\n"
"build out | impout1 impout2: dyndep | impin1 impin2\n"));
EXPECT_EQ(1u, dyndep_file_.size());
DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
ASSERT_NE(i, dyndep_file_.end());
EXPECT_EQ(false, i->second.restat_);
ASSERT_EQ(2u, i->second.implicit_outputs_.size());
EXPECT_EQ("impout1", i->second.implicit_outputs_[0]->path());
EXPECT_EQ("impout2", i->second.implicit_outputs_[1]->path());
ASSERT_EQ(2u, i->second.implicit_inputs_.size());
EXPECT_EQ("impin1", i->second.implicit_inputs_[0]->path());
EXPECT_EQ("impin2", i->second.implicit_inputs_[1]->path());
}
TEST_F(DyndepParserTest, Restat) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1\n"
"build out: dyndep\n"
" restat = 1\n"));
EXPECT_EQ(1u, dyndep_file_.size());
DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
ASSERT_NE(i, dyndep_file_.end());
EXPECT_EQ(true, i->second.restat_);
EXPECT_EQ(0u, i->second.implicit_outputs_.size());
EXPECT_EQ(0u, i->second.implicit_inputs_.size());
}
TEST_F(DyndepParserTest, OtherOutput) {
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1\n"
"build otherout: dyndep\n"));
EXPECT_EQ(1u, dyndep_file_.size());
DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
ASSERT_NE(i, dyndep_file_.end());
EXPECT_EQ(false, i->second.restat_);
EXPECT_EQ(0u, i->second.implicit_outputs_.size());
EXPECT_EQ(0u, i->second.implicit_inputs_.size());
}
TEST_F(DyndepParserTest, MultipleEdges) {
::AssertParse(&state_,
"build out2: touch\n");
ASSERT_EQ(2u, state_.edges_.size());
ASSERT_EQ(1u, state_.edges_[1]->outputs_.size());
EXPECT_EQ("out2", state_.edges_[1]->outputs_[0]->path());
EXPECT_EQ(0u, state_.edges_[0]->inputs_.size());
ASSERT_NO_FATAL_FAILURE(AssertParse(
"ninja_dyndep_version = 1\n"
"build out: dyndep\n"
"build out2: dyndep\n"
" restat = 1\n"));
EXPECT_EQ(2u, dyndep_file_.size());
{
DyndepFile::iterator i = dyndep_file_.find(state_.edges_[0]);
ASSERT_NE(i, dyndep_file_.end());
EXPECT_EQ(false, i->second.restat_);
EXPECT_EQ(0u, i->second.implicit_outputs_.size());
EXPECT_EQ(0u, i->second.implicit_inputs_.size());
}
{
DyndepFile::iterator i = dyndep_file_.find(state_.edges_[1]);
ASSERT_NE(i, dyndep_file_.end());
EXPECT_EQ(true, i->second.restat_);
EXPECT_EQ(0u, i->second.implicit_outputs_.size());
EXPECT_EQ(0u, i->second.implicit_inputs_.size());
}
}

69
src/edit_distance.cc Normal file
View File

@ -0,0 +1,69 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "edit_distance.h"
#include <algorithm>
#include <vector>
int EditDistance(const StringPiece& s1,
const StringPiece& s2,
bool allow_replacements,
int max_edit_distance) {
// The algorithm implemented below is the "classic"
// dynamic-programming algorithm for computing the Levenshtein
// distance, which is described here:
//
// http://en.wikipedia.org/wiki/Levenshtein_distance
//
// Although the algorithm is typically described using an m x n
// array, only one row plus one element are used at a time, so this
// implementation just keeps one vector for the row. To update one entry,
// only the entries to the left, top, and top-left are needed. The left
// entry is in row[x-1], the top entry is what's in row[x] from the last
// iteration, and the top-left entry is stored in previous.
int m = s1.len_;
int n = s2.len_;
vector<int> row(n + 1);
for (int i = 1; i <= n; ++i)
row[i] = i;
for (int y = 1; y <= m; ++y) {
row[0] = y;
int best_this_row = row[0];
int previous = y - 1;
for (int x = 1; x <= n; ++x) {
int old_row = row[x];
if (allow_replacements) {
row[x] = min(previous + (s1.str_[y - 1] == s2.str_[x - 1] ? 0 : 1),
min(row[x - 1], row[x]) + 1);
}
else {
if (s1.str_[y - 1] == s2.str_[x - 1])
row[x] = previous;
else
row[x] = min(row[x - 1], row[x]) + 1;
}
previous = old_row;
best_this_row = min(best_this_row, row[x]);
}
if (max_edit_distance && best_this_row > max_edit_distance)
return max_edit_distance + 1;
}
return row[n];
}

25
src/edit_distance.h Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_EDIT_DISTANCE_H_
#define NINJA_EDIT_DISTANCE_H_
#include "string_piece.h"
int EditDistance(const StringPiece& s1,
const StringPiece& s2,
bool allow_replacements = true,
int max_edit_distance = 0);
#endif // NINJA_EDIT_DISTANCE_H_

48
src/edit_distance_test.cc Normal file
View File

@ -0,0 +1,48 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "edit_distance.h"
#include "test.h"
TEST(EditDistanceTest, TestEmpty) {
EXPECT_EQ(5, EditDistance("", "ninja"));
EXPECT_EQ(5, EditDistance("ninja", ""));
EXPECT_EQ(0, EditDistance("", ""));
}
TEST(EditDistanceTest, TestMaxDistance) {
const bool allow_replacements = true;
for (int max_distance = 1; max_distance < 7; ++max_distance) {
EXPECT_EQ(max_distance + 1,
EditDistance("abcdefghijklmnop", "ponmlkjihgfedcba",
allow_replacements, max_distance));
}
}
TEST(EditDistanceTest, TestAllowReplacements) {
bool allow_replacements = true;
EXPECT_EQ(1, EditDistance("ninja", "njnja", allow_replacements));
EXPECT_EQ(1, EditDistance("njnja", "ninja", allow_replacements));
allow_replacements = false;
EXPECT_EQ(2, EditDistance("ninja", "njnja", allow_replacements));
EXPECT_EQ(2, EditDistance("njnja", "ninja", allow_replacements));
}
TEST(EditDistanceTest, TestBasics) {
EXPECT_EQ(0, EditDistance("browser_tests", "browser_tests"));
EXPECT_EQ(1, EditDistance("browser_test", "browser_tests"));
EXPECT_EQ(1, EditDistance("browser_tests", "browser_test"));
}

147
src/eval_env.cc Normal file
View File

@ -0,0 +1,147 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <assert.h>
#include "eval_env.h"
string BindingEnv::LookupVariable(const string& var) {
map<string, string>::iterator i = bindings_.find(var);
if (i != bindings_.end())
return i->second;
if (parent_)
return parent_->LookupVariable(var);
return "";
}
void BindingEnv::AddBinding(const string& key, const string& val) {
bindings_[key] = val;
}
void BindingEnv::AddRule(const Rule* rule) {
assert(LookupRuleCurrentScope(rule->name()) == NULL);
rules_[rule->name()] = rule;
}
const Rule* BindingEnv::LookupRuleCurrentScope(const string& rule_name) {
map<string, const Rule*>::iterator i = rules_.find(rule_name);
if (i == rules_.end())
return NULL;
return i->second;
}
const Rule* BindingEnv::LookupRule(const string& rule_name) {
map<string, const Rule*>::iterator i = rules_.find(rule_name);
if (i != rules_.end())
return i->second;
if (parent_)
return parent_->LookupRule(rule_name);
return NULL;
}
void Rule::AddBinding(const string& key, const EvalString& val) {
bindings_[key] = val;
}
const EvalString* Rule::GetBinding(const string& key) const {
Bindings::const_iterator i = bindings_.find(key);
if (i == bindings_.end())
return NULL;
return &i->second;
}
// static
bool Rule::IsReservedBinding(const string& var) {
return var == "command" ||
var == "depfile" ||
var == "dyndep" ||
var == "description" ||
var == "deps" ||
var == "generator" ||
var == "pool" ||
var == "restat" ||
var == "rspfile" ||
var == "rspfile_content" ||
var == "msvc_deps_prefix";
}
const map<string, const Rule*>& BindingEnv::GetRules() const {
return rules_;
}
string BindingEnv::LookupWithFallback(const string& var,
const EvalString* eval,
Env* env) {
map<string, string>::iterator i = bindings_.find(var);
if (i != bindings_.end())
return i->second;
if (eval)
return eval->Evaluate(env);
if (parent_)
return parent_->LookupVariable(var);
return "";
}
string EvalString::Evaluate(Env* env) const {
string result;
for (TokenList::const_iterator i = parsed_.begin(); i != parsed_.end(); ++i) {
if (i->second == RAW)
result.append(i->first);
else
result.append(env->LookupVariable(i->first));
}
return result;
}
void EvalString::AddText(StringPiece text) {
// Add it to the end of an existing RAW token if possible.
if (!parsed_.empty() && parsed_.back().second == RAW) {
parsed_.back().first.append(text.str_, text.len_);
} else {
parsed_.push_back(make_pair(text.AsString(), RAW));
}
}
void EvalString::AddSpecial(StringPiece text) {
parsed_.push_back(make_pair(text.AsString(), SPECIAL));
}
string EvalString::Serialize() const {
string result;
for (TokenList::const_iterator i = parsed_.begin();
i != parsed_.end(); ++i) {
result.append("[");
if (i->second == SPECIAL)
result.append("$");
result.append(i->first);
result.append("]");
}
return result;
}
string EvalString::Unparse() const {
string result;
for (TokenList::const_iterator i = parsed_.begin();
i != parsed_.end(); ++i) {
bool special = (i->second == SPECIAL);
if (special)
result.append("${");
result.append(i->first);
if (special)
result.append("}");
}
return result;
}

110
src/eval_env.h Normal file
View File

@ -0,0 +1,110 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_EVAL_ENV_H_
#define NINJA_EVAL_ENV_H_
#include <map>
#include <string>
#include <vector>
using namespace std;
#include "string_piece.h"
struct Rule;
/// An interface for a scope for variable (e.g. "$foo") lookups.
struct Env {
virtual ~Env() {}
virtual string LookupVariable(const string& var) = 0;
};
/// A tokenized string that contains variable references.
/// Can be evaluated relative to an Env.
struct EvalString {
/// @return The evaluated string with variable expanded using value found in
/// environment @a env.
string Evaluate(Env* env) const;
/// @return The string with variables not expanded.
string Unparse() const;
void Clear() { parsed_.clear(); }
bool empty() const { return parsed_.empty(); }
void AddText(StringPiece text);
void AddSpecial(StringPiece text);
/// Construct a human-readable representation of the parsed state,
/// for use in tests.
string Serialize() const;
private:
enum TokenType { RAW, SPECIAL };
typedef vector<pair<string, TokenType> > TokenList;
TokenList parsed_;
};
/// An invokable build command and associated metadata (description, etc.).
struct Rule {
explicit Rule(const string& name) : name_(name) {}
const string& name() const { return name_; }
void AddBinding(const string& key, const EvalString& val);
static bool IsReservedBinding(const string& var);
const EvalString* GetBinding(const string& key) const;
private:
// Allow the parsers to reach into this object and fill out its fields.
friend struct ManifestParser;
string name_;
typedef map<string, EvalString> Bindings;
Bindings bindings_;
};
/// An Env which contains a mapping of variables to values
/// as well as a pointer to a parent scope.
struct BindingEnv : public Env {
BindingEnv() : parent_(NULL) {}
explicit BindingEnv(BindingEnv* parent) : parent_(parent) {}
virtual ~BindingEnv() {}
virtual string LookupVariable(const string& var);
void AddRule(const Rule* rule);
const Rule* LookupRule(const string& rule_name);
const Rule* LookupRuleCurrentScope(const string& rule_name);
const map<string, const Rule*>& GetRules() const;
void AddBinding(const string& key, const string& val);
/// This is tricky. Edges want lookup scope to go in this order:
/// 1) value set on edge itself (edge_->env_)
/// 2) value set on rule, with expansion in the edge's scope
/// 3) value set on enclosing scope of edge (edge_->env_->parent_)
/// This function takes as parameters the necessary info to do (2).
string LookupWithFallback(const string& var, const EvalString* eval,
Env* env);
private:
map<string, string> bindings_;
map<string, const Rule*> rules_;
BindingEnv* parent_;
};
#endif // NINJA_EVAL_ENV_H_

24
src/exit_status.h Normal file
View File

@ -0,0 +1,24 @@
// Copyright 2011 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef NINJA_EXIT_STATUS_H_
#define NINJA_EXIT_STATUS_H_
enum ExitStatus {
ExitSuccess,
ExitFailure,
ExitInterrupted
};
#endif // NINJA_EXIT_STATUS_H_

92
src/gen_doxygen_mainpage.sh Executable file
View File

@ -0,0 +1,92 @@
#!/bin/sh
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
STATUS=0
# Print each of its arguments on stderr (one per line) prefixed by the
# basename of this script.
stderr()
{
local me=$(basename "$0")
local i
for i
do
echo >&2 "$me: $i"
done
}
# Print each of its arguments on stderr (one per line) prefixed by the
# basename of this script and 'error'.
error()
{
local i
for i
do
stderr "error: $i"
done
STATUS=1
}
generate_header()
{
cat <<EOF
/**
* \\mainpage
EOF
}
generate_footer()
{
cat <<EOF
*/
EOF
}
include_file()
{
local file="$1"
if ! [ -r "$file" ]
then
error "'$file' is not readable."
return
fi
cat <<EOF
* \\section $file
* \\verbatim
EOF
cat < "$file"
cat <<EOF
\\endverbatim
EOF
}
if [ $# -eq 0 ]
then
echo >&2 "usage: $0 inputs..."
exit 1
fi
generate_header
for i in "$@"
do
include_file "$i"
done
generate_footer
exit $STATUS

410
src/getopt.c Normal file
View File

@ -0,0 +1,410 @@
/****************************************************************************
getopt.c - Read command line options
AUTHOR: Gregory Pietsch
CREATED Fri Jan 10 21:13:05 1997
DESCRIPTION:
The getopt() function parses the command line arguments. Its arguments argc
and argv are the argument count and array as passed to the main() function
on program invocation. The argument optstring is a list of available option
characters. If such a character is followed by a colon (`:'), the option
takes an argument, which is placed in optarg. If such a character is
followed by two colons, the option takes an optional argument, which is
placed in optarg. If the option does not take an argument, optarg is NULL.
The external variable optind is the index of the next array element of argv
to be processed; it communicates from one call to the next which element to
process.
The getopt_long() function works like getopt() except that it also accepts
long options started by two dashes `--'. If these take values, it is either
in the form
--arg=value
or
--arg value
It takes the additional arguments longopts which is a pointer to the first
element of an array of type GETOPT_LONG_OPTION_T. The last element of the
array has to be filled with NULL for the name field.
The longind pointer points to the index of the current long option relative
to longopts if it is non-NULL.
The getopt() function returns the option character if the option was found
successfully, `:' if there was a missing parameter for one of the options,
`?' for an unknown option character, and EOF for the end of the option list.
The getopt_long() function's return value is described in the header file.
The function getopt_long_only() is identical to getopt_long(), except that a
plus sign `+' can introduce long options as well as `--'.
The following describes how to deal with options that follow non-option
argv-elements.
If the caller did not specify anything, the default is REQUIRE_ORDER if the
environment variable POSIXLY_CORRECT is defined, PERMUTE otherwise.
REQUIRE_ORDER means don't recognize them as options; stop option processing
when the first non-option is seen. This is what Unix does. This mode of
operation is selected by either setting the environment variable
POSIXLY_CORRECT, or using `+' as the first character of the optstring
parameter.
PERMUTE is the default. We permute the contents of ARGV as we scan, so that
eventually all the non-options are at the end. This allows options to be
given in any order, even with programs that were not written to expect this.
RETURN_IN_ORDER is an option available to programs that were written to
expect options and other argv-elements in any order and that care about the
ordering of the two. We describe each non-option argv-element as if it were
the argument of an option with character code 1. Using `-' as the first
character of the optstring parameter selects this mode of operation.
The special argument `--' forces an end of option-scanning regardless of the
value of ordering. In the case of RETURN_IN_ORDER, only `--' can cause
getopt() and friends to return EOF with optind != argc.
COPYRIGHT NOTICE AND DISCLAIMER:
Copyright (C) 1997 Gregory Pietsch
This file and the accompanying getopt.h header file are hereby placed in the
public domain without restrictions. Just give the author credit, don't
claim you wrote it or prevent anyone else from using it.
Gregory Pietsch's current e-mail address:
gpietsch@comcast.net
****************************************************************************/
/* include files */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef GETOPT_H
#include "getopt.h"
#endif
/* macros */
/* types */
typedef enum GETOPT_ORDERING_T
{
PERMUTE,
RETURN_IN_ORDER,
REQUIRE_ORDER
} GETOPT_ORDERING_T;
/* globally-defined variables */
char *optarg = NULL;
int optind = 0;
int opterr = 1;
int optopt = '?';
/* functions */
/* reverse_argv_elements: reverses num elements starting at argv */
static void
reverse_argv_elements (char **argv, int num)
{
int i;
char *tmp;
for (i = 0; i < (num >> 1); i++)
{
tmp = argv[i];
argv[i] = argv[num - i - 1];
argv[num - i - 1] = tmp;
}
}
/* permute: swap two blocks of argv-elements given their lengths */
static void
permute (char **argv, int len1, int len2)
{
reverse_argv_elements (argv, len1);
reverse_argv_elements (argv, len1 + len2);
reverse_argv_elements (argv, len2);
}
/* is_option: is this argv-element an option or the end of the option list? */
static int
is_option (char *argv_element, int only)
{
return ((argv_element == NULL)
|| (argv_element[0] == '-') || (only && argv_element[0] == '+'));
}
/* getopt_internal: the function that does all the dirty work */
static int
getopt_internal (int argc, char **argv, char *shortopts,
GETOPT_LONG_OPTION_T * longopts, int *longind, int only)
{
GETOPT_ORDERING_T ordering = PERMUTE;
static size_t optwhere = 0;
size_t permute_from = 0;
int num_nonopts = 0;
int optindex = 0;
size_t match_chars = 0;
char *possible_arg = NULL;
int longopt_match = -1;
int has_arg = -1;
char *cp = NULL;
int arg_next = 0;
/* first, deal with silly parameters and easy stuff */
if (argc == 0 || argv == NULL || (shortopts == NULL && longopts == NULL))
return (optopt = '?');
if (optind >= argc || argv[optind] == NULL)
return EOF;
if (strcmp (argv[optind], "--") == 0)
{
optind++;
return EOF;
}
/* if this is our first time through */
if (optind == 0)
optind = optwhere = 1;
/* define ordering */
if (shortopts != NULL && (*shortopts == '-' || *shortopts == '+'))
{
ordering = (*shortopts == '-') ? RETURN_IN_ORDER : REQUIRE_ORDER;
shortopts++;
}
else
ordering = (getenv ("POSIXLY_CORRECT") != NULL) ? REQUIRE_ORDER : PERMUTE;
/*
* based on ordering, find our next option, if we're at the beginning of
* one
*/
if (optwhere == 1)
{
switch (ordering)
{
case PERMUTE:
permute_from = optind;
num_nonopts = 0;
while (!is_option (argv[optind], only))
{
optind++;
num_nonopts++;
}
if (argv[optind] == NULL)
{
/* no more options */
optind = permute_from;
return EOF;
}
else if (strcmp (argv[optind], "--") == 0)
{
/* no more options, but have to get `--' out of the way */
permute (argv + permute_from, num_nonopts, 1);
optind = permute_from + 1;
return EOF;
}
break;
case RETURN_IN_ORDER:
if (!is_option (argv[optind], only))
{
optarg = argv[optind++];
return (optopt = 1);
}
break;
case REQUIRE_ORDER:
if (!is_option (argv[optind], only))
return EOF;
break;
}
}
/* we've got an option, so parse it */
/* first, is it a long option? */
if (longopts != NULL
&& (memcmp (argv[optind], "--", 2) == 0
|| (only && argv[optind][0] == '+')) && optwhere == 1)
{
/* handle long options */
if (memcmp (argv[optind], "--", 2) == 0)
optwhere = 2;
longopt_match = -1;
possible_arg = strchr (argv[optind] + optwhere, '=');
if (possible_arg == NULL)
{
/* no =, so next argv might be arg */
match_chars = strlen (argv[optind]);
possible_arg = argv[optind] + match_chars;
match_chars = match_chars - optwhere;
}
else
match_chars = (possible_arg - argv[optind]) - optwhere;
for (optindex = 0; longopts[optindex].name != NULL; optindex++)
{
if (memcmp (argv[optind] + optwhere,
longopts[optindex].name, match_chars) == 0)
{
/* do we have an exact match? */
if (match_chars == strlen (longopts[optindex].name))
{
longopt_match = optindex;
break;
}
/* do any characters match? */
else
{
if (longopt_match < 0)
longopt_match = optindex;
else
{
/* we have ambiguous options */
if (opterr)
fprintf (stderr, "%s: option `%s' is ambiguous "
"(could be `--%s' or `--%s')\n",
argv[0],
argv[optind],
longopts[longopt_match].name,
longopts[optindex].name);
return (optopt = '?');
}
}
}
}
if (longopt_match >= 0)
has_arg = longopts[longopt_match].has_arg;
}
/* if we didn't find a long option, is it a short option? */
if (longopt_match < 0 && shortopts != NULL)
{
cp = strchr (shortopts, argv[optind][optwhere]);
if (cp == NULL)
{
/* couldn't find option in shortopts */
if (opterr)
fprintf (stderr,
"%s: invalid option -- `-%c'\n",
argv[0], argv[optind][optwhere]);
optwhere++;
if (argv[optind][optwhere] == '\0')
{
optind++;
optwhere = 1;
}
return (optopt = '?');
}
has_arg = ((cp[1] == ':')
? ((cp[2] == ':') ? OPTIONAL_ARG : required_argument) : no_argument);
possible_arg = argv[optind] + optwhere + 1;
optopt = *cp;
}
/* get argument and reset optwhere */
arg_next = 0;
switch (has_arg)
{
case OPTIONAL_ARG:
if (*possible_arg == '=')
possible_arg++;
if (*possible_arg != '\0')
{
optarg = possible_arg;
optwhere = 1;
}
else
optarg = NULL;
break;
case required_argument:
if (*possible_arg == '=')
possible_arg++;
if (*possible_arg != '\0')
{
optarg = possible_arg;
optwhere = 1;
}
else if (optind + 1 >= argc)
{
if (opterr)
{
fprintf (stderr, "%s: argument required for option `", argv[0]);
if (longopt_match >= 0)
fprintf (stderr, "--%s'\n", longopts[longopt_match].name);
else
fprintf (stderr, "-%c'\n", *cp);
}
optind++;
return (optopt = ':');
}
else
{
optarg = argv[optind + 1];
arg_next = 1;
optwhere = 1;
}
break;
case no_argument:
if (longopt_match < 0)
{
optwhere++;
if (argv[optind][optwhere] == '\0')
optwhere = 1;
}
else
optwhere = 1;
optarg = NULL;
break;
}
/* do we have to permute or otherwise modify optind? */
if (ordering == PERMUTE && optwhere == 1 && num_nonopts != 0)
{
permute (argv + permute_from, num_nonopts, 1 + arg_next);
optind = permute_from + 1 + arg_next;
}
else if (optwhere == 1)
optind = optind + 1 + arg_next;
/* finally return */
if (longopt_match >= 0)
{
if (longind != NULL)
*longind = longopt_match;
if (longopts[longopt_match].flag != NULL)
{
*(longopts[longopt_match].flag) = longopts[longopt_match].val;
return 0;
}
else
return longopts[longopt_match].val;
}
else
return optopt;
}
#ifndef _AIX
int
getopt (int argc, char **argv, char *optstring)
{
return getopt_internal (argc, argv, optstring, NULL, NULL, 0);
}
#endif
int
getopt_long (int argc, char **argv, const char *shortopts,
const GETOPT_LONG_OPTION_T * longopts, int *longind)
{
return getopt_internal (argc, argv, (char*)shortopts, (GETOPT_LONG_OPTION_T*)longopts, longind, 0);
}
int
getopt_long_only (int argc, char **argv, const char *shortopts,
const GETOPT_LONG_OPTION_T * longopts, int *longind)
{
return getopt_internal (argc, argv, (char*)shortopts, (GETOPT_LONG_OPTION_T*)longopts, longind, 1);
}
/* end of file GETOPT.C */

57
src/getopt.h Normal file
View File

@ -0,0 +1,57 @@
#ifndef GETOPT_H
#define GETOPT_H
/* include files needed by this include file */
/* macros defined by this include file */
#define no_argument 0
#define required_argument 1
#define OPTIONAL_ARG 2
/* types defined by this include file */
/* GETOPT_LONG_OPTION_T: The type of long option */
typedef struct GETOPT_LONG_OPTION_T
{
const char *name; /* the name of the long option */
int has_arg; /* one of the above macros */
int *flag; /* determines if getopt_long() returns a
* value for a long option; if it is
* non-NULL, 0 is returned as a function
* value and the value of val is stored in
* the area pointed to by flag. Otherwise,
* val is returned. */
int val; /* determines the value to return if flag is
* NULL. */
} GETOPT_LONG_OPTION_T;
typedef GETOPT_LONG_OPTION_T option;
#ifdef __cplusplus
extern "C"
{
#endif
/* externally-defined variables */
extern char *optarg;
extern int optind;
extern int opterr;
extern int optopt;
/* function prototypes */
#ifndef _AIX
int getopt (int argc, char **argv, char *optstring);
#endif
int getopt_long (int argc, char **argv, const char *shortopts,
const GETOPT_LONG_OPTION_T * longopts, int *longind);
int getopt_long_only (int argc, char **argv, const char *shortopts,
const GETOPT_LONG_OPTION_T * longopts, int *longind);
#ifdef __cplusplus
};
#endif
#endif /* GETOPT_H */
/* END OF FILE getopt.h */

Some files were not shown because too many files have changed in this diff Show More