mirror of
https://github.com/reactos/ninja.git
synced 2024-11-23 11:49:45 +00:00
add script for measuring build performance
This commit is contained in:
parent
9cf5918cc5
commit
eaf1ff1904
10
HACKING
10
HACKING
@ -13,10 +13,12 @@ Testing performance impact of changes:
|
||||
If you have a Chrome build handy, it's a good test case.
|
||||
Otherwise, https://github.com/martine/ninja/downloads has a copy of
|
||||
the Chrome build files (and depfiles). You can untar that, then run
|
||||
"ninja chrome". I often do something like:
|
||||
(for i in `seq 5`; do time -p ninja chrome) 2>&1 | grep real > old
|
||||
(for i in `seq 5`; do time -p ninja-new chrome) 2>&1 | grep real > new
|
||||
and then compare those two lists of timings either by eye or with R.
|
||||
path/to/my/ninja chrome
|
||||
and compare that against a baseline Ninja.
|
||||
|
||||
There's a script at misc/measure.py that repeatedly runs a command like
|
||||
the above (to address variance) and summarizes its runtime. E.g.
|
||||
path/to/misc/measure.py path/to/my/ninja chrome
|
||||
|
||||
For changing the depfile parser, you can also build 'parser_perftest'
|
||||
and run that directly on some representative input files.
|
||||
|
54
misc/measure.py
Executable file
54
misc/measure.py
Executable file
@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2011 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""measure the runtime of a command by repeatedly running it.
|
||||
"""
|
||||
|
||||
import time
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
devnull = open('/dev/null', 'w')
|
||||
|
||||
def run(cmd, repeat=10):
|
||||
print 'sampling:',
|
||||
sys.stdout.flush()
|
||||
|
||||
samples = []
|
||||
for _ in range(repeat):
|
||||
start = time.time()
|
||||
subprocess.call(cmd, stdout=devnull, stderr=devnull)
|
||||
end = time.time()
|
||||
dt = (end - start) * 1000
|
||||
print '%dms' % int(dt),
|
||||
sys.stdout.flush()
|
||||
samples.append(dt)
|
||||
print
|
||||
|
||||
# We're interested in the 'pure' runtime of the code, which is
|
||||
# conceptually the smallest time we'd see if we ran it enough times
|
||||
# such that it got the perfect time slices / disk cache hits.
|
||||
best = min(samples)
|
||||
# Also print how varied the outputs were in an attempt to make it
|
||||
# more obvious if something has gone terribly wrong.
|
||||
err = sum(s - best for s in samples) / float(len(samples))
|
||||
print 'estimate: %dms (mean err %.1fms)' % (best, err)
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 2:
|
||||
print 'usage: measure.py command args...'
|
||||
sys.exit(1)
|
||||
run(cmd=sys.argv[1:])
|
Loading…
Reference in New Issue
Block a user