llvm-capstone/lld/utils/benchmark.py
Rafael Espindola 6dc954a0d9 Add a script to run various benchmarks and send the result to lnt.
Lnt is both a server and a set of script for benchmarking llvm.

I don't think it makes sense to use the scripts for lld since our
benchmarks are quite different.

The server on the other hand is very general and seems to work well
for tracking any quantities.

This patch adds a script to lld that can be used to run various
benchmarks and send the result to lnt.

The benchmarks are assumed to each be a response file in a
subdirectory. Each subdirectory can contain multiple response
files. That can be used to have a plain response.txt and a
response-icf.txt for example. The name of each benchmark is the
combination of the directory name and the "flavor": firefox-gc,
chromium-icf, etc.

For the first version the script uses perf and collects all the
metrics that a plain "perf stat" prints.

This script can then be used by a developer to test a patch or by a
bot to keep track of lld's performance.

llvm-svn: 318158
2017-11-14 16:40:30 +00:00

136 lines
3.8 KiB
Python
Executable File

#!/usr/bin/env python
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
# ==------------------------------------------------------------------------==#
import os
import glob
import re
import subprocess
import json
import datetime
import argparse
import urllib
import urllib2
parser = argparse.ArgumentParser()
parser.add_argument('benchmark_directory')
parser.add_argument('--runs', type=int, default=10)
parser.add_argument('--wrapper', default='')
parser.add_argument('--machine', required=True)
parser.add_argument('--revision', required=True)
parser.add_argument('--threads', action='store_true')
parser.add_argument('--url', help='The lnt server url to send the results to',
default='http://localhost:8000/db_default/v4/link/submitRun')
args = parser.parse_args()
class Bench:
def __init__(self, directory, variant):
self.directory = directory
self.variant = variant
def __str__(self):
if not self.variant:
return self.directory
return '%s-%s' % (self.directory, self.variant)
def getBenchmarks():
ret = []
for i in glob.glob('*/response*.txt'):
m = re.match('response-(.*)\.txt', os.path.basename(i))
variant = m.groups()[0] if m else None
ret.append(Bench(os.path.dirname(i), variant))
return ret
def parsePerfNum(num):
num = num.replace(b',',b'')
try:
return int(num)
except ValueError:
return float(num)
def parsePerfLine(line):
ret = {}
line = line.split(b'#')[0].strip()
if len(line) != 0:
p = line.split()
ret[p[1].strip().decode('ascii')] = parsePerfNum(p[0])
return ret
def parsePerf(output):
ret = {}
lines = [x.strip() for x in output.split(b'\n')]
seconds = [x for x in lines if b'seconds time elapsed' in x][0]
seconds = seconds.strip().split()[0].strip()
ret['seconds-elapsed'] = parsePerfNum(seconds)
measurement_lines = [x for x in lines if b'#' in x]
for l in measurement_lines:
ret.update(parsePerfLine(l))
return ret
def run(cmd):
try:
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print(e.output)
raise e
def combinePerfRun(acc, d):
for k,v in d.items():
a = acc.get(k, [])
a.append(v)
acc[k] = a
def perf(cmd):
# Discard the first run to warm up any system cache.
run(cmd)
ret = {}
wrapper_args = [x for x in args.wrapper.split(',') if x]
for i in range(args.runs):
os.unlink('t')
out = run(wrapper_args + ['perf', 'stat'] + cmd)
r = parsePerf(out)
combinePerfRun(ret, r)
os.unlink('t')
return ret
def runBench(bench):
thread_arg = [] if args.threads else ['--no-threads']
os.chdir(bench.directory)
suffix = '-%s' % bench.variant if bench.variant else ''
response = 'response' + suffix + '.txt'
ret = perf(['../ld.lld', '@' + response, '-o', 't'] + thread_arg)
ret['name'] = str(bench)
os.chdir('..')
return ret
def buildLntJson(benchmarks):
start = datetime.datetime.utcnow().isoformat()
tests = [runBench(b) for b in benchmarks]
end = datetime.datetime.utcnow().isoformat()
ret = {
'format_version' : 2,
'machine' : { 'name' : args.machine },
'run' : {
'end_time' : start,
'start_time' : end,
'llvm_project_revision': args.revision
},
'tests' : tests
}
return json.dumps(ret, sort_keys=True, indent=4)
def submitToServer(data):
data2 = urllib.urlencode({ 'input_data' : data }).encode('ascii')
urllib2.urlopen(urllib2.Request(args.url, data2))
os.chdir(args.benchmark_directory)
data = buildLntJson(getBenchmarks())
submitToServer(data)