From 52ceb3152e31c561b720192c2581b18a802b0b24 Mon Sep 17 00:00:00 2001 From: Mark Charney Date: Thu, 15 Dec 2016 14:25:06 -0500 Subject: [PATCH] initial commit Change-Id: I6fc3fd7babab231f4389689f9166e04ffba70136 --- .gitignore | 4 + LICENSE.txt | 178 +++ MANIFEST.in | 2 + README.md | 13 + build-doc | 10 + do-dist | 7 + mbuild/__init__.py | 139 +++ mbuild/arar.py | 80 ++ mbuild/base.py | 191 ++++ mbuild/build_env.py | 450 ++++++++ mbuild/dag.py | 1136 +++++++++++++++++++ mbuild/dfs.py | 161 +++ mbuild/doxygen.py | 357 ++++++ mbuild/env.py | 2088 +++++++++++++++++++++++++++++++++++ mbuild/header_tag.py | 175 +++ mbuild/msvs.py | 903 +++++++++++++++ mbuild/plan.py | 93 ++ mbuild/scanner.py | 104 ++ mbuild/util.py | 1148 +++++++++++++++++++ mbuild/work_queue.py | 1020 +++++++++++++++++ setup.py | 21 + templates/find_mbuild.py | 36 + tests/1.py | 45 + tests/2.py | 46 + tests/3.py | 47 + tests/VersionInfo.rc | 35 + tests/a.py | 43 + tests/b.py | 49 + tests/c.py | 41 + tests/circular-dep/a.h | 8 + tests/circular-dep/b.h | 5 + tests/circular-dep/c.h | 5 + tests/circular-dep/find.py | 25 + tests/circular-dep/m.h | 4 + tests/circular-dep/main.c | 5 + tests/circular-dep/mfile.py | 31 + tests/circular-dep/z.h | 7 + tests/delay.c | 25 + tests/find.py | 42 + tests/hello.c | 23 + tests/negtime.py | 22 + tests/nodag.py | 55 + tests/nondep/README.txt | 2 + tests/nondep/__mbuild | 67 ++ tests/nondep/hello.c | 5 + tests/sleep.py | 44 + tests/spew | 4 + tests/stdin.py | 40 + tests/timed3.py | 31 + tests/timed4.py | 60 + tests/use-icl-win.py | 51 + 51 files changed, 9183 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE.txt create mode 100644 MANIFEST.in create mode 100644 README.md create mode 100755 build-doc create mode 100755 do-dist create mode 100755 mbuild/__init__.py create mode 100755 mbuild/arar.py create mode 100755 mbuild/base.py create mode 100755 mbuild/build_env.py create mode 100755 mbuild/dag.py create mode 100755 mbuild/dfs.py create mode 100755 mbuild/doxygen.py create mode 100755 mbuild/env.py create mode 100755 mbuild/header_tag.py create mode 100644 mbuild/msvs.py create mode 100755 mbuild/plan.py create mode 100755 mbuild/scanner.py create mode 100755 mbuild/util.py create mode 100755 mbuild/work_queue.py create mode 100644 setup.py create mode 100644 templates/find_mbuild.py create mode 100755 tests/1.py create mode 100755 tests/2.py create mode 100755 tests/3.py create mode 100644 tests/VersionInfo.rc create mode 100755 tests/a.py create mode 100755 tests/b.py create mode 100755 tests/c.py create mode 100644 tests/circular-dep/a.h create mode 100644 tests/circular-dep/b.h create mode 100644 tests/circular-dep/c.h create mode 100755 tests/circular-dep/find.py create mode 100644 tests/circular-dep/m.h create mode 100644 tests/circular-dep/main.c create mode 100755 tests/circular-dep/mfile.py create mode 100644 tests/circular-dep/z.h create mode 100644 tests/delay.c create mode 100755 tests/find.py create mode 100644 tests/hello.c create mode 100755 tests/negtime.py create mode 100755 tests/nodag.py create mode 100644 tests/nondep/README.txt create mode 100755 tests/nondep/__mbuild create mode 100644 tests/nondep/hello.c create mode 100755 tests/sleep.py create mode 100755 tests/spew create mode 100755 tests/stdin.py create mode 100755 tests/timed3.py create mode 100755 tests/timed4.py create mode 100644 tests/use-icl-win.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..7db35bd --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +*.pyc +*~ +*# +.#* diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..7b1fcae --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,178 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..77eebaf --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,2 @@ +include m*.py +include __init__.py diff --git a/README.md b/README.md new file mode 100644 index 0000000..3c24d68 --- /dev/null +++ b/README.md @@ -0,0 +1,13 @@ +MBUILD USER INFO +----------------- + See: + + https://intelxed.github.io/ +and + https://github.com/intelxed/mbuild + +Documentation generation manual: + http://epydoc.sourceforge.net/epydoc.html + +To generate documentation from a check'ed out source tree, see "build-doc" + diff --git a/build-doc b/build-doc new file mode 100755 index 0000000..5db9159 --- /dev/null +++ b/build-doc @@ -0,0 +1,10 @@ +#!/bin/csh + +seet webdir=/var/www/html/mjcharne/mbuild +epydoc -v --simple-term --no-private --html -o epydoc-out --name mbuild --url 'http://mjc.intel.com/mjcharne/mbuild' mbuild/__init__.py +rm -rf $webdir/epydoc.old +mv $webdir/epydoc $webdir/epydoc.old +mkdir -p $webdir/epydoc +cp epydoc-out/* $webdir/epydoc +make-web-accessible $webdir/epydoc + diff --git a/do-dist b/do-dist new file mode 100755 index 0000000..840d1de --- /dev/null +++ b/do-dist @@ -0,0 +1,7 @@ +#!/bin/csh -f +# +# edit setup.py, set the version number +python setup.py sdist --formats=gztar,zip +cp dist/* /home/mjcharne/www/mbuild/download +make-web-accessible /home/mjcharne/www/mbuild/download + diff --git a/mbuild/__init__.py b/mbuild/__init__.py new file mode 100755 index 0000000..b75fc9a --- /dev/null +++ b/mbuild/__init__.py @@ -0,0 +1,139 @@ +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL +# __init__.py +# Mark Charney +"""This is mbuild: a simple portable dependence-based build-system +written in python. + +mbuild is a python-based build system very similar to scons with some +philosophical features of make. mbuild exposes the scan and build phases +allowing them to be repeated as necessary. Multiple DAGs can be +built, one during each scan phase. + +Conceptually there are 3 major components to mbuild: + - The environment L{env_t} + - The directed acyclic graph L{dag_t} + - The work queue L{work_queue_t} + +Using the environment L{env_t} you customize your build configuration +and construct names for your source files, object files, executables, +etc. The environment contains builder methods that create L{plan_t} +objects. There are builders for C, C++, static and dynamic libraries, +assembly files and linking programs. The environment and builders +support string substitution. + +The L{plan_t} objects are passed to the L{dag_t} which stores the +dependences that order execution. The L{plan_t} objects describe work +that needs to be done. Plans typically contain a command line strings +(with all substitutions done), but can also be python functions that +will be executed during the build. + +Using the L{plan_t} objects, the L{dag_t} creates L{command_t} +objects that are passed to the L{work_queue_t} to ultimately build the +target or targets. + +Your build file can have multiple environments, DAGS and work queues. + + +Using the environment dictionary +================================ + +You can bind or augmenting environment variables from the command +line. For example, one can say C{build_cpu=ia32} on an x86-64 system +to change the default compilation behavior. Similarly, one can say +C{CXXFLAGS+=-g} to add the C{-g} flag to the existing C{CXXFLAGS} +variable. + +Dynamic substitution is also used. Patterns of the form %(I{string})s +will substitute I{string} dynamically before it is used. The +expansion can happen directly from the environment and is +recursive. The expansion can also use dictionaries that are variables +in the environment. A dictionary in the environment is really a tuple +of the key-variable and the dictionary itself. + +For example:: + + env['opt_flag'] = ( 'opt', {'noopt':'', + '0':'%(OPTOPT)s0', + '1':'%(OPTOPT)s1', + '2':'%(OPTOPT)s2', + '3':'%(OPTOPT)s3', + '4':'%(OPTOPT)s4'} ) + + env['OPTOPT'] = ( 'compiler', { 'gnu':'-O', + 'ms':'/O'}) + + + env['CXXFLAGS'] += ' %(opt_flag)s' + +The C{OPTOPT} variable depends on C{env['compiler']}. +If C{env['compiler']='gnu'} then C{env['OPTOPT']} expands to C{-O}. +If C{env['compiler']='ms'} then C{env['OPTOPT']} expands to C{/O}. + +If the C{opt} variable is set "C{opt=3}" on the command line, or equivalently +if C{env['opt']='3'} is +set in the script, +then if the C{env['compiler']='gnu'} in the environment at the time of expansion, +then the flag in the +C{CXXFLAGS} will be C{-O3}. If C{env['compiler']='ms'} at the time of expansion, +then the optimiation +flag would be C{/O3}. If C{opt=noopt} (on the command line) then there will be no +optimization flag in the C{CXXFLAGS}. + + +Introspection +============= + +The L{command_t} that are executed during the build have their output +(stdout/stderr) stored in the L{dag_t}. After a build it is possible +to collect the commands using the L{dag_t.results} function and analyze the +output. This is very handy for test and validation suites. +""" + +from base import * +from dag import * +from work_queue import * +from env import * +from util import * +from plan import * +from arar import * +from doxygen import doxygen_run, doxygen_args, doxygen_env +from header_tag import * + +__all__ = [ 'base', + 'dag', + 'work_queue', + 'env', + 'util', + 'plan', + 'msvs', + 'arar', + 'doxygen', + 'dfs', + 'header_tag' ] + + +import time +def mbuild_exit(): + """mbuild's exit function""" + #print "SLEEPING" + #time.sleep(0.5) + #print "EXITING" + +import atexit +atexit.register(mbuild_exit) diff --git a/mbuild/arar.py b/mbuild/arar.py new file mode 100755 index 0000000..7450666 --- /dev/null +++ b/mbuild/arar.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +# -*- python -*- +# Repackage a bunch of static libs as one big static library. +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL +import os +import sys +import shutil +import re + +class arar_error(Exception): + def __init__(self, value): + self.value = value + def _str__(self): + return repr(self.value) + +def repack(files, ar='ar', target='liball.a', verbose=False): + """For linux only. Repackage the list of files using ar as the + archiver program. The input files list can contain .a or .o + files. The output library name is supplied by the target keyword + argument. This will raise an exception arar_error in the event of + a problem, setting the exception value field with an explanation.""" + import glob + pid= os.getpid() + #error=os.system(ar + " --version") + tdir = 'tmp.arar.%d' % (pid) + if os.path.exists(tdir): + raise arar_error('Conflict with existing temporary directory: %s' % \ + (tdir)) + os.mkdir(tdir) + # work in a temporary subdirectory + os.chdir(tdir) + doto = [] + for arg in files: + if re.search(r'[.]o$', arg): + if arg[0] == '/': + doto.append(arg) + else: + doto.append(os.path.join('..',arg)) + continue + if arg[0] == '/': + cmd = "%s x %s" % (ar,arg) + else: + cmd = "%s x ../%s" % (ar,arg) + if verbose: + print "EXTRACTING %s" % (cmd) + error= os.system(cmd) + if error: + raise arar_error('Extract failed for command %s' % (cmd)) + files = glob.glob('*.o') + doto + local_target = os.path.basename(target) + cmd = "%s rcv %s %s" % (ar, local_target, " ".join(files)) + if verbose: + print "RECOMBINING %s" % (cmd) + error=os.system(cmd) + if error: + raise arar_error('Recombine failed') + + os.chdir('..') + os.rename(os.path.join(tdir,local_target), target) + if verbose: + print "CREATED %s" % (target) + shutil.rmtree(tdir) + + diff --git a/mbuild/base.py b/mbuild/base.py new file mode 100755 index 0000000..32fff28 --- /dev/null +++ b/mbuild/base.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python +# -*- python -*- +# Mark Charney +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL +"""Base functionality: messages, verbosity, python version checking""" + +import os +import sys +import traceback + +_mbuild_verbose_level = 1 +def verbose(level=0): + """Return True if the configured message level supplied is >= the + level arguement + @param level: int + @param level: the verbosity level at which this function should return True + + @rtype: bool + @return: True iff the level argument is >= current verbosity level + """ + global _mbuild_verbose_level + if _mbuild_verbose_level >= level: + return True + return False +def set_verbosity(v): + """Set the global verbosity level. 0=quiet, 99=very very noisy""" + global _mbuild_verbose_level + _mbuild_verbose_level = v + +def get_verbosity(): + """Return the global verbosity level. 0=quiet, 99=very very noisy""" + global _mbuild_verbose_level + return _mbuild_verbose_level + +def bracket(s,m=''): + """add a bracket around s and append m. + @rtype: string + @return: a bracketed string s and a suffixed message m + """ + return '[%s] %s' % (s,str(m)) + +def error_msg(s,t): + """Emit '[s] t' to stderr with a newline""" + sys.stderr.write(bracket(s,t) + "\n") + +def msg(s, pad=''): + """Emit s to stdout with a newline""" + sys.stdout.write(pad) + sys.stdout.write(s) + sys.stdout.write("\n") + +def msgn(s, pad=''): + """Emit s to stdout without a newline""" + sys.stdout.write(pad) + sys.stdout.write(s) + +def msgb(s,t='',pad=''): + """a bracketed string s sent to stdout, followed by a string t""" + msg(bracket(s,t), pad=pad) + +def vmsgb(v,s,t='',pad=''): + """If verbosity v is sufficient, emit a bracketed string s sent to + stdout, followed by a string t""" + if verbose(v): + msg(bracket(s,t),pad=pad) + +def cond_die(v, cmd, msg): + """Conditionally die, if v is not zero. Print the msg and the cmd. + @type v: int + @param v: we die if v is not 0 + + @type cmd: string + @param cmd: a command to print + + @type msg: string + @param msg: a message to print before the command + """ + if v != 0: + s = msg + "\n [CMD] " + cmd + die(s) + +def die(m,s=''): + """Emit an error message m (and optionally s) and exit with a return value 1""" + msgb("MBUILD ERROR", "%s %s\n\n" % (m,s) ) + traceback.print_exc(file=sys.stdout) + sys.exit(1) +def warn(m): + """Emit an warning message""" + msgb("MBUILD WARNING", m) + +def get_python_version(): + """Return the python version as an integer + @rtype: int + @return: major * 100000 + minor + 1000 + fixlevel + """ + tuple = sys.version_info + major = int(tuple[0]) + minor = int(tuple[1]) + fix = int(tuple[2]) + vnum = major *100000 + minor * 1000 + fix + return vnum + +def get_python_version_tuple(): + """Return the python version as a tuple (major,minor,fixlevel) + @rtype: tuple + @return: (major,minor,fixlevel) + """ + + tuple = sys.version_info + major = int(tuple[0]) + minor = int(tuple[1]) + fix = int(tuple[2]) + return (major,minor,fix) + +def check_python_version(maj,minor,fix=0): + """Return true if the current python version at least the one + specified by the arguments. + @rtype: bool + @return: True/False + """ + t = get_python_version_tuple() + if t[0] > maj: + return True + if t[0] == maj and t[1] > minor: + return True + if t[0] == maj and t[1] == minor and t[2] >= fix: + return True + return False + + + +try: + if check_python_version(2,4) == False: + die("MBUILD error: Need Python version 2.4 or later.") +except: + die("MBUILD error: Need Python version 2.4 or later.") + +import platform # requires python 2.3 +_on_mac = False +_on_native_windows = False +_on_windows = False # cygwin or native windows +_on_cygwin = False +_on_linux = False +_on_freebsd = False +_operating_system_name = platform.system() +if _operating_system_name.find('CYGWIN') != -1: + _on_cygwin = True + _on_windows = True +elif _operating_system_name == 'Microsoft' or _operating_system_name == 'Windows': + _on_native_windows = True + _on_windows = True +elif _operating_system_name == 'Linux': + _on_linux = True +elif _operating_system_name == 'FreeBSD': + _on_freebsd = True +elif _operating_system_name == 'Darwin': + _on_mac = True +else: + die("Could not detect operating system type: " + _operating_system_name) + +def on_native_windows(): + """ + @rtype: bool + @return: True iff on native windows win32/win64 + """ + global _on_native_windows + return _on_native_windows + +def on_windows(): + """ + @rtype: bool + @return: True iff on windows cygwin/win32/win64 + """ + global _on_windows + return _on_windows diff --git a/mbuild/build_env.py b/mbuild/build_env.py new file mode 100755 index 0000000..b59acab --- /dev/null +++ b/mbuild/build_env.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +"""Setup functions for the ms/gnu compiler environment""" + +import os +import sys +import platform +from base import * +from util import * +from env import * +import msvs + + +def set_compiler_env_common(env): + """Set up some common stuff that depends heavily on the compiler setting""" + + # This whole section was really an experiment in how dynamically I + # could do substitutions. + + env['debug_flag'] = ( 'debug', { True: '%(DEBUGFLAG)s', + False:''}) + env['debug_flag_link'] = ( 'debug', { True: '%(DEBUGFLAG_LINK)s', + False:''}) + + win_shared_compile_dict = ( 'compiler', { 'ms': '/MD', + 'icl': '/MD', + 'otherwise': '', + }) + + shared_compile_dict = ( 'host_os', { 'android': '-fPIC', + 'lin': '-fPIC', + 'win': win_shared_compile_dict, + 'bsd': '-fPIC', + 'otherwise': '', + }) + + env['shared_compile_flag'] = ( 'shared', { True: shared_compile_dict, + False:''}) + + shared_link_dict = ('compiler', { 'ms':'/dll', + 'icl':'/dll', + 'icc':'-shared', + 'gnu':'-shared'}) + + env['shared_link'] = ( 'shared', { True: shared_link_dict, + False:''}) + + env['OPTOPT'] = ( 'compiler', { 'gnu':'-O', + 'clang':'-O', + 'iclang':'-O', + 'icc':'-O', + 'icl':'/O', + 'ms':'/O'}) + + env['nologo'] = ( 'compiler', { 'gnu':'', + 'clang':'', + 'iclang':'', + 'icc':'', + 'icl':'/nologo', + 'ms':'/nologo'}) + flags = '' + flags += ' %(debug_flag)s' + flags += ' %(nologo)s' + flags += ' %(opt_flag)s' + flags += ' %(shared_compile_flag)s' + env['CCFLAGS'] = flags + env['CXXFLAGS'] = flags + env['LINKFLAGS'] += ' %(debug_flag_link)s' + +def add_gnu_arch_flags(d): + """Accept a dictionary, return a string""" + if d['compiler'] in ['gnu','clang'] and d['gcc_version'] != '2.96': # FIXME: iclang? + if d['host_cpu'] == 'x86-64': + return '-m64' + elif d['host_cpu'] == 'ia32': + return '-m32' + return '' + + +def set_env_gnu(env): + """Example of setting up the GNU GCC environment for compilation""" + set_compiler_env_common(env) + + env['opt_flag'] = ( 'opt', {'noopt':'', + 's':'%(OPTOPT)ss', + '0':'%(OPTOPT)s0', + '1':'%(OPTOPT)s1', + '2':'%(OPTOPT)s2', + '3':'%(OPTOPT)s3', + '4':'%(OPTOPT)s4'} ) + + # lazy toolchain and other env var (f) expansion + mktool = lambda(f): "%(toolchain)s%(" + f + ")s" + + if env['CXX_COMPILER'] == '': + env['CXX_COMPILER'] = ( 'compiler', { 'gnu':'g++', + 'icc':'icpc', + 'iclang':'icl++', + 'clang':'clang++'}) + if env['CC_COMPILER'] == '': + env['CC_COMPILER'] = ( 'compiler', { 'gnu':'gcc', + 'icc':'icc', + 'iclang':'icl', + 'clang':'clang' }) + if env['ASSEMBLER'] == '': + env['ASSEMBLER'] = ( 'compiler', { 'gnu':'gcc', + 'icc':'icc', + 'iclang':'icl', + 'clang':'yasm' }) + + if env['LINKER'] == '': + env['LINKER'] = '%(CXX_COMPILER)s' # FIXME C++ or C? + if env['ARCHIVER'] == '': + env['ARCHIVER'] = ( 'compiler', { 'gnu': 'ar', # or GAR?? + 'icc' : 'xiar', + 'iclang' : 'xiar', + 'clang':'ar' }) + if env['RANLIB_CMD'] == '': + env['RANLIB_CMD'] = 'ranlib' + + if env['CC'] == '': + env['CC'] = mktool('CC_COMPILER') + if env['CXX'] == '': + env['CXX'] = mktool('CXX_COMPILER') + if env['AS'] == '': + env['AS'] = mktool('ASSEMBLER') + if env['LINK'] == '': + env['LINK'] = mktool('LINKER') + if env['AR'] == '': + env['AR'] = mktool('ARCHIVER') + if env['RANLIB'] == '': + env['RANLIB'] = mktool('RANLIB_CMD') + + # if using gcc to compile include -c. If using gas, omit the -c + env['ASFLAGS'] = ' -c' + + env['ARFLAGS'] = "rcv" + env['STATIC'] = ( 'static', { True : "-static", + False : "" } ) + env['LINKFLAGS'] += " %(STATIC)s" + + env['GNU64'] = add_gnu_arch_flags # dynamically called function during variable expansion! + s = ' %(GNU64)s' + env['CCFLAGS'] += s + env['CXXFLAGS'] += s + env['LINKFLAGS'] += s + # if using gcc to compile use -m64, otherwise if gas is used, omit the -m64. + env['ASFLAGS'] += s + + env['DEBUGFLAG'] = '-g' + env['DEBUGFLAG_LINK'] = '-g' + env['COPT'] = '-c' + env['DOPT'] = '-D' + env['ASDOPT'] = '-D' + env['IOPT'] = '-I' + env['ISYSOPT'] = '-isystem ' # trailing space required + env['LOPT'] = '-L' + + env['COUT'] = '-o ' + env['ASMOUT'] = '-o ' + env['LIBOUT'] = ' ' # nothing when using gar/ar + env['LINKOUT'] = '-o ' + env['EXEOUT'] = '-o ' + if env.on_mac(): + env['DLLOPT'] = '-shared' # '-dynamiclib' + else: + env['DLLOPT'] = '-shared -Wl,-soname,%(SOLIBNAME)s' + + env['OBJEXT'] = '.o' + if env.on_windows(): + env['EXEEXT'] = '.exe' + env['DLLEXT'] = '.dll' + env['LIBEXT'] = '.lib' + env['PDBEXT'] = '.pdb' + elif env.on_mac(): + env['EXEEXT'] = '' + env['DLLEXT'] = '.dylib' + env['LIBEXT'] = '.a' + env['PDBEXT'] = '' + else: + env['EXEEXT'] = '' + env['DLLEXT'] = '.so' + env['LIBEXT'] = '.a' + env['PDBEXT'] = '' + + + +def find_ms_toolchain(env): + if env['msvs_version']: + env['setup_msvc']=True + + if env['vc_dir'] == '' and not env['setup_msvc']: + if 'MSVCDir' in os.environ: + vs_dir = os.environ['MSVCDir'] + if os.path.exists(vs_dir): + env['vc_dir'] = vs_dir + elif 'VCINSTALLDIR' in os.environ: + vc_dir = os.environ['VCINSTALLDIR'] + if os.path.exists(vc_dir): + env['vc_dir'] = vc_dir + msvs7 = os.path.join(env['vc_dir'],"Vc7") + if os.path.exists(msvs7): + env['vc_dir'] = msvs7 + elif 'VSINSTALLDIR' in os.environ: + vs_dir = os.environ['VSINSTALLDIR'] + if os.path.exists(vs_dir): + env['vc_dir'] = os.path.join(vs_dir, 'VC') + elif 'MSVCDIR' in os.environ: + vs_dir = os.environ['MSVCDIR'] + if os.path.exists(vs_dir): + env['vc_dir'] = vs_dir + + if env['vc_dir'] == '' or env['setup_msvc']: + env['vc_dir'] = msvs.set_msvs_env(env) + + # toolchain is the bin directory of the compiler with a trailing slash + if env['toolchain'] == '' and env['vc_dir'] and env['compiler']=='ms': + toolchain = '' + if env['build_cpu'] == 'ia32' and env['host_cpu'] == 'ia32': + toolchain = os.path.join(env['vc_dir'], 'bin', '') + elif env['build_cpu'] == 'ia32' and env['host_cpu'] == 'x86-64': + toolchain = os.path.join(env['vc_dir'], 'bin', 'x86_amd64', '') + elif env['build_cpu'] == 'x86-64' and env['host_cpu'] == 'x86-64': + toolchain = os.path.join(env['vc_dir'], 'bin', 'amd64', '') + elif env['build_cpu'] == 'x86-64' and env['host_cpu'] == 'ia32': + toolchain = os.path.join(env['vc_dir'], 'bin', '') + elif env['compiler'] == 'ms': + die("Unknown build/target combination. build cpu=%s, " + + "host_cpu=%s" % ( env['build_cpu'], env['host_cpu'])) + + env['toolchain'] = toolchain # default toolchain that we discover + + + +def set_env_ms(env): + """Example of setting up the MSVS environment for compilation""" + set_compiler_env_common(env) + + # FIXME: allow combinations of options + env['opt_flag'] = ( 'opt', {'noopt':'', + '0':'%(OPTOPT)sd', + '1':'%(OPTOPT)s1', + '2':'%(OPTOPT)s2', + '3':'%(OPTOPT)s2', # map O3 and O4 to O2 + '4':'%(OPTOPT)s2', # map O3 and O4 to O2 + 'b':'%(OPTOPT)sb', + 'i':'%(OPTOPT)si', + 's':'%(OPTOPT)ss', + 'x':'%(OPTOPT)sx', + 'd':'%(OPTOPT)sd', + 'g':'%(OPTOPT)sg'} ) + + env['ASFLAGS'] = '/c /nologo ' + env['LINKFLAGS'] += ' /nologo' + env['ARFLAGS'] = '/nologo' + + env['link_prefix'] = ('use_compiler_to_link', { True:'/link', + False:'' }) + if env['host_cpu'] == 'ia32': + env['LINKFLAGS'] += ' %(link_prefix)s /MACHINE:X86' + env['ARFLAGS'] += ' /MACHINE:X86' + elif env['host_cpu'] == 'x86-64': + env['LINKFLAGS'] += ' %(link_prefix)s /MACHINE:X64' + env['ARFLAGS'] += ' /MACHINE:X64' + + env['favor'] = ( 'compiler', { 'ms' : ' /favor:EM64T', + 'otherwise' : '' }) + env['CXXFLAGS'] += ' %(favor)s' + env['CCFLAGS'] += ' %(favor)s' + + elif env['host_cpu'] == 'ipf': + env['LINKFLAGS'] += ' %(link_prefix)s /MACHINE:IA64' + env['ARFLAGS'] += ' /MACHINE:IA64' + + env['COPT'] = '/c' + env['DOPT'] = '/D' + env['ASDOPT'] = '/D' + + # I use '-I' instead of '/I' because it simplifies use of YASM + # which requires -I for includes. + env['IOPT'] = '-I' # -I or /I works with MSVS8. + env['ISYSOPT'] = '-I' # MSVS has not -isystem so we use -I + env['LOPT'] = '%(link_prefix)s /LIBPATH:' + + + # Some options differ when using the compiler to link programs. + # Note: /Zi has parallel-build synchronization bugs + env['DEBUGFLAG'] = '/Z7' + env['DEBUGFLAG_LINK'] = ('use_compiler_to_link', { True:'/Z7', # of /Zi + False:'/debug'}) + env['COUT'] = '/Fo' + env['ASMOUT'] = '/Fo' + env['LIBOUT'] = '/out:' + env['EXEOUT'] = '/Fe' + env['LINKOUT'] = ('use_compiler_to_link',{ True:'/Fo', + False:'/OUT:'}) + env['DLLOPT'] = '/dll' + env['OBJEXT'] = '.obj' + env['LIBEXT'] = '.lib' + env['DLLEXT'] = '.dll' + env['EXEEXT'] = '.exe' + env['PDBEXT'] = '.pdb' + env['PDBEXT'] = '.pdb' + env['RCEXT'] = '.rc' + env['RESEXT'] = '.res' + + + + find_ms_toolchain(env) + + if env['ASSEMBLER'] == '': + if env['host_cpu'] == 'ia32': + env['ASSEMBLER'] = 'ml.exe' + elif env['host_cpu'] == 'x86-64': + env['ASSEMBLER'] = 'ml64.exe' + + if env['CXX_COMPILER'] == '': + env['CXX_COMPILER'] = ( 'compiler', { 'ms':'cl.exe', + 'icl':'icl.exe' }) + if env['CC_COMPILER'] == '': + env['CC_COMPILER'] = ( 'compiler', { 'ms':'cl.exe', + 'icl':'icl.exe' }) + if env['LINKER'] == '': + env['LINKER'] = ( 'compiler', { 'ms': 'link.exe', + 'icl' : 'xilink.exe'}) + + # old versions of RC do not accept the /nologo switch + env['rcnologo'] = ( 'msvs_version', { 'otherwise':' /nologo', + '6':'', + '7':'', + '8':'', + '9':'' }) + env['RCFLAGS'] = " %(rcnologo)s" + + # Finding the rc executable is a bit of a nightmare. + # + # In MSVS2005(VC8): + # C:/Program Files (x86)/Microsoft Visual Studio 8/VC + # bin/rc.exe + # or + # PlatformSDK/Bin/win64/AMD64/rc.exe + # which is $VCINSTALLDIR/bin or + # $VCINSTALLDIR/PlatformSDK/bin/win64/AMD64 + # We do not bother attempting to find that version of rc. + # Put it on your path or set env['RC_CMD'] if you need it. + # + # In MSVS2008(VC9), MSVS2010 (VC10) and MSVS2012 (VC11): + # have rc.exe in the SDK directory, though the location varies + # a little for the 32b version. + + if env['RC_CMD'] == '': + if 'WindowsSdkDir' in env: + sdk = env['WindowsSdkDir'] + elif 'WindowsSdkDir' in os.environ: + sdk = os.environ['WindowsSdkDir'] + else: + sdk = 'unknown-sdk-dir' + # and hope the user puts it on their PATH + env['RC_CMD'] = 'rc' + + # if we have no valid sdk dir, we won't die trying to + # find the rc.exe. + if sdk != 'unknown-sdk-dir': + if env['host_cpu'] == 'x86-64': + env['RC_CMD'] = os.path.join(sdk,'bin','x64','rc.exe') + if not os.path.exists(env.expand('%(RC_CMD)s')): + die("Could not find 64b RC command in SDK directory") + else: + env['RC_CMD'] = os.path.join(sdk,'bin','x86','rc.exe') + if not os.path.exists(env.expand('%(RC_CMD)s')): + env['RC_CMD'] = os.path.join(sdk,'bin','rc.exe') + if not os.path.exists(env.expand('%(RC_CMD)s')): + die("Could not find 32b RC command in SDK directory") + + # RC lives in the SDK. Counting on the msvs.py setup to + # put it on the PATH. FIXME + if env['RC'] == '': + env['RC'] = quote('%(RC_CMD)s') + + if env['ARCHIVER'] == '': + env['ARCHIVER'] =( 'compiler', { 'ms': 'lib.exe', + 'icl' : 'xilib.exe'}) + # lazy toolchain and other env var (f) expansion + mktool = lambda(f): "%(toolchain)s%(" + f + ")s" + + if env['CXX'] == '': + env['CXX'] = quote(mktool('CXX_COMPILER')) + if env['CC'] == '': + env['CC'] = quote(mktool('CC_COMPILER')) + if env['AS'] == '': + env['AS'] = quote(mktool('ASSEMBLER')) + if env['LINK'] == '': + env['LINK'] = quote(mktool('LINKER')) + if env['AR'] == '': + env['AR'] = quote(mktool('ARCHIVER')) + + + +def yasm_support(env): + """Initialize the YASM support based on the env's host_os and host_cpu""" + # FIXME: android??? + yasm_formats={} + yasm_formats['win'] = { 'ia32': 'win32', 'x86-64': 'win64'} + yasm_formats['lin'] = { 'ia32': 'elf32', 'x86-64': 'elf64'} + yasm_formats['bsd'] = { 'ia32': 'elf32', 'x86-64': 'elf64'} + yasm_formats['mac'] = { 'ia32': 'macho32', 'x86-64': 'macho64'} + env['ASDOPT']='-D' + try: + env['ASFLAGS'] = ' -f' + yasm_formats[env['host_os']][env['host_cpu']] + env['ASMOUT'] = '-o ' + env['AS'] = 'yasm' + except: + die("YASM does not know what format to use for build O/S: %s and target CPU: %s" % + (env['host_os'], env['host_cpu'])) + + + +def set_env_clang(env): + set_env_gnu(env) + + +def set_env_icc(env): + """Example of setting up the Intel ICC environment for compilation""" + set_env_gnu(env) + +def set_env_iclang(env): + """Example of setting up the Intel iclang (aka mac icl) environment for compilation""" + set_env_gnu(env) + +def set_env_icl(env): + """Example of setting up the Intel ICL (windows) environment for compilation""" + set_env_ms(env) diff --git a/mbuild/dag.py b/mbuild/dag.py new file mode 100755 index 0000000..1ce6e80 --- /dev/null +++ b/mbuild/dag.py @@ -0,0 +1,1136 @@ +#!/usr/bin/env python +# -*- python -*- +# Mark Charney +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +"""dependence tracking using a directed acyclic graph (DAG)""" + +# Originally, we decided that if we could not find a header then it +# was an error. And there was an ignored file list for headers that +# were conditionally included on some platforms but not others. The +# idea was that you'd list the files that were ignorable on your +# platform and they would not trigger a rebuild. Any other missing +# file would trigger a rebuild though!! That's problematic though as +# users must maintain lists of ignorable files. +# +# Another school of thought is that if the scanner cannot find the +# file and all the include paths were specified properly, then the +# compilation will fail if the header is required. Missing headers +# files in this regime will not trigger downstream rebuilds. +# +# This precludes users from manually specifying -I flags and +# skipping the mbuild's add_include_dir() API. They'll get okay +# build but incomplete dependence checks. So don't bypass +# add_include_dir()! +# +# Even so, there is a problem with ignoring missing files: What about +# dependences on generated header files that have not been generated +# yet? That is THE problem that motivates this design. If we ignore +# missing headers, then the "dependent" file will either be marked as: +# +# (a) "ready to compile" (assuming the other headers are found but one +# or more might have changed) +# or +# (b) "does not need compilation" (if none of the found headers +# have changed). +# +# In the former case (a), the compilation will fail +# nondeterministically depending on whether or not the header file is +# created at the compilation time of the "including" file. Or in the +# latter case (b), we won't rebuild things that need +# rebuilding. Either way, the idea of ignoring missing header files is +# very broken. +# +# A third option is to ignore most header missing files but specify +# that certain generated missing header files cannot be ignored. Since +# there are way fewer generated header files, this is a much more +# convenient option. +# +# NOTE: If there is a cmd in the dag that produces the missing header +# file, we must run it to produce the missing header. +# + + +import os +import sys +import platform +import types +import collections +try: + import cPickle as apickle +except: + import pickle as apickle + + +from base import * +from work_queue import * +from env import * +from util import * +from plan import * +import scanner +import dfs +import util + +class _mbuild_dep_record_t(object): + """This stores the basic dependence structure for the + build. Creators are listed for files that are generated. The + signature is the last signature we saw for this.""" + def __init__(self, file_name, creator=None): + self.file_name = file_name + + self.old_signature = None + self.signature = None + self.scanned_header = False + + # If this file has a creator, we check the signature of the + # thing that created it to see if it is the same as it was the + # last time we made this target. + self.old_command_signature = None + self.command_signature = None + + # did we do the header scan for this file yet? + self.scanned = False + + # When a changed input reaches this node, it sets needs_to_run + # to True. + self.needs_to_run = False + self.visited = False + self.added = False + + # before building, we mark all the nodes that are required for + # the build to True (by search for ancestors from targets) so + # that we know which commands to enable for execution. + self.required = False + + self.changed = None + + self.creator = creator # command_t + self.files_that_are_inputs = [] + self.files_that_depend_on_this = [] + + self.part_of_loop = False + self.index = 0 + self.lowlink = 0 + + self.hash_file() + + def hash_file(self): + #msgb("HASHING", str(self.file_name)) + if os.path.exists(self.file_name): + try: + lines = file(self.file_name).readlines() + except: + die("COULD NOT READ: %s" %(str(self.file_name))) + self.signature = hash_list(lines) + if verbose(99): + msgb("HASHFILE", "%s -> %s" % (self.signature, + self.file_name)) + else: + if verbose(99): + msgb("COULD NOT HASH MISSING FILE", self.file_name) + + def hash_if_needed(self): + if self.signature == None: + self.hash_file() + + + def missing(self): + if not os.path.exists(self.file_name): + return True + return False + + + def _check_required(self, required_files): + if self.file_name in required_files: + return True + if os.path.basename(self.file_name) in required_files: + return True + return False + + + def _compute_changed_bit(self, required_files): + """Return True if there is no old signature or the old + signature does not equal the current signature, or the file + does not exist""" + if self.missing(): + # A missing required file during the scan implies either + # the build is going to fail or something upstream better + # create it. And if it is created we are going to have to + # assume it is changing since we don't have one now. + if verbose(10): + msgb("MISSING FILE", self.file_name) + if self._check_required(required_files): + if verbose(10): + msgb("MISSING REQUIRED FILE->CHANGED") + return True + # we let scanned headers slide if they don't exist + if self.scanned_header: + if verbose(10): + msgb("MISSING SCANNED HEADER FILE->UNCHANGED") + return False + if verbose(10): + msgb("MISSING FILE->ASSUME CHANGED") + return True + else: + if self.old_signature: + self.hash_if_needed() + if self.old_signature == self.signature: + return False + elif verbose(10): + msgb("SIG MISMATCH for %s" % self.file_name) + msgb("OLD SIG %s" % str(self.old_signature)) + msgb("NEW SIG %s" % str(self.signature)) + elif verbose(10): + msgb("NO OLD SIG for %s" % self.file_name) + return True + + def change_bit(self, required_files): + """Compute changed bit if it has not been computed yet. Return + the value.""" + if self.changed == None: + self.changed = self._compute_changed_bit(required_files) + if verbose(10): + msgb("COMPUTE CHANGE BIT", "%s for %s" % + ( str(self.changed), self.file_name)) + return self.changed + + + def format_string(self,s): + o = "\n\t".join(s) + return o + + def dump_str(self): + + s = "\tANCESTORS: %s\nTARGET: %s\n\tDESCENDENTS: %s\n" % \ + (self.format_string(self.files_that_are_inputs), + self.file_name, + self.format_string(self.files_that_depend_on_this)) + + if self.creator: + s += "\tCREATOR: %s\n" % self.creator.dump() + if self.visited: + s += "\tVISITED\n" + else: + s += "\tNOT-VISITED\n" + + if self.part_of_loop: + s += "\tIN-LOOP\n" + else: + s += "\tNOT-IN-LOOP\n" + + if self.required: + s += "\tREQUIRED\n" + else: + s += "\tNOT-REQUIRED\n" + + if self.changed: + s += "\tCHANGED\n" + else: + s += "\tCHANGED\n" + return s + + def dump(self): + """print a string representing this node of the DAG. The + string comes from the __str__ function""" + print self.dump_str() + def __str__(self): + return self.dump_str() + +class _mbuild_storage_object_t(object): + def __init__(self, signature): + self.signature = signature + + +class dag_t(object): + """ + This object builds a DAG of files an sequences their submission to + the parallel work queue of type L{work_queue_t}. + + This takes L{plan_t} objects representing command + strings or python functions, and creates L{command_t} + objects suitable for use in the L{work_queue_t}. + + As the L{work_queue_t} executes, it queries this DAG for more + ready commands or functions to execute. + """ + + + def __init__(self, name='default', env=None): + self.name = name + self.recs = {} # _mbuild_dep_record_t's + + # dictionary of _mbuild_storage_object_t's by file name. + self.old_signatures = {} + + # if you car about changes to the python functions, then + # include the python sources in the list of inputs. This + # feature _python_commands_changed is deprecated. + self._python_commands_changed = False + + self.signature_file_name = ".mbuild.hash." + self.name + if env: + self.signature_file_name = env.build_dir_join( + self.signature_file_name) + # some users change directories during the build and we do not + # want relative paths to mess us up when we go to write the + # signature file at the end of the build. + self.signature_file_name = os.path.abspath(self.signature_file_name) + + if os.path.exists(self.signature_file_name): + self._read_signatures(self.signature_file_name) + + if env and 'required' in env: + self.required_set = \ + set(self._canonize_if_exists_fn(env['required'])) + else: + self.required_set = set() + + + def cycle_check(self): + """Check the DAG for illegal cycles in the include structure. + @rtype: bool + @return: True if the DAG contains cycles (and thus is not a DAG). + """ + node_dict = {} + # build the graph for the DFS + for k,v in self.recs.iteritems(): + if k in node_dict: + node = node_dict[k] + else: + node = dfs.node_t(k) + node_dict[k] = node + for p in v.files_that_are_inputs: + if p in node_dict: + pnode = node_dict[p] + else: + pnode = dfs.node_t(p) + node_dict[p] = pnode + node.add_successor(pnode) + # Traverse the graph + cycle = dfs.dfs(node_dict.values()) + if cycle: + msgb("CYCLE DETECTED IN DAG") + return cycle + + def __del__(self): + self.dag_write_signatures() + + def dump(self): + """print a string representing the DAG. """ + print "DAG DUMP" + for v in self.recs.itervalues(): + v.dump() + + def _hash_mixed_list(l): + + if isinstance(l, types.ListType): + il = l + else: + il = [l] + s = [] + for i in il: + if i.is_command_line(): + s.append(i.command) + t = " - ".join(s) + h = hash_string(t) + return h + + def dag_write_signatures(self): + """Write a dictionary of _mbuild_storage_object_t's to the + given file name""" + if verbose(10): + msgb("WRITING SIGNATURES", self.signature_file_name) + d = {} + for (k,v) in self.recs.iteritems(): + # get the new hash values for anything that had a command + # execute for it. + if v.creator: + if v.creator.is_command_line() and v.creator.completed: + # store the command line hashes in the same + # dictionary with a prefix + command_hash = v.creator.hash() + full_key = v.creator.dagkey() + d[full_key]= _mbuild_storage_object_t(command_hash) + if verbose(99): + msgb("SIGWRITE", "%s -> %s" % (str(command_hash), + full_key)) + if v.creator.completed and v.creator.exit_status == 0: + v.hash_file() + if v.creator and v.creator.failed(): + if verbose(99): + msgb("NULLIFY SIG", k) + v.signature = None + if not v.signature: + if verbose(99): + msgb("FIXING NULL SIGNATURE", k) + v.hash_file() + + if verbose(99): + msgb("SIGWRITE", "%s -> %s" % (str(v.signature),k)) + d[k] = _mbuild_storage_object_t(v.signature) + + # FIXME: binary protocol 2, binary file write DOES NOT WORK ON + # win32/win64 + f = open(self.signature_file_name,"wb") + apickle.dump(d,f) + f.close() + + def _check_command_signature(self, co): + """Return True if the signature matches the command object.""" + + # if the command is not a list of strings, we just assume that + # is has changed. + if co.has_python_subcommand(): + if self._python_commands_changed: + return False + else: + return True # assume the command has not changed + + full_key = co.dagkey() + try: + old_hash = self.old_signatures[full_key].signature + if verbose(99): + msgb('COMMAND HASH', full_key) + msgb('COMMAND HASH', old_hash) + new_hash = co.hash() + if old_hash == new_hash: + if verbose(99): + msgb('COMMAND HASH','\tMATCH') + + return True + except: + if verbose(99): + msgb('COMMAND HASH','\tNO OLD HASH') + + if verbose(99): + msgb('COMMAND HASH','\tDOES NOT MATCH') + return False + + + def _read_signatures(self, file_name): + """Read a dictionary of _mbuild_storage_object_t's from the + given file name.""" + if verbose(10): + msgb("READING SIGNATURES", file_name) + try: + f = open(file_name,"rb") + self.old_signatures = apickle.load(f) + f.close() + except: + warn("READING SIGNATURES FAILED FOR "+ file_name) + return + if verbose(99): + for k, v in self.old_signatures.iteritems(): + msgb("SIGREAD", "%s -> %s" % (str(v.signature),k)) + + # Add old signatures to any existing files + for k, v in self.recs.iteritems(): + if k in self.old_signatures: + v.old_signature = self.old_signatures[k].signature + + def _check_required_file(self,fn): + if fn in self.required_set: + return True + if os.path.basename(fn) in self.required_set: + return True + return False + + + def _compute_all_parents_visited(self, n): + """Returns (all_parents_visited, some_parents_changed)""" + all_parents_visited = True + some_parents_changed = False + for ancestor_fn in n.files_that_are_inputs: + try: + ancestor_rec = self.recs[ancestor_fn] + if ancestor_rec.visited: + if ancestor_rec.changed: + some_parents_changed = True + else: + all_parents_visited = False + except: + if self._check_required_file(ancestor_fn): + warn("[1] node %s: did not find ancestor node: %s" % + (n.file_name, ancestor_fn)) + + return (all_parents_visited, some_parents_changed) + + def _just_compute_parent_changed(self, n): + """Returns True if some parent changed""" + for ancestor_fn in n.files_that_are_inputs: + try: + ancestor_rec = self.recs[ancestor_fn] + if ancestor_rec.visited: + if ancestor_rec.changed: + return True + except: + if self._check_required_file(ancestor_fn): + warn("[2] node %s: did not find ancestor node: %s" % + (n.file_name, ancestor_fn)) + return False + + + def _just_compute_all_parents_visited(self, n): + """Returns True if all parents were visited or parents are part of a loop""" + for ancestor_fn in n.files_that_are_inputs: + try: + ancestor_rec = self.recs[ancestor_fn] + if not ancestor_rec.visited: + if verbose(10): + msgb("PARENT UNVISITED", "%s <- %s" % + (n.file_name, ancestor_fn)) + if n.part_of_loop: + warn("Circularity involving %s" % (n.file_name)) + return True # FIXME HACK + return False + except: + if self._check_required_file(ancestor_fn): + warn("[3] node %s: did not find ancestor node: %s" % + (n.file_name, ancestor_fn)) + return True + + def _just_compute_all_parents_completed(self, n): + """Returns True if all parents that have to execute have completed""" + for ancestor_fn in n.files_that_are_inputs: + try: + ancestor_rec = self.recs[ancestor_fn] + if ancestor_rec.creator: + if not ancestor_rec.creator.completed: + return False + except: + if self._check_required_file(ancestor_fn): + warn("[4] node %s: did not find ancestor node: %s" % + (n.file_name, ancestor_fn)) + return True + + def _set_ancestors_to_required(self, lof): + """Set all the ancestors of the files in the list of files lof + argument to be required nodes.""" + nodes = collections.deque() # work list + for f in lof: + nodes.append(f) + + while len(nodes) != 0: + f = nodes.popleft() + r = self.recs[f] + if not r.required: + if verbose(10): + msgb("MARKING-ANCESTORS AS REQUIRED", r.file_name) + + r.required = True + for g in r.files_that_are_inputs: + nodes.append(g) + + def _find_required_nodes(self, targets): + """Look at the targets list and mark the ancestors as + required for the build. Internal function""" + if verbose(10): + msgb("INPUT TARGETS", str(targets)) + for v in self.recs.itervalues(): + v.required = False + + target_dictionary = dict.fromkeys(targets, True) + if verbose(10): + msgb("TARGETS", str(target_dictionary)) + for v in self.recs.itervalues(): + if v.creator: + if v.file_name in target_dictionary: + if not v.required: + if verbose(10): + msgb("MARK AS REQUIRED", v.file_name) + v.required = True + self._set_ancestors_to_required(v.files_that_are_inputs) + + def check_for_skipped(self): + """Return a list of things that did not build but were tagged + as required for the build. This list could be nonempty because + (1)there was an error in the build or (2) there is a + circularity in the dependence structure.""" + did_not_build = [] + for v in self.recs.itervalues(): + if v.required and not v.visited: + did_not_build.append(v.file_name) + return did_not_build + + def _find_loops(self, root_nodes): + #print "FIND LOOPS" + + def _mark_loop(level,n,stack,all_sccs): + # Tarjan's algorithm for strongly connected components + n.index = level + n.lowlink = level + level = level + 1 + stack.append(n) + + for cfn in n.files_that_depend_on_this: + child = self.recs[cfn] + if child.index == 0: + _mark_loop(level,child,stack,all_sccs) + n.lowlink = min(n.lowlink, child.lowlink) + elif child in stack: + n.lowlink = min(n.lowlink, child.index) + + if n.lowlink == n.index: + # collect each strongly connected component + scc = [] + + while 1: + child = stack.pop() + scc.append(child) + if child == n: + break + all_sccs.append(scc) + + stack = collections.deque() + all_sccs = [] # list of lists of nodes + level = 1 + + for v in root_nodes: + #print "MARKING", v.file_name + _mark_loop(level,v,stack,all_sccs) + + # mark nodes that are part of include-loops (and print them out) + for scc in all_sccs: + if len(scc) > 1: + msg("===================================") + msg("CYCLE INVOLVING THESE FILES (will assume all ready):") + for n in scc: + msg("\t" + n.file_name) + n.part_of_loop = True + msg("===================================") + + def _leaves_with_changes(self, targets=None): + """Return a list of mbuild_dep_records_t for things with no + ancestors but with associated commands. targets is an optional + list of things to build. (called from work_queue.py) + """ + nodes = collections.deque() # work list + + if targets: + if not isinstance(targets, types.ListType): # make it a list + targets = [ targets ] + self._find_required_nodes(targets) + else: + # mark all nodes required since no targets are specified + for v in self.recs.itervalues(): + v.required = True + + self._find_loops(self.recs.itervalues()) + + # build a list of roots -- files that have nothing they depend on. + # store that list in the nodes list + for v in self.recs.itervalues(): + v.visited = False # initialize all to false + v.added = False # initialize all to false + if (v.part_of_loop or len(v.files_that_are_inputs) == 0) and v.required: + v.needs_to_run = v.change_bit(self.required_set) + v.added = True + nodes.append(v) + + if verbose(9): + if v.needs_to_run: + s = ": CHANGED" + else: + s = '' + msgb("ROOTSEARCH", v.file_name + s) + else: + v.needs_to_run = False # clear all the other nodes + + ready = self._ready_scan(nodes) + del nodes + return ready + + def _enable_successors(self,cmd): + """When a command completes, it must notify things that + depend on its stated target files. Return a list of ready + commands (called from work_queue.py) + """ + if verbose(10): + msgb('ENABLE SUCCESSORS', str(cmd)) + nodes = collections.deque() # work list + for tgt in cmd.targets: + rtgt = os.path.realpath(tgt) + if verbose(11): + msgb('SUCCESSOR', tgt + " --> " + rtgt) + n = self.recs[ rtgt ] + self._scan_successors(nodes,n) + ready = self._ready_scan(nodes) + if verbose(10): + msgb("NEW READY VALUES", str(ready)) + del nodes + return ready + + def _scan_successors(self, nodes,n): + """Add ready successors of n to nodes list""" + if verbose(10): + msgb('SCAN SUCCESSORS', n.file_name + " -> " + + str(n.files_that_depend_on_this)) + for successor_fn in n.files_that_depend_on_this: + try: + successor_rec = self.recs[successor_fn] + if successor_rec.required and not successor_rec.needs_to_run: + if self._just_compute_all_parents_visited(successor_rec): + if self._just_compute_all_parents_completed(successor_rec): + if verbose(10): + msgb("LEAFSEARCH", "\tADDING: " + + successor_rec.file_name) + # Make sure we are not scanning things + # multiple times. + if successor_rec.added: + warn("Already added: " + successor_rec.file_name) + else: + successor_rec.added = True + successor_rec.needs_to_run = True + nodes.append(successor_rec) + else: + if verbose(10): + msgb("NOT ALL PARENTS COMPLETED", successor_fn) + else: + if verbose(10): + msgb("NOT ALL PARENTS VISITED", successor_fn) + else: + if verbose(10): + msgb("NOT REQUIRED/NOT NEEDED TO RUN", successor_fn) + + except: + warn("node %s: did not find child node: %s" % + (n.file_name, successor_fn)) + if verbose(10): + msgb('SCAN SUCCESSORS DONE') + + def _cmd_all_outputs_visited_and_unchanged(self, cmd): + """Return True if all the outputs of the command are visited + and unchanged. If any are not visited or any are changed, + return False.""" + if not cmd.targets: + return True + for fn in cmd.targets: + rfn = os.path.realpath(fn) + vmsgb(20,"TESTING CMD TARGET:", rfn, pad = 4*' ') + if rfn in self.recs: + d = self.recs[rfn] + if d.visited == False: + vmsgb(20,"CMD TARGET NOT VISITED YET:", fn, pad=8*' ') + return False + if d.changed: + vmsgb(20,"CMD TARGET CHANGED:", fn, pad=8*' ') + return False + else: + vmsgb(20,"CMD TARGET NOT FOUND IN DAG:", fn, pad=8*' ') + vmsgb(20,"CMD TARGETS ALL VISITED AND UNCHANGED:", fn) + return True + + def _ready_scan(self,nodes): + """Process the nodes list and return a list of ready commands""" + vmsgb(20,'READY SCAN', '%d' % (len(nodes))) + readyd = dict() # ready dictionary for fast searching + vmsgb(20,"READYD0", str(readyd)) + # Pop a node off the nodes list. If that node has a creator, + # put it in the ready list. If the node has no creator put then its + # children on the nodes list. + iters = 0 + while len(nodes) != 0: + n = nodes.popleft() + iters+=1 + # see if all parents have been visited yet + parents_changed = self._just_compute_parent_changed(n) + vmsgb(20,"VISITING", n.file_name) + n.visited = True + if n.change_bit(self.required_set): + vmsgb(20,"LEAFSEARCH", "%d \tthis node %s CHANGED." % + (iters,n.file_name)) + propagate_changed = True + n.needs_to_run = True + elif parents_changed: + vmsgb(20,"LEAFSEARCH", "%d \tsome parent of %s CHANGED." % + (iters,n.file_name)) + n.changed = True # we changed because our parents changed + propagate_changed = True + n.needs_to_run = True + elif n.creator and \ + not self._check_command_signature(n.creator): + vmsgb(20,"LEAFSEARCH", "%d\tthis node's command changed: %s." % + (iters,n.file_name)) + n.changed = True # we changed because our command line changed + propagate_changed = True + n.needs_to_run = True + else: + vmsgb(20,"LEAFSEARCH", "%d\tUNCHANGED: %s." % + (iters,n.file_name)) + propagate_changed = False + + if n.creator: + # if the inputs have not changed and the signtures of + # the outputs match, then do not build the thing. Just + # mark it complete so it won't run. + + # we only mark a creator completed if all the + # command_t targets are visited unchanged. + + if not propagate_changed: + vmsgb(20,"LEAFSEARCH", "\tTESTING CMD SUCCESSORS: " + + n.file_name) + if self._cmd_all_outputs_visited_and_unchanged(n.creator): + n.creator._complete() + vmsgb(20,"LEAFSEARCH", "\tMARK CREATOR CMD COMPLETED: " + + n.file_name) + else: + vmsgb(20,"LEAFSEARCH", "\tCMD OUTPUTS NOT FULLY SCANNED YET: " + + n.file_name) + + else: + if n.creator._ready(): + vmsgb(20,"LEAFSEARCH", "\tCMD READY: " + n.file_name) + if n.file_name not in readyd: + vmsgb(20,"LEAFSEARCH", + "\tADDING CREATOR TO READYD: " + + n.file_name) + readyd[n.file_name] = n + else: + vmsgb(20,"LEAFSEARCH", + "\tCREATOR ALREADY IN READYD: " + + n.file_name) + + self._scan_successors(nodes,n) + vmsgb(20,"READYD", str(readyd)) + ready = readyd.values() + return ready + + def _find_rec_for_missing_file(self, fn, assumed_directory): + vmsgb(20,"LOOKING FOR MISSING FILE", "%s assuming %s" % + (fn, assumed_directory)) + + if fn in self.recs: + vmsgb(20,"FOUND DEP REC FOR MISSING FILE", fn) + return self.recs[fn] + if assumed_directory: + nfn = util.join(assumed_directory, fn) + if nfn in self.recs: + vmsgb(20,"FOUND DEP REC FOR MISSING FILE(2)", nfn) + return self.recs[nfn] + nfn = os.path.realpath(nfn) + if nfn in self.recs: + vmsgb(20,"FOUND DEP REC FOR MISSING FILE(3)", nfn) + return self.recs[nfn] + vmsgb(20,"NO DEP REC FOR MISSING FILE", fn) + return None + + def _make_list(self, x): # private + """Make a list from a single object if the thing is not + already a list. If it is a list, just return the list""" + if isinstance(x,types.ListType): + return x + return [ x ] + + def _scan_headers(self, xinput, header_paths, assumed_directory=None): + """Scan xinput for headers. Add those headers to the list of + files that are inputs.""" + to_scan = collections.deque() + to_scan.append(xinput) + #msgb("HDRSCAN1", xinput) + # loop scanning headers of headers... + while len(to_scan) != 0: + fn = to_scan.popleft() + #msgb("HDRSCAN2", "\t" + fn) + # r is the record of the thing we are scanning + r = self._check_add_dep_rec(fn) + + # sometimes we add stuff to the work list twice. Catch the + # dups here + if r.scanned: + continue + #msgb("HDRSCAN3", fn) + # headers is all the files that fn includes directly. One + # level scan + headers = scanner.mbuild_scan(fn, header_paths) + if verbose(2): + for hr in headers: + if hr.system: + sys="System " + else: + sys="NotSystem" + if hr.found: + fnd="Found " + else: + fnd="Missing" + msgb('HDR',"%s| %s| %s" % + ( sys, fnd, hr.file_name) ) + + r.scanned = True + + for hr in headers: + # we ignore system include files and process normal files + + if not hr.system: + scanned_header = True + if not hr.found: + # check if we have a dep record for this + # header. It might be a generated header that + # we are expecting to build. + ah = self._find_rec_for_missing_file(hr.file_name, assumed_directory) + if ah: + if verbose(2): + msgb("FOUND DEP REC FOR MISSING HEADER. WE WILL BUILD IT") + hr.file_name = ah.file_name + scanned_header = False + elif not self._check_required_file(hr.file_name): + if verbose(2): + msgb("MISSING HEADER NOT REQUIRED") + continue + elif assumed_directory: + ofn = hr.file_name + hr.file_name = util.join(assumed_directory, ofn) + if verbose(2): + msgb("ASSUMING", + "%s is in %s" % (ofn, assumed_directory)) + + + # make the hdr file name canonical. + hr.file_name = os.path.realpath(hr.file_name) + + # Make the forward & backwards links. + r.files_that_are_inputs.append(hr.file_name) + hdr_node = self._check_add_dep_rec(hr.file_name) + hdr_node.scanned_header = scanned_header + hdr_node.files_that_depend_on_this.append(fn) + + if not hdr_node.scanned: + to_scan.append(hr.file_name) + + + def _make_dep_record(self, file_name, creator=None): + if verbose(10): + msgb("MKDEP", file_name) + r = _mbuild_dep_record_t(file_name, creator) + if file_name in self.old_signatures: + r.old_signature = self.old_signatures[file_name].signature + return r + + def _check_add_dep_rec(self, fn, creator=None): + """Look to see if the file exists in our list of dependence + records. If not, add it. Return the found or created + record.""" + + rfn = os.path.realpath(fn) + + if rfn not in self.recs: + r = self._make_dep_record(rfn, creator) + self.recs[rfn] = r + else: + r = self.recs[rfn] + return r + + def _add_one_input(self, xinput, consumer_cmd): + r = self._check_add_dep_rec(xinput) + r.files_that_depend_on_this.extend(consumer_cmd.targets) + + def _add_one_output(self, output, creator=None): + r = self._check_add_dep_rec(output) + self.required_set.add(r.file_name) + if creator != None: + if r.creator: + die("Two commands create " + output) + r.creator = creator + r.files_that_are_inputs.extend(creator.inputs) + + def _make_command_object(self,d): + """Produce a command_t to add to the workqueue or for + connecting to other commands by dependence chains""" + if d.env: + # FIXME: assumes args is present + c = command_t( d.command, d.args, d.env ) + elif d.args: + c = command_t( d.command, d.args) + else: + c = command_t( d.command ) + if d.input: + c.inputs = self._make_list( d.input) + if d.output: + c.targets = self._make_list( d.output) + + if hasattr(d,'name'): + c.name = d.name + return c + + def _make_commands_depend_on_each_other(self,c): + """We just added a new command c. Now we must make sure that + the commands that create this command's inputs come before + this command. Also the commands that use this command's output + output files as inputs come after it. Not all the commands may + be known yet, but by working symmetrically here, we'll get + them all eventually.""" + + # Look at the inputs and see if any have commands we can make + # preceded this one. + for xinput in c.inputs: + try: + t = self.recs[xinput] + if t.creator: + if verbose(10): + msgb("CMD IDEP", xinput + " -> " + str(c.targets)) + t.creator.add_after_me(c) + except: + pass + + # Look at the outputs and see if the files that depend on + # these outputs have creator commands that should be after + # this one. + for output in c.targets: + # We just added this so it better be there. + if output not in self.recs: + die("Missing command for target " + output) + t = self.recs[output] + for f in t.files_that_depend_on_this: + if f in self.recs: + u = self.recs[f] + if u.creator: + if verbose(10): + msgb("ODEP", output + ' -> ' + + str(u.creator.targets)) + u.creator.add_before_me(c) + + + def results(self): + """Return a list of L{command_t}'s that were executed for + analysis of the build. If a command was not executed, it is + not returned. + + @rtype: list + @return: A list of L{command_t} objects. + """ + executed_commands = [] + for r in self.recs.itervalues(): + if r.creator: + if r.creator.completed: + executed_commands.append(r.creator) + return executed_commands + + + def add(self,env,d): + """Create a command based on the input dictionary or + L{plan_t} object. It may have inputs and + outputs. Things may have no input or output files. Return the + created L{command_t}. The command object dependence + tracking mechanism will control their execution. + + @type env: L{env_t} + @param env: the environment + @type d: dict or L{plan_t} + @param d: a dictionary or L{plan_t} + from a builder describing the command. + @rtype: L{command_t} + @return: A command object for the dependence DAG + """ + if verbose(12): + msgb("DAG ADDING", str(d)) + if isinstance(d,types.DictType): + q = self._convert_to_dagfood(d) + c = self._add_dagfood(env,q) + elif isinstance(d,plan_t): + c = self._add_dagfood(env,d) + else: + die("Unhandled type: " + str(type(d))) + if verbose(12): + msgb("DAG ADDING", 'DONE') + + return c + + + def _canonize_one_fn(self,fn): + nfn = strip_quotes(fn) + r = os.path.realpath(nfn) + if verbose(12): + msgb("REALPATH", "%s -> %s" %(nfn, r), pad=' ') + return r + + def _canonize_fn(self,x): + x = self._make_list(x) + n = [] + for fn in x: + r = self._canonize_one_fn(fn) + n.append( r ) + return n + + def _canonize_if_exists_fn(self,x): + x = self._make_list(x) + n = [] + for fn in x: + if os.path.exists(fn): + r = self._canonize_one_fn(fn) + n.append( r ) + else: + n.append(fn) + return n + + def _add_dagfood(self,env,d): + # make sure all the command line substition has been done + if d.input: + d.input = env.expand_string(d.input) + if d.output: + d.output = env.expand_string(d.output) + + c = self._make_command_object(d) + + if verbose(12): + msgb("CANONIZE INPUTS", pad=' ') + c.inputs = self._canonize_fn(c.inputs) + if verbose(12): + msgb("CANONIZE TARGETS", pad=' ') + c.targets = self._canonize_fn(c.targets) + + for s in c.inputs: + if verbose(10): + msgb("ADD-INPUT", s, pad=' ') + self._add_one_input(s,c) + + for t in c.targets: + if verbose(10): + msgb("ADD-OUTPUT", t, pad=' ') + self._add_one_output(t,c) + + header_paths = env['CPPPATH'] + for s in c.inputs: + self._scan_headers(s, header_paths, env['gen_dir']) + return c + + def _convert_to_dagfood(self,d): + """Convert a dictionary to a plan_t""" + q = plan_t(d['command']) + try: + q.args = d['args'] + except: + pass + try: + q.input = d['input'] + except: + pass + try: + q.output = d['output'] + except: + pass + try: + q.env = d['env'] + except: + pass + return q + + + + + diff --git a/mbuild/dfs.py b/mbuild/dfs.py new file mode 100755 index 0000000..5c74f84 --- /dev/null +++ b/mbuild/dfs.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python +# FILE: dfs.py +# AUTHOR: Mark Charney +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +"""This file provides a node_t type and a dfs() routine that prints out +cycles found in a graph represented as a list of node_t objects. +""" + +_dfs_verbose = False + +class node_t(object): + def __init__(self,name='no-name-for-node'): + self.name = name + self.afters = [] + self.befores = [] + self.zero() + + # The colors are: + # 0 = white (unvisited), + # 1=grey (discovered, visiting), + # 2=black (finalized) + self.color = 0 + + self.discover = 0 + self.finalize = 0 + self.predecessor = None + def zero(self): + self.color = 0 + def add_successor(self, s): + self.afters.append(s) + s.befores.append(self) + def add_ancestor(self, s): + self.befores.append(s) + s.afters.append(self) + def __str__(self): + s = [] + s.append("TARGET: %s\n\t" % self.name) + s.append("discovered %d finalized %d\n\t" % (self.discover, self.finalize)) + s.extend(map(lambda(x): "\t\n%s" % x.name, self.afters)) + return ''.join(s) + + + +def _print_cycle(last_visit, grey_loop_closer): + pad = '' + p = last_visit + while 1: + print pad, p.name + if p == grey_loop_closer: + break + p = p.predecessor + pad += ' ' + +def _visit(n): + global _dfs_time + n.color = 1 + n.discover = _dfs_time + if _dfs_verbose: + print "visiting %s" % str(n) + _dfs_time += 1 + retval = False + for a in n.afters: + if a.color == 0: + a.predecessor = n + retval |= _visit(a) + elif a.color == 1: + # a back-edge + print "cycle" + _print_cycle(n,a) + retval = True + n.color = 2 + n.finalize = _dfs_time + _dfs_time += 1 + return retval + +def dfs(nodes): + """Depth first search a list of node_t objects. Print out cycles. + @rtype: bool + @return: True if cycles were detected. + """ + global _dfs_time + _dfs_time = 0 + for t in nodes: + t.zero() + cycle = False + for n in nodes: + if n.color == 0: + cycle |= _visit(n) + return cycle + +####################################################### + +# stuff for a strongly connected components algorithm -- currently +# unused. + +def _node_cmp(aa,bb): + return aa.finalize.__cmp__(bb.finalize) + +def _visit_transpose(n): + global _dfs_time + n.color = 1 + if _dfs_verbose: + print "visiting %s" % str(n) + for a in n.befores: + if a.color == 0: + _visit_transpose(a) + n.color = 2 + + +def dfs_transpose(nodes): + global _dfs_time + _dfs_time = 0 + for t in nodes: + t.zero() + nodes.sort(cmp=_node_cmp) + for n in nodes: + if n.color == 0: + _visit_transpose(n) + if _dfs_verbose: + print "====" + +#################################################### + +def _test_dfs(): + node1 = node_t('1') + node2 = node_t('2') + node3 = node_t('3') + node4 = node_t('4') + node1.add_successor(node2) + node1.add_successor(node3) + node3.add_successor(node4) + node4.add_successor(node1) + + nodes = [ node1, node2, node3, node4 ] + cycle = dfs(nodes) + if cycle: + print "CYCLE DETECTED" + #print "VISIT TRANSPOSE" + #dfs_transpose(nodes) + + # print "NODES\n", "\n".join(map(str,nodes)) + +if __name__ == '__main__': + _test_dfs() diff --git a/mbuild/doxygen.py b/mbuild/doxygen.py new file mode 100755 index 0000000..87511c9 --- /dev/null +++ b/mbuild/doxygen.py @@ -0,0 +1,357 @@ +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + + +############################################################################ +## START OF IMPORTS SETUP +############################################################################ + +import sys +import os +import re +import copy +import glob +import types + +try: + import mbuild +except: + s = "\nXED ERROR: mfile.py could not find mbuild." + \ + " Should be a sibling of the xed2 directory.\n\n" + sys.stderr.write(s) + sys.exit(1) + + +########################################################################### +## DOXYGEN SUPPORT +########################################################################### + +def _doxygen_version_okay(s, want_major, want_minor, want_fix): + values = s.split('.') + + maj =int(values[0]) + minor = int(values[1]) + fix = 0 + if len(values) > 2: + # remove everything after the dash for things like: 'Doxygen + # 1.5.1-p1' + values[2] = re.sub(r'-.*$','',values[2]) + try: + fix = int(values[2]) + except ValueError: + pass + if (maj > 1) or \ + (maj == want_major and minor > want_minor) or \ + (maj == want_major and minor == want_minor and fix >= want_fix): + return True + return False + +def _find_doxygen(env): + """Find the right version of doxygen. Return a tuple of the + command name and a boolean indicating whether or not the version + checked out.""" + + if env['doxygen_cmd'] == '': + doxygen_cmd_intel = "/usr/intel/bin/doxygen" + doxygen_cmd_cygwin = "C:/cygwin/bin/doxygen" + doxygen_cmd_mac = \ + "/Applications/Doxygen.app/Contents/Resources/doxygen" + doxygen_cmd = "doxygen" + + if env['build_os'] == 'win': + if os.path.exists(doxygen_cmd_cygwin): + doxygen_cmd = doxygen_cmd_cygwin + else: + mbuild.msgb('DOXYGEN',"Could not find cygwin's doxygen," + + "trying doxygen from PATH") + elif env['build_os'] == 'lin': + if mbuild.verbose(1): + mbuild.msgb("CHECKING FOR", doxygen_cmd_intel) + if os.path.exists(doxygen_cmd_intel): + doxygen_cmd = doxygen_cmd_intel + elif env['build_os'] == 'mac': + if mbuild.verbose(1): + mbuild.msgb("CHECKING FOR", doxygen_cmd_mac) + if os.path.exists(doxygen_cmd_mac): + doxygen_cmd = doxygen_cmd_mac + else: + doxygen_cmd = env['doxygen_cmd'] + + doxygen_cmd = env.escape_string(doxygen_cmd) + doxygen_okay = False + if mbuild.verbose(1): + mbuild.msgb('Checking doxygen version','...') + if mbuild.check_python_version(2,4): + try: + (retval, output, error_output) = \ + mbuild.run_command(doxygen_cmd + " --version") + if retval==0: + if len(output) > 0: + first_line = output[0].strip() + if mbuild.verbose(1): + mbuild.msgb("Doxygen version", first_line) + doxygen_okay = _doxygen_version_okay(first_line, 1,4,6) + else: + for o in output: + mbuild.msgb("Doxygen-version-check STDOUT", o) + if error_output: + for line in error_output: + mbuild.msgb("STDERR ",line.rstrip()) + except: + mbuild.die("Doxygen required by the command line options " + + "but no doxygen found") + + return (doxygen_cmd, doxygen_okay) + + +def _replace_match(istring, mtch, newstring, group_name): + """This is a lame way of avoiding regular expression backslashing + issues""" + x1= mtch.start(group_name) + x2= mtch.end(group_name) + ostring = istring[0:x1] + newstring + istring[x2:] + return ostring + + +def _customize_doxygen_file(env, subs): + + """Change the $(*) strings to the proper value in the config file. + Returns True on success""" + + # doxygen wants quotes around paths with spaces + for k,s in subs.iteritems(): + if re.search(' ',s): + if not re.search('^".*"$',s): + mbuild.die("Doxygen requires quotes around strings with spaces: [%s]->[%s]" % + ( k,s)) + return False + + # input and output files + try: + lines = file(env['doxygen_config']).readlines() + except: + mbuild.msgb("Could not open input file: " + env['doxygen_config']) + return False + + env['doxygen_config_customized'] = \ + env.build_dir_join(os.path.basename(env['doxygen_config']) + '.customized') + try: + ofile = open(env['doxygen_config_customized'],'w') + except: + mbuild.msgb("Could not open output file: " + env['doxygen_config_customized']) + return False + + # compile the patterns + rsubs = {} + for k,v in subs.iteritems(): + rsubs[k]=re.compile(r'(?P[$][(]' + k + '[)])') + + olines = [] + for line in lines: + oline = line + for k,p in rsubs.iteritems(): + #print 'searching for', k, 'to replace it with', subs[k] + m = p.search(oline) + while m: + #print 'replacing', k, 'with', subs[k] + oline = _replace_match(oline, m, subs[k], 'tag') + m = p.search(oline) + olines.append(oline) + + + try: + for line in olines: + ofile.write(line) + except: + ofile.close() + mbuild.msgb("Could not write output file: " + env['doxygen_config_customized']) + return False + + ofile.close() + return True + +def _build_doxygen_main(args, env): + """Customize the doxygen input file. Run the doxygen command, copy + in any images, and put the output in the right place.""" + + if type(args) is types.ListType: + if len(args) < 2: + mbuild.die("Need subs dictionary and dummy file arg for the doxygen command " + + "to indicate its processing") + else: + mbuild.die("Need a list for _build_doxygen_main with the subs " + + "dictionary and the dummy file name") + + (subs,dummy_file) = args + + (doxygen_cmd, doxygen_okay) = _find_doxygen(env) + if not doxygen_okay: + msg = 'No good doxygen available on this system; ' + \ + 'Your command line arguments\n\trequire it to be present. ' + \ + 'Consider dropping the "doc" and "doc-build" options\n\t or ' + \ + 'specify a path to doxygen with the --doxygen knob.\n\n\n' + return (1, [msg]) # failure + else: + env['DOXYGEN'] = doxygen_cmd + + try: + okay = _customize_doxygen_file(env, subs) + except: + mbuild.die("CUSTOMIZE DOXYGEN INPUT FILE FAILED") + if not okay: + return (1, ['Doxygen customization failed']) + + cmd = env['DOXYGEN'] + ' ' + \ + env.escape_string(env['doxygen_config_customized']) + if mbuild.verbose(1): + mbuild.msgb("RUN DOXYGEN", cmd) + (retval, output, error_output) = mbuild.run_command(cmd) + + for line in output: + mbuild.msgb("DOX",line.rstrip()) + if error_output: + for line in error_output: + mbuild.msgb("DOX-ERROR",line.rstrip()) + if retval != 0: + mbuild.msgb("DOXYGEN FAILED") + mbuild.die("Doxygen run failed. Retval=", str(retval)) + mbuild.touch(dummy_file) + mbuild.msgb("DOXYGEN","succeeded") + return (0, []) # success + + +########################################################################### +# Doxygen build +########################################################################### +def _empty_dir(d): + """return True if the directory d does not exist or if it contains no + files/subdirectories.""" + if not os.path.exists(d): + return True + for (root, subdirs, subfiles) in os.walk(d): + if len(subfiles) or len(subdirs): + return False + return True + +def _make_doxygen_reference_manual(env, doxygen_inputs, subs, work_queue, + hash_file_name='dox'): + """Install the doxygen reference manual the doyxgen_output_dir + directory. doxygen_inputs is a list of files """ + + dox_dag = mbuild.dag_t(hash_file_name,env=env) + + # so that the scanner can find them + dirs = {} + for f in doxygen_inputs: + dirs[os.path.dirname(f)]=True + for d in dirs.iterkeys(): + env.add_include_dir(d) + + # make sure the config and top file are in the inptus list + doxygen_inputs.append(env['doxygen_config']) + doxygen_inputs.append(env['doxygen_top_src']) + + dummy = env.build_dir_join('dummy-doxygen-' + hash_file_name) + + # Run it via the builder to make it dependence driven + run_always = False + if _empty_dir(env['doxygen_install']): + run_always = True + + if run_always: + _build_doxygen_main([subs,dummy], env) + else: + c1 = mbuild.plan_t(command=_build_doxygen_main, + args= [subs,dummy], + env= env, + input= doxygen_inputs, + output= dummy) + dox1 = dox_dag.add(env,c1) + + okay = work_queue.build(dag=dox_dag) + phase = "DOXYGEN" + if not okay: + mbuild.die("[%s] failed. dying..." % phase) + if mbuild.verbose(1): + mbuild.msgb(phase, "build succeeded") + + +############################################################ + +def doxygen_env(env): + """Add the doxygen variables to the environment""" + doxygen_defaults = dict( doxygen_config='', + doxygen_top_src='', + doxygen_install='', + doxygen_cmd='' ) + env.update_dict(doxygen_defaults) + +def doxygen_args(env): + """Add the knobs to the command line knobs parser""" + + env.parser.add_option("--doxygen-install", + dest="doxygen_install", + action="store", + default='', + help="Doxygen installation directory") + + env.parser.add_option("--doxygen-config", + dest="doxygen_config", + action="store", + default='', + help="Doxygen config file") + + env.parser.add_option("--doxygen-top-src", + dest="doxygen_top_src", + action="store", + default='', + help="Doxygen top source file") + + env.parser.add_option("--doxygen-cmd", + dest="doxygen_cmd", + action="store", + default='', + help="Doxygen command name") + + +def doxygen_run(env, inputs, subs, work_queue, hash_file_name='dox'): + """Run doxygen assuming certain values are in the environment env. + + @type env: env_t + @param env: the environment + + @type inputs: list + @param inputs: list of input files to scan for dependences + + @type subs: dictionary + @param subs: replacements in the config file + + @type work_queue: work_queue_t + @param work_queue: a work queue for the build + + @type hash_file_name: string + @param hash_file_name: used for the dummy file and mbuild hash suffix + """ + _make_doxygen_reference_manual(env, inputs, subs, work_queue, hash_file_name) + + + + + diff --git a/mbuild/env.py b/mbuild/env.py new file mode 100755 index 0000000..85baa37 --- /dev/null +++ b/mbuild/env.py @@ -0,0 +1,2088 @@ +#!/usr/bin/env python +# -*- python -*- +# Mark Charney +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +"""Environment support""" +import os +import sys +import re +import platform +import types +import optparse +import time +import copy + +from base import * +import util +import build_env +import plan +import msvs +import mbuild + +def _remove_libname(args,env): + #lib = env.expand('%(LIBNAME)s') + lib = args[0] + msgb("REMOVING", lib) + util.remove_file(lib) + return (0,['REMOVED %s\n' % ( lib )]) + +# 2014-04-02: Intel has 2 compilers for mac: icc and icl. Intel now +# calls their mac llvm-based comiler "icl". This confuses thing with +# the name of the windows intel compiler which is also called "icl". +# In mbuild, we call the Intel llvm-based compiler "iclang" to +# disambiguate the conflict. + +class env_t(object): + """The is the environment for compilation. The environment + includes a dictionary for holding everything custom about this + environment. The default environment includes: + + - command line options. These are also in the environment dictionary. + - build_dir defaultE{:} obj + - src_dir defaultE{:} . or path to the mfile + - gen_dir defaultE{:} None (default path for generated files, if set) + - shared defaultE{:} False (default: no shared libraries) + - static defaultE{:} False (default: not to statically link) + - opt defaultE{:} 'noopt' (could be 'noopt, 0,1,2,3) + - debug defaultE{:} False + - separate_pdb_files defaultE{:} False + - targets defaultE{:} [] targets to build + - verbose defaultE{:} 1 + - compiler defaultE{:} 'gnu', 'ms', 'clang', 'icc', 'icl', 'iclang' + - extra_defines defaultE{:} '' + - extra_flags defaultE{:} '' (for both CXXFLAGS & CCFLAGS) + - extra_cxxflags defaultE{:} '' + - extra_ccflags defaultE{:} '' + - extra_linkflags defaultE{:} '' + - extra_libs defaultE{:} '' + - use_yasm defaultE{:} False + + - CPPPATH defaultE{:} [] The list of include paths + - SYSTEMINCLUDE defaultE{:} [] The list of system include paths (Not + supported by MSVS). + - DEFINES defaultE{:} {} The dictionary of defines + + + - short names for the primary compiler toolsE{:} + - CXX_COMPILER cl or g++ + - CC_COMPILER cl or gcc + - ASSEMBLER ml/ml64 or gcc/gas (gcc is the default for gnu) + - LINKER link or g++/gcc (g++ is the default for gnu) + - ARCHIVER ar + - RANLIB_CMD ranlib + + - toolchain path to the compiler tools (default is ''). If toolchain is + set, it should end with a trailing slash. + - vc_dir path to the compiler VC directory for MSVS (default is '')n + - icc_version 7, 8, 9, 10, ... + - gcc_version 2.96, 3.x.y, 4.x.y, ... + - msvs_version 6 (VC98), 7 (.NET 2003), 8 (Pro 2005), ... + + - primary compilation toolsE{:} + - CC cl or gcc (with toolchain path) + - CXX cl or g++ (with toolchain path) + - AS ml,ml64 or gcc/gas (with toolchain path) + - LINK link or gcc/g++ (with toolchain path) + - AR lib or ar (with toolchain path) + - RANLIB ranlib (with toolchain path) + - flags for primary toolsE{:} + - CCFLAGS + - CXXFLAGS + - ASFLAGS + - ARFLAGS + - LINKFLAGS + - LIBS (libraries for the end of the link statement) + + - preprocessor flags + - DOPT /D or -D + - ASDOPT /D or -D + - IOPT /I or -I + - OPTPOPT /O or -O + - DEBUGOPT /Zi or -g + + - options to control compilation outputE{:} + - COPT /c or -c + - COUT /Fo or -o + - ASMOUT /Fo or -o + - LIBOUT /outE{:} or -o + - LINKOUT /OUTE{:} or -o + - DLLOPT -shared + + - Override-buildersE{:} set these to a function pointer if you want + to replace the default builder function. + + - ASSEMBLE_BUILDER if not set default is to use assemble_default() + - CXX_COMPILE_BUILDER if not set default is to use cxx_default() + - CC_COMPILE_BUILDER if not set default is to use cc_default() + - LINK_BUILDER if not set default is to use link_default() + - STATIC_LIBRARY_BUILDER if not set default is to use static_lib_default() + - DYNAMIC_LIBRARY_BUILDER if not set default is to use dynamic_lib_default() + + - default extensionsE{:} + - OBJEXT .obj or .o + - LIBEXT .lib or .a + - DLLEXT .dll, .so, or .dylib + - EXEEXT .exe or '' + + - System valuesE{:} + - uname standard python tuple of values from uname. + - system standard valuesE{:} 'Linux', 'Windows', 'Darwin', 'Microsoft', 'FreeBSD' + - hostname + - build_os standard valuesE{:} 'lin', 'win', 'mac', 'bsd' + - host_os standard valuesE{:} 'lin', 'win', 'mac', 'bsd' + - build_cpu standard valuesE{:} 'ia32', 'x86-64', 'ipf' + - host_cpu standard valuesE{:} 'ia32', 'x86-64', 'ipf' + + """ + + obj_pattern = re.compile(r'.obj$') + objext_pattern = re.compile(r'[%][(]OBJEXT[)]s$') + + mbuild_subs_pattern = re.compile('%[(][^)]+[)]') + #FIXME: no backslashes in patterns! + assignment_pattern = re.compile(r'(?P[-A-Za-z0-9_]+)[=](?P.+)') + supplement_pattern = re.compile(r'(?P[-A-Za-z0-9_]+)[+][=](?P.+)') + + def version(self): + """Emit the version string. + @rtype: string + @return: The version string + """ + # FIXME: could put an Id in each sub-module and look at the + # doc strings for each one. + msgb("VERSION", "$Id: mbuild_env.py 44 2007-03-16 15:54:44Z mjcharne $") + def __setitem__(self,k,value): + """Write a value to the environment dictionary""" + if isinstance(value,types.StringType): + self.env[k] = util.posix_slashes(value) + else: + self.env[k] = value + def __contains__(self,k): + if k in self.env: + return True + return False + + def __getitem__(self,k): + """Read the environment dictionary. Not doing any + substitutions.""" + #return self.env[k] + try: + return self.env[k] + except: + die("env key not found: %s" % (k)) + + def expand(self, command_string, newenv=None): + """Alias for expand_string()""" + return self.expand_string(command_string, newenv) + + def expand_string(self, command_string, newenv=None): + """Read the environment dictionary, doing recursive + substitutions from the environment. If no environment is + supplied, then the default environment is used. + + @type command_string: string or list of strings + @param command_string: A string with %(...)s variables in it + @type newenv: L{env_t} + @param newenv: An environment within which to do the expansion. If + null, the default environment is used. + @rtype: string + """ + if newenv == None: + newenv = self.env + if isinstance(command_string, types.StringType): + return self._iterative_substitute(command_string, newenv) + if isinstance(command_string, types.ListType): + return map(lambda(x): self._iterative_substitute(x, newenv), command_string) + die("expand_string only handles substitution in strings or lists of strings") + + def expand_key(self,k, newenv=None): + """Read the the value of k from the environment dictionary, + doing recursive substitutions from the environment. If no + environment is supplied, then the default environment is used. + + @type k: string or list of strings + @param k: A string (or strings) containing a single key name(s) + @type newenv: L{env_t} + @param newenv: An environment within which to do the expansion. If + null, the default environment is used. + @rtype: string + """ + if newenv == None: + newenv = self.env + if k not in newenv: + die("Could not find %s in the environment" % k) + + + if isinstance(newenv[k],types.ListType): + # We must process each string in the list and do + # substitutions on them. For example, CPPPATH + return map(lambda(x): self._iterative_substitute(x,newenv), newenv[k]) + if isinstance(newenv[k], types.StringType): + return self._iterative_substitute("%(" + k + ")s", newenv) + # non strings (scalars) + return newenv[k] + + def _mysub(self,input, keyname, newval): + """Replace %(keyname)s in input with newval""" + # only handling %(...)s replacement. Nothing fancy. + s = '%('+keyname+')s' + # simple string replacement, not regexp replacement + output = input.replace(s,newval) + return output + + + def _iterative_substitute(self,s,dct1,debug=False): + """Replace all the %(...)s with values in s from the + dictionary dct1. Note, the dictionary can contain tuples of + the form (key, dict). In this case, this code uses the lookup + result of dct1[key] to query yet the dictionary dict. That + lookup can result in a string or another such tuple.""" + #error_msg("iterative_substitute", str(s)) + subs_pattern = re.compile('%[(](?P[^)]+)[)]s') + t = s + m = subs_pattern.search(t) + while m: + name = m.group('name') + if name not in dct1: + die("Bad substitution for " + name) + #print "SUBSTITUTING %s" % name + v = dct1[name] + # repeatedly expand any tuples that show up. + while not isinstance(v,types.StringType): + if isinstance(v,types.TupleType): + (key, dct) = v + + # look up key in the main dictionary to create a + # subkey for use in the 2nd level dictionary + + try: + subkey = dct1[key] + except: + die("nested dictionary lookup error during iterative string " + + " expansion. key=%s" % (str(key))) + + try: + v = dct[ subkey ] + except: + try: + v = dct['otherwise'] + except: + die("nested dictionary lookup error during iterative string " + + " expansion. key=%s subkey=%s" % (str(key),str(subkey))) + elif isinstance(v,types.FunctionType): + try: + v = v(dct1) + except: + die("Bad function invokation during iterative string expansion") + else: + die("Bad environment value: " + str(v) + + " when searching: " + s) + t = self._mysub(t,name,v) + m = subs_pattern.search(t) + if debug: + print t + return t + + def _dosub_old(self,s,d): + """Repeatedly substitute values from the dictionary d into the + string s while '%(...)' substrings remain in the thing we want + to return. If the input s is a list, then we recursively + expand each element of that list""" + + if isinstance(s,types.ListType): + return map(lambda(x): self.dosub(x,d), s) + + # The common case: Just expanding a simple string. + t = s + while env_t.mbuild_subs_pattern.search(t): + t = t % d + return t + + def __str__(self): + """Print out the environment""" + s = [] + s.append("BUILD_CPU:") + s.append(self.env['build_cpu']) + s.append("HOST_CPU:") + s.append(self.env['host_cpu']) + s.append("\nBUILD_OS: ") + s.append(self.env['build_os']) + s.append("\nHOST_OS: ") + s.append(self.env['host_os']) + s.append("\nUNAME: ") + s.append(str(self.env['uname'])) + s.append("\nHOSTNAME: ") + s.append(self.env['hostname']) + s.append("\nSYSTEM: ") + s.append(self.env['system']) + s.append("\nDICTIONARY:\n") + for k,v in self.env.iteritems(): + s.append("\t") + s.append(k) + s.append("->") + s.append(str(v)) + s.append("\n") + return ''.join(s) + + def verbose_startup(self): + if self._emitted_startup_msg: + return + self._emitted_startup_msg = True + if verbose(1): + msgb("INVOKED", " ".join(sys.argv)) + msgb("START TIME", self.env['start_time_str']) + msgb("CURRENT DIRECTORY", os.getcwd()) + + msgb('UNAME', str(self.env['uname']).replace(':','_')) + msgb('SYSTEM', self.env['system']) + msgb('HOSTNAME', self.env['hostname']) + msgb("BUILD_OS", self.env['build_os']) + msgb("BUILD_CPU", self.env['build_cpu']) + msgb("HOST_OS", self.env['host_os']) + msgb("HOST_CPU", self.env['host_cpu']) + + def _check_registry_environment(self,env_var): + s = 'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment' + try: + import _winreg + key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, s) + (val, typ) = _winreg.QueryValueEx(key, env_var) + return val + except: + mbuild.die(("Could not read windows registry for variable %s.\n" % \ + (env_var)) + + "Use win32 python and install pywin32") + + def _check_processor_identifier_windows(self): + + return self._check_registry_environment('PROCESSOR_IDENTIFIER') + + + def _check_number_of_processors_windows(self): + return self._check_registry_environment('NUMBER_OF_PROCESSORS') + + + def __init__(self, init_verbose=1, default_knobs=True): + """Build up the environment for compilation. + """ + set_verbosity(int(init_verbose)) + self.env = {} + self.parsed_args = False + self.added_common_knobs=False + self.added_default_knobs=False + self.env['python'] = sys.executable + self.env['CPPPATH'] = [] + self.env['SYSTEMINCLUDE'] = [] + self.env['DEFINES'] = {} + + self.env['LINKPATH'] = [] + self.env['LINKDIRS'] = '' + self.env['LINKFLAGS'] = ' %(LINKDIRS)s ' + + self.env['targets'] = [] + + # defaults for the build dir and src dir + self.env['build_dir'] = 'obj' + self.env['src_dir'] = '' # we set this accordingly + self.env['gen_dir'] = None # location of generated files that do not exist + self.env['shared'] = False + self.env['static'] = False + self.env['debug'] = False + self.env['separate_pdb_files'] = False + self.env['opt'] = 'noopt' + + self.env['LIBS'] = '' # default link libraries + self.env['CXX_COMPILER'] = '' + self.env['CC_COMPILER'] = '' + self.env['ASSEMBLER'] = '' + self.env['LINKER'] = '' + + + # windows rc tool for dll resource files. + self.env['RC'] = '' + self.env['RC_CMD'] = '' + self.env['RCFLAGS'] = '' + + # use_compiler_to_link = True if using the compiler to link. + # use_compiler_to_link = False if using the linker to link + self.env['use_compiler_to_link'] = False + self.env['ARCHIVER'] = '' + self.env['RANLIB_CMD'] = '' + + self.env['CXX'] = '' + self.env['CC'] = '' + self.env['LINK'] = '' + self.env['AR'] = '' + self.env['AS'] = '' + self.env['RANLIB'] = '' + + self.env['uname'] = platform.uname() + self.env['hostname'] = platform.node() + self.env['system'] = platform.system() # sort of like build_os + # distro is the empty string on mac and windows + if util.check_python_version(2,6): + (distro, distro_ver, distro_id) = platform.linux_distribution() + else: + distro = '' + distro_ver = '' + self.env['distro'] = distro.strip() + self.env['distro_version'] = distro_ver + + + if 'HOME' in os.environ: + self.env['home'] = os.environ['HOME'] + else: + self.env['home'] = 'unknown' + + # the colons in the time string are replaced by underscores. + # The colons confused xemacs compilation mode error + # parsing. (emacs was fine) + + self.env['start_time_str'] = re.sub(":","_",util.get_time_str()) + self.start_time = util.get_time() + + #Old versions of mbuild used target_cpu erroneously instead of + #host_cpu. We do a little magic later to try to make those old + #uses continue to work. + self.env['target_cpu']=None + + if self.env['system'] in [ 'Linux', 'FreeBSD']: + uname = platform.uname() + self.env['build_os'] = self._normalize_os_name(uname[0]) + + self.env['build_cpu'] = \ + self._normalize_cpu_name(uname[4]) + + elif self.env['system'] in [ 'Darwin']: + uname = platform.uname() + self.env['build_os'] = self._normalize_os_name(uname[0]) + x = uname[4] + if self._check_mac_64b(): + x = 'x86_64' + self.env['build_cpu'] = \ + self._normalize_cpu_name(x) + elif self.on_windows(): + self.env['build_os'] = self._normalize_os_name(os.environ['OS']) + if 'PROCESSOR_IDENTIFIER' in os.environ: + p = os.environ['PROCESSOR_IDENTIFIER'] + else: + p = self._check_processor_identifier_windows() + self.env['build_cpu'] = \ + self._normalize_cpu_name(p) + + else: + die("Unknown platform") + + # where the compiled thing runs, not where it is built + # but that is the starting default. + self.env['host_cpu'] = self.env['build_cpu'] + self.env['host_os'] = self.env['build_os'] + + self._add_compilation_support() + + + self._emitted_startup_msg = False + + mbuild_env_defaults = dict( + args = [], + mbuild_version=False, + jobs='4', + build_dir='obj', + src_dir='', + gen_dir=None, + verbose= -1, + arg_host_cpu=None, + arg_host_os=None, + compiler=self.default_compiler(), + debug=False, + shared=False, + static=False, + opt='noopt', + silent=False, + extra_defines=[], + extra_flags=[], + extra_cxxflags=[], + extra_ccflags=[], + extra_linkflags=[], + extra_libs=[], + toolchain='', + ignorable_files=[], # deprecated, unused 2011-10-20 + required_files=[], + vc_dir='', + msvs_version='', + setup_msvc=False, + icc_version='', + gcc_version='', + cc='', + cxx='', + linker='', + ar='', + + use_yasm=False, + cygwin_limit_jobs=True + ) + + # as is a keyword so must set it separately + mbuild_env_defaults['as']='' + + # store the default if we ever need them + self.env_defaults = mbuild_env_defaults + # put them in the initial environment + self.update_dict(mbuild_env_defaults) + + self.parser = optparse.OptionParser() + # set the defaults in the command line option parser + self.parser.set_defaults(**mbuild_env_defaults) + + if default_knobs: + self.add_common_knobs() + self.add_default_knobs() + + + def add_common_knobs(self): + if self.added_common_knobs: + return + self.added_common_knobs=True + self.parser.add_option( + "-j", "--jobs", + dest="jobs", + action="store", + help="Number of concurrent worker threads to use.") + + def add_default_knobs(self): + if self.added_default_knobs: + return + self.added_default_knobs=True + self.parser.add_option( + "--mbuild-version", + dest="mbuild_version", + action="store_true", + help="Emit the version information") + self.parser.add_option( + "--build-dir", + dest="build_dir", + action="store", + help="Build directory, default is 'obj'") + self.parser.add_option( + "--src-dir", + action="store", + dest="src_dir", + help="The directory where the sources are located.") + self.parser.add_option( + "--gen-dir", + action="store", + dest="gen_dir", + help="The directory where generated sources are assumed" + + " to be located.") + self.parser.add_option( + "-v", + "--verbose", + action="store", + dest="verbose", + help="Verbosity level. Defaults to value passed to env_t()") + self.parser.add_option( + "--compiler", + dest="compiler", + action="store", + help="Compiler (ms,gnu,clang,icc,icl,iclang)." + + " Default is gnu on linux and" + + " ms on windows. Default is: %s" % (self.default_compiler())) + self.parser.add_option( + "--debug", + dest="debug", + action="store_true", + help="Debug build") + self.parser.add_option( + "--shared", + dest="shared", + action="store_true", + help="Shared DLL build") + self.parser.add_option( + "--static", + dest="static", + action="store_true", + help="Statically link executables") + self.parser.add_option( + "--opt", + dest="opt", + action="store", + help="Optimization level noopt, 0, 1, 2, 3") + self.parser.add_option( + "-s", + "--silent", + dest="silent", + action="store_true", + help="Silence all but the most important messages") + self.parser.add_option( + "--extra-defines", + dest="extra_defines", + action="append", + help="Extra preprocessor defines") + self.parser.add_option( + "--extra-flags", + dest="extra_flags", + action="append", + help="Extra values for CXXFLAGS and CCFLAGS") + self.parser.add_option( + "--extra-cxxflags", + dest="extra_cxxflags", + action="append", + help="Extra values for CXXFLAGS") + self.parser.add_option( + "--extra-ccflags", + dest="extra_ccflags", + action="append", + help="Extra values for CCFLAGS") + self.parser.add_option( + "--extra-linkflags", + dest="extra_linkflags", + action="append", + help="Extra values for LINKFLAGS") + self.parser.add_option( + "--extra-libs", + dest="extra_libs", + action="append", + help="Extra values for LIBS") + self.parser.add_option( + "--toolchain", + dest="toolchain", + action="store", + help="Compiler toolchain") + self.parser.add_option( + "--vc-dir", + dest="vc_dir", + action="store", + help="MSVS Compiler VC directory. For finding libraries " + + " and setting the toolchain") + self.parser.add_option( + '--msvs-version', + '--msvc-version', + '--msvsversion', + '--msvcversion', + dest='msvs_version', + action='store', + help="MSVS version 6=VC98, 7=VS .Net 2003, 8=VS 2005," + + " 9=VS 2008, 10=VS 2010/DEV10, 11=VS2012/DEV11" + + "This sets certain flags and idioms for quirks in some compilers.") + self.parser.add_option( + '--setup-msvc', + '--setup-msvs', + '--msvs-setup', + '--msvc-setup', + dest='setup_msvc', + action='store_true', + help="Use the value of the --msvc-version to initialize" + + " the MSVC configuration.") + self.parser.add_option( + '--icc-version', + '--iccver', + '--icc-ver', + dest='icc_version', + action='store', + help="ICC/ICL version 7, 8, 9, 10, 11") + self.parser.add_option( + '--gcc-version', + '--gccversion', + '--gcc-ver', + dest='gcc_version', + action='store', + help="GCC version, with dots as in 2.96, 3.4.3, 4.2.0, etc. ") + + self.parser.add_option( + "--cc", + dest="cc", + action="store", + help="full path to C compiler") + self.parser.add_option( + "--cxx", + dest="cxx", + action="store", + help="full path to C++ compiler") + self.parser.add_option( + "--linker", + dest="linker", + action="store", + help="full path to linker") + self.parser.add_option( + "--ar", + dest="ar", + action="store", + help="full path to archiver (lib/ar)") + self.parser.add_option( + "--as", + dest="as", + action="store", + help="full path to assembler (gas/as/ml/ml64)") + + self.parser.add_option( + "--yasm", + dest="use_yasm", + action="store_true", + help="Use yasm") + self.parser.add_option( + "--no-cygwin-limit", + dest="cygwin_limit_jobs", + action="store_false", + help="Do not limit cygwin to one job at a time. " + + " Default is to limit cygwin to one job.") + + self.parser.add_option( + "--host-cpu", + dest="arg_host_cpu", + action="store", + help="Host CPU, typically ia32, intel64 or x86-64") + + self.parser.add_option( + "--host-os", + dest="arg_host_os", + action="store", + help="Host OS (where the binary runs)") + + def _implied_compiler(self,dct): + """If one of the icc_version, gcc_version_ or msvs_version + variables are set, deduce the compiler variable setting.""" + + # windows default is ms so no need to set that. + if dct['icc_version'] != '': + if self.on_windows(): + dct['compiler'] = 'icl' + else: + dct['compiler'] = 'icc' + if dct['gcc_version'] != '': + dct['compiler'] = 'gnu' + + def _check_mac_ncpu(self): + """How many CPUs on a mac""" + + cmd = "/usr/sbin/sysctl hw.ncpu" + (retval,output, error_output) = util.run_command(cmd) + if retval == 0 and len(output)>0: + if re.match('hw.ncpu', output[0]): + n = int(re.sub('hw.ncpu: ','',output[0])) + return n + return 0 + + def number_of_cpus(self): + """Return the number of CPUs or 0 if we don't know anything for sure""" + n = 0 + if self.on_mac(): + n = self._check_mac_ncpu() + elif self.on_windows(): + ns = "NUMBER_OF_PROCESSORS" + if ns in os.environ: + nsv = os.environ[ns] + else: + nsv = self._check_number_of_processors_windows() + n = int(nsv) + elif self.on_freebsd(): + getconf = "/usr/bin/getconf" + if os.path.exists(getconf): + cmd = "%s NPROCESSORS_ONLN" % (getconf) # or NPROCESSORS_CONF + (retval, output, error_output) = util.run_command(cmd) + if retval == 0 and len(output)>0: + n = int(output[0]) + else: + f = '/proc/cpuinfo' + proc_pat= re.compile(r'proces') + if os.path.exists(f): + for line in file(f).readlines(): + if proc_pat.search(line): + n += 1 + return n + + def update_dict(self, dct): + """Update the environment dictionary with another dictionary.""" + self.env.update(dct) + + def copy_settings(self, incoming_env, kwds, replace=False): + + """Update the environment dictionary with elements of kwds + from the dictionary in the incoming_env. Lists are extended with the + incoming elements and other types of elements are assigned directly. + + @type incoming_env: env_t + @param incoming_env: the source environment + + @type kwds: list of strings + @param kwds: elements to copy from the source enviornment + + @type replace: bool + @param replace: if True, replace lists in the source environment + """ + for k in kwds: + if k in incoming_env: + t = incoming_env[k] + if isinstance(t,types.ListType) and replace==False: + self.env[k].extend(t) + else: + self.env[k] = t + else: + die("copy_settings() could not read key %s from incoming environment" % k) + + def update(self, targets=None): + """Post process the current environment, setting targets and bindings""" + + # if the dct['args'] exists, supplement the targets list with + # that. This is how non-command-line invocations of mbuild + # pass the "other stuff" + if targets == None: + targets = [] + + if not isinstance(targets,types.ListType): + die("The 'targets' environment option must be a list") + + if 'args' in self.env: + args = self.env['args'] + if isinstance(args,types.ListType): + targets.extend(args) + else: + die("The 'args' environment option must be a list") + + # split up the targets list so we can extract the command line + # variable bindings + just_targets = [] + bindings = [] + for t in targets: + ap = env_t.assignment_pattern.match(t) + if ap: + msgb("BINDING", "%s --> [%s]" % + (ap.group('name'), ap.group('value'))) + bindings.append( (ap.group('name'), + ap.group('value'), 'equals' )) + continue + sp = env_t.supplement_pattern.match(t) + if ap: + msgb("BINDING", "%s --> [%s]" % + (ap.group('name'), ap.group('value'))) + bindings.append( (ap.group('name'), + ap.group('value'), 'plusequals') ) + continue + just_targets.append(t) + + # add command line variable bindings to the environment + for (var,value, how) in bindings: + if how == 'equals': + self.env[var] = value + + # early versions of mbuild used target_cpu instead of + # host_cpu. This next override compensates for that, + # compatibility with older clients. + if var == 'target_cpu': + self.env['host_cpu'] = value + + elif how == 'plusequals': + self.add_to_var(var,value) + + # give precidence to the knob for --host-cpu and --host-os + # over the default binding. + if self.env['arg_host_cpu']: + self.env['host_cpu'] = self.env['arg_host_cpu'] + if self.env['arg_host_os']: + self.env['host_os'] = self.env['arg_host_os'] + + # make sure we understand what host cpu we are dealing + # with. If someone puts in an Intel64 it'll come out as + # x86-64. + + self.env['host_cpu'] = self._normalize_cpu_name(self.env['host_cpu']) + self.env['host_os'] = self._normalize_os_name(self.env['host_os']) + self.add_to_var('targets',just_targets) + + # old versions of mbuild used target_cpu. To allow them to + # continue to work, we copy target_cpu to host_cpu if + # target_cpu is non null and differs from the setting for the + # host_cpu and the host_cpu is the same as the build_cpu. If + # the host_cpu and build_cpu differ, someone must have set + # host_cpu so leave it alone in that case. + + if self.env['target_cpu']: + if self.env['target_cpu'] != self.env['host_cpu']: + # build_cpu and host_cpu start out the same, so only + # change host_cpu if it has the original value. + if self.env['build_cpu'] == self.env['host_cpu']: + self.env['host_cpu'] = self.env['target_cpu'] + + + def process_user_settings(self): + """Set the initial derived environment settings""" + + self.update() + + if self.env['mbuild_version']: + self.version() + sys.exit(0) + + self._implied_compiler(self.env) + + if self.env['silent']: + set_verbosity(0) + else: + arg_verbosity = int(self.env['verbose']) + if arg_verbosity >= 0: + set_verbosity( arg_verbosity ) + self.verbose_startup() + + # convert several of the lists to strings + for f in ['extra_cxxflags', 'extra_ccflags', 'extra_linkflags', + 'extra_libs', 'extra_flags']: + self._flatten_list_to_string(f,self.env) + # distribute the "extra" flags. + if self.env['extra_flags']: + self.env['extra_cxxflags'] += ' ' + self.env['extra_flags'] + self.env['extra_ccflags'] += ' ' + self.env['extra_flags'] + + # This starts the compilation environment off CLEAN + self.set_compiler_env() + + # if the user did not use --src-dir, then we check the path to + # the mbuild script. If it there is no path, we assume we are + # in the right directory. If there is a path, we assume that + # is where the sources are, and change the option before anyone + # can see it. + if self.env['src_dir'] == '': + (path_to_src, this_file) = os.path.split(sys.argv[0]) + if path_to_src == '': + path_to_src = '.' + self.env['src_dir'] = util.posix_slashes(path_to_src) + + # This works around a longstanding python-specific bug in + # cygwin with running multiple threads. + if self.on_windows(): + try: + import win32api # we don't use it. We just test for it. + except: + if self.env['cygwin_limit_jobs'] and self.on_cygwin(): + msgb('NOTE', + 'Using just one worker thread to avoid' + \ + ' a cygwin threading problem.') + self.env['jobs'] = "1" + + # if 0 jobs were specified, try to use 2x the number of cpus. + if self.env['jobs'] == '0': + n = self.number_of_cpus() + if n: + self.env['jobs'] = str(2*n) + msgb('NOTE', + 'Setting jobs to %d, 2x the detected number of CPUs (%d)' % + (2*n,n)) + else: + self.env['jobs'] = "1" + msgb('NOTE', + 'Setting jobs to 1 because we could not detect' + + ' the number of CPUs') + + if verbose(1): + # print host_cpu here because it may be overridden for + # cross compilations + msgb("HOST_CPU", self.env['host_cpu']) + + + + + def _flatten_list_to_string(self, field, dct): + """See if options has a field named field. If it does and its + value is a list, flatten the list, joining the substrings with + spaces.""" + if field in dct: + v = dct[field] + if isinstance(v,types.ListType): + vflat = ' '.join(v) + dct[field]= vflat + + def set_defaults(self, dct): + + """Take the dictionary of defaults and apply to the + environment. Any extra bindings and targets should be listed + in the 'args' list option of the dictionary""" + + self.parser.set_defaults(**dct) + self.update_dict(dct) + + + + def parse_args(self, user_default_options=None): + """Call this to re-initialize the environment from the command + line arguments. This calls update() with the results of + command line processing. + @type user_default_options: dict + @param user_default_options: dictionary of default options + """ + + # make parse_args() runnable only once per environment. + # ("append"-mode arguments get messed up if args parsed + # more than once.) + if self.parsed_args: + return + self.parsed_args=True + + if user_default_options: + # pass a dictionary where keyword args are expected using + # "**" SEE: + # http://docs.python.org/tut/node6.html#SECTION006740000000000000000 + self.parser.set_defaults(**user_default_options) + + (options, args) = self.parser.parse_args() + dct = vars(options) + dct['args'].extend(args) + self.update_dict(dct) + + self.process_user_settings() + + + def on_ipf(self): + """@rtype: bool + @return: True iff on IA64""" + if self.env['build_cpu'] == 'ipf': + return True + return False + + def on_ia32(self): + """@rtype: bool + @return: True iff on IA32""" + if self.env['build_cpu'] == 'ia32': + return True + return False + + def on_intel64(self): + """@rtype: bool + @return: True iff on Intel64""" + if self.env['build_cpu'] == 'x86-64': + return True + return False + + def on_mac(self): + """@rtype: bool + @return: True iff on Mac OSX Darwin""" + if self.env['system'] == 'Darwin': + return True + return False + + def mac_ver(self): + if self.on_mac(): + ver = platform.mac_ver()[0] + (maj,min,rev) = ver.split('.') + return (int(maj),int(min),int(rev)) + return None + + def check_mac_ver(self, x,y,z): + """@rtype: bool + @return: True iff on a mac and the version is later than x.y.z""" + if self.on_mac(): + (maj,min,rev) = self.mac_ver() + if x > maj: + return False + if x == maj and y > min: + return False + if x == maj and y == min and z > rev: + return False + return True + return False + + def on_tiger(self): + """@rtype: bool + @return: True iff on Mac running OS X Tiger 10.4.x""" + if self.check_mac_ver(10,4,0): + return True + return False + def on_leopard(self): + """@rtype: bool + @return: True iff on Mac running OS X Leopard 10.5.x""" + if self.check_mac_ver(10,5,0): + return True + return False + + def on_freebsd(self): + """@rtype: bool + @return: True iff on freebsd""" + if self.env['system'] == 'FreeBSD': + return True + return False + + def on_linux(self): + """@rtype: bool + @return: True iff on linux""" + if self.env['system'] == 'Linux': + return True + return False + + def on_cygwin(self): + """@rtype: bool + @return: True iff on cygwin""" + if len(self.env['system']) >= 6 and self.env['system'][0:6] == 'CYGWIN': + return True + return False + + def windows_native(self): + """@rtype: bool + @return: True iff on windows native -- not using cygwin""" + if self.env['system'] == 'Windows' or self.env['system'] == 'Microsoft': + return True + return False + + def on_windows(self): + """@rtype: bool + @return: True iff on windows""" + if self.windows_native(): + return True + return self.on_cygwin() + + def supports_avx(self): + """Return True if system supports AVX1. Does not work + on windows""" + if self.on_linux(): + lines = file('/proc/cpuinfo').readlines() + for l in lines: + if 'avx' in l: + return True + elif self.on_mac(): + cmd = "/usr/sbin/sysctl hw.optional.avx1_0" + (retval, output, error_output) = util.run_command(cmd) + if retval == 0 and len(output)>0: + if re.match('hw.optional.avx1_0: 1', output[0]): + return True + + # FIXME: find some way of doing this on windows + return False + + def _check_mac_64b(self): + """Check to see if a mac is 64b""" + + cmd = "/usr/sbin/sysctl hw.optional.x86_64" + (retval,output, error_output) = util.run_command(cmd) + if retval == 0 and len(output)>0: + if re.match('hw.optional.x86_64: 1', output[0]): + return True + return False + + def _normalize_cpu_name(self, name): + """Internal function. Standardize various CPU identifiers""" + if name in ['ia32', 'i386', 'i686','x86']: + return 'ia32' + elif name in ['ia32e', 'x86_64', 'amd64', + 'x86-64', 'Intel64','intel64']: + return 'x86-64' + elif name == 'ia64': + return 'ipf' + elif name[0:5] == 'EM64T': + return 'x86-64' + elif name[0:7] == 'Intel64': + return 'x86-64' + elif name == 'intel64': + return 'x86-64' + elif name[0:3] == 'x86': + return 'ia32' + else: + die("Unknown cpu " + name) + + def _normalize_os_name(self,name): + """Internal function. Standardize various O/S identifiers""" + if name in ['android']: + return 'android' + elif name in ['lin', 'Linux']: + return 'lin' + elif name in ['mac', 'Darwin']: + return 'mac' + elif name in ['bsd', 'FreeBSD']: + return 'bsd' + elif name[0:6] == 'CYGWIN': + return 'win' + elif name in ['win', 'Windows_NT']: + return 'win' + else: + die("Unknown os " + name) + + def default_compiler(self): + """Default to ms on windows and gnu everywhere else. + @rtype: string + @returns: "ms" on windows, "clang" on mac, otherwise "gnu" + """ + if self.on_windows(): + return "ms" + if self.on_mac(): + return "clang" + return "gnu" + + def set_compiler_env(self, compiler_family=None): + """Initialize the build environment based on the compiler + environment variable setting. + + Adds in the "extra" flags from the environment. + + @type compiler_family: string + @param compiler_family: an override for the default + compiler family (gnu, ms, clang, icl, icc, iclang) + """ + + + # copy the command line version of the tool overrides to the + # real ones that we use. + + if self.env['cxx'] != '': + self.env['CXX'] = self.env['cxx'] + if self.env['cc'] != '': + self.env['CC'] = self.env['cc'] + if self.env['linker'] != '': + self.env['LINK'] = self.env['linker'] + if self.env['ar'] != '': + self.env['AR'] = self.env['ar'] + if self.env['as'] != '': + self.env['AS'] = self.env['as'] + + if compiler_family == None: + if 'compiler' in self.env: + self.env['compiler'] = self.env['compiler'].lower() + compiler_family = self.env['compiler'] + else: + die("Compiler family not specified in the environment or as an argument") + + if compiler_family == 'gnu': + build_env.set_env_gnu(self) + elif compiler_family == 'clang': + build_env.set_env_clang(self) + elif compiler_family == 'ms': + build_env.set_env_ms(self) + elif compiler_family == 'icc': + build_env.set_env_icc(self) + elif compiler_family == 'iclang': + build_env.set_env_iclang(self) + elif compiler_family == 'icl': + build_env.set_env_icl(self) + else: + die("Compiler family not recognized. Need gnu or ms") + + if self.env['use_yasm']: + if verbose(1): + msgb("USE YASM") + build_env.yasm_support(self) + + self.add_to_var('CXXFLAGS', self.env['extra_cxxflags']) + self.add_to_var('CCFLAGS', self.env['extra_ccflags'] ) + self.add_to_var('LINKFLAGS', self.env['extra_linkflags'] ) + self.add_to_var('LIBS', self.env['extra_libs'] ) + for d in self.env['extra_defines']: + self.add_define(d) + + def resuffix(self, fn, newext): + """Replace the suffix of single fn (or list of files) with + newext. newext should supply its own dot if you want one. + @type fn: string (or list of strings) + @param fn: a filename + @type newext: string + @param newext: a new extension starting with a '.' + @rtype: string + @return: fn with a new suffix specified by newext + """ + if isinstance(fn,types.ListType): + return map(lambda(x): self.resuffix(x,newext), fn) + else: + (root,ext) = os.path.splitext(fn) + return root + newext + + def osenv_add_to_front(self,evar,newstring,osenv=None): + """Add newstring to front of the environment variable osenv if given + if not given add to os.environ """ + environ = os.environ + if osenv: + environ = osenv + + if self.on_windows(): + sep = ';' + else: + sep = ':' + if evar in environ: + # The environment variable already exists + environ[evar]= newstring + sep + environ[evar] + else: + # Support creation of a new environment variable + environ[evar]= newstring + + def path_search(self,exe): + path = os.environ['PATH'] + if self.on_freebsd() or self.on_linux() or self.on_cygwin(): + sep = ':' + else: + sep = ';' + for p in path.split(sep): + t = util.prefix_files(p,exe) + if os.path.exists(t): + return t + return None + + + def make_obj(self,flist): + """Take file or list of files and return a file or list of + files with the OBJEXT extension from the environment. + @type flist: string or list of strings + @param flist: a filename (or list of filenames) + @rtype: string + @return: fn with a suffix specified %(OBJEXT)s + """ + return self.resuffix(flist,"%(OBJEXT)s") + + + def build_dir_join(self,files): + """Make the file (or list of files) with the build + directory name. + + @type files: string or list of strings + @param files: filename(s) + + @rtype: string or list of strings + @return: filenames prepended with the current build_dir + """ + + # FIXME: could do this lazily... and just prepend %(build_dir)s + try: + objdir = self.env['build_dir'] + except: + die("build_dir not defined in build_dir_join") + if objdir == '': + return files + return util.prefix_files(objdir, files) + + def src_dir_join(self,files): + """Prefix file (or list of files) with the src directory name. + @type files: string or list of strings + @param files: filename(s) + + @rtype: string or list of strings + @return: filenames prepended with the current src_dir + """ + # FIXME: could do this lazily... and just prepend %(src_dir)s + try: + srcdir = self.env['src_dir'] + except: + die("src_dir not defined in src_dir_join") + if srcdir == '': + return files + return util.prefix_files(srcdir, files) + + def add_define(self,newdef): + """Add a define or list defines to the CXXFLAGS and CCFLAGS + @type newdef: string or list of strings + @param newdef: string to add to the CXXFLAGS and CCFLAGS + environment variables. + """ + self.add_cc_define(newdef) + self.add_cxx_define(newdef) + self.add_as_define(newdef) + + def _collect_defines(self, dlist): + for d in dlist: + if d not in self.env['DEFINES']: + self.env['DEFINES'][d]=True + + def add_as_define(self,newdef): + """Add a define or list defines to the ASFLAGS + @type newdef: string or list of strings + @param newdef: string to add to the ASFLAGS + environment variable. + """ + if isinstance(newdef,types.ListType): + deflist = newdef + else: + deflist = [ newdef ] + self._collect_defines(deflist) + for d in deflist: + self.add_to_var('ASFLAGS', "%(ASDOPT)s" + d ) + + def add_cc_define(self,newdef): + """Add a define or list defines to the CCFLAGS + @type newdef: string or list of strings + @param newdef: string to add to the CCFLAGS + environment variable. + """ + if isinstance(newdef,types.ListType): + deflist = newdef + else: + deflist = [ newdef ] + self._collect_defines(deflist) + + for d in deflist: + self.add_to_var('CCFLAGS', "%(DOPT)s" + d ) + + def add_cxx_define(self,newdef): + """Add a define or list defines to the CXXFLAGS + @type newdef: string or list of strings + @param newdef: string to add to the CXXFLAGS + environment variable. + """ + if isinstance(newdef,types.ListType): + deflist = newdef + else: + deflist = [ newdef ] + self._collect_defines(deflist) + for d in deflist: + self.add_to_var('CXXFLAGS', "%(DOPT)s" + d ) + + + def add_include_dir(self,include_dir): + """Add a directory or list of directories to the CPPPATH. Just + a short cut for adding things to the list of files in the + env['CPPPATH'] + @type include_dir: string or list of strings + @param include_dir: string to add to the CPPPATH environment variable + """ + if isinstance(include_dir,types.ListType): + lst = include_dir + else: + lst = [ include_dir ] + for d in lst: + p = util.posix_slashes(d) + if p not in self.env['CPPPATH']: + self.env['CPPPATH'].append(p) + + def add_system_include_dir(self,sys_include_dir): + """Add a directory or list of directories to the SYSTEMINCLUDE. Just + a short cut for adding things to the list of files in the + env['SYSTEMINCLUDE'] + @type sys_include_dir: string or list of strings + @param sys_include_dir: string to add to the SYSTEMINCLUDE environment variable + """ + if isinstance(sys_include_dir,types.ListType): + lst = sys_include_dir + else: + lst = [ sys_include_dir ] + for d in lst: + p = util.posix_slashes(d) + if p not in self.env['SYSTEMINCLUDE']: + self.env['SYSTEMINCLUDE'].append(p) + + def add_link_dir(self,link_dir): + """Add a directory or list of directories to the LINKPATH. These + get included in the LINKFLAGS + + @type link_dir: string or list of strings + @param link_dir: string to add to the LINKPATH variable + """ + if isinstance(link_dir,types.ListType): + for d in link_dir: + self.env['LINKPATH'].append(util.posix_slashes(d)) + else: + self.env['LINKPATH'].append(util.posix_slashes(link_dir)) + + + def remove_from_var(self, var, value): + """Remove a substring (or list entry) from env[var]. Opposite + of add_to_var(). + + @type var: string + @param var: name of a dictionary key + @type value: string + @param value: the value to remove + """ + if var in self.env: + if isinstance(self.env[var], types.ListType): + try: + self.env[var].remove(value) + except: + pass + else: + self.env[var] = re.sub(value,'',self.env[var]) + + + def add_to_var(self, var, value): + """Add or append value to the environment variable var. If the + variable is not in the environment, then it is added as + is. Otherwise if the variable is in the environment and is a + list then value is appended. Otherwise, the value is appended + as a string with a leading space. This will *NOT* do variable + substitution when adding to a variable. + + @type var: string + @param var: name of a dictionary key + @type value: string + @param value: the value to add or append + + """ + if var not in self.env: + self.env[var] = value + elif isinstance(self.env[var],types.ListType): + if isinstance(value, types.ListType): + self.env[var].extend(value) + else: + self.env[var].append(value) + else: + self.env[var] += ' ' + value # This would do variable expansion when calling __getitem__ + + # These strings should be % env expanded. + + # COUT should be "-o " on linux. Note the trailing space + # COPT should be "-c" on linux + # OBJNAME and SRCNAME should be fully qualified suffix-wise + # OBJNAMES is used for the link and lib statements + # EXENAME is used for link statements + # LIBNAME is used for lib statements + # SOLIBNAME is used for shared objects "soname" embedded names + # LIBOUT, LINKOUT should be set appropriately. Trailing spaces needed on linux + # DLLOPT is needed for dynamic libraries + + # Example: + # a = '%(lang)s has %(c)03d quote types.' % dict(lang='Python', c=2) + + def _add_c_compile(self): + s = "%(CC)s %(CPPINCLUDES)s %(SYSINCLUDES)s %(CCFLAGS)s %(COPT)s %(COUT)s%(OBJNAME)s %(SRCNAME)s" + return s + + def _add_assemble(self): + s = "%(AS)s %(CPPINCLUDES)s %(SYSINCLUDES)s %(ASFLAGS)s %(ASMOUT)s%(OBJNAME)s %(SRCNAME)s" + return s + + def _add_cxx_compile(self): + s = "%(CXX)s %(CPPINCLUDES)s %(SYSINCLUDES)s %(CXXFLAGS)s %(COPT)s %(COUT)s%(OBJNAME)s %(SRCNAME)s" + return s + + def _add_link(self): + s = "%(LINK)s %(LINKFLAGS)s %(LINKOUT)s%(EXENAME)s %(OBJNAMES)s %(LIBS)s" + return s + + def _add_static_lib(self): + s = [ _remove_libname, + "%(AR)s %(ARFLAGS)s %(LIBOUT)s%(LIBNAME)s %(OBJNAMES)s" ] + return s + + def _add_dynamic_lib(self): + s = "%(LINK)s %(LINKFLAGS)s %(DLLOPT)s %(LIBOUT)s%(LIBNAME)s %(OBJNAMES)s %(LIBS)s" + return s + + def _add_cxx_shared_lib(self): + s = "%(CXX)s %(LINKFLAGS)s %(DLLOPT)s %(COUT)s%(LIBNAME)s %(OBJNAMES)s %(LIBS)s" + return s + + def _add_res_file_cmd(self): + s = "%(RC)s %(RCFLAGS)s /fo%(RESNAME)s %(RCNAME)s" + return s + + def _add_default_builders(self): + """Private. Part of initialization for the environment. Sets + the default builders""" + + # Instead use default function if these are not set. + self.env['ASSEMBLE_BUILDER'] = None + self.env['CXX_COMPILE_BUILDER'] = None + self.env['CC_COMPILE_BUILDER'] = None + self.env['LINK_BUILDER'] = None + self.env['STATIC_LIBRARY_BUILDER'] = None + self.env['DYNAMIC_LIBRARY_BUILDER'] = None + self.env['RES_FILE_BUILDER'] = None + + def _add_default_builder_templates(self): + """Private. Part of initialization for the environment. Sets + the default templates used by the default builders""" + self.env['CC_COMPILE_COMMAND'] = self._add_c_compile() + self.env['CXX_COMPILE_COMMAND'] = self._add_cxx_compile() + self.env['ASSEMBLE_COMMAND'] = self._add_assemble() + self.env['LINK_COMMAND'] = self._add_link() + self.env['STATIC_LIB_COMMAND'] = self._add_static_lib() + self.env['DYNAMIC_LIB_COMMAND'] = self._add_dynamic_lib() + self.env['CXX_SHARED_LIB_COMMAND'] = self._add_cxx_shared_lib() + self.env['RES_FILE_COMMAND'] = self._add_res_file_cmd() + + def _add_compilation_support(self): + """Private. Part of initialization for the environment. Sets + the default builders and templates.""" + self._add_default_builders() + self._add_default_builder_templates() + + def escape_string(self,s): + if self.on_windows(): + return util.cond_add_quotes(s) + else: + t = s.replace(' ','\ ') + return t + def _escape_list_of_strings(self,sl): + n = [] + for s in sl: + n.append(self.escape_string(s)) + return n + + def _make_cpp_include(self): + s = [] + + iopt = self.env['IOPT'] + + for p in self.env['CPPPATH']: + s.extend([iopt, self.escape_string(p), ' ']) + return ''.join(s) + + def _make_system_include(self): + s = [] + iopt = self.env['ISYSOPT'] + for p in self.env['SYSTEMINCLUDE']: + s.extend([iopt, self.escape_string(p), ' ']) + return ''.join(s) + + def _make_link_dirs(self): + s = [] + lopt = self.env['LOPT'] + for p in self.env['LINKPATH']: + s.extend([lopt, self.escape_string(p), ' ']) + return ''.join(s) + + def _make_cpp_flags(self): + self.env['CPPINCLUDES'] = self._make_cpp_include() + def _make_sys_include_flags(self): + self.env['SYSINCLUDES'] = self._make_system_include() + def _make_link_flags(self): + self.env['LINKDIRS'] = self._make_link_dirs() + + def make_derived_flags(self): + """Put together any derived flags. This is required to be + called by builder functions before they do their expansion. + """ + + self._make_cpp_flags() + self._make_sys_include_flags() + self._make_link_flags() + + + + def assemble(self, source, obj=None): + """Indirection function. Reads builder function from the + environment variable ASSEMBLER_BUILDER. Assemble a source file + to the obj file. If no obj file name is given one will be + created in the build directory. + @type source: string + @param source: filename to assemble + + @type obj: string + @param obj: output filename. + + @rtype: L{plan_t} + @return: an input for the DAG + """ + # FIXME abspath breaks windows compilation under cygwin python + new_source = os.path.abspath(source) + + f= self.env['ASSEMBLE_BUILDER'] + if f: + return f(new_source,obj) + return self._assemble_default(new_source,obj) + + def cxx_compile(self, source, obj=None): + """Indirection function. Reads builder function from the + environment variable CXX_COMPILE_BUILDER. C++-compile a source + file to a file called obj. If no obj file name is given one + will be created in the build directory. + @type source: string + @param source: filename to compile + + @type obj: string + @param obj: output filename. + + @rtype: L{plan_t} + @return: an input for the DAG + """ + # FIXME abspath breaks windows compilation under cygwin python + new_source = os.path.abspath(source) + + f = self.env['CXX_COMPILE_BUILDER'] + if f: + return f(new_source,obj) + return self._cxx_compile_default(new_source,obj) + + def cc_compile(self, source, obj=None): + """Indirection function. Reads builder function from the + environment variable CC_COMPILE_BUILDER. C-compile a source + file to a file named obj. If no obj file name is given one + will be created in the build directory. + @type source: string + @param source: filename to compile + + @type obj: string + @param obj: output filename. + + @rtype: L{plan_t} + @return: an input for the DAG + """ + + # FIXME abspath breaks windows compilation under cygwin python + new_source = os.path.abspath(source) + + f = self.env['CC_COMPILE_BUILDER'] + if f: + return f(new_source,obj) + return self._cc_compile_default(new_source,obj) + + def link(self, objs, exename, relocate=False): + """Indirection function. Reads builder function from the + environment variable LINK_BUILDER. Link an executable from + objs. If relocate is True, then prefix exename with the build + directory name. + @type objs: list of strings + @param objs: filenames to link + + @type exename: string + @param exename: output filename. + + @type relocate: bool + @param relocate: If true, relocate the exename to the build directory. + + @rtype: L{plan_t} + @return: an input for the DAG + + """ + f = self.env['LINK_BUILDER'] + if f: + return f(objs,exename, relocate) + return self._link_default(objs,exename,relocate) + + def static_lib(self, objs, libname, relocate=False): + """Indirection function. Reads builder function from the + environment variable STATIC_LIBRARY_BUILDER. Make a static + library libname from objs. If relocate is True, then prefix + libname with the build directory name + + @type objs: list of strings + @param objs: filenames to link + + @type libname: string + @param libname: output filename. + + @type relocate: bool + @param relocate: If true, relocate the library to the build directory. + + @rtype: L{plan_t} + @return: an input for the DAG + + + """ + f = self.env['STATIC_LIBRARY_BUILDER'] + if f: + return f(objs,libname, relocate) + return self._static_lib_default(objs,libname,relocate) + + def compile_and_static_lib(self, dag, sources, libname): + """Build all the sources by adding them to the dag. Use the + suffixes to figure out how to handle the files. The dag can be + passed to a work queue. See the build function. """ + + # Compile + objs = self.compile(dag, sources) + + # Link the lib + dag.add(self, self.static_lib(objs, libname, relocate=True)) + + def dynamic_lib_name(self, base): + return self.shared_lib_name(base) + + def shared_lib_name(self, base): + if self.on_windows(): + s = '{}%(DLLEXT)s'.format(base) + else: + s = 'lib{}%(DLLEXT)s'.format(base) + return s + def static_lib_name(self, base): + if self.on_windows(): + s = '{}%(LIBEXT)s'.format(base) + else: + s = 'lib{}%(LIBEXT)s'.format(base) + return s + + def dynamic_lib(self, objs, libname, relocate=False): + """Indirection function. Reads builder function from the + environment variable DYNAMIC_LIBRARY_BUILDER. Make a dynamic + library libname from objs. If relocate is True, then prefix + libname with the build directory name + + @type objs: list of strings + @param objs: filenames to link + + @type libname: string + @param libname: output filename. + + @type relocate: bool + @param relocate: If true, relocate the library to the build directory. + + @rtype: L{plan_t} + @return: an input for the DAG + + """ + f = self.env['DYNAMIC_LIBRARY_BUILDER'] + if f: + return f(objs,libname, relocate) + return self._dynamic_lib_default(objs,libname,relocate) + + + def rc_file(self, rc_file, res_file=None): + """Indirection function. For making RES files + from RC files on windows. + + @type rc_file: string + @param rc_file: filename for RC file + + @type res_file: string + @param res_file: filename for RES file + + """ + f = self.env['RES_FILE_BUILDER'] + if f: + return f(rc_file, res_file) + return self._res_file_builder_default(rc_file, res_file) + + def _escape_dict(self, d): + file_name_keys = ['SRCNAME','OBJNAME', 'LIBNAME', + 'SOLIBNAME', 'EXENAME', + 'RCNAME', 'RESNAME' ] + for k in file_name_keys: + if k in d: + d[k] = self.escape_string(d[k]) + + def _assemble_default(self, source, obj=None): + """Assemble a source file to the obj file. If no obj file name + is given one will be created in the build directory.""" + cmd = self.env['ASSEMBLE_COMMAND'] + d = copy.copy(self) + self.make_derived_flags() + d['SRCNAME'] = source + if obj == None: + (filepath,fullfilename) = os.path.split(source) + (filename,ext) = os.path.splitext(fullfilename) + obj = filename + self.env['OBJEXT'] + obj = self.build_dir_join(obj) + d['OBJNAME'] = obj + self._escape_dict(d) + s = self.expand_string(cmd, d) + return plan.plan_t(command=s, output=obj, input=source) + + def _make_pdb_file(self,obj): + """If obj obj file ends in '.obj$' or '%(OBJEXT)s' replace it + so it looks like: '%(PDBEXT)s'""" + + if env_t.obj_pattern.search(obj): + pdbfile = env_t.obj_pattern.sub('%(PDBEXT)s',obj) + elif env_t.objext_pattern.search(obj): + pdbfile = env_t.objext_pattern.sub('%(PDBEXT)s',obj) + else: + die("Could not make PDB file from OBJ file: %s" % obj) + return pdbfile + + def _cxx_compile_default(self, source, obj=None): + """C++-compile a source file to a file called obj. If no obj file + name is given one will be created in the build directory.""" + cmd = self.env['CXX_COMPILE_COMMAND'] + d = copy.copy(self) + self.make_derived_flags() + d['SRCNAME'] = source + if obj == None: + (filepath,fullfilename) = os.path.split(source) + (filename,ext) = os.path.splitext(fullfilename) + obj = filename + self.env['OBJEXT'] + obj = self.build_dir_join(obj) + if d['separate_pdb_files'] and d['compiler'] == 'ms' and d['debug'] == 1: + pdbfile = self._make_pdb_file(obj) + d['CXXFLAGS'] += ' /Fd%s ' % pdbfile + + d['OBJNAME'] = obj + self._escape_dict(d) + s = self.expand_string(cmd, d) + return plan.plan_t(command=s, output=obj, input=source) + + + def _cc_compile_default(self, source, obj=None): + """C-compile a source file to a file named obj. If no obj file + name is given one will be created in the build directory.""" + + cmd = self.env['CC_COMPILE_COMMAND'] + d = copy.copy(self) + self.make_derived_flags() + d['SRCNAME'] = source + if obj == None: + (filepath,fullfilename) = os.path.split(source) + (filename,ext) = os.path.splitext(fullfilename) + obj = filename + self.env['OBJEXT'] + obj = self.build_dir_join(obj) + if d['separate_pdb_files'] and d['compiler'] == 'ms' and d['debug'] == 1: + pdbfile = self._make_pdb_file(obj) + d['CCFLAGS'] += ' /Fd%s ' % pdbfile + + d['OBJNAME'] = obj + self._escape_dict(d) + s = self.expand_string(cmd, d) + return plan.plan_t(command=s, output=obj, input=source) + + def _find_libs(self): + libs = [] + for lib in self.expand_string('%(LIBS)s').split(): + if lib: + # ignore libraries that start with "-" as in -lc -lm. I + # would not know what suffix to put on them anyway + # (LIBEXT,DLLEXT) without trying them all. + if lib[0]=='-': + continue + if os.path.exists(lib): + #msgb("ADDING DEPENDENCE ON LIBRARY", lib) + libs.append(lib) + else: + for dir in self.env['LINKPATH']: + t = mbuild.join(dir,lib) + if os.path.exists(t): + #msgb("ADDING DERIVED DEPENDENCE ON LIBRARY", t) + libs.append(t) + return libs + + + def _link_default(self, objs, exename, relocate=False): + """Link an executable from objs. If relocate is True, + then prefix exename with the build directory name.""" + cmd = self.env['LINK_COMMAND'] + d = copy.copy(self) + self.make_derived_flags() + if relocate: + exename = self.build_dir_join(exename) + d['EXENAME'] = exename + + if not isinstance(objs, types.ListType): + objs = [ objs ] + objs = self._escape_list_of_strings(objs) + obj = " ".join(objs) + d['OBJNAMES'] = obj + self._escape_dict(d) + s = self.expand_string(cmd, d) + return plan.plan_t(command=s, output=exename, input=objs + self._find_libs()) + + + def _static_lib_default(self, objs, libname, relocate=False): + """Make a static library libname from objs. If relocate is True, + then prefix libname with the build directory name""" + d = copy.copy(self) + self.make_derived_flags() + if relocate: + libname = self.build_dir_join(libname) + d['LIBNAME'] = libname + if not isinstance(objs,types.ListType): + objs = [ objs ] + objs = self._escape_list_of_strings(objs) + obj = " ".join(objs) + + d['OBJNAMES'] = obj + self._escape_dict(d) + n = [] + scmd = self.env['STATIC_LIB_COMMAND'] + if not isinstance(scmd,types.ListType): + scmd = [ scmd ] + for cmd in scmd: + if isinstance(cmd,types.StringType): + n.append(self.expand_string(cmd, d)) + else: + n.append(cmd) + # we pass args to the python scripts... Must expand now or + # else suffer concurrency bugs at build time. + args = [ self.expand_string('%(LIBNAME)s') ] + return plan.plan_t(command=n, output=libname, + args=args, + input=objs, env=self) + + + def _dynamic_lib_default(self, objs, libname, relocate=False): + """Make a dynamic library libname from objs. If relocate is True, + then prefix libname with the build directory name""" + if self.env['compiler'] in [ 'gnu','icc','clang','iclang']: + cmd = self.env['CXX_SHARED_LIB_COMMAND'] + else: + cmd = self.env['DYNAMIC_LIB_COMMAND'] + d = copy.copy(self) + self.make_derived_flags() + if relocate: + libname = self.build_dir_join(libname) + d['LIBNAME'] = libname + d['SOLIBNAME'] = os.path.basename(libname) + if not isinstance(objs,types.ListType): + objs = [ objs ] + objs = self._escape_list_of_strings(objs) + obj = " ".join(objs) + d['OBJNAMES'] = obj + self._escape_dict(d) + s = self.expand_string(cmd, d) + return plan.plan_t(command=s, output=libname, + input=objs + self._find_libs()) + + + + def _res_file_builder_default(self, rc_file,res_file=None): + """Make a res file from an rc file. Windows only.""" + cmd = self.env['RES_FILE_COMMAND'] + d = copy.copy(self) + if not res_file: + res_file = self.build_dir_join(self.resuffix(rc_file,'%(RESEXT)s')) + d['RESNAME'] = res_file + d['RCNAME'] = rc_file + self._escape_dict(d) + s = self.expand_string(cmd, d) + return plan.plan_t(command=s, + output=res_file, + input=rc_file) + + def compile(self, dag, sources): + """Build all the sources by adding them to the dag. Use the + suffixes to figure out how to handle the files. The dag can be + passed to a work queue. See the build function. """ + + objs = [] + for s in sources: + b = os.path.basename(s) # filename component of path/filename + (base,ext) = os.path.splitext(b) + if ext in ['.rc' ]: + obj = self.build_dir_join(self.resuffix(b,'%(RESEXT)s')) + else: + obj = self.build_dir_join(self.make_obj(b)) + + if ext in ['.asm', '.s' ]: + c = self.assemble( s, obj ) + elif ext in ['.c']: + c = self.cc_compile( s, obj ) + elif ext in ['.cpp', '.C' ]: + c = self.cxx_compile( s, obj ) + elif ext in ['.rc' ]: + c = self.rc_file( s, obj ) # obj is a res file in this case + else: + die("Unsupported file type %s" % (s)) + cmd = dag.add(self,c) + objs.append(self.expand_string(obj)) + return objs + + + def compile_and_link(self, dag, sources, exe, shared_object=False, libs=[]): + """Build all the sources by adding them to the dag. Use the + suffixes to figure out how to handle the files. The dag can be + passed to a work queue. See the build function. """ + + objs = self.compile(dag, sources) + + if shared_object: + cmd2 = dag.add(self, + self.dynamic_lib(objs + libs, exe, relocate=True)) + else: + cmd2 = dag.add(self, + self.link(objs + libs , exe,relocate=True)) + return cmd2 + + + def build(self, work_queue, dag, phase='BUILD',terminate_on_errors=False): + """Build everything in the work queue""" + okay = work_queue.build(dag=dag, die_on_errors=False) + if not okay: + if terminate_on_errors: + die("[%s] failed." % phase) + else: + msgb(phase,"failed.") + return False + msgb(phase, "succeeded") + return True diff --git a/mbuild/header_tag.py b/mbuild/header_tag.py new file mode 100755 index 0000000..7cab904 --- /dev/null +++ b/mbuild/header_tag.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +import sys +import os +import re +from stat import * + +def _get_mode(fn): + "get the mode of the file named fn, suitable for os.chmod() or open() calls" + mode = os.stat(fn)[ST_MODE] + cmode = S_IMODE(mode) + return cmode + +def _replace_original_with_new_file(file,newfile): + "Replace file with newfile" + # os.system(" mv -f %s %s" % ( newfile, file)) + os.unlink(file) + os.rename(newfile,file) + +def _remove_existing_header(contents,prefix="#"): + "remove existing legal header, if any" + retval = [] + skipping = False + start_pattern = re.compile(r"^(/[*]BEGIN_LEGAL)|(" + prefix + "BEGIN_LEGAL)") + stop_pattern = re.compile(r"^[ ]*(END_LEGAL[ ]?[*]/)|(" + prefix + "[ ]*END_LEGAL)") + for line in contents: + if start_pattern.match(line): + skipping = True + if skipping == False: + retval.append(line) + if stop_pattern.match(line): + skipping = False + return retval + +def _prepend_script_comment(header,prefix="#"): + "Apply script comment marker to each line" + retval = [] + for line in header: + retval.append( prefix + line ) + return retval + +def apply_header_to_source_file(header, file): + "apply header to file using C++ comment style" + f = open(file,"r") + mode = _get_mode(file) + contents = f.readlines() + f.close() + trimmed_contents = _remove_existing_header(contents) + newfile = file + ".new" + o = open(newfile,"w") + o.write("/*BEGIN_LEGAL \n") + o.writelines(header) + o.write("END_LEGAL */\n") + o.writelines(trimmed_contents) + o.close() + os.chmod(newfile,mode) + _replace_original_with_new_file(file,newfile) + +# FIXME: this will flag files that have multiline C-style comments +# with -*- in them even though the splitter will not look for the +# comment properly + +def _shell_script(lines): + """return true if the lines are the start of shell script or + something that needs a mode comment at the top""" + + first = "" + second = "" + if len(lines) > 0: + first = lines[0]; + if len(lines) > 1: + second = lines[1]; + + if re.match("#!",first): + #print "\t\t First script test true" + return True + if re.search("-\*-",first) or re.search("-\*-",second): + #print "\t\t Second script test true" + return True + return False + +def _split_script(lines): + "Return a tuple of (header, body) for shell scripts, based on an input line list" + header = [] + body = [] + + f = lines.pop(0) + while re.match("#",f) or re.search("-\*-",f): + header.append(f) + f = lines.pop(0) + + # tack on the first non matching line from the above loop + body.append(f); + body.extend(lines); + return (header,body) + +def _write_script_header(o,lines,prefix="#"): + "Write the file header for a script" + o.write(prefix+"BEGIN_LEGAL\n") + o.writelines(lines) + o.write(prefix+"END_LEGAL\n") + +def apply_header_to_data_file(header, file, prefix="#"): + "apply header to file using script comment style" + f = open(file,"r") + mode = _get_mode(file) + #print "file: " + file + " mode: " + "%o" % mode + contents = f.readlines() + f.close() + trimmed_contents = _remove_existing_header(contents, prefix) + newfile = file + ".new" + o = open(newfile,"w") + augmented_header = _prepend_script_comment(header,prefix) + if _shell_script(trimmed_contents): + (script_header, script_body) = _split_script(trimmed_contents) + o.writelines(script_header) + _write_script_header(o, augmented_header, prefix) + o.writelines(script_body) + else: + _write_script_header(o,augmented_header,prefix) + o.writelines(trimmed_contents) + o.close() + os.chmod(newfile,mode) + _replace_original_with_new_file(file,newfile) + +#################################################################### +### MAIN +#################################################################### +if __name__ == '__main__': + if len(sys.argv) < 4: + print "Usage " + sys.argv[0] + " [-s|-t] legal-header file-name [file-name...]\n" + sys.exit(1) + + type = sys.argv[1] + header_file = sys.argv[2] + if not os.path.exists(header_file): + print "Could not find header file: [%s]\n" % (header_file) + sys.exit(1) + + files_to_tag = sys.argv[3:] + f = open(header_file,"r") + header = f.readlines() + f.close() + + sources = files_to_tag + + if type == "-s": + for file in sources: + if re.search(".svn",file) == None and re.search(".new$",file) == None: + apply_header_to_source_file(header, file.strip()) + elif type == "-t": + for file in sources: + if re.search(".svn",file) == None and re.search(".new$",file) == None: + apply_header_to_data_file(header, file.strip()) + else: + print "2nd argument must be -s or -t\n" + sys.exit(1) diff --git a/mbuild/msvs.py b/mbuild/msvs.py new file mode 100644 index 0000000..754f43e --- /dev/null +++ b/mbuild/msvs.py @@ -0,0 +1,903 @@ +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +# TESTING MATRIX +# ('e' is for express) +# +# 32 32/64 64 +# 6 ok ? N/A +# 7 ok ok N/A +# 8 ? ok ok +# 8e ? ? ? +# 9 ? ok ok +# 9e ok ? ? +# 10 ? ? ? +# + +"""Environment setup for Microsoft Visual Studio. Set INCLUDE, +LIBPATH, LIB, PATH, VCINSTALLDIR, VS80COMNTOOLS, VSINSTALLDIR, etc. +""" + +import os +import sys +import platform +from base import * +from util import * +from env import * + +######################################################################## +def set_env(v,s): + """Add v=s to the shell environment""" + if v in os.environ: + orig = os.environ[v] + else: + orig = '' + + # We have had issues on windows were we attempt to make the + # environment too long. This catches the error and prints a nice + # error msg. + try: + os.environ[v]=s + except Exception, e: + sys.stderr.write( str(e) + '\n') + sys.stderr.write("Env Variable [%s]\n" % (v)) + sys.stderr.write("Original was [%s]\n" % (orig)) + sys.stderr.write("New value was [%s]\n" % (s)) + sys.exit(1) + +def set_env_list(v,slist): + set_env(v,";".join(slist)) + +def add_to_front(v,s): + """Add v=s+old_v to the shell environment""" + set_env(v,s + ';' + os.environ[v]) + +def add_to_front_list(v,s): + add_to_front(v,';'.join(s)) + +def add_env(v,s): + """Add v=v;old_vs to the shell environment. Inserts at front""" + v.insert(0,s) +######################################################################## + +def _find_dir_list(lst): + for dir in lst: + if os.path.exists(dir): + return dir + return None + + +def _set_msvs_dev6(env, x64_host, x64_target): # VC 98 + vc_prefixes = [ "C:/VC98", + "C:/Program Files (x86)/Microsoft Visual Studio", + "C:/Program Files/Microsoft Visual Studio" ] + + msdev_prefixes = [ + "C:/Program Files/Microsoft Visual Studio/Common" ] + vc_prefix = _find_dir_list(vc_prefixes) + msdev_prefix = _find_dir_list(msdev_prefixes) + if not vc_prefix: + die("Could not find VC98") + if not msdev_prefix: + die("Could not find VC98 MSDEV") + + i = [] + add_env(i, vc_prefix + "/VC98/ATL/INCLUDE") + add_env(i, vc_prefix + "/VC98/INCLUDE") + add_env(i, vc_prefix + "/VC98/MFC/INCUDE") + set_env_list("INCLUDE",i) + + lib = [] + add_env(lib, vc_prefix + "/VC98/LIB") + add_env(lib, vc_prefix + "/VC98/MFC/LIB") + set_env_list("LIB",lib) + + path=[] + add_env(path, msdev_prefix + "/msdev98/Bin") + add_env(path, vc_prefix + "/VC98/Bin") + add_env(path, msdev_prefix + "/TOOLS/WINNT") + add_env(path, msdev_prefix + "/TOOLS") + add_to_front_list('PATH', path) + + set_env("MSDevDir", msdev_prefix + "/msdev98") + set_env("MSVCDir", vc_prefix + "/VC98") + + return vc_prefix + "/VC98" + +def _set_msvs_dev7(env, x64_host, x64_target): # .NET 2003 + + prefixes = [ "c:/Program Files/Microsoft Visual Studio .NET 2003", + "c:/Program Files (x86)/Microsoft Visual Studio .NET 2003"] + prefix = _find_dir_list(prefixes) + if not prefix: + die("Could not find MSVS7 .NET 2003") + + inc = [] + add_env(inc, prefix + '/VC7/ATLMFC/INCLUDE') + add_env(inc, prefix + '/VC7/include') + add_env(inc, prefix + '/VC7/PlatformSDK/include/prerelease') + add_env(inc, prefix + '/VC7/PlatformSDK/include') + add_env(inc, prefix + '/SDK/v1.1/include') + add_env(inc, prefix + '/SDK/v1.1/include/') + set_env_list("INCLUDE",inc) + + lib = [] + add_env(lib, prefix + '/VC7/ATLMFC/LIB') + add_env(lib, prefix + '/VC7/LIB') + add_env(lib, prefix + '/VC7/PlatformSDK/lib/prerelease') + add_env(lib, prefix + '/VC7/PlatformSDK/lib') + add_env(lib, prefix + '/SDK/v1.1/lib') + add_env(lib, prefix + '/SDK/v1.1/Lib/') + set_env_list("LIB",lib) + + path = [] + add_env(path, prefix + "/Common7/IDE") + add_env(path, prefix + "/VC7/bin") + add_env(path, prefix + "/Common7/Tools") + add_env(path, prefix + "/Common7/Tools/bin/prerelease") + add_env(path, prefix + "/Common7/Tools/bin") + add_env(path, prefix + "/SDK/v1.1/bin") + add_to_front_list('PATH', path) + + set_env("VCINSTALLDIR", prefix) + set_env("VC71COMNTOOLS", prefix + "/Common7/Tools/") + set_env("VSINSTALLDIR", prefix + '/Common7/IDE') + set_env("MSVCDir", prefix + '/VC7') + set_env("FrameworkVersion","v1.1.4322") + set_env("FrameworkSDKDir", prefix + "/SDK/v1.1") + set_env("FrameworkDir", "C:/WINDOWS/Microsoft.NET/Framework") + # DevEnvDir has a trailing slash + set_env("DevEnvDir", prefix + "/Common7/IDE/") + + return prefix + "/VC7" +def _set_msvs_dev8(env, x64_host, x64_target, regv=None): # VS 2005 + if regv: + prefix = regv + else: + prefixes = ["c:/Program Files (x86)/Microsoft Visual Studio 8", + "c:/Program Files/Microsoft Visual Studio 8"] + prefix = _find_dir_list(prefixes) + if not os.path.exists(prefix): + die("Could not find MSVC8 (2005)") + + set_env('VCINSTALLDIR', prefix + '/VC') + set_env('VS80COMNTOOLS', prefix + "/Common7/Tools") + set_env('VSINSTALLDIR', prefix) + + i =[] + add_env(i, prefix + "/VC/ATLMFC/INCLUDE") + add_env(i, prefix + "/VC/INCLUDE") + add_env(i, prefix + "/VC/PlatformSDK/include") + add_env(i, prefix + "/SDK/v2.0/include") + set_env_list('INCLUDE', i) + + set_env('FrameworkDir','C:/WINDOWS/Microsoft.NET/Framework') + set_env('FrameworkVersion', 'v2.0.50727') + set_env('FrameworkSDKDir', prefix +'/SDK/v2.0') + + # DevEnvDir has a trailing slash + set_env("DevEnvDir", prefix +'/Common7/IDE/') + + lp = [] + path=[] + lib=[] + if x64_host and x64_target: + add_env(lp, prefix + '/VC/ATLMFC/LIB/amd64') + + add_env(lib, prefix + "/VC/ATLMFC/LIB/amd64") + add_env(lib, prefix + "/VC/LIB/amd64") + add_env(lib, prefix + "/VC/PlatformSDK/lib/amd64") + add_env(lib, prefix + "/SDK/v2.0/LIBAMD64") + + add_env(path, prefix + "/VC/bin/amd64") + add_env(path, prefix + "/VC/PlatformSDK/bin/win64/amd64") + add_env(path, prefix + "/VC/PlatformSDK/bin") + add_env(path, prefix + "/VC/VCPackages") + add_env(path, prefix + "/Common7/IDE") + add_env(path, prefix + "/Common7/Tools") + add_env(path, prefix + "/Common7/Tools/bin") + add_env(path, prefix + "/SDK/v2.0/bin") + add_env(path, prefix + "C:/WINDOWS/Microsoft.NET/Framework64/v2.0.50727") + + elif not x64_target: + + add_env(path, prefix + '/Common7/IDE') + add_env(path, prefix + '/VC/BIN') + add_env(path, prefix + '/Common7/Tools') + add_env(path, prefix + '/Common7/Tools/bin') + add_env(path, prefix + '/VC/PlatformSDK/bin') + add_env(path, prefix + '/SDK/v2.0/bin') + add_env(path, prefix + '/VC/VCPackages') + add_env(path, 'C:/WINDOWS/Microsoft.NET/Framework/v2.0.50727') + + add_env(lib, prefix + '/VC/ATLMFC/LIB') + add_env(lib, prefix + '/VC/LIB') + add_env(lib, prefix + '/VC/PlatformSDK/lib') + add_env(lib, prefix + '/SDK/v2.0/lib') + + add_env(lp, prefix + '/VC/ATLMFC/LIB') + add_env(lp, 'C:/WINDOWS/Microsoft.NET/Framework/v2.0.50727') + + add_to_front_list('PATH', path) + set_env_list('LIB',lib) + set_env_list('LIBPATH', lp) + + return prefix + "/VC" + +def _set_msvs_dev9(env, x64_host, x64_target, regv=None): # VS 2008 + if regv: + prefix = regv + else: + prefixes = ['C:/Program Files (x86)/Microsoft Visual Studio 9.0', + 'C:/Program Files/Microsoft Visual Studio 9.0'] + prefix = _find_dir_list(prefixes) + + set_env('VSINSTALLDIR', prefix) + set_env('VS90COMNTOOLS', prefix + '/Common7/Tools') + set_env('VCINSTALLDIR', prefix +'/VC') + set_env('FrameworkDir', 'C:/WINDOWS/Microsoft.NET/Framework') + set_env('Framework35Version','v3.5') + set_env('FrameworkVersion','v2.0.50727') + set_env('FrameworkSDKDir', prefix +'/SDK/v3.5') + set_env('WindowsSdkDir','C:/Program Files/Microsoft SDKs/Windows/v6.0A') + + # DevEnvDir has a trailing slash + set_env('DevEnvDir', prefix + '/Common7/IDE/') + inc = [] + add_env(inc, prefix + 'VC/ATLMFC/INCLUDE') + add_env(inc, prefix + '/VC/INCLUDE') + add_env(inc, 'C:/Program Files/Microsoft SDKs/Windows/v6.0A/include') + set_env_list('INCLUDE',inc) + + path = [] + lib = [] + libpath = [] + + if x64_target: # FIXME! 64b!!!! + add_env(path, prefix + '/Common7/IDE') + add_env(path, prefix + '/VC/BIN') + add_env(path, prefix + '/Common7/Tools') + add_env(path, prefix + '/VC/VCPackages') + add_env(path, 'C:/Program Files/Microsoft SDKs/Windows/v6.0A/bin') + add_env(path, 'C:/WINDOWS/Microsoft.NET/Framework/v3.5') + add_env(path, 'C:/WINDOWS/Microsoft.NET/Framework/v2.0.50727') + + add_env(lib, prefix +'/VC/ATLMFC/LIB/amdt64') + add_env(lib, prefix +'/VC/LIB/amd64') + add_env(lib, 'C:/Program Files/Microsoft SDKs/Windows/v6.0A/lib/x64') + + add_env(libpath, 'C:/WINDOWS/Microsoft.NET/Framework64/v2.0.50727') + add_env(libpath, 'C:/WINDOWS/Microsoft.NET/Framework64/v3.5') + add_env(libpath, 'C:/WINDOWS/Microsoft.NET/Framework64/v2.0.50727') + add_env(libpath, 'C:/WINDOWS/Microsoft.NET/Framework64/v2.0.50727') + add_env(libpath, prefix + '/VC/ATLMFC/LIB/amd64') + add_env(libpath, prefix + '/VC/LIB/amd64') + else: + add_env(path, prefix + '/Common7/IDE') + add_env(path, prefix + '/VC/BIN') + add_env(path, prefix + '/Common7/Tools') + add_env(path, prefix + '/VC/VCPackages') + add_env(path, 'C:/Program Files/Microsoft SDKs/Windows/v6.0A/bin') + add_env(path, 'C:/WINDOWS/Microsoft.NET/Framework/v3.5') + add_env(path, 'C:/WINDOWS/Microsoft.NET/Framework/v2.0.50727') + + add_env(lib, prefix +'/VC/LIB') + add_env(lib, prefix +'/VC/ATLMFC/LIB') + add_env(lib, 'C:/Program Files/Microsoft SDKs/Windows/v6.0A/lib') + + add_env(libpath, 'C:/WINDOWS/Microsoft.NET/Framework/v3.5') + add_env(libpath, 'C:/WINDOWS/Microsoft.NET/Framework/v2.0.50727') + add_env(libpath, prefix + '/VC/ATLMFC/LIB') + add_env(libpath, prefix + '/VC/LIB') + + set_env_list('LIBPATH',libpath) + set_env_list('LIB',lib) + add_to_front_list('PATH',path) + + return prefix + "/VC" + + +def _set_msvs_dev10(env, x64_host, x64_target, regv=None): # VS 2010 + if regv: + prefix = regv + else: + prefix = 'C:/Program Files (x86)/Microsoft Visual Studio 10.0' + + path = [] + lib = [] + libpath = [] + + inc = [] + add_env(inc, prefix + '/VC/INCLUDE') + add_env(inc, prefix + '/VC/ATLMFC/INCLUDE') + add_env(inc, 'c:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/include') + set_env_list('INCLUDE',inc) + + set_env('Framework35Version','v3.5') + set_env('FrameworkVersion', 'v4.0.20728') + set_env('FrameworkVersion32', 'v4.0.20728') + + set_env('VCINSTALLDIR', prefix + '/VC') + set_env('VS100COMNTOOLS', prefix + '/Common7/Tools') + set_env('VSINSTALLDIR' , prefix) + set_env('WindowsSdkDir', 'c:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A') + + # DevEnvDir has a trailing slash + set_env('DevEnvDir', prefix + '/Common7/IDE/') + + if x64_target: + set_env('FrameworkDir','c:/WINDOWS/Microsoft.NET/Framework64') + set_env('FrameworkDIR64','c:/WINDOWS/Microsoft.NET/Framework64') + set_env('FrameworkVersion64', 'v4.0.20728') + + set_env('Platform','X64') + add_env(lib, prefix + '/VC/LIB/amd64') + add_env(lib, prefix + '/VC/ATLMFC/LIB/amd64') + add_env(lib, 'c:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/lib/x64') + + add_env(libpath, 'c:/WINDOWS/Microsoft.NET/Framework64/v4.0.20728') + add_env(libpath, 'c:/WINDOWS/Microsoft.NET/Framework64/v3.5') + add_env(libpath, prefix + '/VC/LIB/amd64') + add_env(libpath, prefix + '/VC/ATLMFC/LIB/amd64') + + add_env(path, prefix + '/VC/BIN/amd64') + add_env(path, 'c:/WINDOWS/Microsoft.NET/Framework64/v4.0.20728') + add_env(path, 'C:/WINDOWS/Microsoft.NET/Framework64/v3.5') + add_env(path, prefix + '/VC/VCPackages') + add_env(path, prefix + '/Common7/IDE') + add_env(path, prefix + '/Common7/Tools') + add_env(path, 'C:/Program Files (x86)/HTML Help Workshop') + add_env(path, 'C:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/' + + 'bin/NETFX 4.0 Tools/x64') + add_env(path, 'C:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/bin/x64') + add_env(path, 'C:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/bin') + else: + set_env('FrameworkDir', 'c:/WINDOWS/Microsoft.NET/Framework') + set_env('FrameworkDIR32', 'c:/WINDOWS/Microsoft.NET/Framework') + + add_env(lib, prefix + '/VC/LIB') + add_env(lib, prefix + '/VC/ATLMFC/LIB') + add_env(lib, 'c:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/lib') + + add_env(libpath, 'c:/WINDOWS/Microsoft.NET/Framework/v4.0.20728') + add_env(libpath, 'c:/WINDOWS/Microsoft.NET/Framework/v3.5') + add_env(libpath, prefix + '/VC/LIB') + add_env(libpath, prefix + '/VC/ATLMFC/LIB') + + add_env(path, prefix + '/Common7/IDE/') + add_env(path, prefix + '/VC/BIN') + add_env(path, prefix +'/Common7/Tools') + add_env(path, 'C:/WINDOWS/Microsoft.NET/Framework/v4.0.20728') + add_env(path, 'C:/WINDOWS/Microsoft.NET/Framework/v3.5') + add_env(path, prefix + '/VC/VCPackages') + add_env(path, 'C:/Program Files (x86)/HTML Help Workshop') + add_env(path, prefix + '/Team Tools/Performance Tools') + add_env(path, 'C;/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/' + + 'bin/NETFX 4.0 Tools') + add_env(path, 'C:/Program Files (x86)/Microsoft SDKs/Windows/v7.0A/bin') + + set_env_list('LIBPATH',libpath) + set_env_list('LIB',lib) + add_to_front_list('PATH',path) + + return prefix + "/VC" + + +def _set_msvs_dev11(env, x64_host, x64_target, regv=None): # msvs2012 + progfi = 'C:/Program Files (x86)' + if regv: + prefix = regv + else: + prefix = progfi + '/Microsoft Visual Studio 11.0' + + sdkdir = progfi + '/Microsoft SDKs/Windows/v8.0' + sdk8 = progfi + '/Microsoft SDKs/Windows/v8.0A' + sdk7 = progfi + '/Microsoft SDKs/Windows/v7.0A' + winkit = progfi + '/Windows Kits/8.0' + + path = [] + lib = [] + libpath = [] + + inc = [] + add_env(inc, prefix + '/VC/INCLUDE') + add_env(inc, prefix + '/VC/ATLMFC/INCLUDE') + add_env(inc, winkit + '/include') + add_env(inc, winkit + '/include/um') + add_env(inc, winkit + '/include/shared') + add_env(inc, winkit + '/include/winrt') + set_env_list('INCLUDE',inc) + + set_env('Framework35Version','v3.5') + set_env('FrameworkVersion', 'v4.0.30319') + set_env('FrameworkVersion32', 'v4.0.30319') + + set_env('VCINSTALLDIR', prefix + '/VC/') + set_env('VS110COMNTOOLS', prefix + '/Common7/Tools') + set_env('VSINSTALLDIR' , prefix) + set_env('WindowsSdkDir', winkit) + + + if x64_target: + set_env('FrameworkDir','c:/WINDOWS/Microsoft.NET/Framework64') + set_env('FrameworkDIR64','c:/WINDOWS/Microsoft.NET/Framework64') + set_env('FrameworkVersion64', 'v4.0.30319') + + set_env('Platform','X64') + + add_env(lib, prefix + '/VC/LIB/amd64') + add_env(lib, prefix + '/VC/ATLMFC/LIB/amd64') + add_env(lib, winkit + '/lib/win8/um/x64') + + + add_env(libpath, 'c:/WINDOWS/Microsoft.NET/Framework64/v4.0.30319') + add_env(libpath, 'c:/WINDOWS/Microsoft.NET/Framework64/v3.5') + add_env(libpath, prefix + '/VC/LIB/amd64') + add_env(libpath, prefix + '/VC/ATLMFC/LIB/amd64') + add_env(libpath, winkit + '/References/CommonConfiguration/Neutral') + add_env(libpath, sdkdir + 'ExtensionSDKs/Microsoft.VCLibs/11.0/' + + 'References/CommonConfiguration/neutral') + + add_env(path, prefix + '/VC/BIN/amd64') + add_env(path, 'c:/WINDOWS/Microsoft.NET/Framework64/v4.0.30319') + add_env(path, 'C:/WINDOWS/Microsoft.NET/Framework64/v3.5') + + add_env(path, prefix + '/Common7/IDE/CommonExtensions/Microsoft/TestWindow') + add_env(path, prefix + '/VC/VCPackages') + add_env(path, prefix + '/Common7/IDE') + add_env(path, prefix + '/Common7/Tools') + add_env(path, 'C:/Program Files (x86)/HTML Help Workshop') + add_env(path, prefix + '/Team Tools/Performance Tools/x64') + add_env(path, prefix + '/Team Tools/Performance Tools') + add_env(path, winkit + '/8.0/bin/x64') + add_env(path, sdk8 + '/bin/NETFX 4.0 Tools/x64') + add_env(path, sdk7 + '/Bin/x64') + add_env(path, sdk8 + '/bin/NETFX 4.0 Tools') + add_env(path, sdk7 + '/Bin') + add_env(path, winkit + '/Windows Performance Toolkit') + add_env(path, 'C:/Program Files/Microsoft SQL Server/110/Tools/Binn') + + else: + set_env('FrameworkDir', 'c:/WINDOWS/Microsoft.NET/Framework') + set_env('FrameworkDIR32', 'c:/WINDOWS/Microsoft.NET/Framework') + + add_env(lib, prefix + '/VC/LIB') + add_env(lib, prefix + '/VC/ATLMFC/LIB') + add_env(lib, winkit + '/lib/win8/um/x86') + + + add_env(libpath, 'c:/WINDOWS/Microsoft.NET/Framework/v4.0.30319') + add_env(libpath, 'c:/WINDOWS/Microsoft.NET/Framework/v3.5') + add_env(libpath, prefix + '/VC/LIB') + add_env(libpath, prefix + '/VC/ATLMFC/LIB') + add_env(libpath, winkit + '/References/CommonConfiguration/Neutral') + add_env(libpath, sdkdir + '/ExtensionSDKs/Microsoft.VCLibs/11.0/' + + 'References/CommonConfiguration/neutral') + + + add_env(path, prefix + '/Common7/IDE/CommonExtensions/Microsoft/TestWindow') + add_env(path, 'C:/Program Files (x86)/Microsoft SDKs/F#/3.0/Framework/v4.0') + add_env(path, prefix + '/Common7/IDE') + add_env(path, prefix + '/VC/BIN') + add_env(path, prefix + '/Common7/Tools') + add_env(path, 'C:/Windows/Microsoft.NET/Framework/v4.0.30319') + add_env(path, 'C:/Windows/Microsoft.NET/Framework/v3.5') + add_env(path, prefix + '/VC/VCPackages') + add_env(path, 'C:/Program Files (x86)/HTML Help Workshop') + add_env(path, prefix + '/Team Tools/Performance Tools') + add_env(path, winkit + '/bin/x86') + add_env(path, sdk8 + '/bin/NETFX 4.0 Tools') + add_env(path, sdk7 + '/Bin') + add_env(path, winkit + '/Windows Performance Toolkit') + add_env(path, 'C:/Program Files/Microsoft SQL Server/110/Tools/Binn') + + + + set_env_list('LIBPATH',libpath) + set_env_list('LIB',lib) + add_to_front_list('PATH',path) + + return prefix + "/VC" + + + +def _set_msvs_dev12(env, x64_host, x64_target, regv=None): # msvs2013 + progfi = 'C:/Program Files (x86)' + if regv: + prefix = regv + else: + prefix = progfi + '/Microsoft Visual Studio 12.0' + + sdk81a = progfi + '/Microsoft SDKs/Windows/v8.1A' + sdk81 = progfi + '/Microsoft SDKs/Windows/v8.1' + winkit = progfi + '/Windows Kits/8.1' + + + path = [] + lib = [] + libpath = [] + + inc = [] + add_env(inc, prefix + '/VC/INCLUDE') + add_env(inc, prefix + '/VC/ATLMFC/INCLUDE') + add_env(inc, winkit + '/include') # not used in msvs12 + add_env(inc, winkit + '/include/um') + add_env(inc, winkit + '/include/shared') + add_env(inc, winkit + '/include/winrt') + set_env_list('INCLUDE',inc) + + set_env('Framework40Version','v4.0') + set_env('FrameworkVersion', 'v4.0.30319') + set_env('ExtensionSdkDir', + sdk81 + '/ExtensionSDKs') + + set_env('VCINSTALLDIR', prefix + '/VC/') + set_env('VS120COMNTOOLS', prefix + '/Common7/Tools') + set_env('VSINSTALLDIR' , prefix) + set_env('WindowsSdkDir', winkit) + set_env('VisualStudioVersion','12.0') + + set_env('WindowsSDK_ExecutablePath_x86', + sdk81a + '/bin/NETFX 4.5.1 Tools/') + + if x64_target: + set_env('WindowsSDK_ExecutablePath_x64', + sdk81a +'/bin/NETFX 4.5.1 Tools/x64/') + + set_env('FrameworkDir','c:/WINDOWS/Microsoft.NET/Framework64') + set_env('FrameworkDIR64','c:/WINDOWS/Microsoft.NET/Framework64') + set_env('FrameworkVersion64', 'v4.0.30319') + + set_env('Platform','X64') + + add_env(lib, prefix + '/VC/LIB/amd64') + add_env(lib, prefix + '/VC/ATLMFC/LIB/amd64') + add_env(lib, winkit + '/lib/winv6.3/um/x64') + + add_env(libpath, 'c:/WINDOWS/Microsoft.NET/Framework64/v4.0.30319') + add_env(libpath, prefix + '/VC/LIB/amd64') + add_env(libpath, prefix + '/VC/ATLMFC/LIB/amd64') + add_env(libpath, winkit + '/References/CommonConfiguration/Neutral') + add_env(libpath, sdk81 + '/ExtensionSDKs/Microsoft.VCLibs/12.0/' + + 'References/CommonConfiguration/neutral') + + add_env(path, prefix + '/Common7/IDE/CommonExtensions/Microsoft/TestWindow') + add_env(path, prefix + '/VC/BIN/amd64') + add_env(path, 'c:/WINDOWS/Microsoft.NET/Framework64/v4.0.30319') + + add_env(path, prefix + '/VC/VCPackages') + add_env(path, prefix + '/Common7/IDE') + add_env(path, prefix + '/Common7/Tools') + add_env(path, 'C:/Program Files (x86)/HTML Help Workshop') + add_env(path, prefix + '/Team Tools/Performance Tools/x64') + add_env(path, prefix + '/Team Tools/Performance Tools') + add_env(path, winkit + '/8.1/bin/x64') + add_env(path, winkit + '/8.1/bin/x86') + add_env(path, sdk81a + '/bin/NETFX 4.5.1 Tools/x64') + add_env(path, winkit + '/Windows Performance Toolkit') + + + else: + set_env('FrameworkDir', 'c:/WINDOWS/Microsoft.NET/Framework') + set_env('FrameworkDIR32', 'c:/WINDOWS/Microsoft.NET/Framework') + set_env('FrameworkVersion32','v4.0.30319') + + add_env(lib, prefix + '/VC/LIB') + add_env(lib, prefix + '/VC/ATLMFC/LIB') + add_env(lib, winkit + '/lib/winv6.3/um/x86') + + add_env(libpath, 'c:/WINDOWS/Microsoft.NET/Framework/v4.0.30319') + add_env(libpath, prefix + '/VC/LIB') + add_env(libpath, prefix + '/VC/ATLMFC/LIB') + add_env(libpath, winkit + '/References/CommonConfiguration/Neutral') + add_env(libpath, sdk81 + '/ExtensionSDKs/Microsoft.VCLibs/12.0/' + + 'References/CommonConfiguration/neutral') + + + add_env(path, prefix + '/Common7/IDE/CommonExtensions/Microsoft/TestWindow') + add_env(path, progfi + '/Microsoft SDKs/F#/3.1/Framework/v4.0') + add_env(path, progfi + '/MSBuild/12.0/bin') + add_env(path, prefix + '/Common7/IDE') + add_env(path, prefix + '/VC/BIN') + add_env(path, prefix + '/Common7/Tools') + add_env(path, 'C:/Windows/Microsoft.NET/Framework/v4.0.30319') + add_env(path, prefix + '/VC/VCPackages') + add_env(path, progfi + '/HTML Help Workshop') + add_env(path, prefix + '/Team Tools/Performance Tools') + add_env(path, winkit + '/bin/x86') + add_env(path, sdk81a + '/bin/NETFX 4.5.1 Tools') + add_env(path, winkit + '/Windows Performance Toolkit') + + + set_env_list('LIBPATH',libpath) + set_env_list('LIB',lib) + add_to_front_list('PATH',path) + + return prefix + "/VC" + + + + +def _set_msvs_dev14(env, x64_host, x64_target, regv=None): # msvs 2015 + progfi = 'C:/Program Files (x86)' + if regv: + prefix = regv + else: + prefix = progfi + '/Microsoft Visual Studio 14.0' + + sdk81a = progfi + '/Microsoft SDKs/Windows/v8.1A' + sdk81 = progfi + '/Microsoft SDKs/Windows/v8.1' + winkit = progfi + '/Windows Kits/8.1' + winkit10 = progfi + '/Windows Kits/10' + + # Find the UCRT Version. Could not locate a registry entry with + # the information. Preview version of msvs2015/dev14 did not set + # the env var. Poke around in the directory system as a last + # resort. Could make this configrable + winkit10version = None + if 'UCRTVersion' in os.environ: + winkit10version = os.environ['UCRTVersion'] + if not winkit10version: + # use glob and find youngest directory + ctime = 0 + for g in glob(winkit10 + '/include/*'): + if os.path.exists('{}/ucrt'.format(g)): + gtime = os.path.getctime(g) + if gtime > ctime: + winkit10version = os.path.basename(g) + ctime = gtime + if not winkit10version: + die("Did not find winkit 10 version") + msgb("UCRT Version", winkit10version) + + path = [] + lib = [] + libpath = [] + + inc = [] + add_env(inc, prefix + '/VC/INCLUDE') + add_env(inc, prefix + '/VC/ATLMFC/INCLUDE') + add_env(inc, winkit + '/include') # not used in msvs12 + + add_env(inc, winkit10 + '/include/{}/ucrt'.format(winkit10version)) + add_env(inc, winkit + '/include/shared') + add_env(inc, winkit + '/include/um') + add_env(inc, winkit + '/include/winrt') + set_env_list('INCLUDE',inc) + + set_env('Framework40Version','v4.0') + set_env('FrameworkVersion', 'v4.0.30319') + set_env('ExtensionSdkDir', + sdk81 + '/ExtensionSDKs') + + set_env('VCINSTALLDIR', prefix + '/VC/') + set_env('VS140COMNTOOLS', prefix + '/Common7/Tools') + set_env('VSINSTALLDIR' , prefix) + set_env('WindowsSdkDir', winkit) + set_env('VisualStudioVersion','14.0') + + set_env('WindowsSDK_ExecutablePath_x86', + sdk81a + '/bin/NETFX 4.5.1 Tools/') + + if x64_target: + set_env('WindowsSDK_ExecutablePath_x64', + sdk81a +'/bin/NETFX 4.5.1 Tools/x64/') + + set_env('FrameworkDir','c:/WINDOWS/Microsoft.NET/Framework64') + set_env('FrameworkDIR64','c:/WINDOWS/Microsoft.NET/Framework64') + set_env('FrameworkVersion64', 'v4.0.30319') + + set_env('Platform','X64') + + add_env(lib, prefix + '/VC/LIB/amd64') + add_env(lib, prefix + '/VC/ATLMFC/LIB/amd64') + add_env(lib, winkit10 + '/lib/{}/ucrt/x64'.format(winkit10version)) + add_env(lib, winkit + '/lib/winv6.3/um/x64') + + add_env(libpath, 'c:/WINDOWS/Microsoft.NET/Framework64/v4.0.30319') + add_env(libpath, prefix + '/VC/LIB/amd64') + add_env(libpath, prefix + '/VC/ATLMFC/LIB/amd64') + add_env(libpath, winkit + '/References/CommonConfiguration/Neutral') + add_env(libpath, sdk81 + '/ExtensionSDKs/Microsoft.VCLibs/14.0/' + + 'References/CommonConfiguration/neutral') + + add_env(path, prefix + '/Common7/IDE/CommonExtensions/Microsoft/TestWindow') + add_env(path, prefix + '/VC/BIN/amd64') + add_env(path, 'c:/WINDOWS/Microsoft.NET/Framework64/v4.0.30319') + + add_env(path, prefix + '/VC/VCPackages') + add_env(path, prefix + '/Common7/IDE') + add_env(path, prefix + '/Common7/Tools') + add_env(path, 'C:/Program Files (x86)/HTML Help Workshop') + add_env(path, prefix + '/Team Tools/Performance Tools/x64') + add_env(path, prefix + '/Team Tools/Performance Tools') + add_env(path, winkit + '/8.1/bin/x64') + add_env(path, winkit + '/8.1/bin/x86') + add_env(path, sdk81a + '/bin/NETFX 4.5.1 Tools/x64') + add_env(path, winkit + '/Windows Performance Toolkit') + + + else: + set_env('FrameworkDir', 'c:/WINDOWS/Microsoft.NET/Framework') + set_env('FrameworkDIR32', 'c:/WINDOWS/Microsoft.NET/Framework') + set_env('FrameworkVersion32','v4.0.30319') + + add_env(lib, prefix + '/VC/LIB') + add_env(lib, prefix + '/VC/ATLMFC/LIB') + add_env(lib, winkit10 + '/lib/{}/ucrt/x86'.format(winkit10version)) + add_env(lib, winkit + '/lib/winv6.3/um/x86') + + add_env(libpath, 'c:/WINDOWS/Microsoft.NET/Framework/v4.0.30319') + add_env(libpath, prefix + '/VC/LIB') + add_env(libpath, prefix + '/VC/ATLMFC/LIB') + add_env(libpath, winkit + '/References/CommonConfiguration/Neutral') + add_env(libpath, sdk81 + '/ExtensionSDKs/Microsoft.VCLibs/14.0/' + + 'References/CommonConfiguration/neutral') + + + add_env(path, prefix + '/Common7/IDE/CommonExtensions/Microsoft/TestWindow') + add_env(path, progfi + '/Microsoft SDKs/F#/3.1/Framework/v4.0') + add_env(path, progfi + '/MSBuild/14.0/bin') + add_env(path, prefix + '/Common7/IDE') + add_env(path, prefix + '/VC/BIN') + add_env(path, prefix + '/Common7/Tools') + add_env(path, 'C:/Windows/Microsoft.NET/Framework/v4.0.30319') + add_env(path, prefix + '/VC/VCPackages') + add_env(path, progfi + '/HTML Help Workshop') + add_env(path, prefix + '/Team Tools/Performance Tools') + add_env(path, winkit + '/bin/x86') + add_env(path, sdk81a + '/bin/NETFX 4.5.1 Tools') + add_env(path, winkit + '/Windows Performance Toolkit') + + + set_env_list('LIBPATH',libpath) + set_env_list('LIB',lib) + add_to_front_list('PATH',path) + + return prefix + "/VC" + + +def _try_to_figure_out_msvs_version(env): + prefixes = [ + (14,'C:/Program Files (x86)/Microsoft Visual Studio 14.0'), + (14,'C:/Program Files/Microsoft Visual Studio 14.0'), + + (12,'C:/Program Files (x86)/Microsoft Visual Studio 12.0'), + (12,'C:/Program Files/Microsoft Visual Studio 12.0'), + + (11,'C:/Program Files (x86)/Microsoft Visual Studio 11.0'), + (11,'C:/Program Files/Microsoft Visual Studio 11.0'), + + (10,'C:/Program Files (x86)/Microsoft Visual Studio 10.0'), + (10,'C:/Program Files/Microsoft Visual Studio 10.0'), + + (9,'C:/Program Files (x86)/Microsoft Visual Studio 9.0'), + (9,'C:/Program Files/Microsoft Visual Studio 9.0'), + + (8, "c:/Program Files (x86)/Microsoft Visual Studio 8"), + (8,"c:/Program Files/Microsoft Visual Studio 8"), + + (7, "c:/Program Files/Microsoft Visual Studio .NET 2003"), + (7,"c:/Program Files (x86)/Microsoft Visual Studio .NET 2003") + + ] + for v,dir in prefixes: + #print dir + if os.path.exists(dir): + #print 'FOUND', dir + return str(v) + return '' # we don't know + +def _read_registry(root,key,value): + import _winreg + try: + hkey = _winreg.OpenKey(root, key) + except: + return None + try: + (val, typ) = _winreg.QueryValueEx(hkey, value) + except: + _winreg.CloseKey(hkey) + return None + _winreg.CloseKey(hkey) + return val + +def find_msvc(env,version): + import _winreg + vs_ver = str(version) + '.0' + vs_key = 'SOFTWARE\\Microsoft\\VisualStudio\\' + vs_ver + '\\Setup\\VS' + vc_key = 'SOFTWARE\\Microsoft\\VisualStudio\\' + vs_ver + '\\Setup\\VC' + vs_dir = _read_registry(_winreg.HKEY_LOCAL_MACHINE, vs_key, 'ProductDir') + vc_dir = _read_registry(_winreg.HKEY_LOCAL_MACHINE, vc_key, 'ProductDir') + + # On a 64-bit host, look for a 32-bit installation + + if (not vs_dir or not vc_dir): + vs_key = 'SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\' + \ + vs_ver + '\\Setup\\VS' + vc_key = 'SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\' + \ + vs_ver + '\\Setup\\VC' + vs_dir = _read_registry(_winreg.HKEY_LOCAL_MACHINE, + vs_key, 'ProductDir') + vc_dir = _read_registry(_winreg.HKEY_LOCAL_MACHINE, + vc_key, 'ProductDir') + return (vs_dir,vc_dir) + +def _try_to_figure_out_msvs_version_registry(env): + versions = [14,12,11,10,9,8,7,6] + for v in versions: + (vs_dir,vc_dir) = find_msvc(env,v) + if vs_dir and vc_dir: + return (str(v),vs_dir) + return (None,None) + +def set_msvs_env(env): + x64_target=False + if env['host_cpu'] == 'x86-64': + x64_target=True + + x64_host = False + if env['build_cpu'] == 'x86-64': + x64_host=True + + # "express" compiler is 32b only + vc = None + # Verify validity of chosen msvs_version in registry + if env['msvs_version'] != '' : + v = int(env['msvs_version']) + (vs_dir,vc_dir) = find_msvc(env,v) + if not (vs_dir and vc_dir): + warn("Could no find specified version of MSVS. Looking around...") + env['msvs_version'] = '' + if env['msvs_version'] == '': + # The chosen msvs_version was not valid we need to search for it + env['msvs_version'] = _try_to_figure_out_msvs_version(env) + # FIXME: could add a knob to just use registry.. + if env['msvs_version'] == '': + env['msvs_version'], vs_dir = \ + _try_to_figure_out_msvs_version_registry(env) + if env['msvs_version'] == None: + die("Did not find MSVS version!") + + vs_dir = None + i = int(env['msvs_version']) + if i == 6: # 32b only + vc = _set_msvs_dev6(env,x64_host, x64_target) + elif i == 7: # 32b only + vc = _set_msvs_dev7(env,x64_host, x64_target) + + elif i == 8: # 32b or 64b + vc = _set_msvs_dev8(env, x64_host, x64_target, vs_dir) + elif i == 9: # 32b or 64b + vc = _set_msvs_dev9(env, x64_host, x64_target, vs_dir) + elif i == 10: # 32b or 64b + vc = _set_msvs_dev10(env, x64_host, x64_target, vs_dir) + elif i == 11: # 32b or 64b + vc = _set_msvs_dev11(env, x64_host, x64_target, vs_dir) + elif i == 12: # 32b or 64b + vc = _set_msvs_dev12(env, x64_host, x64_target, vs_dir) + # And 12 shall be followed by 14. 13? 13 is Right Out! + elif i == 14: # 32b or 64b + vc = _set_msvs_dev14(env, x64_host, x64_target, vs_dir) + else: + die("Unhandled MSVS version: " + env['msvs_version']) + + msgb("FOUND MS VERSION",env['msvs_version']) + return vc + diff --git a/mbuild/plan.py b/mbuild/plan.py new file mode 100755 index 0000000..7ec89d3 --- /dev/null +++ b/mbuild/plan.py @@ -0,0 +1,93 @@ +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +"""Intermediate data structure produced by builders and sent to the +dependence directed acyclic graph (DAG) that sequences execution. + +Users who create their own builders to call python functions should emit +an plan_t object and add it to the DAG. +""" + +class plan_t(object): + """ + An object that the builders create and is passed to the DAG L{dag_t} to + order the tasks. This is used exclusively to create + L{command_t}'s. + """ + def __init__(self, command, args=None, env=None, input=None, output=None, name=None): + """ + Create an input record for the L{dag_t} describing a + command. The command can be a string to execute or a python + function or a list of strings and python functions. The python + function will be passed two arguments: args and env. args is + typically a list, but could be anything. + + The input and output lists of files are used by the L{dag_t} to + order this command relative to other commands. + + When the command is a python function, the python function is + called with two arguments: args and an env of type + L{env_t}. The args can be anything but are typically the + inputs to the python function and any information required to + generate the corresponding outputs. The python functions return + a 2-typle (retcode, stdout). + + The input list: When the command is a python function, the + plan_t's input list contains at least the input files names + passed via args variable. The input list can be a superset + containing more stuff that might trigger the command + execution. + + If the command does not produce a specific output, you can + specify a dummy file name to allow sequencing relative to + other commands. + + @type command: string or python function or a list + @param command: string or python function. + + @type args: list + @param args: (optional) arguments to the command if it is a python function + + @type env: L{env_t} + @param env: (optional) an environment to pass to the python function + + @type input: list + @param input: (optional) files upon which this command depends. + + @type output: list + @param output: (optional) files which depend on this command. + + @type name: string + @param name: (optional) short name to be used to identify the work/task + """ + self.command = command + self.args = args + self.env = env + self.input = input + self.output = output + self.name = name + + def __str__(self): + s = [] + if self.name: + s.append('NAME: ' + str(self.name)) + s.append('CMD: ' + str(self.command)) + s.append('INPUT: ' + str(self.input)) + s.append('OUTPUT: ' + str(self.output)) + return " ".join(s) diff --git a/mbuild/scanner.py b/mbuild/scanner.py new file mode 100755 index 0000000..6ef0c83 --- /dev/null +++ b/mbuild/scanner.py @@ -0,0 +1,104 @@ +# -*- python -*- +# Mark Charney +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +"""Function for header include scanning""" + +import re +import os +import sys +import base +import util + +class mbuild_header_record_t: + """Stores information about headers that we find""" + def __init__(self, fn, found=True): + self.file_name = fn + self.system = False + self.found = found + def __str__(self): + s = '' + s = self.file_name + if self.system: + s += ' system' + if not self.found: + s += ' not-found' + return s + +def mbuild_compute_path(hname, search_path): + """Return the full path of the header hname, if found and None + otherwise. Search the path in order and see if we find the file""" + for p in search_path: + tname = util.join(p,hname) + tname = os.path.realpath(tname) + #mbuild_base.msgb("TESTING", tname) + if os.path.exists(tname): + return tname + return None + +# FIXME: ignoring system headers for now. +mbuild_include_pattern = re.compile(r'^[ \t]*#[ \t]*include[ \t]+"(?P[^"]+)"') +mbuild_nasm_include_pattern = re.compile(r'^[ \t]*%include[ \t]+"(?P[^"]+)"') + +def mbuild_scan(fn, search_path): + """Given a file name fn, and a list of search paths, scan for + headers in fn and return a list of mbuild_header_record_t's. The + header records indicate if the file is a system include based on + <> symbols or if the file was missing. If the file cannot be + found, we assume it is in the assumed_directory.""" + global mbuild_include_pattern + global mbuild_nasm_include_pattern + + all_names = [] + + if not os.path.exists(fn): + return all_names + + source_path = os.path.dirname(fn) + if source_path == '': + source_path = '.' + aug_search_path = [source_path] + search_path + + for line in file(fn).readlines(): + #print line + hgroup = mbuild_include_pattern.match(line) + if not hgroup: + hgroup = mbuild_nasm_include_pattern.match(line) + if hgroup: + hname = hgroup.group('hdr') + #print hname + full_name = mbuild_compute_path(hname, aug_search_path) + if full_name: + hr = mbuild_header_record_t(full_name) + else: + hr = mbuild_header_record_t(hname, found=False) + all_names.append(hr) + return all_names + + + +def _test_scan(): + paths = ["/home/mjcharne/proj/learn/" ] + all_headers = mbuild_scan("/home/mjcharne/proj/learn/foo.cpp", paths) + for hr in all_headers: + print hr + +if __name__ == '__main__': + _test_scan() + diff --git a/mbuild/util.py b/mbuild/util.py new file mode 100755 index 0000000..abfae0b --- /dev/null +++ b/mbuild/util.py @@ -0,0 +1,1148 @@ +# -*- python -*- +# Mark Charney +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +"""Basic useful utilities: file copying, removal, permissions, +path-name manipulation, and command execution.""" + +import os +import re +import glob +import sys +import shutil +import stat +import types +import time +import subprocess +import tempfile +import shlex +import mbuild +import traceback +try: + import cPickle as apickle +except: + import pickle as apickle + +from base import * + +def find_python(env): + """return path to NON cygwin""" + pycmd = sys.executable # use whatever the user invoked us with + if env.on_windows() and env.on_cygwin(): + # avoid cygwin python + if pycmd in ['/usr/bin/python', '/bin/python']: + python_commands = [ 'c:/python27/python.exe', + 'c:/python26/python.exe', + 'c:/python25/python.exe' ] + pycmd = None + for p in python_commands: + if os.path.exists(p): + return p + if not pycmd: + mbuild.die("Could not find win32 python at these locations: %s" % + "\n\t" + "\n\t".join(python_commands)) + + return pycmd +def copy_file(src,tgt): + """Copy src to tgt.""" + if verbose(1): + msgb("COPY", tgt + " <- " + src) + shutil.copy(src,tgt) +def move_file(src,tgt): + """Move/Rename src to tgt.""" + if verbose(1): + msgb("MOVE", src + " -> " + tgt) + shutil.move(src,tgt) +def symlink(env,src,tgt): + """Make a symlink from src to target. Not available on windows.""" + if env.on_windows(): + die("symlink() not available on windows") + if verbose(1): + msgb("SYMLINK", src + " -> " + tgt) + os.symlink(src,tgt) + +def copy_tree(src,tgt, ignore_patterns=None, symlinks=False): + """Copy the tree at src to tgt. This will first remove tgt if it + already exists.""" + if verbose(1): + msgb("COPYTREE", tgt + " <- " + src) + if not os.path.exists(src): + error_msg("SRC TREE DOES NOT EXIST", src) + raise Exception + if os.path.exists(tgt): + if verbose(1): + msgb("Removing existing target tree", tgt) + shutil.rmtree(tgt, ignore_errors=True) + if verbose(1): + msgb("Copying to tree", tgt) + if ignore_patterns: + sp = shutil.ignore_patterns(ignore_patterns) + else: + sp = None + shutil.copytree(src,tgt,ignore=sp, symlinks=symlinks) + if verbose(1): + msgb("Done copying tree", tgt) + +def cmkdir(path_to_dir): + """Make a directory if it does not exist""" + if not os.path.exists(path_to_dir): + if verbose(1): + msgb("MKDIR", path_to_dir) + os.makedirs(path_to_dir) +def list2string(ls): + """Print a list as a string""" + s = " ".join(ls) + return s + +def remove_file(fn, env=None, quiet=True): + """Remove a file or link if it exists. env parameter is not used.""" + if os.path.exists(fn): + make_writable(fn) + if os.path.exists(fn) or os.path.lexists(fn): + if not quiet: + if verbose(1): + msgb("REMOVING", fn) + os.unlink(fn) + return (0, []) +def remove_tree(dir_name, env=None, dangerous=False): + """Remove a directory if it exists. env parameter is not + used. This will not remove a directory that has a .svn + subdirectory indicating it is a source directory. Warning: It does + not look recursively for .svn subdirectories. + @type dir_name: string + @param dir_name: a directory name + @type env: L{env_t} + @param env: optional. Not currently used. + @type dangerous: bool + @param dangerous: optional. If True,will delete anything including svn trees!! BE CAREFUL! default False. + """ + if verbose(1): + msgb("CHECKING", dir_name) + if os.path.exists(dir_name): + if not dangerous and os.path.exists(os.path.join(dir_name, ".svn")): + s = 'Did not remove directory %s because of a .svn subdirectory' % \ + dir_name + warn(s) + return (1, [ s ]) + if verbose(1): + msgb("REMOVING", dir_name) + make_writable(dir_name) + shutil.rmtree(dir_name, ignore_errors = True) + return (0, []) +def remove_files(lst, env=None): + """Remove all the files in the list of files, lst. The env + parameter is not used""" + for fn in lst: + remove_file(fn) + return (0, []) + +def remove_files_glob(lst,env=None): + """Remove all files in the list of wild card expressions. The env + parameter is not used""" + for fn_glob in lst: + #msgb("REMOVING", fn_glob) + for file_name in glob(fn_glob): + remove_file(file_name) + return (0, []) + +def remove_files_from_tree(dir, file_patterns): + """Remove files that match the re object compiled pattern provided""" + for (dir, subdirs, subfiles) in os.walk(dir): + for file_name in subfiles: + fn = os.path.join(dir,file_name) + if file_patterns.search(fn): + remove_file(fn) + + +_readable_by_all = stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH +_readable_by_ug = stat.S_IRUSR|stat.S_IRGRP +_executable_by_all = stat.S_IXUSR|stat.S_IXGRP|stat.S_IXOTH +_executable_by_ug = stat.S_IXUSR|stat.S_IXGRP +_writeable_by_me = stat.S_IWUSR +_rwx_by_me = stat.S_IWUSR| stat.S_IRUSR|stat.S_IXUSR +_writeable_by_ug = stat.S_IWUSR|stat.S_IWGRP + +def make_writable(fn): + """Make the file or directory readable/writable/executable by me""" + global _rwx_by_me + os.chmod(fn, _rwx_by_me) + +def make_executable(fn): + """Make the file or directory readable & executable by user/group, writable by user""" + global _executable_by_ug + global _readable_by_ug + global _writeable_by_me + os.chmod(fn, _readable_by_ug|_writeable_by_me|_executable_by_ug) + +def modify_dir_tree(path, dir_fn=None, file_fn=None): + """Walk the tree rooted at path and apply the function dir_fn to + directories and file_fn to files. This is intended for doing + recursive chmods, etc.""" + if dir_fn: + dir_fn(path) + for (dir, subdirs, subfiles) in os.walk(path): + if dir_fn: + for subdir in subdirs: + dir_fn(os.path.join(dir,subdir)) + if file_fn: + for file_name in subfiles: + file_fn(os.path.join(dir,file_name)) + + +def make_read_only(fn): + """Make the file fn read-only""" + global _readable_by_all + os.chmod(fn, _readable_by_all) + +def make_web_accessible(fn): + """Make the file readable by all and writable by the current owner""" + global _readable_by_all + global _writeable_by_me + if verbose(8): + msgb("make_web_accessible", fn) + os.chmod(fn, _writeable_by_me|_readable_by_all) +def make_web_accessible_dir(dir): + """Make the directory readable and executable by all and writable + by the current owner""" + global _readable_by_all + global _executable_by_all + global _writeable_by_me + if verbose(8): + msgb("make_web_accessible_dir", dir) + os.chmod(dir, _writeable_by_me|_readable_by_all|_executable_by_all) + +def make_documentation_tree_accessible(dir): + """Make the directory teree rooted at dir web-accessible. That is, + the directories are readable and executable by anyone and the + files are readable by anyone.""" + msgb("CHMOD TREE", dir) + modify_dir_tree(dir, make_web_accessible_dir, make_web_accessible) + + + +def prefix_files(dir,input_files): + """Add dir on to the front of the input file or files. Works with + strings or lists of strings. + @type dir: string + @param dir: prefix directory + + @type input_files: string or list of strings + @param input_files: name(s) of files + + @rtype: string or list of strings + @return: input file(s) prefixed with dir sp + """ + if isinstance(input_files,types.ListType): + new_files = map(lambda(x): join(dir, x), input_files) + return new_files + elif isinstance(input_files,types.StringType): + new_file = join(dir, input_files) + return new_file + die("Unhandled type in prefix_files: "+ str(type(input_files))) + +def quote(fn): + """Add quotes around the file nameed fn. Return a string""" + return "\"%s\"" % fn + +def qdip(fn): + """Add quotes to a string if there are spaces in the name""" + if re.search(' ',fn): + return '"%s"' % fn + return fn + + +def touch(fn): + """Open a file for append. Write nothing to it""" + if verbose(): + msgb("TOUCH", fn) + f=open(fn,"a") + f.close() + +############################################################ +if on_native_windows(): + _mysep = "\\" +else: + _mysep = "/" + +def myjoin( *args ): + """join all the args supplied as arguments using _mysep as the + separator. _mysep is a backslash on native windows and a forward + slash everywhere else. + @type args: strings + @param args: path component strings + + @rtype: string + @return: string with _mysep slashes + """ + s = '' + first = True + for a in args: + if first: + first = False + else: + s = s + _mysep + s = s + a + return s + +def strip_quotes(a): + """Conditionally remove leading/trailing quotes from a string + @type a: string + @param a: a string potentially with quotes + + @rtype: string + @return: same string without the leading and trailing quotes + """ + ln = len(a) + if ln >= 2: + strip_quotes = False + if a[0] == '"' and a[-1] == '"': + strip_quotes=True + elif a[0] == "'" and a[-1] == "'": + strip_quotes=True + if strip_quotes: + b = a[1:ln-1] + return b + return a + +def join( *args ): + """join all the args supplied as arguments using a forward slash as + the separator + + @type args: strings + @param args: path component strings + + @rtype: string + @return: string with forward-slashes + """ + s = '' + first = True + for a in args: + ln = len(s) + if first: + first = False + elif ln == 0 or s[-1] != '/': + # if the last character is not a fwd slash already, add a slash + s = s + '/' + a = strip_quotes(a) + s = s + a + return s + + +def flip_slashes(s): + """convert to backslashes to _mysep slashes. _mysep slashes are + defined to be backslashes on native windows and forward slashes + everywhere else. + @type s: string or list of strings + @param s: path name(s) + + @rtype: string or list of strings + @return: string(s) with _mysep slashes + """ + + if on_native_windows(): + return s + if type(s) == types.ListType: + return map(flip_slashes, s) + t = re.sub(r'\\',_mysep,s,0) # replace all + return t + +def posix_slashes(s): + """convert to posix slashes. Do not flip slashes immediately before spaces + @type s: string or list of strings + @param s: path name(s) + + @rtype: string or list of strings + @return: string(s) with forward slashes + """ + if type(s) == types.ListType: + return map(posix_slashes, s) + #t = re.sub(r'\\','/',s,0) # replace all + last = len(s)-1 + t=[] + for i,a in enumerate(s): + x=a + if a == '\\': + if i == last: + x = '/' + elif s[i+1] != ' ': + x = '/' + t.append(x) + return ''.join(t) + +def glob(s): + """Run the normal glob.glob() on s but make sure all the slashes + are flipped forward afterwards. This is shorthand for + mbuild.posix_slashes(glob.glob(s))""" + import glob + return posix_slashes(glob.glob(s)) + +def cond_add_quotes(s): + """If there are spaces in the input string s, put quotes around the + string and return it... if there are not already quotes in the + string. + + @type s: string + @param s: path name + + @rtype: string + @return: string with quotes, if necessary + """ + if re.search(r'[ ]',s) and not ( re.search(r'["].*["]',s) or + re.search(r"['].*[']",s) ): + return '\"' + s + '\"' + return s + + +def escape_special_characters(s): + """Add a backslash before characters that have special meanings in + regular expressions. Python does not handle backslashes in regular + expressions or substitution text so they must be escaped before + processing.""" + + special_chars = r'\\' + new_string = '' + for c in s: + if c in special_chars: + new_string += '\\' + new_string += c + return new_string + +############################################################### + +if check_python_version(2,5): + import hashlib +else: + import sha + +def hash_list(list_of_strings): + """Compute a sha1 hash of a list of strings and return the hex digest""" + if check_python_version(2,5): + m = hashlib.sha1() + else: + m = sha.new() + map(lambda (x): m.update(x), list_of_strings) + d = m.hexdigest() + return d + + +def hash_file(fn): + if os.path.exists(fn): + try: + lines = file(fn).readlines() + except: + die("COULD NOT READ: %s" % (fn)) + signature = hash_list(lines) + return signature + return None + + +def write_signatures(fn,d): + """Write a dictionary of d[file]=hash to the specified file""" + # FIXME: binary protocol 2, binary file write DOES NOT WORK ON win32/win64 + f = open(fn,"wb") + apickle.dump(d,f) + f.close() + +def read_signatures(fn): + """Return a dictionary of d[file]=hash from the specified file""" + try: + f = open(fn,"r") + d = apickle.load(f) + f.close() + return d + except: + return None + + +def hash_string(s): + """Compute a sha1 hash of a string and return the hex digest""" + if check_python_version(2,5): + m = hashlib.sha1() + else: + m = sha.new() + m.update(s) + d = m.hexdigest() + return d + + +def hash_files(list_of_files, fn): + """Hash the files in the list of files and write the hashes to fn""" + d = {} + for f in list_of_files: + d[f] = hash_file(f) + write_signatures(fn,d) + +def file_hashes_are_valid(list_of_files, fn): + """Return true iff the old hashes in the file fn are valid for all + of the specified list of files.""" + if not os.path.exists(fn): + return False + d = read_signatures(fn) + if d == None: + return False + for f in list_of_files: + if os.path.exists(f): + nhash = hash_file(f) + else: + return False + if nhash == None: + return False + if f not in d: + return False + elif d[f] != nhash: + return False; + return True + +############################################################### +# Time functions +def get_time_str(): + """@rtype: string + @returns: current time as string + """ + # include time zone + return time.strftime('%Y-%m-%d %H:%M:%S %Z') + +def get_time(): + """@rtype: float + @returns: current time as float + """ + return time.time() + +def get_elapsed_time(start_time, end_time=None): + """compute the elapsed time in seconds or minutes + @type start_time: float + @param start_time: starting time. + @type end_time: float + @param end_time: ending time. + @rtype: string + """ + if end_time == None: + end_time = get_time() + seconds = end_time - start_time + negative_prefix = '' + if seconds < 0: + negative_prefix = '-' + seconds = -seconds + if seconds < 120: + if int(seconds) == 0: + milli_seconds = seconds * 1000 + timestr = "%d" % int(milli_seconds) + suffix = " msecs" + else: + timestr = "%d" % int(seconds) + suffix = " secs" + else: + minutes = int(seconds/60.0) + remainder_seconds = int(seconds - (minutes*60)) + timestr = "%.d:%02d" % (minutes,remainder_seconds) + suffix = " min:sec" + return "".join([negative_prefix, timestr, suffix]) + +def print_elapsed_time(start_time, end_time=None, prefix=None, current=False): + """print the elapsed time in seconds or minutes. + + @type start_time: float + @param start_time: the starting time + @type end_time: float + @param end_time: the ending time (optional) + @type prefix: string + @param prefix: a string to print at the start of the line (optional) + """ + if end_time == None: + end_time = get_time() + ets = "ELAPSED TIME" + if prefix: + s = "%s %s" % (prefix, ets) + else: + s = ets + + t = get_elapsed_time(start_time, end_time) + if current: + t = t + " / NOW: " + get_time_str() + msgb(s,t) + + +############################################################### +def _prepare_cmd(cmd): + """Tokenize the cmd string input. Return as list on non-windows + platforms. On windows, it returns the raw command string.""" + + if on_native_windows(): + # the posix=False is required to keep shlex from eating + # backslashed path characters on windows. But + # the nonposix chokes on /Dfoo="xxx yyy" in that it'll + # split '/Dfoo="xxx' and 'yyy"' in to two different args. + # so we cannot use that + #args = shlex.split(cmd,posix=False) + + # using posix mode (default) means that all commands must must + # forward slashes. So that is annoying and we avoid that + #args = shlex.split(cmd) + + # passing the args through works fine. Make sure not to have + # any carriage returns or leading white space in the supplied + # command. + args = cmd + + else: + args = shlex.split(cmd) + return args + +def _cond_open_input_file(directory,input_file_name): + if input_file_name: + if directory and not os.path.isabs(input_file_name): + fn = os.path.join(directory, input_file_name) + else: + fn = input_file_name + input_file_obj = file(fn,"r") + return input_file_obj + return None + +def run_command(cmd, + separate_stderr=False, + shell_executable=None, + directory=None, + osenv=None, + input_file_name=None, + **kwargs): + """ + Run a command string using the subprocess module. + + @type cmd: string + @param cmd: command line to execut with all args. + @type separate_stderr: bool + @param separate_stderr: If True, the return tuple has a list of stderr lines as the 3rd element + @type shell_executable: string + @param shell_executable: the shell executable + @type directory: string + @param directory: a directory to change to before running the command. + @type osenv: dictionary + @param osenv: dict of environment vars to be passed to the new process + @type input_file_name: string + @param input_file_name: file name to read stdin from. Default none + + @rtype: tuple + @return: (return code, list of stdout lines, list of lines of stderr) + """ + use_shell = False + if verbose(99): + msgb("RUN COMMAND", cmd) + msgb("RUN COMMAND repr", repr(cmd)) + stdout = None + stderr = None + cmd_args = _prepare_cmd(cmd) + try: + input_file_obj = _cond_open_input_file(directory, input_file_name) + + if separate_stderr: + sub = subprocess.Popen(cmd_args, + shell=use_shell, + executable=shell_executable, + stdin = input_file_obj, + stdout = subprocess.PIPE, + stderr = subprocess.PIPE, + cwd=directory, + env=osenv, + **kwargs) + (stdout, stderr ) = sub.communicate() + if not isinstance(stderr,types.ListType): + stderr = [stderr] + if not isinstance(stdout,types.ListType): + stdout = [stdout] + return (sub.returncode, stdout, stderr) + else: + sub = subprocess.Popen(cmd_args, + shell=use_shell, + executable=shell_executable, + stdin = input_file_obj, + stdout = subprocess.PIPE, + stderr = subprocess.STDOUT, + cwd=directory, + env=osenv, + **kwargs) + stdout = sub.stdout.readlines() + sub.wait() + if not isinstance(stdout,types.ListType): + stdout = [stdout] + return (sub.returncode, stdout, None) + except OSError, e: + s= ["Execution failed for: %s\n" % (cmd) ] + s.append("Result is %s\n" % (str(e))) + # put the error message in stderr if there is a separate + # stderr, otherwise put it in stdout. + if separate_stderr: + if stderr == None: + stderr = [] + elif not isinstance(stderr,types.ListType): + stderr = [stderr] + if stdout == None: + stdout = [] + elif not isinstance(stdout,types.ListType): + stdout = [stdout] + if separate_stderr: + stderr.extend(s) + else: + stdout.extend(s) + return (1, stdout, stderr) + + +def run_command_unbufferred(cmd, + prefix_line=None, + shell_executable=None, + directory=None, + osenv=None, + input_file_name=None, + **kwargs): + """ + Run a command string using the subprocess module. + + @type cmd: string + @param cmd: command line to execut with all args. + @type prefix_line: string + @param prefix_line: a string to prefix each output line. Default None + @type shell_executable: string + @param shell_executable: NOT USED BY THIS FUNCTION + @type directory: string + @param directory: a directory to change to before running the command. + @type osenv: dictionary + @param osenv: dict of environment vars to be passed to the new process + @type input_file_name: string + @param input_file_name: file name to read stdin from. Default none + + @rtype: tuple + @return: (return code, list of stdout lines, empty list) + + """ + use_shell = False + if verbose(99): + msgb("RUN COMMAND", cmd) + msgb("RUN COMMAND repr", repr(cmd)) + lines = [] + cmd_args = _prepare_cmd(cmd) + try: + input_file_obj = _cond_open_input_file(directory, input_file_name) + sub = subprocess.Popen(cmd_args, + shell=use_shell, + executable=shell_executable, + stdin = input_file_obj, + stdout = subprocess.PIPE, + stderr = subprocess.STDOUT, + env=osenv, + cwd=directory, + **kwargs) + while 1: + # FIXME: 2008-12-05 bad for password prompts without newlines. + line = sub.stdout.readline() + if line == '': + break + line = line.rstrip() + if prefix_line: + msgn(prefix_line) + msg(line) + lines.append(line + "\n") + + sub.wait() + return (sub.returncode, lines, []) + except OSError, e: + lines.append("Execution failed for: %s\n" % (cmd)) + lines.append("Result is %s\n" % (str(e))) + return (1, lines,[]) + + +def run_command_output_file(cmd, + output_file_name, + shell_executable=None, + directory=None, + osenv=None, + input_file_name=None, + **kwargs): + """ + Run a command string using the subprocess module. + + @type cmd: string + @param cmd: command line to execut with all args. + @type output_file_name: string + @param output_file_name: output file name + @type shell_executable: string + @param shell_executable: the shell executable + @type directory: string + @param directory: a directory to change to before running the command. + @type osenv: dictionary + @param osenv: dict of environment vars to be passed to the new process + @type input_file_name: string + @param input_file_name: file name to read stdin from. Default none + + @rtype: tuple + @return: (return code, list of stdout lines) + """ + use_shell = False + if verbose(99): + msgb("RUN COMMAND", cmd) + lines = [] + cmd_args = _prepare_cmd(cmd) + try: + output = file(output_file_name,"w") + input_file_obj = _cond_open_input_file(directory, input_file_name) + sub = subprocess.Popen(cmd_args, + shell=use_shell, + executable=shell_executable, + stdin = input_file_obj, + stdout = subprocess.PIPE, + stderr = subprocess.STDOUT, + env=osenv, + cwd=directory, + **kwargs) + #msgb("RUNNING SUBPROCESS") + while 1: + #msgb("READING OUTPUT") + line = sub.stdout.readline() + if line == '': + break + line = line.rstrip() + output.write(line + "\n") + lines.append(line + "\n") + + output.close() + sub.wait() + return (sub.returncode, lines, []) + except OSError, e: + lines.append("Execution failed for: %s\n" % (cmd)) + lines.append("Result is %s\n" % (str(e))) + return (1, lines,[]) + +def run_cmd_io(cmd, fn_i, fn_o,shell_executable=None, directory=None): + """ + Run a command string using the subprocess module. Read standard + input from fn_i and write stdout/stderr to fn_o. + + @type cmd: string + @param cmd: command line to execut with all args. + @type fn_i: string + @param fn_i: input file name + @type fn_o: string + @param fn_o: output file name + @type shell_executable: string + @param shell_executable: the shell executable + @type directory: string + @param directory: a directory to change to before running the command. + + @rtype: integer + @return: return code + """ + use_shell = False + cmd_args = _prepare_cmd(cmd) + try: + fin = open(fn_i,'r') + fout = open(fn_o,'w') + sub = subprocess.Popen(cmd_args, + shell=use_shell, + executable=shell_executable, + stdin=fin, + stdout=fout, + stderr=subprocess.STDOUT, + cwd=directory) + retval = sub.wait() + fin.close() + fout.close() + return retval + except OSError, e: + die("Execution failed for cmd %s\nResult is %s\n" % (cmd,str(e))) + +def find_dir(d): + """Look upwards for a particular filesystem directory d as a + subdirectory of one of the ancestors. Return None on failure""" + dir = os.getcwd() + last = '' + while dir != last: + target_dir = os.path.join(dir,d) + #print "Trying %s" % (target_dir) + if os.path.exists(target_dir): + return target_dir + last = dir + (dir,tail) = os.path.split(dir) + return None + +def peel_dir(s,n): + """Remove n trailing path components from s by calling + os.path.dirname()""" + t = s + for i in range(0,n): + t = os.path.dirname(t) + return t + +def get_gcc_version(gcc): + """Return the compressed version number of gcc""" + cmd = gcc + " -dumpversion" + try: + (retcode, stdout, stderr) = run_command(cmd) + if retcode == 0: + version = stdout[0] + return version.strip() + except: + return 'unknown' + +def get_clang_version(full_path): + cmd = full_path + " -dM -E - " + try: + (retcode, stdout, stderr) = run_command(cmd, + input_file_name="/dev/null") + if retcode == 0: + major=minor=patchlevel='x' + for line in stdout: + line = line.strip() + chunks = line.split() + if len(chunks) == 3: + if chunks[1] == '__clang_major__': + major = chunks[2] + elif chunks[1] == '__clang_minor__': + minor = chunks[2] + elif chunks[1] == '__clang_patchlevel__': + patchlevel = chunks[2] + version = "{}.{}.{}".format(major,minor,patchlevel) + return version + except: + return 'unknown' + +# unify names for clang/gcc version checkers +def compute_clang_version(full_path): + return get_clang_version(full_path) + +def compute_gcc_version(full_path): + return get_gcc_version(full_path) + +def gcc_version_test(major,minor,rev,gstr): + """Return True if the specified gcc version string (gstr) is at or + after the specified major,minor,revision args""" + + n = gstr.split('.') + if len(n) not in [2,3]: + die("Cannot compute gcc version from input string: [%s]" % (gstr)) + ga = int(n[0]) + gb = int(n[1]) + if len(n) == 2: + gc = 0 + else: + gc = int(n[2]) + + if ga > major: + return True + if ga == major and gb > minor: + return True + if ga == major and gb == minor and gc >= rev: + return True + return False + +import threading +# requires Python2.6 or later +class _timed_command_t(threading.Thread): + """ + Internal function to mbuild util.py. Do not call directly. + + Examples of use + env = os.environ + env['FOOBAR'] = 'hi' + # the command a.out prints out the getenv("FOOBAR") value + rc = _timed_command_t(["./a.out", "5"], seconds=4, env=env) + rc.timed_run() + + rc = _timed_command_t(["/bin/sleep", "5"], seconds=4) + rc.timed_run() + """ + + def __init__(self, cmd, + shell_executable=None, + directory=None, + osenv=None, + seconds=0, + input_file_name=None, + **kwargs): + """The kwargs are for the other parameters to Popen""" + threading.Thread.__init__(self) + self.cmd = cmd + self.kwargs = kwargs + self.seconds = seconds + self.timed_out = False + self.sub = None + self.osenv= osenv + self.input_file_name = input_file_name + self.directory = directory + self.shell_executable = shell_executable + self.exception_type = None + self.exception_object = None + self.exception_trace = None + self.exitcode = 0, + self.output = "", + self.stderr = "", + + def run(self): # executed by calling start() + cmd = self.cmd + #run a python command + if _is_python_cmd(cmd): + kwargs = self.kwargs + xenv = kwargs.get('xenv') + args_lst = kwargs.get('args_lst') + if args_lst == None: + args_lst = [] + if xenv == None: + (self.exitcode,self.output,self.stderr) = cmd(*args_lst) + else: + (self.exitcode,self.output,self.stderr) = cmd(xenv, *args_lst) + return + + #run an executable + use_shell = False + cmd_args = _prepare_cmd(cmd) + input_file_obj = _cond_open_input_file(self.directory, + self.input_file_name) + try: + self.sub = subprocess.Popen(cmd_args, + shell=use_shell, + executable=self.shell_executable, + cwd=self.directory, + env=self.osenv, + stdin = input_file_obj, + **self.kwargs) + except: + (self.exception_type, + self.exception_object, + self.exception_trace) = sys.exc_info() + else: + self.sub.wait() + + def timed_run(self): + """Returns False if the process times out. Also sets + self.timed_out to True.""" + + self.timed_out=False + self.start() # calls run() + if self.seconds: + self.join(self.seconds) + else: + self.join() + + if self.is_alive(): + try: + if self.sub: + if on_windows(): + # On Windows terminate() does not always kill + # the process So we need specific handling for + # Windows here. + kill_cmd = "taskkill /F /T /PID %i" % (self.sub.pid) + cmd_args = _prepare_cmd(kill_cmd) + subprocess.Popen(cmd_args, shell=True) + else: + self.sub.kill() + except: + pass + + self.join() + self.timed_out=True + return False + return True + + +def _is_python_cmd(cmd): + return isinstance(cmd,types.FunctionType) + + +def run_command_timed( cmd, + shell_executable=None, + directory=None, + osenv=None, + seconds=0, + input_file_name=None, + **kwargs ): + """Run a timed command. kwargs are keyword args for subprocess.Popen. + + @type cmd: string or python function + @param cmd: command to run + + @type shell_executable: string + @param shell_executable: the shell executable + + @type directory: string + @param directory: the directory to run the command in + + @type osenv: dictionary + @param osenv: dict of environment vars to be passed to the new process + + @type seconds: number + @param seconds: maximum execution time in seconds + + @type input_file_name: string + @param input_file_name: input filename when redirecting stdin. + + @type kwargs: keyword args + @param kwargs: keyword args for subprocess.Popen + + @rtype: tuple + return: (return code, list of stdout+stderr lines) + """ + + def _get_exit_code(tc): + exit_code = 399 + if tc.sub: + # if tc.sub does not have a returncode, then something went + # very wrong, usually an exception running the subprocess. + if hasattr(tc.sub, 'returncode'): + exit_code = tc.sub.returncode + return exit_code + + # we use a temporary file to hold the output because killing the + # process disrupts the normal output collection mechanism. + fo = tempfile.SpooledTemporaryFile() + fe = tempfile.SpooledTemporaryFile() + tc = _timed_command_t(cmd, + shell_executable, + directory, + osenv, + seconds, + input_file_name, + stdout=fo, + stderr=fe, + **kwargs) + + tc.timed_run() + + if _is_python_cmd(tc.cmd): + exit_code = tc.exitcode + output = tc.output + stderr = tc.stderr + else: + fo.seek(0) + output = fo.readlines() + fo.close() + fe.seek(0) + stderr = [''.join(fe.readlines())] + fe.close() + exit_code = _get_exit_code(tc) + + nl = '\n' + if tc.timed_out: + stderr.extend([ nl, + 'COMMAND TIMEOUT'+nl, + 'KILLING PROCCESS'+nl]) + if tc.exception_type: + stderr.extend([ nl, + 'COMMAND ENCOUNTERD AN EXCEPTION' + nl]) + stderr.extend(traceback.format_exception(tc.exception_type, + tc.exception_object, + tc.exception_trace)) + + return (exit_code, output, stderr) diff --git a/mbuild/work_queue.py b/mbuild/work_queue.py new file mode 100755 index 0000000..2c04d19 --- /dev/null +++ b/mbuild/work_queue.py @@ -0,0 +1,1020 @@ +# -*- python -*- +# Mark Charney +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +"""Command objects and parallel work queue""" + +import os +import sys +import types +import Queue +from threading import Thread +from collections import deque + +from base import * +from util import * +from dag import * + + +############################################################################ +class dir_cmd_t(object): + """For holding a directory and a command. When you call + execute(), it changes to the directory an executes the command""" + + def __init__(self, dir, command, output_file=None): + self.dir= dir + self.command= command + self.output_file = output_file + def __str__(self): + return "DIR: %s\nCOMMAND: %s" % (self.dir, self.command) + + def execute(self,args=None, env=None): + """Change to the specified directory and execute the command, + unbufferred""" + orig = os.getcwd() + try: + msgb("CHDIR TO", self.dir) + os.chdir(self.dir) + except: + return (-1, ["no such dir: " + self.dir]) + msgb("EXECUTING", self.command) + if self.output_file: + (retcode, out, err) = \ + run_command_output_file(self.command, self.output_file) + msgb("WROTE", self.output_file) + else: + (retcode, out, err) = run_command_unbufferred(self.command) + os.chdir(orig) + if not err: + err = [] + if not out: + out = [] + if err: + return (retcode, out+err) + else: + return (retcode, out) + +class command_t(object): + """The primary data structure used to track jobs in this script. It + is created when you add L{plan_t} objects to the DAG + L{dag_t}.""" + + _ids = 0 + + def __init__(self, + command=None, + args=None, + xenv=None, + unbufferred=False, + output_file_name=None, + shell_executable=None, + directory=None, + name=None, + show_output=True, + osenv=None, + seconds=0, + input_file_name=None): + """ + This is the unit of work for the L{work_queue_t}. These are + typically created by the L{dag_t} but they can also be created + by hand and added to the L{work_queue_t} to execute arbitrary + commands. + + @type command: string or python function, or a list of both + @param command: command line string to execute or a python function + + @type args: anything + @param args: (optional) typically a list of arguments for the python function. + + @type xenv: L{env_t} + @param xenv: (optional) environment for used by the python + function. Passed as the second argument to the python function. + + @type osenv: dictionary + @param osenv: (optional) the environment that will be set in the new subprocess. + + @type unbufferred: L{bool} + @param unbufferred: (optional) true if the output should be unbufferred. + + @type output_file_name: string + @param output_file_name: (optional) file name for stderr/stdout + + @type show_output: L{bool} + @param show_output: (optional) show output, default True + + @type input_file_name: string + @param input_file_name: (optional) file name for stdin + + """ + self.id = command_t._ids + command_t._ids += 1 + # store the command as a list + if isinstance(command,types.ListType): + self.command = command + else: + self.command = [ command ] + self.name = name + self.shell_executable = shell_executable + self.args = args + self.xenv = xenv + self.osenv = osenv + self.exit_status = 0 + self.output = [] + self.stderr = [] + self.unbufferred = unbufferred + self.input_file_name = input_file_name + self.output_file_name = output_file_name + self.start_time = 0 + self.end_time = 0 + self.directory = directory + self.show_output = show_output + self.input_file_name = input_file_name + + # Has this command be submitted to the work queue? + self.submitted = False + + # executed is set to True when this command tries to execute. + self.executed = False + + # all prerequisite commands are ready + self.ready = False + + # completed is set to True when this command exits successfully. + self.completed = False + + # things that depend on this command completing sucessfully + self.after_me = [] + + # things that must complete before this command can run + self.before_me = [] + + # from the file DAG. A list of inputs upon which this command depends + self.inputs = [] + # from the file DAG. A list of things generated by this command + self.targets = [] + + # used for special signals to the worker threads to tell them to + # shut down. + self.terminator = False + self.timeout = seconds + + def failed(self): + """ + Return the exit status. + @rtype: bool + @return: True if the command failed (exit status != 0) + """ + if self.exit_status != 0: + return True + return False + + def _complete(self): + self.completed = True + + def _ready(self): + """Return true if all things that must execute before this node + have completed and false otherwise. Updates self.ready.""" + if self.ready: + return True + + for n in self.before_me: + if not n.completed: + return False + + self.ready=True + return True + + def is_python_command(self, i=0): + """Return true if the command list element is a python function + @rtype: bool + """ + if isinstance(self.command[i],types.FunctionType): + return True + return False + + def is_dir_cmd(self, i=0): + """Return true if the command list element is a python dir_cmd_t object + @rtype: bool + """ + if isinstance(self.command[i],dir_cmd_t): + return True + return False + + def has_python_subcommand(self): + """Return true if the command list has a python function + @rtype: bool + """ + for c in self.command: + if isinstance(c,types.FunctionType): + return True + return False + + def is_command_line(self, i=0): + """Return true if the command list element is normal string command + line. + @rtype: bool + """ + if not isinstance(self.command[i],types.FunctionType) and \ + not isinstance(self.command[i],dir_cmd_t): + return True + return False + + def dagkey(self): + s = [] + for i in self.command: + if not isinstance(i,types.FunctionType): + s.append(i) + t = "MBUILD_COMMAND_KEY " + (" - ".join(s)) + return t + + def hash(self): + s = [] + for i in self.command: + if not isinstance(i,types.FunctionType): + s.append(i) + t = " - ".join(s) + h = hash_string(t) + return h + + def add_before_me(self,n): + """Make the current command execute after command n + @type n: L{command_t} + @param n: another (earlier) command + """ + if isinstance(n,types.ListType): + for x in n: + self.before_me.append(x) + x.after_me.append(self) + else: + self.before_me.append(n) + n.after_me.append(self) + + def add_after_me(self,n): + """Make the current command execute before command n. + @type n: L{command_t} + @param n: another (later) command + """ + if isinstance(n, types.ListType): + for x in n: + self.after_me.append(x) + x.before_me.append(self) + else: + self.after_me.append(n) + n.before_me.append(self) + + def _check_afters(self): + """Return a list of after nodes that are as-yet not submitted + but now ready""" + ready = [] + for x in self.after_me: + if not x.submitted and x._ready(): + ready.append(x) + return ready + + def elapsed_time(self): + """Return the elapsed time as an number of seconds""" + if self.end_time == None: + self.end_time = get_time() + return self.end_time - self.start_time + + def elapsed(self): + """Return the elapsed time. + @rtype: string + @returns: the elapsed wall clock time of execution. + """ + if self.end_time == None: + self.end_time = get_time() + elapsed = get_elapsed_time(self.start_time, self.end_time) + return elapsed + + def dump_cmd(self): + return self._pretty_cmd_str() + + def stderr_exists(self): + if self.stderr and len(self.stderr) > 0: + if len(self.stderr) == 1 and len(self.stderr[0]) == 0: + return False + return True + return False + + def stdout_exists(self): + if self.output and len(self.output) > 0: + if len(self.output) == 1 and len(self.output[0]) == 0: + return False + return True + return False + + def _pretty_cmd_str(self): + s = [] + for cmd in self.command: + if isinstance(cmd,types.FunctionType): + s.append("PYTHON FN: " + cmd.__name__) + elif isinstance(cmd,types.StringType): + s.append(cmd) + else: + s.append(str(cmd)) + return " ;;;; ".join(s) + + + def dump(self, tab_output=False, show_output=True): + s = [] + nl = '\n' + #s.append( bracket('ID ', str(self.id))) + #s.append(nl) + + if verbose(1): + pass + elif self.failed(): + pass + elif self.targets: + s.append(bracket('TARGET ', " ".join(self.targets))) + s.append(nl) + if self.name: + s.append(bracket('NAME ', self.name)) + s.append(nl) + if self.command: + s.append(bracket('COMMAND ', self._pretty_cmd_str())) + s.append(nl) + else: + s.append( bracket('COMMAND ', 'none') ) + s.append(nl) + if self.args: + args_string = str(self.args) + print_limit = 400 + if len(args_string) > print_limit: + args_string = args_string[:print_limit] + s.append(bracket('ARGS ', args_string)) + s.append(nl) + if self.xenv: + s.append(bracket('ENV ', 'some env')) + s.append(nl) + #if self.submitted: + # s.append(bracket('START_TIME ', self.start_time)) + # s.append(nl) + if self.input_file_name: + s.append(bracket('INPUT_FILE ', self.input_file_name)) + s.append(nl) + + if self.completed or self.failed(): + if self.exit_status != 0: + s.append(bracket('EXIT_STATUS ', str(self.exit_status))) + s.append(nl) + if self.elapsed_time() > 1: + s.append(bracket('ELAPSED_TIME', self.elapsed())) + s.append(nl) + if self.input_file_name: + s.append(bracket('INPUT FILE', self.input_file_name)) + s.append(nl) + if self.output_file_name: + s.append(bracket('OUTPUT FILE', self.output_file_name)) + s.append(nl) + + if self.unbufferred == False and self.output_file_name==None: + if show_output and self.show_output and self.stdout_exists(): + s.append(bracket('OUTPUT')) + s.append(nl) + for line in self.output: + if tab_output: + s.append('\t') + s.append(line) + if show_output and self.show_output and self.stderr_exists(): + s.append(bracket('STDERR')) + s.append(nl) + for line in self.stderr: + if tab_output: + s.append('\t') + s.append(line) + return "".join(s) + + def __str__(self): + return self.dump() + + def _extend_output(self,output): + if output: + if isinstance(output,types.ListType): + self.output.extend(output) + else: + self.output.append(output) + + def _extend_output_stderr(self,output, stderr): + self._extend_output(output) + if stderr: + if isinstance(stderr,types.ListType): + self.stderr.extend(stderr) + else: + self.stderr.append(stderr) + + + def execute(self): + """Execute the command whether it be a python function or a + command string. This is executed by worker threads but is made + available here for potential debugging. Record execution exit/return + status and output. + + Sets the exit_status, output and stderr error fields of the + + command object. + """ + self.executed = True + self.start_time = get_time() + self.output = [] + self.stderr = [] + for cmd in self.command: + try: + if isinstance(cmd, dir_cmd_t): + # execute dir_cmd_t objects + (self.exit_status, output) = cmd.execute( self.args, self.xenv ) + self._extend_output(output) + + elif isinstance(cmd,types.FunctionType): + # execute python functions + (self.exit_status, output) = cmd( self.args, self.xenv ) + self._extend_output(output) + + elif isinstance(cmd,types.StringType): + # execute command strings + if self.output_file_name: + (self.exit_status, output, stderr) = \ + run_command_output_file(cmd, + self.output_file_name, + shell_executable=self.shell_executable, + directory=self.directory, + osenv=self.osenv, + input_file_name=self.input_file_name) + self._extend_output_stderr(output,stderr) + + elif self.unbufferred: + (self.exit_status, output, stderr) = \ + run_command_unbufferred(cmd, + shell_executable= + self.shell_executable, + directory = self.directory, + osenv = self.osenv, + input_file_name=self.input_file_name) + self._extend_output_stderr(output, stderr) + else: + # execute timed_cmd_t objects + (self.exit_status, output, stderr) = \ + run_command_timed(cmd, + shell_executable=self.shell_executable, + directory = self.directory, + osenv = self.osenv, + seconds=self.timeout, + input_file_name = self.input_file_name) + self._extend_output_stderr(output, stderr) + + else: + self.exit_status = 1 + self.extend_output("Unhandled command object: " + self.dump()) + + # stop if something failed + if self.exit_status != 0: + break; + except Exception, e: + self.exit_status = 1 + self.stderr.append("Execution error for: %s\n%s" % (str(e), self.dump())) + break + + self.end_time = get_time() + + + +def _worker_one_task(incoming,outgoing): + """A thread. Takes stuff from the incoming queue and puts stuff on + the outgoing queue. calls execute for each command it takes off the + in queue. Return False when we receive a terminator command""" + #msgb("WORKER WAITING") + item = incoming.get() + #msgb("WORKER GOT A TASK") + if item.terminator: + outgoing.put(item) + return False + item.execute() + #incoming.task_done() # PYTHON2.5 ONLY + outgoing.put(item) + return True + +def _worker(incoming,outgoing): + """A thread. Takes stuff from the incoming queue and puts stuff on + the outgoing queue. calls execute for each command it takes off the + in queue. Return when we get a terminator command""" + keep_going = True + while keep_going: + keep_going = _worker_one_task(incoming, outgoing) + +class work_queue_t(object): + """This stores the threads and controls their execution""" + def __init__(self, max_parallelism=4): + """ + @type max_parallelism: int + @param max_parallelism: the number of worker threads to start + """ + max_parallelism = int(max_parallelism) + if max_parallelism <= 0: + die("Bad value for --jobs option: " + str(max_parallelism)) + self.max_parallelism = max_parallelism + self.use_threads = True + self.threads = [] + + # worker threads can add stuff to the new_queue so we + # use an MT-safe queue. + self.new_queue = Queue.Queue(0) + self.out_queue = Queue.Queue(0) + self.back_queue = Queue.Queue(0) + self.pending_commands = deque() + + self.message_delay = 10 + self.min_message_delay = 10 + self.message_delay_delta = 10 + + self.job_num = 0 + self.pending = 0 + self._clean_slate() + + if self.use_threads: + if len(self.threads) == 0: + self._start_daemons() + + def _empty_queue(self, q): + while not q.empty(): + item = q.get_nowait() + + def _cleanup(self): + """After a failed build we want to clean up our any in-progress state + so we can re-use the work queue object""" + + # the new_queue, job_num and pending get updated by add() before we build. + # so we must clean them up after every build. Also good hygene to clean out + # the task queues that we use to talk to the workers. + self.pending_commands = deque() + self._empty_queue(self.new_queue) + self._empty_queue(self.out_queue) + self._empty_queue(self.back_queue) + self.job_num = 0 + self.pending = 0 + + def _clean_slate(self): + self.running_commands = [] + self.all_commands = [] + self.running = 0 + self.sent = 0 + self.finished = 0 + self.errors = 0 + self.dag = None + + # for message limiting in _status() + self.last_time = 0 + self.last_pending = 0 + self.last_finished = 0 + self.last_running = 0 + + self.start_time = get_time() + self.end_time = None + + # we set dying to to True when we are trying to stop because of an error + self.dying = False + + self._empty_queue(self.out_queue) + self._empty_queue(self.back_queue) + + + def clear_commands(self): + """Remove any previously remembered commands""" + self.all_commands = [] + def commands(self): + """Return list of all commands involved in last build""" + return self.all_commands + + def elapsed_time(self): + """Return the elapsed time as an a number""" + if self.end_time == None: + self.end_time = get_time() + return self.end_time - self.start_time + + def elapsed(self): + """Return the elapsed time as a pretty string + @rtype: string + @returns: the elapsed wall clock time of execution. + """ + if self.end_time == None: + self.end_time = get_time() + elapsed = get_elapsed_time(self.start_time, self.end_time) + return elapsed + + def __del__(self): + if verbose(3): + msgb("DEL WORK QUEUE") + self._terminate() + + def _terminate(self): + """Shut everything down. Kill the worker threads if any were + being used. This is called when the work_queue_t is garbage + collected, but can be called directly.""" + self.dying = True + if self.use_threads: + self._stop_daemons() + self._join_threads() + + def _start_daemons(self): + """Start up a bunch of daemon worker threads to process jobs from + the queue.""" + for i in range(self.max_parallelism): + t = Thread(target=_worker, args=(self.out_queue, self.back_queue)) + t.setDaemon(True) + t.start() + self.threads.append(t) + + def _stop_daemons(self): + """Send terminator objects to all the workers""" + for i in range(self.max_parallelism): + t = command_t() + t.terminator = True + if verbose(3): + msgb("SENT TERMINATOR", str(i)) + self._start_a_job(t) + self.threads = [] + + def _join_threads(self): + """Use this when not running threads in daemon-mode""" + for t in self.threads: + t.join() + if verbose(3): + msgb("WORKER THREAD TERMINATED") + + def _add_one(self,command): + """Add a single command of type L{command_t} to the list + of jobs to run.""" + # FIXME: make this take a string and build a command_t + + if command.completed: + if verbose(5): + msgb("SKIPPING COMPLETED CMD", str(command.command)) + msgb("SKIPPING COMPLETED CMD", str(command.command)) + self.add(command._check_afters()) + return + if command.submitted: + if verbose(5): + msgb("SKIPPING SUBMITTED CMD", str(command.command)) + msgb("SKIPPING SUBMITTED CMD", str(command.command)) + return + command.submitted = True + if verbose(6): + msgb("WQ ADDING", str(command.command)) + self.job_num += 1 + self.new_queue.put( command ) + self.pending += 1 + + def add_sequential(self,command_strings, unbufferred=False): + """ + Add a list of command strings as sequential tasks to the work queue. + + @type command_strings: list of strings + @param command_strings: command strings to add to the L{work_queue_t} + + @rtype: list of L{command_t} + @return: the commands created + """ + last_cmd = None + cmds = [] + for c in command_strings: + co = command_t(c, unbufferred=unbufferred) + cmds.append(co) + self.add(co) + if last_cmd: + last_cmd.add_after_me(co) + last_cmd = co + return cmds + + def add(self,command): + """Add a command or list of commands of type L{command_t} + to the list of jobs to run. + + @type command: L{command_t} + @param command: the command to run + """ + if verbose(5): + msgb("ADD CMD", str(type(command))) + + if command: + if isinstance(command,types.ListType): + for c in command: + if verbose(5): + msgb("ADD CMD", str(type(c))) + self._add_one(c) + else: + self._add_one(command) + + def _done(self): + if self.running > 0: + return False + if not self.dying and self.pending > 0: + return False + return True + + def _status(self): + if self.show_progress or verbose(2): + s = ( 'RUNNING: %d PENDING: %d COMPLETED: %d ' + + 'ERRORS: %d ELAPSED: %s %s' ) + cur_time = get_time() + + changed = False + if (self.running != self.last_running or + self.pending != self.last_pending or + self.finished != self.last_finished): + changed = True + + if (changed or + # have we waited sufficiently long? + cur_time >= self.last_time + self.message_delay): + + # speed back up when anything finishes + if self.finished != self.last_finished: + self.message_delay = self.min_message_delay + elif self.last_time != 0: + # only printing because of timeout delay, so + # we increase the time a little bit. + self.message_delay += self.min_message_delay + + # store the other limiters for next time + self.last_time = cur_time + self.last_pending = self.pending + self.last_finished = self.finished + self.last_running = self.running + + msgb('STATUS', + s % (self.running, + self.pending, + self.finished, + self.errors, + get_elapsed_time(self.start_time, get_time()), + self._command_names())) + + def _start_more_jobs(self): + """If there are jobs to start and we didn't hit our parallelism + limit, start more jobs""" + + # copy from new_queue to pending_commands to avoid data + # race on iterating over pending commands. + started = False + while not self.new_queue.empty(): + self.pending_commands.append( self.new_queue.get() ) + + ready = deque() + for cmd in self.pending_commands: + if cmd._ready(): + ready.append(cmd) + + while self.running < self.max_parallelism and ready: + cmd = ready.popleft() + # FIXME: small concern that this could be slow + self.pending_commands.remove(cmd) + if verbose(2): + msgb("LAUNCHING", cmd.dump_cmd()) + self._start_a_job(cmd) + self.pending -= 1 + started = True + return started + + def _start_a_job(self,cmd): + """Private function to kick off a command""" + self.out_queue.put(cmd) + self.running_commands.append(cmd) + if not cmd.terminator: + self.all_commands.append(cmd) + self.sent += 1 + self.running += 1 + + def _command_names(self): + s = [] + anonymous_jobs = 0 + for r in self.running_commands: + if hasattr(r,'name') and r.name: + s.append(r.name) + else: + anonymous_jobs += 1 + if s: + if anonymous_jobs: + s.append('%d-anonymous' % (anonymous_jobs)) + return '[' + ' '.join(s) + ']' + else: + return '' + + def _wait_for_jobs(self): + """Return one command object when it finishes, or None on timeout (or + other non-keyboard-interrupt exceptions).""" + if self.running > 0: + try: + cmd = self.back_queue.get(block=True, timeout=self.join_timeout) + self.running -= 1 + self.finished += 1 + self.running_commands.remove(cmd) + return cmd + except Queue.Empty: + return None + except KeyboardInterrupt: + msgb('INTERRUPT') + self._terminate() + self.dying = True + sys.exit(1) + return None # NOT REACHED + except: + return None + return None + + def build(self, + dag=None, + targets=None, + die_on_errors=True, + show_output=True, + error_limit=0, + show_progress=False, + show_errors_only=False, + join_timeout=10.0): + """ + This makes the work queue start building stuff. If no targets + are specified then all the targets are considered and built if + necessary. All commands that get run or generated are stored in + the all_commands attribute. That attribute gets re-initialized + on each call to build. + + @type dag: L{dag_t} + @param dag: the dependence tree object + + @type targets: list + @param targets: specific targets to build + + @type die_on_errors: bool + @param die_on_errors: keep going or die on errors + + @type show_output: bool + @param show_output: show stdout/stderr (or just buffer it in + memory for later processing). Setting this to False is good for + avoiding voluminous screen output. The default is True. + + @type show_progress: bool + @param show_progress: show the running/pending/completed/errors msgs + + @type show_errors_only: bool + @param show_errors_only: normally print the commands as they complete. + If True, only show the commands that fail. + + @type join_timeout: float + @param join_timeout: how long to wait for thread to terminate. default 10s + """ + self._clean_slate() + + self.show_progress = show_progress + self.join_timeout = join_timeout + self.errors = 0 + self.show_errors_only = show_errors_only + self.message_delay = self.min_message_delay + self.last_time = 0 + self.clear_commands() + self.dag = dag + if self.dag: + for x in self.dag._leaves_with_changes(targets): + self.add(x.creator) + okay = self._build_blind(die_on_errors, show_output, error_limit) + if okay and self.dag: + did_not_build = self.dag.check_for_skipped() + if len(did_not_build) > 0: + # some stuff did not build, force an error status return + msgb("ERROR: DID NOT BUILD SOME STUFF", "\n\t".join(did_not_build)) + if self.dag: + print self.dag.dump() + self.end_time = get_time() + self._cleanup() + return False + # normal exit path + self.end_time = get_time() + if self.dag: + self.dag.dag_write_signatures() + self._cleanup() + return okay + + def _build_blind(self, die_on_errors=True, show_output=True, error_limit=0): + """Start running the commands that are pending and kick off + dependent jobs as those complete. If die_on_errors is True, the + default, we stop running new jobs after one job returns a nonzero + status. Returns True if no errors""" + if self.use_threads: + return self._build_blind_threads(die_on_errors, + show_output, + error_limit) + else: + return self._build_blind_no_threads(die_on_errors, + show_output, + error_limit) + + def _build_blind_threads(self, + die_on_errors=True, + show_output=True, + error_limit=0): + """Start running the commands that are pending and kick off + dependent jobs as those complete. If die_on_errors is True, the + default, we stop running new jobs after one job returns a nonzero + status. Returns True if no errors""" + okay = True + started = False + while 1: + c = None + if started: + c = self._wait_for_jobs() + if c: + if verbose(3): + msgb("JOB COMPLETED") + if c.failed(): + self.errors += 1 + okay = False + if die_on_errors or (error_limit != 0 and + self.errors > error_limit): + warn("Command execution failed. " + + "Waiting for remaining jobs and exiting.") + self.dying = True + + if not self.dying: + started |= self._start_more_jobs() + self._status() + + if c and not self.dying: + c._complete() + # Command objects can depend on each other + # directly. Enable execution of dependent commands. + if verbose(3): + msgb("ADD CMD-AFTERS") + self.add(c._check_afters()) + # Or we might find new commands from the file DAG. + if self.dag: + for x in self.dag._enable_successors(c): + self.add(x.creator) + if c and (self.show_errors_only==False or c.failed()): + print c.dump(show_output=show_output) + if self._done(): + break; + return okay + + def _build_blind_no_threads(self, die_on_errors=True, + show_output=True, error_limit=0): + """Start running the commands that are pending and kick off + dependent jobs as those complete. If die_on_errors is True, the + default, we stop running new jobs after one job returns a nonzero + status. Returns True if no errors""" + okay = True + while 1: + started = False + if not self.dying: + started = self._start_more_jobs() + if started: + self._status() + + # EXECUTE THE TASK OURSELVES + if self.running > 0: + _worker_one_task(self.out_queue, self.back_queue) + c = self._wait_for_jobs() + if c: + if verbose(3): + msgb("JOB COMPLETED") + if c.failed(): + okay = False + self.errors += 1 + if die_on_errors or (error_limit !=0 and + self.errors > error_limit): + warn("Command execution failed. " + + "Waiting for remaining jobs and exiting.") + self.dying = True + if not self.dying: + c._complete() + # Command objects can depende on each other + # directly. Enable execution of dependent commands. + if verbose(3): + msgb("ADD CMD-AFTERS") + self.add(c._check_afters()) + # Or we might find new commands from the file DAG. + if self.dag: + for x in self.dag._enable_successors(c): + self.add(x.creator) + if self.show_errors_only==False or c.failed(): + print c.dump(show_output=show_output) + self._status() + if self._done(): + break; + return okay + + + diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..4d4a33e --- /dev/null +++ b/setup.py @@ -0,0 +1,21 @@ + +# +# to build the distribution file: +# python setup.py sdist --formats=gztar,zip +# +# to build an installer for windows: +# python setup.py bdist_wininst +# +# to install the distribution file: +# python setup.py install + +from distutils.core import setup +setup(name='mbuild', + version='0.2496', + url='https://github.com/intelxed/mbuild', + description = "mbuild: python based build system", + author = 'Mark Charney', + author_email = 'Mark.Charney@intel.com', + packages = [ 'mbuild'] + ) + diff --git a/templates/find_mbuild.py b/templates/find_mbuild.py new file mode 100644 index 0000000..87447d6 --- /dev/null +++ b/templates/find_mbuild.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL +import os +import sys + +def find_dir(d): + dir = os.getcwd() + last = '' + while dir != last: + target_dir = os.path.join(dir,d) + if os.path.exists(target_dir): + return target_dir + last = dir + (dir,tail) = os.path.split(dir) + return None + +mbuild_path = find_dir('mbuild') +sys.path = [ mbuild_path ] + sys.path +import mbuild diff --git a/tests/1.py b/tests/1.py new file mode 100755 index 0000000..7a0f502 --- /dev/null +++ b/tests/1.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +import os,sys +sys.path = ['..'] + sys.path +import mbuild + +env = mbuild.env_t() +env.parse_args() + +env['jobs']=1 +work_queue = mbuild.work_queue_t(env['jobs']) +all_cmds = [ '/bin/ls -R ../..' ] +subs = {} +command_list = [] +for cmd in all_cmds: + cmd = cmd % (subs) + mbuild.msgb('ADDING', cmd) + command_list.append(cmd) +work_queue.add_sequential(command_list, unbufferred=True) + + +phase = "BUILD" +okay = work_queue.build() +if not okay: + mbuild.die("[%s] failed. dying..." % phase) +mbuild.msgb(phase, "succeeded") diff --git a/tests/2.py b/tests/2.py new file mode 100755 index 0000000..87392a8 --- /dev/null +++ b/tests/2.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +import os,sys +sys.path = ['..'] + sys.path +import mbuild + +env = mbuild.env_t() +env.parse_args() + +env['jobs']=1 +work_queue = mbuild.work_queue_t(env['jobs']) +all_cmds = [ '/bin/ls -R ../..' ] +subs = {} +command_list = [] +for cmd in all_cmds: + cmd = cmd % (subs) + mbuild.msgb('ADDING', cmd) + c = mbuild.command_t(cmd, output_file_name="foo") + work_queue.add(c) + command_list.append(cmd) + + +phase = "BUILD" +okay = work_queue.build() +if not okay: + mbuild.die("[%s] failed. dying..." % phase) +mbuild.msgb(phase, "succeeded") diff --git a/tests/3.py b/tests/3.py new file mode 100755 index 0000000..35d8255 --- /dev/null +++ b/tests/3.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +import os,sys +sys.path = ['..'] + sys.path +import mbuild + +env = mbuild.env_t() +env.parse_args() + +env['jobs']=8 +work_queue = mbuild.work_queue_t(env['jobs']) +if False: + all_cmds = [ '/bin/ls -R ../..' ] + subs = {} + command_list = [] + for cmd in all_cmds: + cmd = cmd % (subs) + mbuild.msgb('ADDING', cmd) + c = mbuild.command_t(cmd, output_file_name="foo") + work_queue.add(c) + command_list.append(cmd) + + +phase = "BUILD" +okay = work_queue.build() +if not okay: + mbuild.die("[%s] failed. dying..." % phase) +mbuild.msgb(phase, "succeeded") diff --git a/tests/VersionInfo.rc b/tests/VersionInfo.rc new file mode 100644 index 0000000..baba119 --- /dev/null +++ b/tests/VersionInfo.rc @@ -0,0 +1,35 @@ +#define FOOBARVERSION 23,32 +#define STRFOOBARVERSION "23.32\0" +1 VERSIONINFO +FILEVERSION FOOBARVERSION +PRODUCTVERSION FOOBARVERSION +FILEFLAGSMASK 0x17L +#ifdef _DEBUG +FILEFLAGS 0x1L +#else +FILEFLAGS 0x0L +#endif +FILEOS 0x4L +FILETYPE 0x1L +FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904b0" + BEGIN + VALUE "CompanyName", "Foobar Corporation" + VALUE "FileDescription", "FoobarŪ Engine DLL" + VALUE "FileVersion", STRFOOBARVERSION + VALUE "InternalName", "FoobarŪ Engine" + VALUE "LegalCopyright", "Copyright (C) 2014" + VALUE "OriginalFilename", "vcpu_foobar.dll" + VALUE "ProductName", "FoobarŪ Engine" + VALUE "ProductVersion", STRFOOBARVERSION + END + END + + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1200 + END +END diff --git a/tests/a.py b/tests/a.py new file mode 100755 index 0000000..e4bb444 --- /dev/null +++ b/tests/a.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +import sys +import mbuild + +env = mbuild.env_t() +env.parse_args() + +if 'clean' in env['targets']: + mbuild.remove_tree(env['build_dir']) + sys.exit(0) +mbuild.cmkdir(env['build_dir']) +if not env.on_windows(): + env['LINK'] = env['CC'] # not g++ for this program + +dep_tracker = mbuild.dag_t() +prog = env.build_dir_join('hello' + env['EXEEXT']) +cmd1 = dep_tracker.add(env, env.cc_compile('hello.c')) +cmd2 = dep_tracker.add(env, env.link(cmd1.targets, prog)) + +work_queue = mbuild.work_queue_t(env['jobs']) +okay = work_queue.build(dag=dep_tracker) +if not okay: + mbuild.die("build failed") +mbuild.msgb("SUCCESS") diff --git a/tests/b.py b/tests/b.py new file mode 100755 index 0000000..cdc8955 --- /dev/null +++ b/tests/b.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +import sys +import mbuild + +env = mbuild.env_t() +env.parse_args() + +if 'clean' in env['targets']: + mbuild.remove_tree(env['build_dir']) + sys.exit(0) +mbuild.cmkdir(env['build_dir']) +env['LINK'] = env['CC'] # not g++ for this program + +dep_tracker = mbuild.dag_t() +prog = env.build_dir_join('hello' + env['EXEEXT']) +cmd1 = dep_tracker.add(env, env.cc_compile('hello.c')) + +# take the nicely created link command and add on the strip command +# sequentially. +plan = env.link(cmd1.targets, prog) +cmds = [plan.command, " strip " + prog] +plan2 = mbuild.plan_t( cmds, env=env, + input=plan.input, output=plan.output) +cmd2 = dep_tracker.add(env, plan2) + +work_queue = mbuild.work_queue_t(env['jobs']) +okay = work_queue.build(dag=dep_tracker) +if not okay: + mbuild.die("build failed") +mbuild.msgb("SUCCESS") diff --git a/tests/c.py b/tests/c.py new file mode 100755 index 0000000..b99be5d --- /dev/null +++ b/tests/c.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL +import sys +# find.py is in the tests dir. It finds mbuild and puts it on the +# sys.path. +import find +import mbuild + +env = mbuild.env_t(init_verbose=0) +env.parse_args() +#mbuild.build_env.set_env_icl(env) +mbuild.cmkdir(env['build_dir']) +dag = mbuild.dag_t() +res = env.compile(dag,['VersionInfo.rc']) +objs = env.compile(dag,['hello.c']) +cmd = dag.add(env, + env.dynamic_lib(objs + res, + env.build_dir_join('hello.dll'))) + +work_queue = mbuild.work_queue_t(env['jobs']) +okay = work_queue.build(dag=dag) +if not okay: + mbuild.die("build failed") +mbuild.msgb("SUCCESS") diff --git a/tests/circular-dep/a.h b/tests/circular-dep/a.h new file mode 100644 index 0000000..1e35996 --- /dev/null +++ b/tests/circular-dep/a.h @@ -0,0 +1,8 @@ + + +#if !defined(AAA_1) +# define AAA_1 + +# include "b.h" + +#endif diff --git a/tests/circular-dep/b.h b/tests/circular-dep/b.h new file mode 100644 index 0000000..c1eddad --- /dev/null +++ b/tests/circular-dep/b.h @@ -0,0 +1,5 @@ +#if !defined(BBB_2) +# define BBB_2 + +# include "c.h" +#endif diff --git a/tests/circular-dep/c.h b/tests/circular-dep/c.h new file mode 100644 index 0000000..1339c9f --- /dev/null +++ b/tests/circular-dep/c.h @@ -0,0 +1,5 @@ +#if !defined(CCC_1) +# define CCC_1 + +# include "a.h" +#endif diff --git a/tests/circular-dep/find.py b/tests/circular-dep/find.py new file mode 100755 index 0000000..8331a18 --- /dev/null +++ b/tests/circular-dep/find.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +# add the right mbuild dir to the sys.path so that we can import mbuild. + +import os +import sys + +def find_dir(d): + dir = os.getcwd() + last = '' + while dir != last: + target_dir = os.path.join(dir,d) + if os.path.exists(target_dir): + return target_dir + last = dir + (dir,tail) = os.path.split(dir) + return None + +# go up an extra level because we are in the mbuild tree. +# otherwise we find the subdir instead of the parent dir. +# normally this last os.path.dirname() would not be required. +mbuild_path = os.path.dirname(find_dir('mbuild')) +sys.path = [ mbuild_path ] + sys.path + + diff --git a/tests/circular-dep/m.h b/tests/circular-dep/m.h new file mode 100644 index 0000000..908eddd --- /dev/null +++ b/tests/circular-dep/m.h @@ -0,0 +1,4 @@ +#if !defined(MMM_1) +# define MMM_1 + +#endif diff --git a/tests/circular-dep/main.c b/tests/circular-dep/main.c new file mode 100644 index 0000000..b688fce --- /dev/null +++ b/tests/circular-dep/main.c @@ -0,0 +1,5 @@ +#include "z.h" + +int main() { + return 0; +} diff --git a/tests/circular-dep/mfile.py b/tests/circular-dep/mfile.py new file mode 100755 index 0000000..3d80e9d --- /dev/null +++ b/tests/circular-dep/mfile.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# -*- python -*- + +import sys +import find +import mbuild + +def setup(): + env = mbuild.env_t() + env.parse_args() + mbuild.cmkdir(env['build_dir']) + if not env.on_windows(): + env['LINK'] = env['CC'] # not g++ for this program + return env + +def work(env): + #with then env, the dag hash file is put in the build_dir. + dag = mbuild.dag_t('circular-test',env=env) + work_queue = mbuild.work_queue_t(env['jobs']) + + env.compile_and_link(dag, ['main.c'], 'main' + env['EXEEXT']) + + okay = work_queue.build(dag=dag) + if not okay: + mbuild.die("build failed") + mbuild.msgb("SUCCESS") + + +if __name__ == "__main__": + env = setup() + work(env) diff --git a/tests/circular-dep/z.h b/tests/circular-dep/z.h new file mode 100644 index 0000000..8f6b169 --- /dev/null +++ b/tests/circular-dep/z.h @@ -0,0 +1,7 @@ + + +#if !defined(ZZZ_1) +# define ZZZ_1 + +# include "a.h" +#endif diff --git a/tests/delay.c b/tests/delay.c new file mode 100644 index 0000000..fdaa999 --- /dev/null +++ b/tests/delay.c @@ -0,0 +1,25 @@ +/*BEGIN_LEGAL + +Copyright (c) 2016 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +END_LEGAL */ +int main(int argc, char** argv) { + unsigned int i,j,n; + n = atoi(argv[1]); + for(i=0;i +int main() { + printf("Hello world\n"); + + return 0; +} diff --git a/tests/negtime.py b/tests/negtime.py new file mode 100755 index 0000000..4d6ef40 --- /dev/null +++ b/tests/negtime.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +import find +import mbuild +print mbuild.get_elapsed_time(10,0) diff --git a/tests/nodag.py b/tests/nodag.py new file mode 100755 index 0000000..bc25715 --- /dev/null +++ b/tests/nodag.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +# Example of using the connections between commands to enforce +# execution ordering. A dag_t requires that you have files and that +# doesn't help for things that are not file oriented. + +import os +import sys +import mbuild + +env = mbuild.env_t() +env.parse_args() +work_queue = mbuild.work_queue_t(env['jobs']) + +last_command_in_sequence = {} +for i in range(0,99): + cmd = "/bin/echo %d" % (i) + c = mbuild.command_t(cmd) + + # break the commmands in to 7 sequences + seq = i % 7 + + # enforce a dependence between commands in the same sequence + try: + prev = last_command_in_sequence[seq] + prev.add_after_me(c) + except: + pass + last_command_in_sequence[seq]=c + mbuild.msgb('ADDING',i) + # add it to the work queue + work_queue.add(c) + +# run the commands. Use --jobs N to set the number of workers to N. +okay = work_queue.build() +if not okay: + mbuild.die("build failed") +mbuild.msgb("SUCCESS") diff --git a/tests/nondep/README.txt b/tests/nondep/README.txt new file mode 100644 index 0000000..7db782c --- /dev/null +++ b/tests/nondep/README.txt @@ -0,0 +1,2 @@ +this is not a dependence driven build. +it is for testing various msvs environments. diff --git a/tests/nondep/__mbuild b/tests/nondep/__mbuild new file mode 100755 index 0000000..f8f853a --- /dev/null +++ b/tests/nondep/__mbuild @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# -*- python -*- +import sys +import os +import glob + +def find_dir(d): + idir = os.getcwd() + last_idir = '' + while idir != last_idir: + mfile = os.path.join(idir,d) + if os.path.exists(mfile): + return mfile + last_idir = idir + idir = os.path.dirname(idir) + print "Could not find %s file, looking upwards"% (mfile) + sys.exit(1) + +#sys.path = [find_dir('mbuild')] + sys.path +sys.path.append('../..') +import mbuild + + +def build(env, phase='BUILD',terminate_on_errors=False): + """Build everything in the work queue""" + okay = env.work_queue.build(die_on_errors=False) + if not okay: + if terminate_on_errors: + mbuild.die("[%s] failed." % phase) + else: + mbuild.msgb("[%s] failed." % phase) + else: + mbuild.msgb(phase, "succeeded") + +def compile_and_link(env,fn): + env['file']=fn + (base,ext) = os.path.splitext(fn) + if ext in ['.cpp','.C']: + env['x_compiler'] = "%(CXX)s" + env['tflags'] = "%(CXXFLAGS)s" + else: + env['x_compiler'] = "%(CC)s" + env['tflags'] = "%(CCFLAGS)s" + env['exe'] = "%s.%s.%s.exe" % (base, env['build_os'],env['host_cpu']) + cmd = "%(x_compiler)s %(tflags)s %(file)s %(EXEOUT)s%(exe)s" + cmd = env.expand_string(cmd) + env.work_queue.add(mbuild.command_t(cmd)) + + +def work(): + env = mbuild.env_t() + env.parser.add_option("--build", + dest="build", action="store_true", default=False, + help="Build tests") + + env.parse_args() + env.work_queue = mbuild.work_queue_t(env['jobs']) + + if env['build']: + for s in glob.glob('*.c'): + compile_and_link(env,s) + build(env) + +if __name__ == "__main__": + work() + + diff --git a/tests/nondep/hello.c b/tests/nondep/hello.c new file mode 100644 index 0000000..06cea98 --- /dev/null +++ b/tests/nondep/hello.c @@ -0,0 +1,5 @@ +#include +int main() { + printf("hello world\n"); + return 0; +} diff --git a/tests/sleep.py b/tests/sleep.py new file mode 100755 index 0000000..407ef3f --- /dev/null +++ b/tests/sleep.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +import os,sys +sys.path = ['..'] + sys.path +import mbuild + +env = mbuild.env_t() +env.parse_args() + +env['jobs']=4 +work_queue = mbuild.work_queue_t(env['jobs']) +all_cmds = 2 * [ './delay 40000' ] +subs = {} +command_list = [] +for cmd in all_cmds: + cmd = cmd % (subs) + mbuild.msgb('ADDING', cmd) + command_list.append(cmd) +work_queue.add_sequential(command_list, unbufferred=True) + + +phase = "BUILD" +okay = work_queue.build(show_progress=True) +if not okay: + mbuild.die("[%s] failed. dying..." % phase) +mbuild.msgb(phase, "succeeded") diff --git a/tests/spew b/tests/spew new file mode 100755 index 0000000..3b26b9e --- /dev/null +++ b/tests/spew @@ -0,0 +1,4 @@ +#!/usr/bin/env python + +for i in range(0,1000*1000*100): + print i diff --git a/tests/stdin.py b/tests/stdin.py new file mode 100755 index 0000000..6e6c88c --- /dev/null +++ b/tests/stdin.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +import mbuild + +def dump(lines): + if lines: + for line in lines: + line = line.strip() + print "::" + line + else: + print "(EMPTY)" + +env = mbuild.env_t(0) +env.parse_args() + +infile = file('stdin.py') +retval,output,error = mbuild.run_command('cat', stdin=infile) +print "EXIT STATUS ", str(retval) +print "OUTPUT LINES " +dump(output) +print "ERROR LINES " +dump(error) diff --git a/tests/timed3.py b/tests/timed3.py new file mode 100755 index 0000000..faf71ab --- /dev/null +++ b/tests/timed3.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL +import find +import mbuild + +env = mbuild.env_t(0) +env.parse_args() + +retval, output, error = mbuild.run_command_timed('./spew', seconds=2) +print "RETURN CODE ", retval +print "OUTPUT LINES ", len(output) +print "ERROR LINES ", len(error) +for l in error: + print 'ERROR OUTPUT [{}]'.format(l.strip()) diff --git a/tests/timed4.py b/tests/timed4.py new file mode 100755 index 0000000..d5b25d6 --- /dev/null +++ b/tests/timed4.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL +import find +import mbuild + +env = mbuild.env_t(0) +env.parse_args() +work_queue = mbuild.work_queue_t(env['jobs']) + + +c = mbuild.command_t("/bin/sleep 1", seconds=2, show_output=False) +#work_queue.add(c) + +c2 = mbuild.command_t("./spew", seconds=2, show_output=False) +#work_queue.add(c2) + + +def some_python_fn(a,b): + n = 10 + x = 0 + for i in range(0,n): + for j in range(0,n): + for k in range(0,n): + x += i*j*k + return (0, [str(x)], []) + +c3 = mbuild.command_t(some_python_fn, seconds=2, show_output=False) +work_queue.add(c3) + + + +# run the commands. Use --jobs N to set the number of workers to N. +okay = work_queue.build(die_on_errors=False) +if okay: + mbuild.msgb('BUILD', 'pass') +else: + mbuild.msgb('BUILD', 'failed') + +print len(c2.output) +print c2.output[0:10] +print str(c2.stderr) +print str(c3.output) + diff --git a/tests/use-icl-win.py b/tests/use-icl-win.py new file mode 100644 index 0000000..308cde4 --- /dev/null +++ b/tests/use-icl-win.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python +# -*- python -*- +#BEGIN_LEGAL +# +#Copyright (c) 2016 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#END_LEGAL + +# example of using icc (icl) on windows. +# need to set toolchain and compiler and add lib dir to LIB env var + +import sys +import mbuild + +env = mbuild.env_t() +env.parse_args({ 'toolchain':'C:/icc/win/AVX3/2014-11-21/64/bin/', + 'compiler':'icl' + }) + +icc_lib_dir = 'C:/icc/win/AVX3/2014-11-21/64/lib' +env.osenv_add_to_front('LIB',icc_lib_dir) + +if 'clean' in env['targets']: + mbuild.remove_tree(env['build_dir']) + sys.exit(0) +mbuild.cmkdir(env['build_dir']) +if not env.on_windows(): + env['LINK'] = env['CC'] # not g++ for this program +dep_tracker = mbuild.dag_t() +base='pcommit' +prog = env.build_dir_join(base + env['EXEEXT']) +cmd1 = dep_tracker.add(env, env.cc_compile(base = '.c')) +cmd2 = dep_tracker.add(env, env.link(cmd1.targets, prog)) + +work_queue = mbuild.work_queue_t(env['jobs']) +okay = work_queue.build(dag=dep_tracker) +if not okay: + mbuild.die("build failed") +mbuild.msgb("SUCCESS")