mirror of
https://github.com/libretro/Lakka-LibreELEC.git
synced 2024-11-23 08:20:00 +00:00
scripts/pkgbuilder.py: enable percent/load/mem info with --progress
This commit is contained in:
parent
8e2c02f464
commit
a6f81e49d5
@ -30,6 +30,7 @@ start_multithread_build() {
|
|||||||
# When building addons, don't halt on error - keep building all packages/addons
|
# When building addons, don't halt on error - keep building all packages/addons
|
||||||
[ "${MTADDONBUILD}" = "yes" ] && buildopts+=" --continue-on-error" || buildopts+=" --halt-on-error"
|
[ "${MTADDONBUILD}" = "yes" ] && buildopts+=" --continue-on-error" || buildopts+=" --halt-on-error"
|
||||||
|
|
||||||
|
[ "${MTPROGRESS}" = "yes" ] && buildopts+=" --progress"
|
||||||
[ "${MTVERBOSE}" = "yes" ] && buildopts+=" --verbose"
|
[ "${MTVERBOSE}" = "yes" ] && buildopts+=" --verbose"
|
||||||
[ "${MTDEBUG}" = "yes" ] && buildopts+=" --debug"
|
[ "${MTDEBUG}" = "yes" ] && buildopts+=" --debug"
|
||||||
if [ "${DISABLE_COLORS}" = "yes" ]; then
|
if [ "${DISABLE_COLORS}" = "yes" ]; then
|
||||||
|
@ -15,6 +15,7 @@ import queue
|
|||||||
import subprocess
|
import subprocess
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
import signal
|
import signal
|
||||||
|
import fcntl, termios, struct
|
||||||
|
|
||||||
# Ensure we can output any old crap to stdout and stderr
|
# Ensure we can output any old crap to stdout and stderr
|
||||||
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
|
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
|
||||||
@ -209,6 +210,9 @@ class Generator:
|
|||||||
for name in self.failed:
|
for name in self.failed:
|
||||||
yield self.failed[name]
|
yield self.failed[name]
|
||||||
|
|
||||||
|
def completedJobCount(self):
|
||||||
|
return len(self.built)
|
||||||
|
|
||||||
def totalJobCount(self):
|
def totalJobCount(self):
|
||||||
return self.totalJobs
|
return self.totalJobs
|
||||||
|
|
||||||
@ -351,7 +355,7 @@ class BuildProcess(threading.Thread):
|
|||||||
class Builder:
|
class Builder:
|
||||||
def __init__(self, maxthreadcount, inputfilename, jobglog, loadstats, stats_interval, \
|
def __init__(self, maxthreadcount, inputfilename, jobglog, loadstats, stats_interval, \
|
||||||
haltonerror=True, failimmediately=True, log_burst=True, log_combine="always", \
|
haltonerror=True, failimmediately=True, log_burst=True, log_combine="always", \
|
||||||
autoremove=False, bookends=True, colors=False, debug=False, verbose=False):
|
bookends=True, autoremove=False, colors=False, progress=False, debug=False, verbose=False):
|
||||||
if inputfilename == "-":
|
if inputfilename == "-":
|
||||||
plan = json.load(sys.stdin)
|
plan = json.load(sys.stdin)
|
||||||
else:
|
else:
|
||||||
@ -360,46 +364,9 @@ class Builder:
|
|||||||
|
|
||||||
self.generator = Generator(plan)
|
self.generator = Generator(plan)
|
||||||
|
|
||||||
self.joblog = jobglog
|
|
||||||
self.loadstats = loadstats
|
|
||||||
self.stats_interval = int(stats_interval)
|
|
||||||
if self.stats_interval < 1:
|
|
||||||
self.stats_interval = 60
|
|
||||||
|
|
||||||
self.haltonerror = haltonerror
|
|
||||||
self.failimmediately = failimmediately
|
|
||||||
self.log_burst = log_burst
|
|
||||||
self.log_combine = log_combine
|
|
||||||
self.debug = debug
|
|
||||||
self.verbose = verbose
|
|
||||||
self.bookends = bookends
|
|
||||||
self.autoremove = autoremove
|
|
||||||
|
|
||||||
self.colors = (colors == "always" or (colors == "auto" and sys.stderr.isatty()))
|
|
||||||
self.color_code = {}
|
|
||||||
self.color_code["DONE"] = "\033[0;32m" #green
|
|
||||||
self.color_code["FAIL"] = "\033[1;31m" #bold red
|
|
||||||
self.color_code["ACTV"] = "\033[0;33m" #yellow
|
|
||||||
self.color_code["IDLE"] = "\033[0;35m" #magenta
|
|
||||||
self.color_code["INIT"] = "\033[0;36m" #cyan
|
|
||||||
self.color_code["WAIT"] = "\033[0;35m" #magenta
|
|
||||||
|
|
||||||
self.work = queue.Queue()
|
|
||||||
self.complete = queue.Queue()
|
|
||||||
|
|
||||||
self.jobtotal = self.generator.totalJobCount()
|
self.jobtotal = self.generator.totalJobCount()
|
||||||
self.twidth = len("%d" % self.jobtotal)
|
self.twidth = len("%d" % self.jobtotal)
|
||||||
|
|
||||||
self.joblogfile = None
|
|
||||||
self.loadstatsfile = None
|
|
||||||
self.nextstats = 0
|
|
||||||
|
|
||||||
self.build_start = 0
|
|
||||||
|
|
||||||
# work and completion sequences
|
|
||||||
self.cseq = 0
|
|
||||||
self.wseq = 0
|
|
||||||
|
|
||||||
# parse threadcount
|
# parse threadcount
|
||||||
if maxthreadcount.endswith("%"):
|
if maxthreadcount.endswith("%"):
|
||||||
self.threadcount = int(multiprocessing.cpu_count() / 100 * int(args.max_procs.replace("%","")))
|
self.threadcount = int(multiprocessing.cpu_count() / 100 * int(args.max_procs.replace("%","")))
|
||||||
@ -416,10 +383,59 @@ class Builder:
|
|||||||
if args.debug:
|
if args.debug:
|
||||||
DEBUG("THREADCOUNT#: input arg: %s, computed: %d" % (maxthreadcount, self.threadcount))
|
DEBUG("THREADCOUNT#: input arg: %s, computed: %d" % (maxthreadcount, self.threadcount))
|
||||||
|
|
||||||
|
self.joblog = jobglog
|
||||||
|
self.loadstats = loadstats
|
||||||
|
self.stats_interval = int(stats_interval)
|
||||||
|
if self.stats_interval < 1:
|
||||||
|
self.stats_interval = 60
|
||||||
|
|
||||||
|
self.haltonerror = haltonerror
|
||||||
|
self.failimmediately = failimmediately
|
||||||
|
self.log_burst = log_burst
|
||||||
|
self.log_combine = log_combine
|
||||||
|
self.debug = debug
|
||||||
|
self.verbose = verbose
|
||||||
|
self.bookends = bookends
|
||||||
|
self.autoremove = autoremove
|
||||||
|
|
||||||
|
self.stdout_dirty = False
|
||||||
|
self.stderr_dirty = False
|
||||||
|
self.progress_dirty = False
|
||||||
|
|
||||||
|
self.joblogfile = None
|
||||||
|
self.loadstatsfile = None
|
||||||
|
self.nextstats = 0
|
||||||
|
self.build_start = 0
|
||||||
|
|
||||||
|
self.work = queue.Queue()
|
||||||
|
self.complete = queue.Queue()
|
||||||
|
self.processes = []
|
||||||
|
|
||||||
# Init all processes
|
# Init all processes
|
||||||
self.processes = []
|
self.processes = []
|
||||||
for i in range(1, self.threadcount + 1):
|
for i in range(1, self.threadcount + 1):
|
||||||
self.processes.append(BuildProcess(i, self.threadcount, self.jobtotal, haltonerror, self.work, self.complete))
|
self.processes.append(BuildProcess(i, self.threadcount, self.jobtotal, self.haltonerror, self.work, self.complete))
|
||||||
|
|
||||||
|
# work and completion sequences
|
||||||
|
self.wseq = 0
|
||||||
|
self.cseq = 0
|
||||||
|
|
||||||
|
self.progress = progress and sys.stderr.isatty()
|
||||||
|
self.progress_glitch_fix = ""
|
||||||
|
self.rows = self.columns = -1
|
||||||
|
self.original_resize_handler = None
|
||||||
|
if self.progress:
|
||||||
|
self.getTerminalSize()
|
||||||
|
self.resize_handler = signal.signal(signal.SIGWINCH, self.getTerminalSize)
|
||||||
|
|
||||||
|
self.colors = (colors == "always" or (colors == "auto" and sys.stderr.isatty()))
|
||||||
|
self.color_code = {}
|
||||||
|
self.color_code["DONE"] = "\033[0;32m" #green
|
||||||
|
self.color_code["FAIL"] = "\033[1;31m" #bold red
|
||||||
|
self.color_code["ACTV"] = "\033[0;33m" #yellow
|
||||||
|
self.color_code["IDLE"] = "\033[0;35m" #magenta
|
||||||
|
self.color_code["INIT"] = "\033[0;36m" #cyan
|
||||||
|
self.color_code["WAIT"] = "\033[0;35m" #magenta
|
||||||
|
|
||||||
def build(self):
|
def build(self):
|
||||||
if self.joblog:
|
if self.joblog:
|
||||||
@ -428,10 +444,11 @@ class Builder:
|
|||||||
if self.loadstats:
|
if self.loadstats:
|
||||||
self.loadstatsfile = open(self.loadstats, "w")
|
self.loadstatsfile = open(self.loadstats, "w")
|
||||||
|
|
||||||
self.startProcesses()
|
|
||||||
|
|
||||||
self.build_start = time.time()
|
self.build_start = time.time()
|
||||||
|
|
||||||
|
for process in self.processes:
|
||||||
|
process.start()
|
||||||
|
|
||||||
# Queue new work until no more work is available, and all queued jobs have completed.
|
# Queue new work until no more work is available, and all queued jobs have completed.
|
||||||
while self.queueWork():
|
while self.queueWork():
|
||||||
job = self.getCompletedJob()
|
job = self.getCompletedJob()
|
||||||
@ -456,11 +473,10 @@ class Builder:
|
|||||||
if self.haltonerror and not self.failimmediately:
|
if self.haltonerror and not self.failimmediately:
|
||||||
failed = [job for job in self.generator.failedJobs() if job["logfile"]]
|
failed = [job for job in self.generator.failedJobs() if job["logfile"]]
|
||||||
if failed != []:
|
if failed != []:
|
||||||
print("\nThe following log(s) for this failure are available:", file=sys.stdout)
|
self.oprint("\nThe following log(s) for this failure are available:")
|
||||||
for job in failed:
|
for job in failed:
|
||||||
print(" %s => %s" % (job["name"], job["logfile"]), file=sys.stdout)
|
self.oprint(" %s => %s" % (job["name"], job["logfile"]))
|
||||||
print("", file=sys.stdout)
|
self.oprint("", flush=True)
|
||||||
sys.stdout.flush()
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@ -476,7 +492,7 @@ class Builder:
|
|||||||
if self.haltonerror and self.generator.failedJobCount() != 0:
|
if self.haltonerror and self.generator.failedJobCount() != 0:
|
||||||
if not self.failimmediately and self.generator.activeJobCount() != 0:
|
if not self.failimmediately and self.generator.activeJobCount() != 0:
|
||||||
freeslots = self.threadcount - self.generator.activeJobCount()
|
freeslots = self.threadcount - self.generator.activeJobCount()
|
||||||
self.vprint("WAIT", "waiting", ", ".join(self.generator.activeJobNames()))
|
self.show_status("WAIT", "waiting", ", ".join(self.generator.activeJobNames()))
|
||||||
DEBUG("Waiting for : %d active, %d idle [%s]" % (self.generator.activeJobCount(), freeslots, ", ".join(self.generator.activeJobNames())))
|
DEBUG("Waiting for : %d active, %d idle [%s]" % (self.generator.activeJobCount(), freeslots, ", ".join(self.generator.activeJobNames())))
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
@ -487,7 +503,7 @@ class Builder:
|
|||||||
job = self.generator.getNextJob()
|
job = self.generator.getNextJob()
|
||||||
|
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
self.vprint("INIT", "submit", job["name"])
|
self.show_status("INIT", "submit", job["name"])
|
||||||
|
|
||||||
if self.debug:
|
if self.debug:
|
||||||
DEBUG("Queueing Job: %s %s" % (job["task"], job["name"]))
|
DEBUG("Queueing Job: %s %s" % (job["task"], job["name"]))
|
||||||
@ -500,7 +516,7 @@ class Builder:
|
|||||||
self.work.put(job)
|
self.work.put(job)
|
||||||
|
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
self.vprint("ACTV", "active", ", ".join(self.generator.activeJobNames()))
|
self.show_status("ACTV", "active", ", ".join(self.generator.activeJobNames()))
|
||||||
|
|
||||||
if self.debug:
|
if self.debug:
|
||||||
freeslots = self.threadcount - self.generator.activeJobCount()
|
freeslots = self.threadcount - self.generator.activeJobCount()
|
||||||
@ -512,8 +528,8 @@ class Builder:
|
|||||||
pending = []
|
pending = []
|
||||||
for (i, (package, wants)) in enumerate(self.generator.getStallInfo()):
|
for (i, (package, wants)) in enumerate(self.generator.getStallInfo()):
|
||||||
pending.append("%s (wants: %s)" % (package, ", ".join(wants)))
|
pending.append("%s (wants: %s)" % (package, ", ".join(wants)))
|
||||||
self.vprint("ACTV", "active", ", ".join(self.generator.activeJobNames()))
|
self.show_status("ACTV", "active", ", ".join(self.generator.activeJobNames()))
|
||||||
self.vprint("IDLE", "stalled", "; ".join(pending), p1=len(pending))
|
self.show_status("IDLE", "stalled", "; ".join(pending), p1=len(pending))
|
||||||
|
|
||||||
if self.debug:
|
if self.debug:
|
||||||
freeslots = self.threadcount - self.generator.activeJobCount()
|
freeslots = self.threadcount - self.generator.activeJobCount()
|
||||||
@ -553,43 +569,99 @@ class Builder:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def captureStats(self, finished=False):
|
def captureStats(self, finished=False):
|
||||||
|
self.displayProgress()
|
||||||
|
if finished:
|
||||||
|
self.clearProgress()
|
||||||
|
self.flush()
|
||||||
|
|
||||||
if not self.loadstatsfile:
|
if not self.loadstatsfile:
|
||||||
return None
|
if self.progress:
|
||||||
|
now = time.time()
|
||||||
|
return int(now + 1) - now
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
now = time.time()
|
now = time.time()
|
||||||
if now >= self.nextstats or finished:
|
if now >= self.nextstats or finished:
|
||||||
self.nextstats = int(now - (now % self.stats_interval)) + self.stats_interval
|
self.nextstats = int(now - (now % self.stats_interval)) + self.stats_interval
|
||||||
|
|
||||||
loadavg = open("/proc/loadavg", "r").readline().split()
|
loadavg = self.getLoad()
|
||||||
procs = loadavg[3].split("/")
|
procs = loadavg[3].split("/")
|
||||||
meminfo = dict((i.split()[0].rstrip(':'),int(i.split()[1])) for i in open("/proc/meminfo", "r").readlines())
|
meminfo = self.getMemory()
|
||||||
|
|
||||||
print("%d %06d %5s %5s %5s %3s %4s %9d %2d %s" % (now, now - self.build_start, \
|
print("%d %06d %5s %5s %5s %3s %4s %9d %2d %s" % (now, now - self.build_start, \
|
||||||
loadavg[0], loadavg[1], loadavg[2], procs[0], procs[1], meminfo["MemAvailable"], \
|
loadavg[0], loadavg[1], loadavg[2], procs[0], procs[1], meminfo["MemAvailable"], \
|
||||||
self.generator.activeJobCount(), ",".join(self.generator.activeJobNames())), \
|
self.generator.activeJobCount(), ",".join(self.generator.activeJobNames())), \
|
||||||
file=self.loadstatsfile, flush=True)
|
file=self.loadstatsfile, flush=True)
|
||||||
|
|
||||||
return (self.nextstats - now)
|
if self.progress:
|
||||||
|
return min((self.nextstats - now), int(now + 1) - now)
|
||||||
|
else:
|
||||||
|
return (self.nextstats - now)
|
||||||
|
|
||||||
# Output progress info, and links to any relevant logs
|
def displayProgress(self):
|
||||||
|
if self.progress:
|
||||||
|
freeslots = self.threadcount - self.generator.activeJobCount()
|
||||||
|
if self.jobtotal != self.generator.completedJobCount():
|
||||||
|
percent = "%0.2f" % (100 / self.jobtotal * self.generator.completedJobCount())
|
||||||
|
else:
|
||||||
|
percent = "100"
|
||||||
|
loadavg = self.getLoad()
|
||||||
|
meminfo = self.getMemory()
|
||||||
|
available = int(meminfo["MemAvailable"]) / 1024
|
||||||
|
|
||||||
|
lines = [ "",
|
||||||
|
"%s: %5s%% | load: %s mem: %d MB | failed: %d idle: %d active: %d" % \
|
||||||
|
(self.secs2hms(time.time() - self.build_start), percent, \
|
||||||
|
loadavg[0], available, \
|
||||||
|
self.generator.failedJobCount(), freeslots, self.generator.activeJobCount()),
|
||||||
|
"Building: %s" % ", ".join(self.generator.activeJobNames())
|
||||||
|
]
|
||||||
|
|
||||||
|
columns = self.columns # in theory could change mid-loop
|
||||||
|
output = []
|
||||||
|
for line in lines:
|
||||||
|
output.append(line if len(line) < columns else "%s+" % line[0:columns - 2])
|
||||||
|
|
||||||
|
if not self.progress_glitch_fix:
|
||||||
|
self.progress_glitch_fix = "%s\033[%dA" % ("\n" * len(output), len(output))
|
||||||
|
|
||||||
|
# \033[?7l: disable linewrap
|
||||||
|
# \033[0K: clear cursor to end of line (every line but last)
|
||||||
|
# \033[0J: clear cursor to end of screen (last line)
|
||||||
|
# \033%dA: move cursor up %d lines (move back to "home" position)
|
||||||
|
# \033[?7h: re-enable linewrap
|
||||||
|
#
|
||||||
|
# When the console is resized to a narrower width, lines wider than the
|
||||||
|
# new console width may be wrapped to a second line (depends on console
|
||||||
|
# software, for example PuTTY) so disable line wrapping to prevent this.
|
||||||
|
#
|
||||||
|
self.eprint("\033[?7l%s\033[0J\r\033[%dA\033[?7h" % ("\033[0K\n".join(output), len(output) - 1),
|
||||||
|
end="\r", isProgress=True)
|
||||||
|
self.progress_dirty = True
|
||||||
|
|
||||||
|
def clearProgress(self):
|
||||||
|
if self.progress and self.progress_dirty:
|
||||||
|
self.progress_dirty = False
|
||||||
|
self.eprint("\033[0J", end="")
|
||||||
|
|
||||||
|
# Output completion info, and links to any relevant logs
|
||||||
def displayJobStatus(self, job):
|
def displayJobStatus(self, job):
|
||||||
self.cseq += 1
|
self.cseq += 1
|
||||||
self.vprint(job["status"], job["task"], job["name"], p1=self.cseq, p2=self.jobtotal)
|
self.show_status(job["status"], job["task"], job["name"], p1=self.cseq, p2=self.jobtotal)
|
||||||
|
|
||||||
if job["failed"]:
|
if job["failed"]:
|
||||||
if job["logfile"]:
|
if job["logfile"]:
|
||||||
print("\nThe following log for this failure is available:\n %s\n" % job["logfile"], \
|
self.eprint("\nThe following log for this failure is available:\n %s\n" % job["logfile"])
|
||||||
file=sys.stderr, flush=True)
|
|
||||||
|
|
||||||
if job["failedjobs"] and job["failedjobs"][0]["logfile"]:
|
if job["failedjobs"] and job["failedjobs"][0]["logfile"]:
|
||||||
if len(job["failedjobs"]) == 1:
|
if len(job["failedjobs"]) == 1:
|
||||||
print("The following log from the failed dependency may be relevant:", file=sys.stderr)
|
self.eprint("The following log from the failed dependency may be relevant:")
|
||||||
else:
|
else:
|
||||||
print("The following logs from the failed dependencies may be relevant:", file=sys.stderr)
|
self.eprint("The following logs from the failed dependencies may be relevant:")
|
||||||
for fjob in job["failedjobs"]:
|
for fjob in job["failedjobs"]:
|
||||||
print(" %-7s %s => %s" % (fjob["task"], fjob["name"], fjob["logfile"]), file=sys.stderr)
|
self.eprint(" %-7s %s => %s" % (fjob["task"], fjob["name"], fjob["logfile"]))
|
||||||
print("", file=sys.stderr)
|
self.eprint("")
|
||||||
sys.stderr.flush()
|
|
||||||
|
|
||||||
# If configured, send output for a job (either a logfile, or captured stdout) to stdout
|
# If configured, send output for a job (either a logfile, or captured stdout) to stdout
|
||||||
def processJobOutput(self, job):
|
def processJobOutput(self, job):
|
||||||
@ -600,55 +672,52 @@ class Builder:
|
|||||||
if job["logfile"]:
|
if job["logfile"]:
|
||||||
if self.log_combine == "always" or (job["failed"] and self.log_combine == "fail"):
|
if self.log_combine == "always" or (job["failed"] and self.log_combine == "fail"):
|
||||||
if self.bookends:
|
if self.bookends:
|
||||||
print("<<< %s seq %s <<<" % (job["name"], job["seq"]))
|
self.oprint("<<< %s seq %s <<<" % (job["name"], job["seq"]))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(job["logfile"], "r", encoding="utf-8", errors="replace") as logfile:
|
with open(job["logfile"], "r", encoding="utf-8", errors="replace") as logfile:
|
||||||
for line in logfile:
|
for line in logfile:
|
||||||
print(line, end="")
|
self.oprint(line, end="")
|
||||||
if self.debug:
|
log_size += len(line)
|
||||||
log_size += len(line)
|
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
print("\nPKGBUILDER ERROR: UnicodeDecodeError while reading log file %s\n" % job["logfile"], file=sys.stderr, flush=True)
|
self.eprint("\nPKGBUILDER ERROR: UnicodeDecodeError while reading log file %s\n" % job["logfile"])
|
||||||
|
|
||||||
if job["failed"]:
|
if job["failed"]:
|
||||||
print("\nThe following log for this failure is available:\n %s\n" % job["logfile"])
|
self.oprint("\nThe following log for this failure is available:\n %s\n" % job["logfile"])
|
||||||
|
|
||||||
if self.bookends:
|
if self.bookends:
|
||||||
print(">>> %s seq %s >>>" % (job["name"], job["seq"]))
|
self.oprint(">>> %s seq %s >>>" % (job["name"], job["seq"]))
|
||||||
|
|
||||||
sys.stdout.flush()
|
|
||||||
log_processed = True
|
log_processed = True
|
||||||
|
|
||||||
elif job["cmdproc"]:
|
elif job["cmdproc"]:
|
||||||
if self.log_combine == "always" or (job["failed"] and self.log_combine == "fail"):
|
if self.log_combine == "always" or (job["failed"] and self.log_combine == "fail"):
|
||||||
if self.bookends:
|
if self.bookends:
|
||||||
print("<<< %s" % job["name"])
|
self.oprint("<<< %s" % job["name"])
|
||||||
|
|
||||||
for line in job["cmdproc"].stdout:
|
for line in job["cmdproc"].stdout:
|
||||||
print(line, end="")
|
self.oprint(line, end="")
|
||||||
if self.debug:
|
log_size += len(line)
|
||||||
log_size += len(line)
|
|
||||||
|
|
||||||
if "autoremove" in job:
|
if "autoremove" in job:
|
||||||
for line in job["autoremove"].stdout:
|
for line in job["autoremove"].stdout:
|
||||||
print(line, end="")
|
self.oprint(line, end="")
|
||||||
if self.debug:
|
log_size += len(line)
|
||||||
log_size += len(line)
|
|
||||||
job["autoremove"] = None
|
job["autoremove"] = None
|
||||||
|
|
||||||
if self.bookends:
|
if self.bookends:
|
||||||
print(">>> %s" % job["name"])
|
self.oprint(">>> %s" % job["name"])
|
||||||
|
|
||||||
sys.stdout.flush()
|
|
||||||
log_processed = True
|
log_processed = True
|
||||||
|
|
||||||
log_elapsed = time.time() - log_start
|
if log_processed:
|
||||||
|
self.flush()
|
||||||
|
|
||||||
if self.debug and log_processed:
|
if self.debug:
|
||||||
log_rate = int(log_size / log_elapsed) if log_elapsed != 0 else 0
|
log_elapsed = time.time() - log_start
|
||||||
log_data = ", %s" % "/".join(job["logfile"].split("/")[-2:]) if job["logfile"] else ""
|
log_rate = int(log_size / log_elapsed) if log_elapsed != 0 else 0
|
||||||
DEBUG("WRITING LOG : {0:,} bytes in {1:0.3f} seconds ({2:,d} bytes/sec{3:})".format(log_size, log_elapsed, log_rate, log_data))
|
log_data = ", %s" % "/".join(job["logfile"].split("/")[-2:]) if job["logfile"] else ""
|
||||||
|
DEBUG("WRITING LOG : {0:,} bytes in {1:0.3f} seconds ({2:,d} bytes/sec{3:})".format(log_size, log_elapsed, log_rate, log_data))
|
||||||
|
|
||||||
# Log completion stats for job
|
# Log completion stats for job
|
||||||
def writeJobLog(self, job):
|
def writeJobLog(self, job):
|
||||||
@ -683,9 +752,14 @@ class Builder:
|
|||||||
|
|
||||||
self.generator.removed(pkg_name)
|
self.generator.removed(pkg_name)
|
||||||
|
|
||||||
def startProcesses(self):
|
def show_status(self, status, task, data, p1=None, p2=None):
|
||||||
for process in self.processes:
|
p1 = (self.threadcount - self.generator.activeJobCount()) if p1 == None else p1
|
||||||
process.start()
|
p2 = self.generator.activeJobCount() if p2 == None else p2
|
||||||
|
|
||||||
|
colored_status = "%s%-4s\033[0m" % (self.color_code[status], status) if self.colors else "%-4s" % status
|
||||||
|
|
||||||
|
self.eprint("%s[%0*d/%0*d] [%s] %-7s %s" % \
|
||||||
|
(self.progress_glitch_fix, self.twidth, p1, self.twidth, p2, colored_status, task, data))
|
||||||
|
|
||||||
def stopProcesses(self):
|
def stopProcesses(self):
|
||||||
if self.processes:
|
if self.processes:
|
||||||
@ -694,19 +768,60 @@ class Builder:
|
|||||||
self.processes = None
|
self.processes = None
|
||||||
|
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
|
self.clearProgress()
|
||||||
|
self.flush()
|
||||||
|
if self.original_resize_handler != None:
|
||||||
|
signal.signal(signal.SIGWINCH, self.original_resize_handler)
|
||||||
self.stopProcesses()
|
self.stopProcesses()
|
||||||
|
|
||||||
def vprint(self, status, task, data, p1=None, p2=None):
|
def flush(self):
|
||||||
p1 = (self.threadcount - self.generator.activeJobCount()) if p1 == None else p1
|
if self.stdout_dirty:
|
||||||
p2 = self.generator.activeJobCount() if p2 == None else p2
|
sys.stdout.flush()
|
||||||
print("[%0*d/%0*d] [%4s] %-7s %s" %
|
self.stdout_dirty = False
|
||||||
(self.twidth, p1, self.twidth, p2,
|
|
||||||
self.colorise(status), task, data), file=sys.stderr, flush=True)
|
|
||||||
|
|
||||||
def colorise(self, item):
|
if self.stderr_dirty:
|
||||||
if self.colors:
|
sys.stderr.flush()
|
||||||
return "%s%-4s\033[0m" % (self.color_code[item], item)
|
self.stderr_dirty = False
|
||||||
return item
|
|
||||||
|
def oprint(self, *args, flush=False, **kwargs):
|
||||||
|
if self.progress_dirty:
|
||||||
|
self.clearProgress()
|
||||||
|
|
||||||
|
if self.stderr_dirty:
|
||||||
|
sys.stderr.flush()
|
||||||
|
self.stderr_dirty = False
|
||||||
|
|
||||||
|
print(*args, **kwargs, file=sys.stdout, flush=flush)
|
||||||
|
self.stdout_dirty = not flush
|
||||||
|
|
||||||
|
def eprint(self, *args, flush=False, isProgress=False, **kwargs):
|
||||||
|
if self.stdout_dirty:
|
||||||
|
sys.stdout.flush()
|
||||||
|
self.stdout_dirty = False
|
||||||
|
|
||||||
|
if not isProgress and self.progress_dirty:
|
||||||
|
self.clearProgress()
|
||||||
|
|
||||||
|
print(*args, **kwargs, file=sys.stderr, flush=flush)
|
||||||
|
self.stderr_dirty = not flush
|
||||||
|
|
||||||
|
def getLoad(self):
|
||||||
|
return open("/proc/loadavg", "r").readline().split()
|
||||||
|
|
||||||
|
def getMemory(self):
|
||||||
|
return dict((i.split()[0].rstrip(':'),int(i.split()[1])) for i in open("/proc/meminfo", "r").readlines())
|
||||||
|
|
||||||
|
def getTerminalSize(self, signum = None, frame = None):
|
||||||
|
h, w, hp, wp = struct.unpack('HHHH',
|
||||||
|
fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ,
|
||||||
|
struct.pack('HHHH', 0, 0, 0, 0)))
|
||||||
|
self.rows = h
|
||||||
|
self.columns = w
|
||||||
|
|
||||||
|
def secs2hms(self, seconds):
|
||||||
|
min, sec = divmod(seconds, 60)
|
||||||
|
hour, min = divmod(min, 60)
|
||||||
|
return "%02d:%02d:%02d" % (hour, min, sec)
|
||||||
|
|
||||||
def DEBUG(msg):
|
def DEBUG(msg):
|
||||||
if DEBUG_LOG:
|
if DEBUG_LOG:
|
||||||
@ -764,6 +879,9 @@ group.add_argument("--fail-after-active", action="store_false", dest="fail_immed
|
|||||||
parser.add_argument("--auto-remove", action="store_true", default=False, \
|
parser.add_argument("--auto-remove", action="store_true", default=False, \
|
||||||
help="Automatically remove redundant source code directories. Default is disabled.")
|
help="Automatically remove redundant source code directories. Default is disabled.")
|
||||||
|
|
||||||
|
parser.add_argument("--progress", action="store_true", default=False, \
|
||||||
|
help="Display progress information. Default is disabled")
|
||||||
|
|
||||||
parser.add_argument("--verbose", action="store_true", default=False, \
|
parser.add_argument("--verbose", action="store_true", default=False, \
|
||||||
help="Output verbose information to stderr.")
|
help="Output verbose information to stderr.")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user