Directory reorg: move code into redo/, generate binaries in bin/.
It's time to start preparing for a version of redo that doesn't work unless we build it first (because it will rely on C modules, and eventually be rewritten in C altogether). To get rolling, remove the old-style symlinks to the main programs, and rename those programs from redo-*.py to redo/cmd_*.py. We'll also move all library functions into the redo/ dir, which is a more python-style naming convention. Previously, install.do was generating wrappers for installing in /usr/bin, which extend sys.path and then import+run the right file. This made "installed" redo work quite differently from running redo inside its source tree. Instead, let's always generate the wrappers in bin/, and not make anything executable except those wrappers. Since we're generating wrappers anyway, let's actually auto-detect the right version of python for the running system; distros can't seem to agree on what to call their python2 binaries (sigh). We'll fill in the right #! shebang lines. Since we're doing that, we can stop using /usr/bin/env, which will a) make things slightly faster, and b) let us use "python -S", which tells python not to load a bunch of extra crap we're not using, thus improving startup times. Annoyingly, we now have to build redo using minimal/do, then run the tests using bin/redo. To make this less annoying, we add a toplevel ./do script that knows the right steps, and a Makefile (whee!) for people who are used to typing 'make' and 'make test' and 'make clean'.
This commit is contained in:
parent
5bc7c861b6
commit
f6fe00db5c
140 changed files with 256 additions and 99 deletions
1
redo
1
redo
|
|
@ -1 +0,0 @@
|
|||
redo.py
|
||||
4
redo/.gitignore
vendored
Normal file
4
redo/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
*.pyc
|
||||
sh
|
||||
whichpython
|
||||
python
|
||||
0
redo/__init__.py
Normal file
0
redo/__init__.py
Normal file
6
redo/atoi.py
Normal file
6
redo/atoi.py
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
|
||||
def atoi(v):
|
||||
try:
|
||||
return int(v or 0)
|
||||
except ValueError:
|
||||
return 0
|
||||
528
redo/builder.py
Normal file
528
redo/builder.py
Normal file
|
|
@ -0,0 +1,528 @@
|
|||
import sys, os, errno, stat, signal, time
|
||||
import vars, jwack, state, paths
|
||||
from helpers import unlink, close_on_exec
|
||||
import logs
|
||||
from logs import debug2, err, warn, meta, check_tty
|
||||
|
||||
|
||||
def _nice(t):
|
||||
return state.relpath(t, vars.STARTDIR)
|
||||
|
||||
|
||||
def _try_stat(filename):
|
||||
try:
|
||||
return os.lstat(filename)
|
||||
except OSError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return None
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
log_reader_pid = None
|
||||
|
||||
|
||||
def close_stdin():
|
||||
f = open('/dev/null')
|
||||
os.dup2(f.fileno(), 0)
|
||||
f.close()
|
||||
|
||||
|
||||
def start_stdin_log_reader(status, details, pretty, color,
|
||||
debug_locks, debug_pids):
|
||||
global log_reader_pid
|
||||
r, w = os.pipe() # main pipe to redo-log
|
||||
ar, aw = os.pipe() # ack pipe from redo-log --ack-fd
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
pid = os.fork()
|
||||
if pid:
|
||||
# parent
|
||||
log_reader_pid = pid
|
||||
os.close(r)
|
||||
os.close(aw)
|
||||
b = os.read(ar, 8)
|
||||
if not b:
|
||||
# subprocess died without sending us anything: that's bad.
|
||||
err('failed to start redo-log subprocess; cannot continue.\n')
|
||||
os._exit(99)
|
||||
assert b == 'REDO-OK\n'
|
||||
# now we know the subproc is running and will report our errors
|
||||
# to stderr, so it's okay to lose our own stderr.
|
||||
os.close(ar)
|
||||
os.dup2(w, 1)
|
||||
os.dup2(w, 2)
|
||||
os.close(w)
|
||||
check_tty(sys.stderr, vars.COLOR)
|
||||
else:
|
||||
# child
|
||||
try:
|
||||
os.close(ar)
|
||||
os.close(w)
|
||||
os.dup2(r, 0)
|
||||
os.close(r)
|
||||
# redo-log sends to stdout (because if you ask for logs, that's
|
||||
# the output you wanted!). But redo itself sends logs to stderr
|
||||
# (because they're incidental to the thing you asked for).
|
||||
# To make these semantics work, we point redo-log's stdout at
|
||||
# our stderr when we launch it.
|
||||
os.dup2(2, 1)
|
||||
argv = [
|
||||
'redo-log',
|
||||
'--recursive', '--follow',
|
||||
'--ack-fd', str(aw),
|
||||
('--status' if status and os.isatty(2) else '--no-status'),
|
||||
('--details' if details else '--no-details'),
|
||||
('--pretty' if pretty else '--no-pretty'),
|
||||
('--debug-locks' if debug_locks else '--no-debug-locks'),
|
||||
('--debug-pids' if debug_pids else '--no-debug-pids'),
|
||||
]
|
||||
if color != 1:
|
||||
argv.append('--color' if color >= 2 else '--no-color')
|
||||
argv.append('-')
|
||||
os.execvp(argv[0], argv)
|
||||
except Exception, e: # pylint: disable=broad-except
|
||||
sys.stderr.write('redo-log: exec: %s\n' % e)
|
||||
finally:
|
||||
os._exit(99)
|
||||
|
||||
|
||||
def await_log_reader():
|
||||
if not vars.LOG:
|
||||
return
|
||||
if log_reader_pid > 0:
|
||||
# never actually close fd#1 or fd#2; insanity awaits.
|
||||
# replace it with something else instead.
|
||||
# Since our stdout/stderr are attached to redo-log's stdin,
|
||||
# this will notify redo-log that it's time to die (after it finishes
|
||||
# reading the logs)
|
||||
out = open('/dev/tty', 'w')
|
||||
os.dup2(out.fileno(), 1)
|
||||
os.dup2(out.fileno(), 2)
|
||||
os.waitpid(log_reader_pid, 0)
|
||||
|
||||
|
||||
class ImmediateReturn(Exception):
|
||||
def __init__(self, rv):
|
||||
Exception.__init__(self, "immediate return with exit code %d" % rv)
|
||||
self.rv = rv
|
||||
|
||||
|
||||
class BuildJob(object):
|
||||
def __init__(self, t, sf, lock, shouldbuildfunc, donefunc):
|
||||
self.t = t # original target name, not relative to vars.BASE
|
||||
self.sf = sf
|
||||
tmpbase = t
|
||||
while not os.path.isdir(os.path.dirname(tmpbase) or '.'):
|
||||
ofs = tmpbase.rfind('/')
|
||||
assert ofs >= 0
|
||||
tmpbase = tmpbase[:ofs] + '__' + tmpbase[ofs+1:]
|
||||
self.tmpname1 = '%s.redo1.tmp' % tmpbase
|
||||
self.tmpname2 = '%s.redo2.tmp' % tmpbase
|
||||
self.lock = lock
|
||||
self.shouldbuildfunc = shouldbuildfunc
|
||||
self.donefunc = donefunc
|
||||
self.before_t = _try_stat(self.t)
|
||||
|
||||
def start(self):
|
||||
assert self.lock.owned
|
||||
try:
|
||||
try:
|
||||
is_target, dirty = self.shouldbuildfunc(self.t)
|
||||
except state.CyclicDependencyError:
|
||||
err('cyclic dependency while checking %s\n' % _nice(self.t))
|
||||
raise ImmediateReturn(208)
|
||||
if not dirty:
|
||||
# target doesn't need to be built; skip the whole task
|
||||
if is_target:
|
||||
meta('unchanged', state.target_relpath(self.t))
|
||||
return self._after2(0)
|
||||
except ImmediateReturn, e:
|
||||
return self._after2(e.rv)
|
||||
|
||||
if vars.NO_OOB or dirty == True: # pylint: disable=singleton-comparison
|
||||
self._start_do()
|
||||
else:
|
||||
self._start_unlocked(dirty)
|
||||
|
||||
def _start_do(self):
|
||||
assert self.lock.owned
|
||||
t = self.t
|
||||
sf = self.sf
|
||||
newstamp = sf.read_stamp()
|
||||
if (sf.is_generated and
|
||||
newstamp != state.STAMP_MISSING and
|
||||
(sf.is_override or state.detect_override(sf.stamp, newstamp))):
|
||||
state.warn_override(_nice(t))
|
||||
if not sf.is_override:
|
||||
warn('%s - old: %r\n' % (_nice(t), sf.stamp))
|
||||
warn('%s - new: %r\n' % (_nice(t), newstamp))
|
||||
sf.set_override()
|
||||
sf.set_checked()
|
||||
sf.save()
|
||||
return self._after2(0)
|
||||
if (os.path.exists(t) and not os.path.isdir(t + '/.')
|
||||
and not sf.is_generated):
|
||||
# an existing source file that was not generated by us.
|
||||
# This step is mentioned by djb in his notes.
|
||||
# For example, a rule called default.c.do could be used to try
|
||||
# to produce hello.c, but we don't want that to happen if
|
||||
# hello.c was created by the end user.
|
||||
debug2("-- static (%r)\n" % t)
|
||||
sf.set_static()
|
||||
sf.save()
|
||||
return self._after2(0)
|
||||
sf.zap_deps1()
|
||||
(dodir, dofile, _, basename, ext) = paths.find_do_file(sf)
|
||||
if not dofile:
|
||||
if os.path.exists(t):
|
||||
sf.set_static()
|
||||
sf.save()
|
||||
return self._after2(0)
|
||||
else:
|
||||
err('no rule to redo %r\n' % t)
|
||||
return self._after2(1)
|
||||
unlink(self.tmpname1)
|
||||
unlink(self.tmpname2)
|
||||
ffd = os.open(self.tmpname1, os.O_CREAT|os.O_RDWR|os.O_EXCL, 0666)
|
||||
close_on_exec(ffd, True)
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.f = os.fdopen(ffd, 'w+')
|
||||
# this will run in the dofile's directory, so use only basenames here
|
||||
arg1 = basename + ext # target name (including extension)
|
||||
arg2 = basename # target name (without extension)
|
||||
argv = ['sh', '-e',
|
||||
dofile,
|
||||
arg1,
|
||||
arg2,
|
||||
# temp output file name
|
||||
state.relpath(os.path.abspath(self.tmpname2), dodir),
|
||||
]
|
||||
if vars.VERBOSE:
|
||||
argv[1] += 'v'
|
||||
if vars.XTRACE:
|
||||
argv[1] += 'x'
|
||||
firstline = open(os.path.join(dodir, dofile)).readline().strip()
|
||||
if firstline.startswith('#!/'):
|
||||
argv[0:2] = firstline[2:].split(' ')
|
||||
# make sure to create the logfile *before* writing the meta() about it.
|
||||
# that way redo-log won't trace into an obsolete logfile.
|
||||
if vars.LOG:
|
||||
open(state.logname(self.sf.id), 'w')
|
||||
# FIXME: put these variables somewhere else, instead of on-the-fly
|
||||
# extending this class!
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
self.dodir = dodir
|
||||
self.basename = basename
|
||||
self.ext = ext
|
||||
self.argv = argv
|
||||
dof = state.File(name=os.path.join(dodir, dofile))
|
||||
dof.set_static()
|
||||
dof.save()
|
||||
state.commit()
|
||||
meta('do', state.target_relpath(t))
|
||||
jwack.start_job(t, self._do_subproc, self._after)
|
||||
|
||||
def _start_unlocked(self, dirty):
|
||||
# out-of-band redo of some sub-objects. This happens when we're not
|
||||
# quite sure if t needs to be built or not (because some children
|
||||
# look dirty, but might turn out to be clean thanks to checksums).
|
||||
# We have to call redo-unlocked to figure it all out.
|
||||
#
|
||||
# Note: redo-unlocked will handle all the updating of sf, so we
|
||||
# don't have to do it here, nor call _after1. However, we have to
|
||||
# hold onto the lock because otherwise we would introduce a race
|
||||
# condition; that's why it's called redo-unlocked, because it doesn't
|
||||
# grab a lock.
|
||||
here = os.getcwd()
|
||||
def _fix(p):
|
||||
return state.relpath(os.path.join(vars.BASE, p), here)
|
||||
argv = (['redo-unlocked', _fix(self.sf.name)] +
|
||||
list(set(_fix(d.name) for d in dirty)))
|
||||
meta('check', state.target_relpath(self.t))
|
||||
state.commit()
|
||||
def run():
|
||||
os.environ['REDO_DEPTH'] = vars.DEPTH + ' '
|
||||
# python ignores SIGPIPE
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
os.execvp(argv[0], argv)
|
||||
assert 0
|
||||
# returns only if there's an exception
|
||||
def after(t, rv):
|
||||
return self._after2(rv)
|
||||
jwack.start_job(self.t, run, after)
|
||||
|
||||
def _do_subproc(self):
|
||||
# careful: REDO_PWD was the PWD relative to the STARTPATH at the time
|
||||
# we *started* building the current target; but that target ran
|
||||
# redo-ifchange, and it might have done it from a different directory
|
||||
# than we started it in. So os.getcwd() might be != REDO_PWD right
|
||||
# now.
|
||||
assert state.is_flushed()
|
||||
dn = self.dodir
|
||||
newp = os.path.realpath(dn)
|
||||
os.environ['REDO_PWD'] = state.relpath(newp, vars.STARTDIR)
|
||||
os.environ['REDO_TARGET'] = self.basename + self.ext
|
||||
os.environ['REDO_DEPTH'] = vars.DEPTH + ' '
|
||||
vars.add_lock(str(self.lock.fid))
|
||||
if dn:
|
||||
os.chdir(dn)
|
||||
os.dup2(self.f.fileno(), 1)
|
||||
os.close(self.f.fileno())
|
||||
close_on_exec(1, False)
|
||||
if vars.LOG:
|
||||
cur_inode = str(os.fstat(2).st_ino)
|
||||
if not vars.LOG_INODE or cur_inode == vars.LOG_INODE:
|
||||
# .do script has *not* redirected stderr, which means we're
|
||||
# using redo-log's log saving mode. That means subprocs
|
||||
# should be logged to their own file. If the .do script
|
||||
# *does* redirect stderr, that redirection should be inherited
|
||||
# by subprocs, so we'd do nothing.
|
||||
logf = open(state.logname(self.sf.id), 'w')
|
||||
new_inode = str(os.fstat(logf.fileno()).st_ino)
|
||||
os.environ['REDO_LOG'] = '1' # .do files can check this
|
||||
os.environ['REDO_LOG_INODE'] = new_inode
|
||||
os.dup2(logf.fileno(), 2)
|
||||
close_on_exec(2, False)
|
||||
logf.close()
|
||||
else:
|
||||
if 'REDO_LOG_INODE' in os.environ:
|
||||
del os.environ['REDO_LOG_INODE']
|
||||
os.environ['REDO_LOG'] = ''
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL) # python ignores SIGPIPE
|
||||
if vars.VERBOSE or vars.XTRACE:
|
||||
logs.write('* %s' % ' '.join(self.argv).replace('\n', ' '))
|
||||
os.execvp(self.argv[0], self.argv)
|
||||
# FIXME: it would be nice to log the exit code to logf.
|
||||
# But that would have to happen in the parent process, which doesn't
|
||||
# have logf open.
|
||||
assert 0
|
||||
# returns only if there's an exception
|
||||
|
||||
def _after(self, t, rv):
|
||||
try:
|
||||
state.check_sane()
|
||||
rv = self._after1(t, rv)
|
||||
state.commit()
|
||||
finally:
|
||||
self._after2(rv)
|
||||
|
||||
def _after1(self, t, rv):
|
||||
f = self.f
|
||||
before_t = self.before_t
|
||||
after_t = _try_stat(t)
|
||||
st1 = os.fstat(f.fileno())
|
||||
st2 = _try_stat(self.tmpname2)
|
||||
if (after_t and
|
||||
(not before_t or before_t.st_mtime != after_t.st_mtime) and
|
||||
not stat.S_ISDIR(after_t.st_mode)):
|
||||
err('%s modified %s directly!\n' % (self.argv[2], t))
|
||||
err('...you should update $3 (a temp file) or stdout, not $1.\n')
|
||||
rv = 206
|
||||
elif st2 and st1.st_size > 0:
|
||||
err('%s wrote to stdout *and* created $3.\n' % self.argv[2])
|
||||
err('...you should write status messages to stderr, not stdout.\n')
|
||||
rv = 207
|
||||
if rv == 0:
|
||||
# FIXME: race condition here between updating stamp/is_generated
|
||||
# and actually renaming the files into place. There needs to
|
||||
# be some kind of two-stage commit, I guess.
|
||||
if st2:
|
||||
try:
|
||||
os.rename(self.tmpname2, t)
|
||||
except OSError, e:
|
||||
dnt = os.path.dirname(t)
|
||||
if not os.path.exists(dnt):
|
||||
err('%s: target dir %r does not exist!\n' % (t, dnt))
|
||||
else:
|
||||
err('%s: rename %s: %s\n' % (t, self.tmpname2, e))
|
||||
raise
|
||||
os.unlink(self.tmpname1)
|
||||
elif st1.st_size > 0:
|
||||
try:
|
||||
os.rename(self.tmpname1, t)
|
||||
except OSError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
unlink(t)
|
||||
else:
|
||||
err('%s: can\'t save stdout to %r: %s\n' %
|
||||
(self.argv[2], t, e.strerror))
|
||||
rv = 1000
|
||||
if st2:
|
||||
os.unlink(self.tmpname2)
|
||||
else: # no output generated at all; that's ok
|
||||
unlink(self.tmpname1)
|
||||
unlink(t)
|
||||
sf = self.sf
|
||||
sf.refresh()
|
||||
sf.is_generated = True
|
||||
sf.is_override = False
|
||||
if sf.is_checked() or sf.is_changed():
|
||||
# it got checked during the run; someone ran redo-stamp.
|
||||
# update_stamp would call set_changed(); we don't want that,
|
||||
# so only use read_stamp.
|
||||
sf.stamp = sf.read_stamp()
|
||||
else:
|
||||
sf.csum = None
|
||||
sf.update_stamp()
|
||||
sf.set_changed()
|
||||
else:
|
||||
unlink(self.tmpname1)
|
||||
unlink(self.tmpname2)
|
||||
sf = self.sf
|
||||
sf.set_failed()
|
||||
sf.zap_deps2()
|
||||
sf.save()
|
||||
f.close()
|
||||
meta('done', '%d %s' % (rv, state.target_relpath(self.t)))
|
||||
return rv
|
||||
|
||||
def _after2(self, rv):
|
||||
try:
|
||||
self.donefunc(self.t, rv)
|
||||
assert self.lock.owned
|
||||
finally:
|
||||
self.lock.unlock()
|
||||
|
||||
|
||||
def main(targets, shouldbuildfunc):
|
||||
retcode = [0] # a list so that it can be reassigned from done()
|
||||
if vars.SHUFFLE:
|
||||
import random
|
||||
random.shuffle(targets)
|
||||
|
||||
locked = []
|
||||
|
||||
def done(t, rv):
|
||||
if rv:
|
||||
retcode[0] = 1
|
||||
|
||||
if vars.TARGET and not vars.UNLOCKED:
|
||||
me = os.path.join(vars.STARTDIR,
|
||||
os.path.join(vars.PWD, vars.TARGET))
|
||||
myfile = state.File(name=me)
|
||||
selflock = state.Lock(state.LOG_LOCK_MAGIC + myfile.id)
|
||||
else:
|
||||
selflock = myfile = me = None
|
||||
|
||||
def cheat():
|
||||
if not selflock:
|
||||
return 0
|
||||
selflock.trylock()
|
||||
if not selflock.owned:
|
||||
# redo-log already owns it: let's cheat.
|
||||
# Give ourselves one extra token so that the "foreground" log
|
||||
# can always make progress.
|
||||
return 1
|
||||
else:
|
||||
# redo-log isn't watching us (yet)
|
||||
selflock.unlock()
|
||||
return 0
|
||||
|
||||
# In the first cycle, we just build as much as we can without worrying
|
||||
# about any lock contention. If someone else has it locked, we move on.
|
||||
seen = {}
|
||||
lock = None
|
||||
for t in targets:
|
||||
if not t:
|
||||
err('cannot build the empty target ("").\n')
|
||||
retcode[0] = 204
|
||||
break
|
||||
assert state.is_flushed()
|
||||
if t in seen:
|
||||
continue
|
||||
seen[t] = 1
|
||||
if not jwack.has_token():
|
||||
state.commit()
|
||||
jwack.ensure_token_or_cheat(t, cheat)
|
||||
if retcode[0] and not vars.KEEP_GOING:
|
||||
break
|
||||
if not state.check_sane():
|
||||
err('.redo directory disappeared; cannot continue.\n')
|
||||
retcode[0] = 205
|
||||
break
|
||||
f = state.File(name=t)
|
||||
lock = state.Lock(f.id)
|
||||
if vars.UNLOCKED:
|
||||
lock.owned = True
|
||||
else:
|
||||
lock.trylock()
|
||||
if not lock.owned:
|
||||
meta('locked', state.target_relpath(t))
|
||||
locked.append((f.id, t, f.name))
|
||||
else:
|
||||
# We had to create f before we had a lock, because we need f.id
|
||||
# to make the lock. But someone may have updated the state
|
||||
# between then and now.
|
||||
# FIXME: separate obtaining the fid from creating the File.
|
||||
# FIXME: maybe integrate locking into the File object?
|
||||
f.refresh()
|
||||
BuildJob(t, f, lock, shouldbuildfunc, done).start()
|
||||
state.commit()
|
||||
assert state.is_flushed()
|
||||
lock = None
|
||||
|
||||
del lock
|
||||
|
||||
# Now we've built all the "easy" ones. Go back and just wait on the
|
||||
# remaining ones one by one. There's no reason to do it any more
|
||||
# efficiently, because if these targets were previously locked, that
|
||||
# means someone else was building them; thus, we probably won't need to
|
||||
# do anything. The only exception is if we're invoked as redo instead
|
||||
# of redo-ifchange; then we have to redo it even if someone else already
|
||||
# did. But that should be rare.
|
||||
while locked or jwack.running():
|
||||
state.commit()
|
||||
jwack.wait_all()
|
||||
assert jwack._mytokens == 0 # pylint: disable=protected-access
|
||||
jwack.ensure_token_or_cheat('self', cheat)
|
||||
# at this point, we don't have any children holding any tokens, so
|
||||
# it's okay to block below.
|
||||
if retcode[0] and not vars.KEEP_GOING:
|
||||
break
|
||||
if locked:
|
||||
if not state.check_sane():
|
||||
err('.redo directory disappeared; cannot continue.\n')
|
||||
retcode[0] = 205
|
||||
break
|
||||
fid, t, _ = locked.pop(0)
|
||||
lock = state.Lock(fid)
|
||||
backoff = 0.01
|
||||
lock.trylock()
|
||||
while not lock.owned:
|
||||
# Don't spin with 100% CPU while we fight for the lock.
|
||||
import random
|
||||
time.sleep(random.random() * min(backoff, 1.0))
|
||||
backoff *= 2
|
||||
# after printing this line, redo-log will recurse into t,
|
||||
# whether it's us building it, or someone else.
|
||||
meta('waiting', state.target_relpath(t))
|
||||
try:
|
||||
lock.check()
|
||||
except state.CyclicDependencyError:
|
||||
err('cyclic dependency while building %s\n' % _nice(t))
|
||||
retcode[0] = 208
|
||||
return retcode[0]
|
||||
# this sequence looks a little silly, but the idea is to
|
||||
# give up our personal token while we wait for the lock to
|
||||
# be released; but we should never run ensure_token() while
|
||||
# holding a lock, or we could cause deadlocks.
|
||||
jwack.release_mine()
|
||||
lock.waitlock()
|
||||
# now t is definitely free, so we get to decide whether
|
||||
# to build it.
|
||||
lock.unlock()
|
||||
jwack.ensure_token_or_cheat(t, cheat)
|
||||
lock.trylock()
|
||||
assert lock.owned
|
||||
meta('unlocked', state.target_relpath(t))
|
||||
if state.File(name=t).is_failed():
|
||||
err('%s: failed in another thread\n' % _nice(t))
|
||||
retcode[0] = 2
|
||||
lock.unlock()
|
||||
else:
|
||||
BuildJob(t, state.File(id=fid), lock,
|
||||
shouldbuildfunc, done).start()
|
||||
lock = None
|
||||
state.commit()
|
||||
return retcode[0]
|
||||
2
redo/clean.do
Normal file
2
redo/clean.do
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
redo version/clean
|
||||
rm -f whichpython python *.pyc */*.pyc
|
||||
21
redo/cmd_always.py
Normal file
21
redo/cmd_always.py
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
import sys, os
|
||||
import vars, state
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
me = os.path.join(vars.STARTDIR,
|
||||
os.path.join(vars.PWD, vars.TARGET))
|
||||
f = state.File(name=me)
|
||||
f.add_dep('m', state.ALWAYS)
|
||||
always = state.File(name=state.ALWAYS)
|
||||
always.stamp = state.STAMP_MISSING
|
||||
always.set_changed()
|
||||
always.save()
|
||||
state.commit()
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(200)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
65
redo/cmd_ifchange.py
Normal file
65
redo/cmd_ifchange.py
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
import os, sys, traceback
|
||||
|
||||
import vars_init
|
||||
vars_init.init(sys.argv[1:])
|
||||
|
||||
import vars, state, builder, jwack, deps
|
||||
from logs import debug2, err
|
||||
|
||||
def should_build(t):
|
||||
f = state.File(name=t)
|
||||
if f.is_failed():
|
||||
raise builder.ImmediateReturn(32)
|
||||
dirty = deps.isdirty(f, depth='', max_changed=vars.RUNID,
|
||||
already_checked=[])
|
||||
return f.is_generated, dirty == [f] and deps.DIRTY or dirty
|
||||
|
||||
|
||||
def main():
|
||||
rv = 202
|
||||
try:
|
||||
if vars_init.is_toplevel and vars.LOG:
|
||||
builder.close_stdin()
|
||||
builder.start_stdin_log_reader(
|
||||
status=True, details=True,
|
||||
pretty=True, color=True, debug_locks=False, debug_pids=False)
|
||||
if vars.TARGET and not vars.UNLOCKED:
|
||||
me = os.path.join(vars.STARTDIR,
|
||||
os.path.join(vars.PWD, vars.TARGET))
|
||||
f = state.File(name=me)
|
||||
debug2('TARGET: %r %r %r\n'
|
||||
% (vars.STARTDIR, vars.PWD, vars.TARGET))
|
||||
else:
|
||||
f = me = None
|
||||
debug2('redo-ifchange: not adding depends.\n')
|
||||
jwack.setup(1)
|
||||
try:
|
||||
targets = sys.argv[1:]
|
||||
if f:
|
||||
for t in targets:
|
||||
f.add_dep('m', t)
|
||||
f.save()
|
||||
state.commit()
|
||||
rv = builder.main(targets, should_build)
|
||||
finally:
|
||||
try:
|
||||
state.rollback()
|
||||
finally:
|
||||
try:
|
||||
jwack.force_return_tokens()
|
||||
except Exception, e: # pylint: disable=broad-except
|
||||
traceback.print_exc(100, sys.stderr)
|
||||
err('unexpected error: %r\n' % e)
|
||||
rv = 1
|
||||
except KeyboardInterrupt:
|
||||
if vars_init.is_toplevel:
|
||||
builder.await_log_reader()
|
||||
sys.exit(200)
|
||||
state.commit()
|
||||
if vars_init.is_toplevel:
|
||||
builder.await_log_reader()
|
||||
sys.exit(rv)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
26
redo/cmd_ifcreate.py
Normal file
26
redo/cmd_ifcreate.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
import sys, os
|
||||
import vars, state
|
||||
from logs import err
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
me = os.path.join(vars.STARTDIR,
|
||||
os.path.join(vars.PWD, vars.TARGET))
|
||||
f = state.File(name=me)
|
||||
for t in sys.argv[1:]:
|
||||
if not t:
|
||||
err('cannot build the empty target ("").\n')
|
||||
sys.exit(204)
|
||||
if os.path.exists(t):
|
||||
err('redo-ifcreate: error: %r already exists\n' % t)
|
||||
sys.exit(1)
|
||||
else:
|
||||
f.add_dep('c', t)
|
||||
state.commit()
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(200)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
269
redo/cmd_log.py
Normal file
269
redo/cmd_log.py
Normal file
|
|
@ -0,0 +1,269 @@
|
|||
import errno, fcntl, os, re, struct, sys, time
|
||||
import termios
|
||||
from atoi import atoi
|
||||
import options
|
||||
|
||||
optspec = """
|
||||
redo-log [options...] [targets...]
|
||||
--
|
||||
r,recursive show build logs for dependencies too
|
||||
u,unchanged show lines for dependencies not needing to be rebuilt
|
||||
f,follow keep watching for more lines to be appended (like tail -f)
|
||||
no-details only show 'redo' recursion trace, not build output
|
||||
no-status don't display build summary line in --follow
|
||||
no-pretty don't pretty-print logs, show raw @@REDO output instead
|
||||
no-color disable ANSI color; --color to force enable (default: auto)
|
||||
debug-locks print messages about file locking (useful for debugging)
|
||||
debug-pids print process ids in log messages (useful for debugging)
|
||||
ack-fd= (internal use only) print REDO-OK to this fd upon starting
|
||||
"""
|
||||
o = options.Options(optspec)
|
||||
(opt, flags, extra) = o.parse(sys.argv[1:])
|
||||
targets = extra
|
||||
|
||||
import vars_init
|
||||
vars_init.init(list(targets))
|
||||
|
||||
import vars, logs, state
|
||||
|
||||
topdir = os.getcwd()
|
||||
already = set()
|
||||
depth = []
|
||||
total_lines = 0
|
||||
status = None
|
||||
start_time = time.time()
|
||||
|
||||
|
||||
# regexp for matching "redo" lines in the log, which we use for recursion.
|
||||
# format:
|
||||
# redo path/to/target which might have spaces
|
||||
# redo [unchanged] path/to/target which might have spaces
|
||||
# redo path/to/target which might have spaces (comment)
|
||||
REDO_LINE_RE = re.compile(r'^@@REDO:([^@]+)@@ (.*)\n$')
|
||||
|
||||
|
||||
def _atoi(s):
|
||||
try:
|
||||
return int(s)
|
||||
except TypeError:
|
||||
return 0
|
||||
|
||||
|
||||
def _tty_width():
|
||||
s = struct.pack("HHHH", 0, 0, 0, 0)
|
||||
try:
|
||||
s = fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ, s)
|
||||
except (IOError, ImportError):
|
||||
return _atoi(os.environ.get('WIDTH')) or 70
|
||||
(ysize, xsize, ypix, xpix) = struct.unpack('HHHH', s)
|
||||
return xsize or 70
|
||||
|
||||
|
||||
def is_locked(fid):
|
||||
return (fid is not None) and not state.Lock(fid=fid).trylock()
|
||||
|
||||
|
||||
def _fix_depth():
|
||||
vars.DEPTH = len(depth) * ' '
|
||||
|
||||
|
||||
def _rel(top, mydir, path):
|
||||
return os.path.relpath(os.path.join(top, mydir, path), topdir)
|
||||
|
||||
|
||||
def catlog(t):
|
||||
global total_lines, status
|
||||
if t in already:
|
||||
return
|
||||
if t != '-':
|
||||
depth.append(t)
|
||||
_fix_depth()
|
||||
already.add(t)
|
||||
mydir = os.path.dirname(t)
|
||||
if t == '-':
|
||||
f = sys.stdin
|
||||
fid = None
|
||||
loglock = None
|
||||
logname = None
|
||||
else:
|
||||
try:
|
||||
sf = state.File(name=t, allow_add=False)
|
||||
except KeyError:
|
||||
sys.stderr.write('redo-log: [%s] %r: not known to redo.\n'
|
||||
% (os.getcwd(), t,))
|
||||
sys.exit(24)
|
||||
fid = sf.id
|
||||
del sf
|
||||
state.rollback()
|
||||
logname = state.logname(fid)
|
||||
loglock = state.Lock(fid + state.LOG_LOCK_MAGIC)
|
||||
loglock.waitlock(shared=True)
|
||||
f = None
|
||||
delay = 0.01
|
||||
was_locked = is_locked(fid)
|
||||
line_head = ''
|
||||
width = _tty_width()
|
||||
while 1:
|
||||
if not f:
|
||||
try:
|
||||
f = open(logname)
|
||||
except IOError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
# ignore files without logs
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
if f:
|
||||
# Note: normally includes trailing \n.
|
||||
# In 'follow' mode, might get a line with no trailing \n
|
||||
# (eg. when ./configure is halfway through a test), which we
|
||||
# deal with below.
|
||||
line = f.readline()
|
||||
else:
|
||||
line = None
|
||||
if not line and (not opt.follow or not was_locked):
|
||||
# file not locked, and no new lines: done
|
||||
break
|
||||
if not line:
|
||||
was_locked = is_locked(fid)
|
||||
if opt.follow:
|
||||
# Don't display status line for extremely short-lived runs
|
||||
if opt.status and time.time() - start_time > 1.0:
|
||||
width = _tty_width()
|
||||
head = 'redo %s ' % ('{:,}'.format(total_lines))
|
||||
tail = ''
|
||||
for n in reversed(depth):
|
||||
remain = width - len(head) - len(tail)
|
||||
# always leave room for a final '... ' prefix
|
||||
if remain < len(n) + 4 + 1 or remain <= 4:
|
||||
if len(n) < 6 or remain < 6 + 1 + 4:
|
||||
tail = '... %s' % tail
|
||||
else:
|
||||
start = len(n) - (remain - 3 - 1)
|
||||
tail = '...%s %s' % (n[start:], tail)
|
||||
break
|
||||
elif n != '-':
|
||||
tail = n + ' ' + tail
|
||||
status = head + tail
|
||||
if len(status) > width:
|
||||
sys.stderr.write(
|
||||
'\nOVERSIZE STATUS (%d):\n%r\n'
|
||||
% (len(status), status))
|
||||
assert len(status) <= width
|
||||
sys.stdout.flush()
|
||||
sys.stderr.write('\r%-*.*s\r' % (width, width, status))
|
||||
time.sleep(min(delay, 1.0))
|
||||
delay += 0.01
|
||||
continue
|
||||
total_lines += 1
|
||||
delay = 0.01
|
||||
if not line.endswith('\n'):
|
||||
line_head += line
|
||||
continue
|
||||
if line_head:
|
||||
line = line_head + line
|
||||
line_head = ''
|
||||
if status:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.write('\r%-*.*s\r' % (width, width, ''))
|
||||
status = None
|
||||
g = re.match(REDO_LINE_RE, line)
|
||||
if g:
|
||||
# FIXME: print prefix if @@REDO is not at start of line.
|
||||
# logs.PrettyLog does it, but only if we actually call .write().
|
||||
words, text = g.groups()
|
||||
kind, pid, when = words.split(':')[0:3]
|
||||
pid = atoi(pid)
|
||||
relname = _rel(topdir, mydir, text)
|
||||
fixname = os.path.normpath(os.path.join(mydir, text))
|
||||
if kind == 'unchanged':
|
||||
if opt.unchanged:
|
||||
if opt.debug_locks:
|
||||
logs.meta(kind, relname, pid=pid)
|
||||
elif fixname not in already:
|
||||
logs.meta('do', relname, pid=pid)
|
||||
if opt.recursive:
|
||||
if loglock:
|
||||
loglock.unlock()
|
||||
catlog(os.path.join(mydir, text))
|
||||
if loglock:
|
||||
loglock.waitlock(shared=True)
|
||||
already.add(fixname)
|
||||
elif kind in ('do', 'waiting', 'locked', 'unlocked'):
|
||||
if opt.debug_locks:
|
||||
logs.meta(kind, relname, pid=pid)
|
||||
logs.write(line.rstrip())
|
||||
elif fixname not in already:
|
||||
logs.meta('do', relname, pid=pid)
|
||||
if opt.recursive:
|
||||
assert text
|
||||
if loglock:
|
||||
loglock.unlock()
|
||||
catlog(os.path.join(mydir, text))
|
||||
if loglock:
|
||||
loglock.waitlock(shared=True)
|
||||
already.add(fixname)
|
||||
elif kind == 'done':
|
||||
rv, name = text.split(' ', 1)
|
||||
logs.meta(kind, rv + ' ' + _rel(topdir, mydir, name))
|
||||
else:
|
||||
logs.write(line.rstrip())
|
||||
else:
|
||||
if opt.details:
|
||||
logs.write(line.rstrip())
|
||||
if loglock:
|
||||
loglock.unlock()
|
||||
if status:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.write('\r%-*.*s\r' % (width, width, ''))
|
||||
status = None
|
||||
if line_head:
|
||||
# partial line never got terminated
|
||||
print line_head
|
||||
if t != '-':
|
||||
assert depth[-1] == t
|
||||
depth.pop(-1)
|
||||
_fix_depth()
|
||||
|
||||
|
||||
def main():
|
||||
queue = []
|
||||
try:
|
||||
if not targets:
|
||||
sys.stderr.write(
|
||||
'redo-log: give at least one target; ' +
|
||||
'maybe "all"?\n')
|
||||
sys.exit(1)
|
||||
if opt.status < 2 and not os.isatty(2):
|
||||
opt.status = False
|
||||
logs.setup(file=sys.stdout, pretty=opt.pretty, color=opt.color)
|
||||
if opt.debug_locks:
|
||||
vars.DEBUG_LOCKS = 1
|
||||
if opt.debug_pids:
|
||||
vars.DEBUG_PIDS = 1
|
||||
if opt.ack_fd:
|
||||
# Write back to owner, to let them know we started up okay and
|
||||
# will be able to see their error output, so it's okay to close
|
||||
# their old stderr.
|
||||
ack_fd = int(opt.ack_fd)
|
||||
assert ack_fd > 2
|
||||
if os.write(ack_fd, 'REDO-OK\n') != 8:
|
||||
raise Exception('write to ack_fd returned wrong length')
|
||||
os.close(ack_fd)
|
||||
queue += targets
|
||||
while queue:
|
||||
t = queue.pop(0)
|
||||
if t != '-':
|
||||
logs.meta('do', _rel(topdir, '.', t), pid=0)
|
||||
catlog(t)
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(200)
|
||||
except IOError, e:
|
||||
if e.errno == errno.EPIPE:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
44
redo/cmd_ood.py
Normal file
44
redo/cmd_ood.py
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
import sys, os
|
||||
|
||||
import vars_init
|
||||
vars_init.init([])
|
||||
|
||||
import vars, state, deps
|
||||
from logs import err
|
||||
|
||||
if len(sys.argv[1:]) != 0:
|
||||
err('%s: no arguments expected.\n' % sys.argv[0])
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
cache = {}
|
||||
|
||||
|
||||
def is_checked(f):
|
||||
return cache.get(f.id, 0)
|
||||
|
||||
|
||||
def set_checked(f):
|
||||
cache[f.id] = 1
|
||||
|
||||
|
||||
def log_override(name):
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
cwd = os.getcwd()
|
||||
for f in state.files():
|
||||
if f.is_target():
|
||||
if deps.isdirty(f,
|
||||
depth='',
|
||||
max_changed=vars.RUNID,
|
||||
already_checked=[],
|
||||
is_checked=is_checked,
|
||||
set_checked=set_checked,
|
||||
log_override=log_override):
|
||||
print state.relpath(os.path.join(vars.BASE, f.name), cwd)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
125
redo/cmd_redo.py
Normal file
125
redo/cmd_redo.py
Normal file
|
|
@ -0,0 +1,125 @@
|
|||
#
|
||||
# Copyright 2010-2018 Avery Pennarun and contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import sys, os, traceback
|
||||
import options
|
||||
from atoi import atoi
|
||||
|
||||
optspec = """
|
||||
redo [targets...]
|
||||
--
|
||||
j,jobs= maximum number of jobs to build at once
|
||||
d,debug print dependency checks as they happen
|
||||
v,verbose print commands as they are read from .do files (variables intact)
|
||||
x,xtrace print commands as they are executed (variables expanded)
|
||||
k,keep-going keep going as long as possible even if some targets fail
|
||||
shuffle randomize the build order to find dependency bugs
|
||||
version print the current version and exit
|
||||
|
||||
redo-log options:
|
||||
no-log don't capture error output, just let it flow straight to stderr
|
||||
no-details only show 'redo' recursion trace (to see more later, use redo-log)
|
||||
no-status don't display build summary line at the bottom of the screen
|
||||
no-pretty don't pretty-print logs, show raw @@REDO output instead
|
||||
no-color disable ANSI color; --color to force enable (default: auto)
|
||||
debug-locks print messages about file locking (useful for debugging)
|
||||
debug-pids print process ids as part of log messages (useful for debugging)
|
||||
"""
|
||||
o = options.Options(optspec)
|
||||
(opt, flags, extra) = o.parse(sys.argv[1:])
|
||||
|
||||
targets = extra
|
||||
|
||||
if opt.version:
|
||||
import version
|
||||
print version.TAG
|
||||
sys.exit(0)
|
||||
if opt.debug:
|
||||
os.environ['REDO_DEBUG'] = str(opt.debug or 0)
|
||||
if opt.verbose:
|
||||
os.environ['REDO_VERBOSE'] = '1'
|
||||
if opt.xtrace:
|
||||
os.environ['REDO_XTRACE'] = '1'
|
||||
if opt.keep_going:
|
||||
os.environ['REDO_KEEP_GOING'] = '1'
|
||||
if opt.shuffle:
|
||||
os.environ['REDO_SHUFFLE'] = '1'
|
||||
if opt.debug_locks:
|
||||
os.environ['REDO_DEBUG_LOCKS'] = '1'
|
||||
if opt.debug_pids:
|
||||
os.environ['REDO_DEBUG_PIDS'] = '1'
|
||||
|
||||
# This is slightly tricky: the log and pretty options default to true. We
|
||||
# want to inherit that 'true' value from parent processes *unless* someone
|
||||
# explicitly specifies the reverse.
|
||||
if opt.no_log:
|
||||
os.environ['REDO_LOG'] = '0'
|
||||
if opt.no_pretty:
|
||||
os.environ['REDO_PRETTY'] = '0'
|
||||
if opt.no_color:
|
||||
os.environ['REDO_COLOR'] = '0'
|
||||
|
||||
import vars_init
|
||||
vars_init.init(targets)
|
||||
|
||||
import vars, state, builder, jwack
|
||||
from logs import warn, err
|
||||
|
||||
def main():
|
||||
try:
|
||||
j = atoi(opt.jobs or 1)
|
||||
if vars_init.is_toplevel and (vars.LOG or j > 1):
|
||||
builder.close_stdin()
|
||||
if vars_init.is_toplevel and vars.LOG:
|
||||
builder.start_stdin_log_reader(
|
||||
status=opt.status, details=opt.details,
|
||||
pretty=opt.pretty, color=opt.color,
|
||||
debug_locks=opt.debug_locks, debug_pids=opt.debug_pids)
|
||||
for t in targets:
|
||||
if os.path.exists(t):
|
||||
f = state.File(name=t)
|
||||
if not f.is_generated:
|
||||
warn('%s: exists and not marked as generated; not redoing.\n'
|
||||
% f.nicename())
|
||||
state.rollback()
|
||||
|
||||
if j < 1 or j > 1000:
|
||||
err('invalid --jobs value: %r\n' % opt.jobs)
|
||||
jwack.setup(j)
|
||||
try:
|
||||
assert state.is_flushed()
|
||||
retcode = builder.main(targets, lambda t: (True, True))
|
||||
assert state.is_flushed()
|
||||
finally:
|
||||
try:
|
||||
state.rollback()
|
||||
finally:
|
||||
try:
|
||||
jwack.force_return_tokens()
|
||||
except Exception, e: # pylint: disable=broad-except
|
||||
traceback.print_exc(100, sys.stderr)
|
||||
err('unexpected error: %r\n' % e)
|
||||
retcode = 1
|
||||
if vars_init.is_toplevel:
|
||||
builder.await_log_reader()
|
||||
sys.exit(retcode)
|
||||
except KeyboardInterrupt:
|
||||
if vars_init.is_toplevel:
|
||||
builder.await_log_reader()
|
||||
sys.exit(200)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
22
redo/cmd_sources.py
Normal file
22
redo/cmd_sources.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
import sys, os
|
||||
|
||||
import vars_init
|
||||
vars_init.init([])
|
||||
|
||||
import state, vars
|
||||
from logs import err
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv[1:]) != 0:
|
||||
err('%s: no arguments expected.\n' % sys.argv[0])
|
||||
sys.exit(1)
|
||||
|
||||
cwd = os.getcwd()
|
||||
for f in state.files():
|
||||
if f.is_source():
|
||||
print state.relpath(os.path.join(vars.BASE, f.name), cwd)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
59
redo/cmd_stamp.py
Normal file
59
redo/cmd_stamp.py
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
import sys, os
|
||||
import vars, state
|
||||
from logs import err, debug2
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) > 1:
|
||||
err('%s: no arguments expected.\n' % sys.argv[0])
|
||||
sys.exit(1)
|
||||
|
||||
if os.isatty(0):
|
||||
err('%s: you must provide the data to stamp on stdin\n' % sys.argv[0])
|
||||
sys.exit(1)
|
||||
|
||||
# hashlib is only available in python 2.5 or higher, but the 'sha'
|
||||
# module produces a DeprecationWarning in python 2.6 or higher. We want
|
||||
# to support python 2.4 and above without any stupid warnings, so let's
|
||||
# try using hashlib first, and downgrade if it fails.
|
||||
try:
|
||||
import hashlib
|
||||
except ImportError:
|
||||
import sha
|
||||
sh = sha.sha()
|
||||
else:
|
||||
sh = hashlib.sha1()
|
||||
|
||||
while 1:
|
||||
b = os.read(0, 4096)
|
||||
sh.update(b)
|
||||
if not b:
|
||||
break
|
||||
|
||||
csum = sh.hexdigest()
|
||||
|
||||
if not vars.TARGET:
|
||||
sys.exit(0)
|
||||
|
||||
me = os.path.join(vars.STARTDIR,
|
||||
os.path.join(vars.PWD, vars.TARGET))
|
||||
f = state.File(name=me)
|
||||
changed = (csum != f.csum)
|
||||
debug2('%s: old = %s\n' % (f.name, f.csum))
|
||||
debug2('%s: sum = %s (%s)\n' % (f.name, csum,
|
||||
changed and 'changed' or 'unchanged'))
|
||||
f.is_generated = True
|
||||
f.is_override = False
|
||||
f.failed_runid = None
|
||||
if changed:
|
||||
f.set_changed() # update_stamp might skip this if mtime is identical
|
||||
f.csum = csum
|
||||
else:
|
||||
# unchanged
|
||||
f.set_checked()
|
||||
f.save()
|
||||
state.commit()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
22
redo/cmd_targets.py
Normal file
22
redo/cmd_targets.py
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
import sys, os
|
||||
|
||||
import vars_init
|
||||
vars_init.init([])
|
||||
|
||||
import state, vars
|
||||
from logs import err
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv[1:]) != 0:
|
||||
err('%s: no arguments expected.\n' % sys.argv[0])
|
||||
sys.exit(1)
|
||||
|
||||
cwd = os.getcwd()
|
||||
for f in state.files():
|
||||
if f.is_target():
|
||||
print state.relpath(os.path.join(vars.BASE, f.name), cwd)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
40
redo/cmd_unlocked.py
Normal file
40
redo/cmd_unlocked.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
import sys, os
|
||||
import state
|
||||
from logs import err
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv[1:]) < 2:
|
||||
err('%s: at least 2 arguments expected.\n' % sys.argv[0])
|
||||
sys.exit(1)
|
||||
|
||||
target = sys.argv[1]
|
||||
deps = sys.argv[2:]
|
||||
|
||||
for d in deps:
|
||||
assert d != target
|
||||
|
||||
me = state.File(name=target)
|
||||
|
||||
# Build the known dependencies of our primary target. This *does* require
|
||||
# grabbing locks.
|
||||
os.environ['REDO_NO_OOB'] = '1'
|
||||
argv = ['redo-ifchange'] + deps
|
||||
rv = os.spawnvp(os.P_WAIT, argv[0], argv)
|
||||
if rv:
|
||||
sys.exit(rv)
|
||||
|
||||
# We know our caller already owns the lock on target, so we don't have to
|
||||
# acquire another one; tell redo-ifchange about that. Also, REDO_NO_OOB
|
||||
# persists from up above, because we don't want to do OOB now either.
|
||||
# (Actually it's most important for the primary target, since it's the one
|
||||
# who initiated the OOB in the first place.)
|
||||
os.environ['REDO_UNLOCKED'] = '1'
|
||||
argv = ['redo-ifchange', target]
|
||||
rv = os.spawnvp(os.P_WAIT, argv[0], argv)
|
||||
if rv:
|
||||
sys.exit(rv)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
34
redo/cmd_whichdo.py
Normal file
34
redo/cmd_whichdo.py
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
import sys, os
|
||||
|
||||
import vars_init
|
||||
vars_init.init_no_state()
|
||||
|
||||
import paths
|
||||
from logs import err
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv[1:]) != 1:
|
||||
err('%s: exactly one argument expected.\n' % sys.argv[0])
|
||||
sys.exit(1)
|
||||
|
||||
want = sys.argv[1]
|
||||
if not want:
|
||||
err('cannot build the empty target ("").\n')
|
||||
sys.exit(204)
|
||||
|
||||
abswant = os.path.abspath(want)
|
||||
pdf = paths.possible_do_files(abswant)
|
||||
for dodir, dofile, basedir, basename, ext in pdf:
|
||||
dopath = os.path.join('/', dodir, dofile)
|
||||
relpath = os.path.relpath(dopath, '.')
|
||||
exists = os.path.exists(dopath)
|
||||
assert '\n' not in relpath
|
||||
print relpath
|
||||
if exists:
|
||||
sys.exit(0)
|
||||
sys.exit(1) # no appropriate dofile found
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
122
redo/deps.py
Normal file
122
redo/deps.py
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
import os
|
||||
import vars, state
|
||||
from logs import debug
|
||||
|
||||
CLEAN = 0
|
||||
DIRTY = 1
|
||||
|
||||
def isdirty(f, depth, max_changed,
|
||||
already_checked,
|
||||
is_checked=state.File.is_checked,
|
||||
set_checked=state.File.set_checked_save,
|
||||
log_override=state.warn_override):
|
||||
if f.id in already_checked:
|
||||
raise state.CyclicDependencyError()
|
||||
# make a copy of the list, so upon returning, our parent's copy
|
||||
# is unaffected
|
||||
already_checked = list(already_checked) + [f.id]
|
||||
|
||||
if vars.DEBUG >= 1:
|
||||
debug('%s?%s %r,%r\n'
|
||||
% (depth, f.nicename(), f.is_generated, f.is_override))
|
||||
|
||||
if f.failed_runid:
|
||||
debug('%s-- DIRTY (failed last time)\n' % depth)
|
||||
return DIRTY
|
||||
if f.changed_runid is None:
|
||||
debug('%s-- DIRTY (never built)\n' % depth)
|
||||
return DIRTY
|
||||
if f.changed_runid > max_changed:
|
||||
debug('%s-- DIRTY (built %d > %d; %d)\n'
|
||||
% (depth, f.changed_runid, max_changed, vars.RUNID))
|
||||
return DIRTY # has been built more recently than parent
|
||||
if is_checked(f):
|
||||
if vars.DEBUG >= 1:
|
||||
debug('%s-- CLEAN (checked)\n' % depth)
|
||||
return CLEAN # has already been checked during this session
|
||||
if not f.stamp:
|
||||
debug('%s-- DIRTY (no stamp)\n' % depth)
|
||||
return DIRTY
|
||||
|
||||
newstamp = f.read_stamp()
|
||||
if f.stamp != newstamp:
|
||||
if newstamp == state.STAMP_MISSING:
|
||||
debug('%s-- DIRTY (missing)\n' % depth)
|
||||
if f.stamp and f.is_generated:
|
||||
# previously was stamped and generated, but suddenly missing.
|
||||
# We can safely forget that it is/was a target; if someone
|
||||
# does redo-ifchange on it and it doesn't exist, we'll mark
|
||||
# it a target again, but if someone creates it by hand,
|
||||
# it'll be a source. This should reduce false alarms when
|
||||
# files change from targets to sources as a project evolves.
|
||||
debug('%s converted target -> source %r\n' % (depth, f.id))
|
||||
f.is_generated = f.failed_runid = 0
|
||||
f.save()
|
||||
f.refresh()
|
||||
assert not f.is_generated
|
||||
else:
|
||||
debug('%s-- DIRTY (mtime)\n' % depth)
|
||||
if f.csum:
|
||||
return [f]
|
||||
else:
|
||||
return DIRTY
|
||||
|
||||
must_build = []
|
||||
for mode, f2 in f.deps():
|
||||
dirty = CLEAN
|
||||
if mode == 'c':
|
||||
if os.path.exists(os.path.join(vars.BASE, f2.name)):
|
||||
debug('%s-- DIRTY (created)\n' % depth)
|
||||
dirty = DIRTY
|
||||
elif mode == 'm':
|
||||
sub = isdirty(f2, depth=depth + ' ',
|
||||
max_changed=max(f.changed_runid,
|
||||
f.checked_runid),
|
||||
already_checked=already_checked,
|
||||
is_checked=is_checked,
|
||||
set_checked=set_checked,
|
||||
log_override=log_override)
|
||||
if sub:
|
||||
debug('%s-- DIRTY (sub)\n' % depth)
|
||||
dirty = sub
|
||||
else:
|
||||
assert mode in ('c', 'm')
|
||||
if not f.csum:
|
||||
# f is a "normal" target: dirty f2 means f is instantly dirty
|
||||
if dirty == DIRTY:
|
||||
# f2 is definitely dirty, so f definitely needs to
|
||||
# redo.
|
||||
return DIRTY
|
||||
elif isinstance(dirty, list):
|
||||
# our child f2 might be dirty, but it's not sure yet. It's
|
||||
# given us a list of targets we have to redo in order to
|
||||
# be sure.
|
||||
must_build += dirty
|
||||
else:
|
||||
# f is "checksummable": dirty f2 means f needs to redo,
|
||||
# but f might turn out to be clean after that (ie. our parent
|
||||
# might not be dirty).
|
||||
if dirty == DIRTY:
|
||||
# f2 is definitely dirty, so f definitely needs to
|
||||
# redo. However, after that, f might turn out to be
|
||||
# unchanged.
|
||||
return [f]
|
||||
elif isinstance(dirty, list):
|
||||
# our child f2 might be dirty, but it's not sure yet. It's
|
||||
# given us a list of targets we have to redo in order to
|
||||
# be sure.
|
||||
must_build += dirty
|
||||
|
||||
if must_build:
|
||||
# f is *maybe* dirty because at least one of its children is maybe
|
||||
# dirty. must_build has accumulated a list of "topmost" uncertain
|
||||
# objects in the tree. If we build all those, we can then
|
||||
# redo-ifchange f and it won't have any uncertainty next time.
|
||||
return must_build
|
||||
debug('%s-- CLEAN\n' % (depth,))
|
||||
|
||||
# if we get here, it's because the target is clean
|
||||
if f.is_override:
|
||||
log_override(f.name)
|
||||
set_checked(f)
|
||||
return CLEAN
|
||||
26
redo/helpers.py
Normal file
26
redo/helpers.py
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
import os, errno, fcntl
|
||||
|
||||
|
||||
def join(between, l):
|
||||
return between.join(l)
|
||||
|
||||
|
||||
def unlink(f):
|
||||
"""Delete a file at path 'f' if it currently exists.
|
||||
|
||||
Unlike os.unlink(), does not throw an exception if the file didn't already
|
||||
exist.
|
||||
"""
|
||||
try:
|
||||
os.unlink(f)
|
||||
except OSError, e:
|
||||
if e.errno == errno.ENOENT:
|
||||
pass # it doesn't exist, that's what you asked for
|
||||
|
||||
|
||||
def close_on_exec(fd, yes):
|
||||
fl = fcntl.fcntl(fd, fcntl.F_GETFD)
|
||||
fl &= ~fcntl.FD_CLOEXEC
|
||||
if yes:
|
||||
fl |= fcntl.FD_CLOEXEC
|
||||
fcntl.fcntl(fd, fcntl.F_SETFD, fl)
|
||||
447
redo/jwack.py
Normal file
447
redo/jwack.py
Normal file
|
|
@ -0,0 +1,447 @@
|
|||
#
|
||||
# Implementation of a GNU make-compatible jobserver.
|
||||
#
|
||||
# The basic idea is that both ends of a pipe (tokenfds) are shared with all
|
||||
# subprocesses. At startup, we write one "token" into the pipe for each
|
||||
# configured job. (So eg. redo -j20 will put 20 tokens in the pipe.) In
|
||||
# order to do work, you must first obtain a token, by reading the other
|
||||
# end of the pipe. When you're done working, you write the token back into
|
||||
# the pipe so that someone else can grab it.
|
||||
#
|
||||
# The toplevel process in the hierarchy is what creates the pipes in the
|
||||
# first place. Then it puts the pipe file descriptor numbers into MAKEFLAGS,
|
||||
# so that subprocesses can pull them back out.
|
||||
#
|
||||
# As usual, edge cases make all this a bit tricky:
|
||||
#
|
||||
# - Every process is defined as owning a token at startup time. This makes
|
||||
# sense because it's backward compatible with single-process make: if a
|
||||
# subprocess neither reads nor writes the pipe, then it has exactly one
|
||||
# token, so it's allowed to do one thread of work.
|
||||
#
|
||||
# - Thus, for symmetry, processes also must own a token at exit time.
|
||||
#
|
||||
# - In turn, to make *that* work, a parent process must destroy *its* token
|
||||
# upon launching a subprocess. (Destroy, not release, because the
|
||||
# subprocess has created its own token.) It can try to obtain another
|
||||
# token, but if none are available, it has to stop work until one of its
|
||||
# subprocesses finishes. When the subprocess finishes, its token is
|
||||
# destroyed, so the parent creates a new one.
|
||||
#
|
||||
# - If our process is going to stop and wait for a lock (eg. because we
|
||||
# depend on a target and someone else is already building that target),
|
||||
# we must give up our token. Otherwise, we're sucking up a "thread" (a
|
||||
# unit of parallelism) just to do nothing. If enough processes are waiting
|
||||
# on a particular lock, then the process building that target might end up
|
||||
# with only a single token, and everything gets serialized.
|
||||
#
|
||||
# - Unfortunately this leads to a problem: if we give up our token, we then
|
||||
# have to re-acquire a token before exiting, even if we want to exit with
|
||||
# an error code.
|
||||
#
|
||||
# - redo-log wants to linearize output so that it always prints log messages
|
||||
# in the order jobs were started; but because of the above, a job being
|
||||
# logged might end up with no tokens for a long time, waiting for some
|
||||
# other branch of the build to complete.
|
||||
#
|
||||
# As a result, we extend beyond GNU make's model and make things even more
|
||||
# complicated. We add a second pipe, cheatfds, which we use to "cheat" on
|
||||
# tokens if our particular job is in the foreground (ie. is the one
|
||||
# currently being tailed by redo-log -f). We add at most one token per
|
||||
# redo-log instance. If we are the foreground task, and we need a token,
|
||||
# and we don't have a token, and we don't have any subtasks (because if we
|
||||
# had a subtask, then we're not in the foreground), we synthesize our own
|
||||
# token by incrementing _mytokens and _cheats, but we don't read from
|
||||
# tokenfds. Then, when it's time to give up our token again, we also won't
|
||||
# write back to tokenfds, so the synthesized token disappears.
|
||||
#
|
||||
# Of course, all that then leads to *another* problem: every process must
|
||||
# hold a *real* token when it exits, because its parent has given up a
|
||||
# *real* token in order to start this subprocess. If we're holding a cheat
|
||||
# token when it's time to exit, then we can't meet this requirement. The
|
||||
# obvious thing to do would be to give up the cheat token and wait for a
|
||||
# real token, but that might take a very long time, and if we're the last
|
||||
# thing preventing our parent from exiting, then redo-log will sit around
|
||||
# following our parent until we finally get a token so we can exit,
|
||||
# defeating the whole purpose of cheating. Instead of waiting, we write our
|
||||
# "cheater" token to cheatfds. Then, any task, upon noticing one of its
|
||||
# subprocesses has finished, will check to see if there are any tokens on
|
||||
# cheatfds; if so, it will remove one of them and *not* re-create its
|
||||
# child's token, thus destroying the cheater token from earlier, and restoring
|
||||
# balance.
|
||||
#
|
||||
# Sorry this is so complicated. I couldn't think of a way to make it
|
||||
# simpler :)
|
||||
#
|
||||
import sys, os, errno, select, fcntl, signal
|
||||
from atoi import atoi
|
||||
from helpers import close_on_exec
|
||||
import state, vars
|
||||
|
||||
_toplevel = 0
|
||||
_mytokens = 1
|
||||
_cheats = 0
|
||||
_tokenfds = None
|
||||
_cheatfds = None
|
||||
_waitfds = {}
|
||||
|
||||
|
||||
def _debug(s):
|
||||
if 0:
|
||||
sys.stderr.write('jwack#%d: %s' % (os.getpid(), s))
|
||||
|
||||
|
||||
def _create_tokens(n):
|
||||
global _mytokens, _cheats
|
||||
assert n >= 0
|
||||
assert _cheats >= 0
|
||||
for _ in xrange(n):
|
||||
if _cheats > 0:
|
||||
_cheats -= 1
|
||||
else:
|
||||
_mytokens += 1
|
||||
|
||||
|
||||
def _destroy_tokens(n):
|
||||
global _mytokens
|
||||
assert _mytokens >= n
|
||||
_mytokens -= n
|
||||
|
||||
|
||||
def _release(n):
|
||||
global _mytokens, _cheats
|
||||
assert n >= 0
|
||||
assert _mytokens >= n
|
||||
_debug('%d,%d -> release(%d)\n' % (_mytokens, _cheats, n))
|
||||
n_to_share = 0
|
||||
for _ in xrange(n):
|
||||
_mytokens -= 1
|
||||
if _cheats > 0:
|
||||
_cheats -= 1
|
||||
else:
|
||||
n_to_share += 1
|
||||
assert _mytokens >= 0
|
||||
assert _cheats >= 0
|
||||
if n_to_share:
|
||||
_debug('PUT tokenfds %d\n' % n_to_share)
|
||||
os.write(_tokenfds[1], 't' * n_to_share)
|
||||
|
||||
|
||||
def _release_except_mine():
|
||||
assert _mytokens > 0
|
||||
_release(_mytokens - 1)
|
||||
|
||||
|
||||
def release_mine():
|
||||
assert _mytokens >= 1
|
||||
_debug('%d,%d -> release_mine()\n' % (_mytokens, _cheats))
|
||||
_release(1)
|
||||
|
||||
|
||||
def _timeout(sig, frame):
|
||||
pass
|
||||
|
||||
|
||||
# We make the pipes use the first available fd numbers starting at startfd.
|
||||
# This makes it easier to differentiate different kinds of pipes when using
|
||||
# strace.
|
||||
def _make_pipe(startfd):
|
||||
(a, b) = os.pipe()
|
||||
fds = (fcntl.fcntl(a, fcntl.F_DUPFD, startfd),
|
||||
fcntl.fcntl(b, fcntl.F_DUPFD, startfd + 1))
|
||||
os.close(a)
|
||||
os.close(b)
|
||||
return fds
|
||||
|
||||
|
||||
def _try_read(fd, n):
|
||||
"""Try to read n bytes from fd. Returns: '' on EOF, None if EAGAIN."""
|
||||
assert state.is_flushed()
|
||||
|
||||
# using djb's suggested way of doing non-blocking reads from a blocking
|
||||
# socket: http://cr.yp.to/unix/nonblock.html
|
||||
# We can't just make the socket non-blocking, because we want to be
|
||||
# compatible with GNU Make, and they can't handle it.
|
||||
r, w, x = select.select([fd], [], [], 0)
|
||||
if not r:
|
||||
return None # try again
|
||||
# ok, the socket is readable - but some other process might get there
|
||||
# first. We have to set an alarm() in case our read() gets stuck.
|
||||
oldh = signal.signal(signal.SIGALRM, _timeout)
|
||||
try:
|
||||
signal.setitimer(signal.ITIMER_REAL, 0.01, 0.01) # emergency fallback
|
||||
try:
|
||||
b = os.read(fd, 1)
|
||||
except OSError, e:
|
||||
if e.errno in (errno.EAGAIN, errno.EINTR):
|
||||
# interrupted or it was nonblocking
|
||||
return None # try again
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
signal.setitimer(signal.ITIMER_REAL, 0, 0)
|
||||
signal.signal(signal.SIGALRM, oldh)
|
||||
return b
|
||||
|
||||
|
||||
def _try_read_all(fd, n):
|
||||
bb = ''
|
||||
while 1:
|
||||
b = _try_read(fd, n)
|
||||
if not b:
|
||||
break
|
||||
bb += b
|
||||
return bb
|
||||
|
||||
|
||||
def setup(maxjobs):
|
||||
global _tokenfds, _cheatfds, _toplevel
|
||||
assert maxjobs > 0
|
||||
assert not _tokenfds
|
||||
_debug('setup(%d)\n' % maxjobs)
|
||||
|
||||
flags = ' ' + os.getenv('MAKEFLAGS', '') + ' '
|
||||
FIND1 = ' --jobserver-auth=' # renamed in GNU make 4.2
|
||||
FIND2 = ' --jobserver-fds=' # fallback syntax
|
||||
FIND = FIND1
|
||||
ofs = flags.find(FIND1)
|
||||
if ofs < 0:
|
||||
FIND = FIND2
|
||||
ofs = flags.find(FIND2)
|
||||
if ofs >= 0:
|
||||
s = flags[ofs+len(FIND):]
|
||||
(arg, junk) = s.split(' ', 1)
|
||||
(a, b) = arg.split(',', 1)
|
||||
a = atoi(a)
|
||||
b = atoi(b)
|
||||
if a <= 0 or b <= 0:
|
||||
raise ValueError('invalid --jobserver-auth: %r' % arg)
|
||||
try:
|
||||
fcntl.fcntl(a, fcntl.F_GETFL)
|
||||
fcntl.fcntl(b, fcntl.F_GETFL)
|
||||
except IOError, e:
|
||||
if e.errno == errno.EBADF:
|
||||
raise ValueError('broken --jobserver-auth from make; ' +
|
||||
'prefix your Makefile rule with a "+"')
|
||||
else:
|
||||
raise
|
||||
_tokenfds = (a, b)
|
||||
|
||||
cheats = os.getenv('REDO_CHEATFDS', '')
|
||||
if cheats:
|
||||
(a, b) = cheats.split(',', 1)
|
||||
a = atoi(a)
|
||||
b = atoi(b)
|
||||
if a <= 0 or b <= 0:
|
||||
raise ValueError('invalid REDO_CHEATFDS: %r' % cheats)
|
||||
_cheatfds = (a, b)
|
||||
else:
|
||||
_cheatfds = _make_pipe(102)
|
||||
os.putenv('REDO_CHEATFDS', '%d,%d' % (_cheatfds[0], _cheatfds[1]))
|
||||
|
||||
if not _tokenfds:
|
||||
# need to start a new server
|
||||
_toplevel = maxjobs
|
||||
_tokenfds = _make_pipe(100)
|
||||
_create_tokens(maxjobs - 1)
|
||||
_release_except_mine()
|
||||
os.putenv('MAKEFLAGS',
|
||||
'%s -j --jobserver-auth=%d,%d --jobserver-fds=%d,%d' %
|
||||
(os.getenv('MAKEFLAGS', ''),
|
||||
_tokenfds[0], _tokenfds[1],
|
||||
_tokenfds[0], _tokenfds[1]))
|
||||
|
||||
|
||||
def _wait(want_token, max_delay):
|
||||
rfds = _waitfds.keys()
|
||||
if want_token:
|
||||
rfds.append(_tokenfds[0])
|
||||
assert rfds
|
||||
assert state.is_flushed()
|
||||
r, w, x = select.select(rfds, [], [], max_delay)
|
||||
_debug('_tokenfds=%r; wfds=%r; readable: %r\n' % (_tokenfds, _waitfds, r))
|
||||
for fd in r:
|
||||
if fd == _tokenfds[0]:
|
||||
pass
|
||||
else:
|
||||
pd = _waitfds[fd]
|
||||
_debug("done: %r\n" % pd.name)
|
||||
# redo subprocesses are expected to die without releasing their
|
||||
# tokens, so things are less likely to get confused if they
|
||||
# die abnormally. That means a token has 'disappeared' and we
|
||||
# now need to recreate it.
|
||||
b = _try_read(_cheatfds[0], 1)
|
||||
_debug('GOT cheatfd\n')
|
||||
if b is None:
|
||||
_create_tokens(1)
|
||||
if has_token():
|
||||
_release_except_mine()
|
||||
else:
|
||||
# someone exited with _cheats > 0, so we need to compensate
|
||||
# by *not* re-creating a token now.
|
||||
pass
|
||||
os.close(fd)
|
||||
del _waitfds[fd]
|
||||
rv = os.waitpid(pd.pid, 0)
|
||||
assert rv[0] == pd.pid
|
||||
_debug("done1: rv=%r\n" % (rv,))
|
||||
rv = rv[1]
|
||||
if os.WIFEXITED(rv):
|
||||
pd.rv = os.WEXITSTATUS(rv)
|
||||
else:
|
||||
pd.rv = -os.WTERMSIG(rv)
|
||||
_debug("done2: rv=%d\n" % pd.rv)
|
||||
pd.donefunc(pd.name, pd.rv)
|
||||
|
||||
|
||||
def has_token():
|
||||
assert _mytokens >= 0
|
||||
if _mytokens >= 1:
|
||||
return True
|
||||
|
||||
|
||||
def ensure_token(reason, max_delay=None):
|
||||
global _mytokens
|
||||
assert state.is_flushed()
|
||||
assert _mytokens <= 1
|
||||
while 1:
|
||||
if _mytokens >= 1:
|
||||
_debug("_mytokens is %d\n" % _mytokens)
|
||||
assert _mytokens == 1
|
||||
_debug('(%r) used my own token...\n' % reason)
|
||||
break
|
||||
assert _mytokens < 1
|
||||
_debug('(%r) waiting for tokens...\n' % reason)
|
||||
_wait(want_token=1, max_delay=max_delay)
|
||||
if _mytokens >= 1:
|
||||
break
|
||||
assert _mytokens < 1
|
||||
b = _try_read(_tokenfds[0], 1)
|
||||
_debug('GOT tokenfd\n')
|
||||
if b == '':
|
||||
raise Exception('unexpected EOF on token read')
|
||||
if b:
|
||||
_mytokens += 1
|
||||
_debug('(%r) got a token (%r).\n' % (reason, b))
|
||||
break
|
||||
if max_delay != None:
|
||||
break
|
||||
assert _mytokens <= 1
|
||||
|
||||
|
||||
def ensure_token_or_cheat(reason, cheatfunc):
|
||||
global _mytokens, _cheats
|
||||
backoff = 0.01
|
||||
while not has_token():
|
||||
while running() and not has_token():
|
||||
# If we already have a subproc running, then effectively we
|
||||
# already have a token. Don't create a cheater token unless
|
||||
# we're completely idle.
|
||||
ensure_token(reason, max_delay=None)
|
||||
ensure_token(reason, max_delay=min(1.0, backoff))
|
||||
backoff *= 2
|
||||
if not has_token():
|
||||
assert _mytokens == 0
|
||||
n = cheatfunc()
|
||||
_debug('%s: %s: cheat = %d\n' % (vars.TARGET, reason, n))
|
||||
if n > 0:
|
||||
_mytokens += n
|
||||
_cheats += n
|
||||
break
|
||||
|
||||
|
||||
def running():
|
||||
return len(_waitfds)
|
||||
|
||||
|
||||
def wait_all():
|
||||
_debug("%d,%d -> wait_all\n" % (_mytokens, _cheats))
|
||||
assert state.is_flushed()
|
||||
while 1:
|
||||
while _mytokens >= 1:
|
||||
release_mine()
|
||||
if not running():
|
||||
break
|
||||
_debug("wait_all: wait()\n")
|
||||
_wait(want_token=0, max_delay=None)
|
||||
_debug("wait_all: empty list\n")
|
||||
if _toplevel:
|
||||
# If we're the toplevel and we're sure no child processes remain,
|
||||
# then we know we're totally idle. Self-test to ensure no tokens
|
||||
# mysteriously got created/destroyed.
|
||||
tokens = _try_read_all(_tokenfds[0], 8192)
|
||||
cheats = _try_read_all(_cheatfds[0], 8192)
|
||||
_debug('toplevel: GOT %d tokens and %d cheats\n'
|
||||
% (len(tokens), len(cheats)))
|
||||
if len(tokens) - len(cheats) != _toplevel:
|
||||
raise Exception('on exit: expected %d tokens; found %r-%r'
|
||||
% (_toplevel, len(tokens), len(cheats)))
|
||||
os.write(_tokenfds[1], tokens)
|
||||
# note: when we return, we have *no* tokens, not even our own!
|
||||
# If caller wants to continue, they have to obtain one right away.
|
||||
|
||||
|
||||
def force_return_tokens():
|
||||
n = len(_waitfds)
|
||||
_debug('%d,%d -> %d jobs left in force_return_tokens\n'
|
||||
% (_mytokens, _cheats, n))
|
||||
for k in list(_waitfds):
|
||||
del _waitfds[k]
|
||||
_create_tokens(n)
|
||||
if has_token():
|
||||
_release_except_mine()
|
||||
assert _mytokens == 1, 'mytokens=%d' % _mytokens
|
||||
assert _cheats <= _mytokens, 'mytokens=%d cheats=%d' % (_mytokens, _cheats)
|
||||
assert _cheats in (0, 1), 'cheats=%d' % _cheats
|
||||
if _cheats:
|
||||
_debug('%d,%d -> force_return_tokens: recovering final token\n'
|
||||
% (_mytokens, _cheats))
|
||||
_destroy_tokens(_cheats)
|
||||
os.write(_cheatfds[1], 't' * _cheats)
|
||||
assert state.is_flushed()
|
||||
|
||||
|
||||
def _pre_job(r, w, pfn):
|
||||
os.close(r)
|
||||
if pfn:
|
||||
pfn()
|
||||
|
||||
|
||||
class Job(object):
|
||||
def __init__(self, name, pid, donefunc):
|
||||
self.name = name
|
||||
self.pid = pid
|
||||
self.rv = None
|
||||
self.donefunc = donefunc
|
||||
|
||||
def __repr__(self):
|
||||
return 'Job(%s,%d)' % (self.name, self.pid)
|
||||
|
||||
|
||||
def start_job(reason, jobfunc, donefunc):
|
||||
assert state.is_flushed()
|
||||
assert _mytokens <= 1
|
||||
assert _mytokens == 1
|
||||
# Subprocesses always start with 1 token, so we have to destroy ours
|
||||
# in order for the universe to stay in balance.
|
||||
_destroy_tokens(1)
|
||||
r, w = _make_pipe(50)
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
# child
|
||||
os.close(r)
|
||||
rv = 201
|
||||
try:
|
||||
try:
|
||||
rv = jobfunc() or 0
|
||||
_debug('jobfunc completed (%r, %r)\n' % (jobfunc, rv))
|
||||
except Exception: # pylint: disable=broad-except
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
_debug('exit: %d\n' % rv)
|
||||
os._exit(rv)
|
||||
close_on_exec(r, True)
|
||||
os.close(w)
|
||||
pd = Job(reason, pid, donefunc)
|
||||
_waitfds[r] = pd
|
||||
154
redo/logs.py
Normal file
154
redo/logs.py
Normal file
|
|
@ -0,0 +1,154 @@
|
|||
import os, re, sys, time
|
||||
import vars
|
||||
|
||||
RED = GREEN = YELLOW = BOLD = PLAIN = None
|
||||
|
||||
|
||||
def check_tty(file, color):
|
||||
global RED, GREEN, YELLOW, BOLD, PLAIN
|
||||
color_ok = file.isatty() and (os.environ.get('TERM') or 'dumb') != 'dumb'
|
||||
if (color and color_ok) or color >= 2:
|
||||
# ...use ANSI formatting codes.
|
||||
# pylint: disable=bad-whitespace
|
||||
RED = "\x1b[31m"
|
||||
GREEN = "\x1b[32m"
|
||||
YELLOW = "\x1b[33m"
|
||||
BOLD = "\x1b[1m"
|
||||
PLAIN = "\x1b[m"
|
||||
else:
|
||||
RED = ""
|
||||
GREEN = ""
|
||||
YELLOW = ""
|
||||
BOLD = ""
|
||||
PLAIN = ""
|
||||
|
||||
|
||||
class RawLog(object):
|
||||
def __init__(self, file):
|
||||
self.file = file
|
||||
|
||||
def write(self, s):
|
||||
assert '\n' not in s
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
self.file.write(s + '\n')
|
||||
self.file.flush()
|
||||
|
||||
|
||||
REDO_RE = re.compile(r'@@REDO:([^@]+)@@ (.*)$')
|
||||
|
||||
|
||||
class PrettyLog(object):
|
||||
def __init__(self, file):
|
||||
self.topdir = os.getcwd()
|
||||
self.file = file
|
||||
|
||||
def _pretty(self, pid, color, s):
|
||||
if vars.DEBUG_PIDS:
|
||||
redo = '%-6d redo ' % pid
|
||||
else:
|
||||
redo = 'redo '
|
||||
self.file.write(
|
||||
''.join([color, redo, vars.DEPTH,
|
||||
BOLD if color else '', s, PLAIN, '\n']))
|
||||
|
||||
def write(self, s):
|
||||
assert '\n' not in s
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
g = REDO_RE.match(s)
|
||||
if g:
|
||||
all = g.group(0)
|
||||
self.file.write(s[:-len(all)])
|
||||
words = g.group(1).split(':')
|
||||
text = g.group(2)
|
||||
kind, pid, _ = words[0:3]
|
||||
pid = int(pid)
|
||||
if kind == 'unchanged':
|
||||
self._pretty(pid, '', '%s (unchanged)' % text)
|
||||
elif kind == 'check':
|
||||
self._pretty(pid, GREEN, '(%s)' % text)
|
||||
elif kind == 'do':
|
||||
self._pretty(pid, GREEN, text)
|
||||
elif kind == 'done':
|
||||
rv, name = text.split(' ', 1)
|
||||
rv = int(rv)
|
||||
if rv:
|
||||
self._pretty(pid, RED, '%s (exit %d)' % (name, rv))
|
||||
elif vars.VERBOSE or vars.XTRACE or vars.DEBUG:
|
||||
self._pretty(pid, GREEN, '%s (done)' % name)
|
||||
self.file.write('\n')
|
||||
elif kind == 'locked':
|
||||
if vars.DEBUG_LOCKS:
|
||||
self._pretty(pid, GREEN, '%s (locked...)' % text)
|
||||
elif kind == 'waiting':
|
||||
if vars.DEBUG_LOCKS:
|
||||
self._pretty(pid, GREEN, '%s (WAITING)' % text)
|
||||
elif kind == 'unlocked':
|
||||
if vars.DEBUG_LOCKS:
|
||||
self._pretty(pid, GREEN, '%s (...unlocked!)' % text)
|
||||
elif kind == 'error':
|
||||
self.file.write(''.join([RED, 'redo: ',
|
||||
BOLD, text, PLAIN, '\n']))
|
||||
elif kind == 'warning':
|
||||
self.file.write(''.join([YELLOW, 'redo: ',
|
||||
BOLD, text, PLAIN, '\n']))
|
||||
elif kind == 'debug':
|
||||
self._pretty(pid, '', text)
|
||||
else:
|
||||
assert 0, 'Unexpected @@REDO kind: %r' % kind
|
||||
else:
|
||||
self.file.write(s + '\n')
|
||||
self.file.flush()
|
||||
|
||||
|
||||
_log = None
|
||||
|
||||
def setup(file, pretty, color):
|
||||
global _log
|
||||
if pretty or vars.PRETTY:
|
||||
check_tty(file, color=color)
|
||||
_log = PrettyLog(file=file)
|
||||
else:
|
||||
_log = RawLog(file=file)
|
||||
|
||||
|
||||
# FIXME: explicitly initialize in each program, for clarity
|
||||
setup(file=sys.stderr, pretty=vars.PRETTY, color=vars.COLOR)
|
||||
|
||||
|
||||
def write(s):
|
||||
_log.write(s)
|
||||
|
||||
|
||||
def meta(kind, s, pid=None):
|
||||
assert ':' not in kind
|
||||
assert '@' not in kind
|
||||
assert '\n' not in s
|
||||
if pid is None:
|
||||
pid = os.getpid()
|
||||
write('@@REDO:%s:%d:%.4f@@ %s'
|
||||
% (kind, pid, time.time(), s))
|
||||
|
||||
def err(s):
|
||||
s = s.rstrip()
|
||||
meta('error', s)
|
||||
|
||||
def warn(s):
|
||||
s = s.rstrip()
|
||||
meta('warning', s)
|
||||
|
||||
def debug(s):
|
||||
if vars.DEBUG >= 1:
|
||||
s = s.rstrip()
|
||||
meta('debug', s)
|
||||
|
||||
def debug2(s):
|
||||
if vars.DEBUG >= 2:
|
||||
s = s.rstrip()
|
||||
meta('debug', s)
|
||||
|
||||
def debug3(s):
|
||||
if vars.DEBUG >= 3:
|
||||
s = s.rstrip()
|
||||
meta('debug', s)
|
||||
273
redo/options.py
Normal file
273
redo/options.py
Normal file
|
|
@ -0,0 +1,273 @@
|
|||
# pylint: skip-file
|
||||
#
|
||||
# Copyright 2011 Avery Pennarun and options.py contributors.
|
||||
# All rights reserved.
|
||||
#
|
||||
# (This license applies to this file but not necessarily the other files in
|
||||
# this package.)
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in
|
||||
# the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY AVERY PENNARUN ``AS IS'' AND ANY
|
||||
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
|
||||
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
"""Command-line options parser.
|
||||
With the help of an options spec string, easily parse command-line options.
|
||||
|
||||
An options spec is made up of two parts, separated by a line with two dashes.
|
||||
The first part is the synopsis of the command and the second one specifies
|
||||
options, one per line.
|
||||
|
||||
Each non-empty line in the synopsis gives a set of options that can be used
|
||||
together.
|
||||
|
||||
Option flags must be at the begining of the line and multiple flags are
|
||||
separated by commas. Usually, options have a short, one character flag, and a
|
||||
longer one, but the short one can be omitted.
|
||||
|
||||
Long option flags are used as the option's key for the OptDict produced when
|
||||
parsing options.
|
||||
|
||||
When the flag definition is ended with an equal sign, the option takes one
|
||||
string as an argument. Otherwise, the option does not take an argument and
|
||||
corresponds to a boolean flag that is true when the option is given on the
|
||||
command line.
|
||||
|
||||
The option's description is found at the right of its flags definition, after
|
||||
one or more spaces. The description ends at the end of the line. If the
|
||||
description contains text enclosed in square brackets, the enclosed text will
|
||||
be used as the option's default value.
|
||||
|
||||
Options can be put in different groups. Options in the same group must be on
|
||||
consecutive lines. Groups are formed by inserting a line that begins with a
|
||||
space. The text on that line will be output after an empty line.
|
||||
"""
|
||||
import sys, os, textwrap, getopt, re, struct
|
||||
|
||||
class OptDict:
|
||||
"""Dictionary that exposes keys as attributes.
|
||||
|
||||
Keys can be set or accessed with a "no-" or "no_" prefix to negate the
|
||||
value.
|
||||
"""
|
||||
def __init__(self):
|
||||
self._opts = {}
|
||||
|
||||
def __setitem__(self, k, v):
|
||||
if k.startswith('no-') or k.startswith('no_'):
|
||||
k = k[3:]
|
||||
v = not v
|
||||
self._opts[k] = v
|
||||
|
||||
def __getitem__(self, k):
|
||||
if k.startswith('no-') or k.startswith('no_'):
|
||||
return not self._opts[k[3:]]
|
||||
return self._opts[k]
|
||||
|
||||
def __getattr__(self, k):
|
||||
return self[k]
|
||||
|
||||
|
||||
def _default_onabort(msg):
|
||||
sys.exit(97)
|
||||
|
||||
|
||||
def _intify(v):
|
||||
try:
|
||||
vv = int(v or '')
|
||||
if str(vv) == v:
|
||||
return vv
|
||||
except ValueError:
|
||||
pass
|
||||
return v
|
||||
|
||||
|
||||
def _atoi(v):
|
||||
try:
|
||||
return int(v or 0)
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
|
||||
def _remove_negative_kv(k, v):
|
||||
if k.startswith('no-') or k.startswith('no_'):
|
||||
return k[3:], not v
|
||||
return k,v
|
||||
|
||||
def _remove_negative_k(k):
|
||||
return _remove_negative_kv(k, None)[0]
|
||||
|
||||
|
||||
def _tty_width():
|
||||
s = struct.pack("HHHH", 0, 0, 0, 0)
|
||||
try:
|
||||
import fcntl, termios
|
||||
s = fcntl.ioctl(sys.stderr.fileno(), termios.TIOCGWINSZ, s)
|
||||
except (IOError, ImportError):
|
||||
return _atoi(os.environ.get('WIDTH')) or 70
|
||||
(ysize,xsize,ypix,xpix) = struct.unpack('HHHH', s)
|
||||
return xsize or 70
|
||||
|
||||
|
||||
class Options:
|
||||
"""Option parser.
|
||||
When constructed, a string called an option spec must be given. It
|
||||
specifies the synopsis and option flags and their description. For more
|
||||
information about option specs, see the docstring at the top of this file.
|
||||
|
||||
Two optional arguments specify an alternative parsing function and an
|
||||
alternative behaviour on abort (after having output the usage string).
|
||||
|
||||
By default, the parser function is getopt.gnu_getopt, and the abort
|
||||
behaviour is to exit the program.
|
||||
"""
|
||||
def __init__(self, optspec, optfunc=getopt.gnu_getopt,
|
||||
onabort=_default_onabort):
|
||||
self.optspec = optspec
|
||||
self._onabort = onabort
|
||||
self.optfunc = optfunc
|
||||
self._aliases = {}
|
||||
self._shortopts = 'h?'
|
||||
self._longopts = ['help', 'usage']
|
||||
self._hasparms = {}
|
||||
self._defaults = {}
|
||||
self._usagestr = self._gen_usage()
|
||||
|
||||
def _gen_usage(self):
|
||||
out = []
|
||||
lines = self.optspec.strip().split('\n')
|
||||
lines.reverse()
|
||||
first_syn = True
|
||||
while lines:
|
||||
l = lines.pop()
|
||||
if l == '--': break
|
||||
out.append('%s: %s\n' % (first_syn and 'usage' or ' or', l))
|
||||
first_syn = False
|
||||
out.append('\n')
|
||||
last_was_option = False
|
||||
while lines:
|
||||
l = lines.pop()
|
||||
if l.startswith(' '):
|
||||
out.append('%s%s\n' % (last_was_option and '\n' or '',
|
||||
l.lstrip()))
|
||||
last_was_option = False
|
||||
elif l:
|
||||
(flags, extra) = l.split(' ', 1)
|
||||
extra = extra.strip()
|
||||
if flags.endswith('='):
|
||||
flags = flags[:-1]
|
||||
has_parm = 1
|
||||
else:
|
||||
has_parm = 0
|
||||
g = re.search(r'\[([^\]]*)\]$', extra)
|
||||
if g:
|
||||
defval = g.group(1)
|
||||
else:
|
||||
defval = None
|
||||
flagl = flags.split(',')
|
||||
flagl_nice = []
|
||||
for _f in flagl:
|
||||
f,dvi = _remove_negative_kv(_f, _intify(defval))
|
||||
self._aliases[f] = _remove_negative_k(flagl[0])
|
||||
self._hasparms[f] = has_parm
|
||||
self._defaults[f] = dvi
|
||||
if f == '#':
|
||||
self._shortopts += '0123456789'
|
||||
flagl_nice.append('-#')
|
||||
elif len(f) == 1:
|
||||
self._shortopts += f + (has_parm and ':' or '')
|
||||
flagl_nice.append('-' + f)
|
||||
else:
|
||||
f_nice = re.sub(r'\W', '_', f)
|
||||
self._aliases[f_nice] = _remove_negative_k(flagl[0])
|
||||
self._longopts.append(f + (has_parm and '=' or ''))
|
||||
self._longopts.append('no-' + f)
|
||||
flagl_nice.append('--' + _f)
|
||||
flags_nice = ', '.join(flagl_nice)
|
||||
if has_parm:
|
||||
flags_nice += ' ...'
|
||||
prefix = ' %-20s ' % flags_nice
|
||||
argtext = '\n'.join(textwrap.wrap(extra, width=_tty_width(),
|
||||
initial_indent=prefix,
|
||||
subsequent_indent=' '*28))
|
||||
out.append(argtext + '\n')
|
||||
last_was_option = True
|
||||
else:
|
||||
out.append('\n')
|
||||
last_was_option = False
|
||||
return ''.join(out).rstrip() + '\n'
|
||||
|
||||
def usage(self, msg=""):
|
||||
"""Print usage string to stderr and abort."""
|
||||
sys.stderr.write(self._usagestr)
|
||||
if msg:
|
||||
sys.stderr.write(msg)
|
||||
e = self._onabort and self._onabort(msg) or None
|
||||
if e:
|
||||
raise e
|
||||
|
||||
def fatal(self, msg):
|
||||
"""Print an error message to stderr and abort with usage string."""
|
||||
msg = '\nerror: %s\n' % msg
|
||||
return self.usage(msg)
|
||||
|
||||
def parse(self, args):
|
||||
"""Parse a list of arguments and return (options, flags, extra).
|
||||
|
||||
In the returned tuple, "options" is an OptDict with known options,
|
||||
"flags" is a list of option flags that were used on the command-line,
|
||||
and "extra" is a list of positional arguments.
|
||||
"""
|
||||
try:
|
||||
(flags,extra) = self.optfunc(args, self._shortopts, self._longopts)
|
||||
except getopt.GetoptError, e:
|
||||
self.fatal(e)
|
||||
|
||||
opt = OptDict()
|
||||
|
||||
for k,v in self._defaults.iteritems():
|
||||
k = self._aliases[k]
|
||||
opt[k] = v
|
||||
|
||||
for (k,v) in flags:
|
||||
k = k.lstrip('-')
|
||||
if k in ('h', '?', 'help', 'usage'):
|
||||
self.usage()
|
||||
if k.startswith('no-'):
|
||||
k = self._aliases[k[3:]]
|
||||
v = 0
|
||||
elif (self._aliases.get('#') and
|
||||
k in ('0','1','2','3','4','5','6','7','8','9')):
|
||||
v = int(k) # guaranteed to be exactly one digit
|
||||
k = self._aliases['#']
|
||||
opt['#'] = v
|
||||
else:
|
||||
k = self._aliases[k]
|
||||
if not self._hasparms[k]:
|
||||
assert(v == '')
|
||||
v = (opt._opts.get(k) or 0) + 1
|
||||
else:
|
||||
v = _intify(v)
|
||||
opt[k] = v
|
||||
for (f1,f2) in self._aliases.iteritems():
|
||||
opt[f1] = opt._opts.get(f2)
|
||||
return (opt,flags,extra)
|
||||
49
redo/paths.py
Normal file
49
redo/paths.py
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
import os
|
||||
import vars
|
||||
from logs import debug2
|
||||
|
||||
|
||||
def _default_do_files(filename):
|
||||
l = filename.split('.')
|
||||
for i in range(1, len(l)+1):
|
||||
basename = '.'.join(l[:i])
|
||||
ext = '.'.join(l[i:])
|
||||
if ext:
|
||||
ext = '.' + ext
|
||||
yield ("default%s.do" % ext), basename, ext
|
||||
|
||||
|
||||
def possible_do_files(t):
|
||||
dirname, filename = os.path.split(t)
|
||||
yield (os.path.join(vars.BASE, dirname), "%s.do" % filename,
|
||||
'', filename, '')
|
||||
|
||||
# It's important to try every possibility in a directory before resorting
|
||||
# to a parent directory. Think about nested projects: We don't want
|
||||
# ../../default.o.do to take precedence over ../default.do, because
|
||||
# the former one might just be an artifact of someone embedding my project
|
||||
# into theirs as a subdir. When they do, my rules should still be used
|
||||
# for building my project in *all* cases.
|
||||
t = os.path.normpath(os.path.join(vars.BASE, t))
|
||||
dirname, filename = os.path.split(t)
|
||||
dirbits = dirname.split('/')
|
||||
# since t is an absolute path, dirbits[0] is always '', so we don't
|
||||
# need to count all the way down to i=0.
|
||||
for i in range(len(dirbits), 0, -1):
|
||||
basedir = '/'.join(dirbits[:i])
|
||||
subdir = '/'.join(dirbits[i:])
|
||||
for dofile, basename, ext in _default_do_files(filename):
|
||||
yield (basedir, dofile,
|
||||
subdir, os.path.join(subdir, basename), ext)
|
||||
|
||||
|
||||
def find_do_file(f):
|
||||
for dodir, dofile, basedir, basename, ext in possible_do_files(f.name):
|
||||
dopath = os.path.join(dodir, dofile)
|
||||
debug2('%s: %s:%s ?\n' % (f.name, dodir, dofile))
|
||||
if os.path.exists(dopath):
|
||||
f.add_dep('m', dopath)
|
||||
return dodir, dofile, basedir, basename, ext
|
||||
else:
|
||||
f.add_dep('c', dopath)
|
||||
return None, None, None, None, None
|
||||
7
redo/python.do
Normal file
7
redo/python.do
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
redo-ifchange whichpython
|
||||
read py <whichpython
|
||||
cat >$3 <<-EOF
|
||||
#!/bin/sh
|
||||
exec $py "\$@"
|
||||
EOF
|
||||
chmod a+x $3
|
||||
66
redo/sh.do
Normal file
66
redo/sh.do
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
exec >&2
|
||||
redo-ifchange ../t/shelltest.od
|
||||
|
||||
rm -rf $1.new
|
||||
mkdir $1.new
|
||||
|
||||
GOOD=
|
||||
WARN=
|
||||
|
||||
# Note: list low-functionality, maximally POSIX-like shells before more
|
||||
# powerful ones. We want weaker shells to take precedence, as long as they
|
||||
# pass the tests, because weaker shells are more likely to point out when you
|
||||
# use some non-portable feature.
|
||||
for sh in dash /usr/xpg4/bin/sh ash posh mksh ksh ksh88 ksh93 pdksh \
|
||||
zsh bash busybox /bin/sh; do
|
||||
printf "%-30s" "Testing $sh..."
|
||||
FOUND=`which $sh 2>/dev/null` || { echo "missing"; continue; }
|
||||
|
||||
# It's important for the file to actually be named 'sh'. Some
|
||||
# shells (like bash and zsh) only go into POSIX-compatible mode if
|
||||
# they have that name. If they're not in POSIX-compatible mode,
|
||||
# they'll fail the test.
|
||||
rm -f $1.new/sh
|
||||
ln -s $FOUND $1.new/sh
|
||||
SH=$PWD/$1.new/sh
|
||||
|
||||
set +e
|
||||
( cd ../t && $SH shelltest.od ) >shelltest.tmp 2>&1
|
||||
RV=$?
|
||||
set -e
|
||||
|
||||
msgs=
|
||||
crash=
|
||||
while read line; do
|
||||
#echo "line: '$line'" >&2
|
||||
stripw=${line#warning: }
|
||||
stripf=${line#failed: }
|
||||
crash=$line
|
||||
[ "$line" = "$stripw" ] || msgs="$msgs W$stripw"
|
||||
[ "$line" = "$stripf" ] || msgs="$msgs F$stripf"
|
||||
done <shelltest.tmp
|
||||
rm -f shelltest.tmp
|
||||
msgs=${msgs# }
|
||||
crash=${crash##*:}
|
||||
crash=${crash# }
|
||||
|
||||
case $RV in
|
||||
40) echo "ok $msgs"; [ -n "$GOOD" ] || GOOD=$FOUND ;;
|
||||
41) echo "failed $msgs" ;;
|
||||
42) echo "warnings $msgs"; [ -n "$WARN" ] || WARN=$FOUND ;;
|
||||
*) echo "crash $crash" ;;
|
||||
esac
|
||||
done
|
||||
|
||||
rm -rf $1.new $3
|
||||
|
||||
if [ -n "$GOOD" ]; then
|
||||
echo "Selected perfect shell: $GOOD"
|
||||
ln -s $GOOD $3
|
||||
elif [ -n "$WARN" ]; then
|
||||
echo "Selected mostly good shell: $WARN"
|
||||
ln -s $WARN $3
|
||||
else
|
||||
echo "No good shells found! Maybe install dash, bash, or zsh."
|
||||
exit 13
|
||||
fi
|
||||
501
redo/state.py
Normal file
501
redo/state.py
Normal file
|
|
@ -0,0 +1,501 @@
|
|||
import sys, os, errno, stat, fcntl, sqlite3
|
||||
import vars
|
||||
from helpers import unlink, close_on_exec, join
|
||||
from logs import warn, debug2, debug3
|
||||
|
||||
# When the module is imported, change the process title.
|
||||
# We do it here because this module is imported by all the scripts.
|
||||
try:
|
||||
from setproctitle import setproctitle
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
cmdline = sys.argv[:]
|
||||
cmdline[0] = os.path.splitext(os.path.basename(cmdline[0]))[0]
|
||||
setproctitle(" ".join(cmdline))
|
||||
|
||||
SCHEMA_VER = 2
|
||||
TIMEOUT = 60
|
||||
|
||||
ALWAYS = '//ALWAYS' # an invalid filename that is always marked as dirty
|
||||
STAMP_DIR = 'dir' # the stamp of a directory; mtime is unhelpful
|
||||
STAMP_MISSING = '0' # the stamp of a nonexistent file
|
||||
|
||||
LOG_LOCK_MAGIC = 0x10000000 # fid offset for "log locks"
|
||||
|
||||
|
||||
class CyclicDependencyError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def _connect(dbfile):
|
||||
_db = sqlite3.connect(dbfile, timeout=TIMEOUT)
|
||||
_db.execute("pragma synchronous = off")
|
||||
_db.execute("pragma journal_mode = WAL")
|
||||
_db.text_factory = str
|
||||
return _db
|
||||
|
||||
|
||||
# We need to keep a process-wide fd open for all access to the lock file.
|
||||
# Because POSIX lock files are insane, if you close *one* fd pointing
|
||||
# at a given inode, it will immediately release *all* locks on that inode from
|
||||
# your pid, even if those locks are on a different fd. This is literally
|
||||
# never what you want. To avoid the problem, always use just a single fd.
|
||||
_lockfile = None
|
||||
|
||||
|
||||
_db = None
|
||||
def db():
|
||||
global _db, _lockfile
|
||||
if _db:
|
||||
return _db
|
||||
|
||||
dbdir = '%s/.redo' % vars.BASE
|
||||
dbfile = '%s/db.sqlite3' % dbdir
|
||||
try:
|
||||
os.mkdir(dbdir)
|
||||
except OSError, e:
|
||||
if e.errno == errno.EEXIST:
|
||||
pass # if it exists, that's okay
|
||||
else:
|
||||
raise
|
||||
|
||||
_lockfile = os.open(os.path.join(vars.BASE, '.redo/locks'),
|
||||
os.O_RDWR | os.O_CREAT, 0666)
|
||||
close_on_exec(_lockfile, True)
|
||||
|
||||
must_create = not os.path.exists(dbfile)
|
||||
if not must_create:
|
||||
_db = _connect(dbfile)
|
||||
try:
|
||||
row = _db.cursor().execute("select version from Schema").fetchone()
|
||||
except sqlite3.OperationalError:
|
||||
row = None
|
||||
ver = row and row[0] or None
|
||||
if ver != SCHEMA_VER:
|
||||
# Don't use err() here because this might happen before
|
||||
# redo-log spawns.
|
||||
sys.stderr.write(
|
||||
'redo: %s: found v%s (expected v%s)\n'
|
||||
% (dbfile, ver, SCHEMA_VER))
|
||||
sys.stderr.write('redo: manually delete .redo dir to start over.\n')
|
||||
sys.exit(1)
|
||||
if must_create:
|
||||
unlink(dbfile)
|
||||
_db = _connect(dbfile)
|
||||
_db.execute("create table Schema "
|
||||
" (version int)")
|
||||
_db.execute("create table Runid "
|
||||
" (id integer primary key autoincrement)")
|
||||
_db.execute("create table Files "
|
||||
" (name not null primary key, "
|
||||
" is_generated int, "
|
||||
" is_override int, "
|
||||
" checked_runid int, "
|
||||
" changed_runid int, "
|
||||
" failed_runid int, "
|
||||
" stamp, "
|
||||
" csum)")
|
||||
_db.execute("create table Deps "
|
||||
" (target int, "
|
||||
" source int, "
|
||||
" mode not null, "
|
||||
" delete_me int, "
|
||||
" primary key (target,source))")
|
||||
_db.execute("insert into Schema (version) values (?)", [SCHEMA_VER])
|
||||
# eat the '0' runid and File id.
|
||||
# Because of the cheesy way t/flush-cache is implemented, leave a
|
||||
# lot of runids available before the "first" one so that we
|
||||
# can adjust cached values to be before the first value.
|
||||
_db.execute("insert into Runid values (1000000000)")
|
||||
_db.execute("insert into Files (name) values (?)", [ALWAYS])
|
||||
|
||||
if not vars.RUNID:
|
||||
_db.execute("insert into Runid values "
|
||||
" ((select max(id)+1 from Runid))")
|
||||
vars.RUNID = _db.execute("select last_insert_rowid()").fetchone()[0]
|
||||
os.environ['REDO_RUNID'] = str(vars.RUNID)
|
||||
|
||||
_db.commit()
|
||||
return _db
|
||||
|
||||
|
||||
def init():
|
||||
db()
|
||||
|
||||
|
||||
_wrote = 0
|
||||
def _write(q, l):
|
||||
if _insane:
|
||||
return
|
||||
global _wrote
|
||||
_wrote += 1
|
||||
db().execute(q, l)
|
||||
|
||||
|
||||
def commit():
|
||||
if _insane:
|
||||
return
|
||||
global _wrote
|
||||
if _wrote:
|
||||
db().commit()
|
||||
_wrote = 0
|
||||
|
||||
|
||||
def rollback():
|
||||
if _insane:
|
||||
return
|
||||
global _wrote
|
||||
if _wrote:
|
||||
db().rollback()
|
||||
_wrote = 0
|
||||
|
||||
|
||||
def is_flushed():
|
||||
return not _wrote
|
||||
|
||||
|
||||
_insane = None
|
||||
def check_sane():
|
||||
global _insane
|
||||
if not _insane:
|
||||
_insane = not os.path.exists('%s/.redo' % vars.BASE)
|
||||
return not _insane
|
||||
|
||||
|
||||
_cwd = None
|
||||
def relpath(t, base):
|
||||
global _cwd
|
||||
if not _cwd:
|
||||
_cwd = os.getcwd()
|
||||
t = os.path.normpath(os.path.join(_cwd, t))
|
||||
base = os.path.normpath(base)
|
||||
tparts = t.split('/')
|
||||
bparts = base.split('/')
|
||||
for tp, bp in zip(tparts, bparts):
|
||||
if tp != bp:
|
||||
break
|
||||
tparts.pop(0)
|
||||
bparts.pop(0)
|
||||
while bparts:
|
||||
tparts.insert(0, '..')
|
||||
bparts.pop(0)
|
||||
return join('/', tparts)
|
||||
|
||||
|
||||
# Return a path for t, if cwd were the dirname of vars.TARGET.
|
||||
# This is tricky! STARTDIR+PWD is the directory for the *dofile*, when
|
||||
# the dofile was started. However, inside the dofile, someone may have done
|
||||
# a chdir to anywhere else. vars.TARGET is relative to the dofile path, so
|
||||
# we have to first figure out where the dofile was, then find TARGET relative
|
||||
# to that, then find t relative to that.
|
||||
#
|
||||
# FIXME: find some cleaner terminology for all these different paths.
|
||||
def target_relpath(t):
|
||||
dofile_dir = os.path.abspath(os.path.join(vars.STARTDIR, vars.PWD))
|
||||
target_dir = os.path.abspath(
|
||||
os.path.dirname(os.path.join(dofile_dir, vars.TARGET)))
|
||||
return relpath(t, target_dir)
|
||||
|
||||
|
||||
def detect_override(stamp1, stamp2):
|
||||
"""Determine if two stamps differ in a way that means manual override.
|
||||
|
||||
When two stamps differ at all, that means the source is dirty and so we
|
||||
need to rebuild. If they differ in mtime or size, then someone has surely
|
||||
edited the file, and we don't want to trample their changes.
|
||||
|
||||
But if the only difference is something else (like ownership, st_mode,
|
||||
etc) then that might be a false positive; it's annoying to mark as
|
||||
overridden in that case, so we return False. (It's still dirty though!)
|
||||
"""
|
||||
if stamp1 == stamp2:
|
||||
return False
|
||||
crit1 = stamp1.split('-', 2)[0:2]
|
||||
crit2 = stamp2.split('-', 2)[0:2]
|
||||
return crit1 != crit2
|
||||
|
||||
|
||||
def warn_override(name):
|
||||
warn('%s - you modified it; skipping\n' % name)
|
||||
|
||||
|
||||
_file_cols = ['rowid', 'name', 'is_generated', 'is_override',
|
||||
'checked_runid', 'changed_runid', 'failed_runid',
|
||||
'stamp', 'csum']
|
||||
class File(object):
|
||||
# use this mostly to avoid accidentally assigning to typos
|
||||
__slots__ = ['id'] + _file_cols[1:]
|
||||
|
||||
# These warnings are a result of the weird way this class is
|
||||
# initialized, which we should fix, and then re-enable warning.
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
def _init_from_idname(self, id, name, allow_add):
|
||||
q = ('select %s from Files ' % join(', ', _file_cols))
|
||||
if id != None:
|
||||
q += 'where rowid=?'
|
||||
l = [id]
|
||||
elif name != None:
|
||||
name = (name == ALWAYS) and ALWAYS or relpath(name, vars.BASE)
|
||||
q += 'where name=?'
|
||||
l = [name]
|
||||
else:
|
||||
raise Exception('name or id must be set')
|
||||
d = db()
|
||||
row = d.execute(q, l).fetchone()
|
||||
if not row:
|
||||
if not name:
|
||||
raise KeyError('No file with id=%r name=%r' % (id, name))
|
||||
elif not allow_add:
|
||||
raise KeyError('No file with name=%r' % (name,))
|
||||
try:
|
||||
_write('insert into Files (name) values (?)', [name])
|
||||
except sqlite3.IntegrityError:
|
||||
# some parallel redo probably added it at the same time; no
|
||||
# big deal.
|
||||
pass
|
||||
row = d.execute(q, l).fetchone()
|
||||
assert row
|
||||
return self._init_from_cols(row)
|
||||
|
||||
def _init_from_cols(self, cols):
|
||||
(self.id, self.name, self.is_generated, self.is_override,
|
||||
self.checked_runid, self.changed_runid, self.failed_runid,
|
||||
self.stamp, self.csum) = cols
|
||||
if self.name == ALWAYS and self.changed_runid < vars.RUNID:
|
||||
self.changed_runid = vars.RUNID
|
||||
|
||||
def __init__(self, id=None, name=None, cols=None, allow_add=True):
|
||||
if cols:
|
||||
self._init_from_cols(cols)
|
||||
else:
|
||||
self._init_from_idname(id, name, allow_add=allow_add)
|
||||
|
||||
def __repr__(self):
|
||||
return "File(%r)" % (self.nicename(),)
|
||||
|
||||
def refresh(self):
|
||||
self._init_from_idname(self.id, None, allow_add=False)
|
||||
|
||||
def save(self):
|
||||
cols = join(', ', ['%s=?'%i for i in _file_cols[2:]])
|
||||
_write('update Files set '
|
||||
' %s '
|
||||
' where rowid=?' % cols,
|
||||
[self.is_generated, self.is_override,
|
||||
self.checked_runid, self.changed_runid, self.failed_runid,
|
||||
self.stamp, self.csum,
|
||||
self.id])
|
||||
|
||||
def set_checked(self):
|
||||
self.checked_runid = vars.RUNID
|
||||
|
||||
def set_checked_save(self):
|
||||
self.set_checked()
|
||||
self.save()
|
||||
|
||||
def set_changed(self):
|
||||
debug2('BUILT: %r (%r)\n' % (self.name, self.stamp))
|
||||
self.changed_runid = vars.RUNID
|
||||
self.failed_runid = None
|
||||
self.is_override = False
|
||||
|
||||
def set_failed(self):
|
||||
debug2('FAILED: %r\n' % self.name)
|
||||
self.update_stamp()
|
||||
self.failed_runid = vars.RUNID
|
||||
if self.stamp != STAMP_MISSING:
|
||||
# if we failed and the target file still exists,
|
||||
# then we're generated.
|
||||
self.is_generated = True
|
||||
else:
|
||||
# if the target file now does *not* exist, then go back to
|
||||
# treating this as a source file. Since it doesn't exist,
|
||||
# if someone tries to rebuild it immediately, it'll go
|
||||
# back to being a target. But if the file is manually
|
||||
# created before that, we don't need a "manual override"
|
||||
# warning.
|
||||
self.is_generated = False
|
||||
|
||||
def set_static(self):
|
||||
self.update_stamp(must_exist=True)
|
||||
self.failed_runid = None
|
||||
self.is_override = False
|
||||
self.is_generated = False
|
||||
|
||||
def set_override(self):
|
||||
self.update_stamp()
|
||||
self.failed_runid = None
|
||||
self.is_override = True
|
||||
|
||||
def update_stamp(self, must_exist=False):
|
||||
newstamp = self.read_stamp()
|
||||
if must_exist and newstamp == STAMP_MISSING:
|
||||
raise Exception("%r does not exist" % self.name)
|
||||
if newstamp != self.stamp:
|
||||
debug2("STAMP: %s: %r -> %r\n" % (self.name, self.stamp, newstamp))
|
||||
self.stamp = newstamp
|
||||
self.set_changed()
|
||||
|
||||
def is_source(self):
|
||||
if self.name.startswith('//'):
|
||||
return False # special name, ignore
|
||||
newstamp = self.read_stamp()
|
||||
if (self.is_generated and
|
||||
(not self.is_failed() or newstamp != STAMP_MISSING) and
|
||||
not self.is_override and
|
||||
self.stamp == newstamp):
|
||||
# target is as we left it
|
||||
return False
|
||||
if ((not self.is_generated or self.stamp != newstamp) and
|
||||
newstamp == STAMP_MISSING):
|
||||
# target has gone missing after the last build.
|
||||
# It's not usefully a source *or* a target.
|
||||
return False
|
||||
return True
|
||||
|
||||
def is_target(self):
|
||||
if not self.is_generated:
|
||||
return False
|
||||
if self.is_source():
|
||||
return False
|
||||
return True
|
||||
|
||||
def is_checked(self):
|
||||
return self.checked_runid and self.checked_runid >= vars.RUNID
|
||||
|
||||
def is_changed(self):
|
||||
return self.changed_runid and self.changed_runid >= vars.RUNID
|
||||
|
||||
def is_failed(self):
|
||||
return self.failed_runid and self.failed_runid >= vars.RUNID
|
||||
|
||||
def deps(self):
|
||||
if self.is_override or not self.is_generated:
|
||||
return
|
||||
q = ('select Deps.mode, Deps.source, %s '
|
||||
' from Files '
|
||||
' join Deps on Files.rowid = Deps.source '
|
||||
' where target=?' % join(', ', _file_cols[1:]))
|
||||
for row in db().execute(q, [self.id]).fetchall():
|
||||
mode = row[0]
|
||||
cols = row[1:]
|
||||
assert mode in ('c', 'm')
|
||||
yield mode, File(cols=cols)
|
||||
|
||||
def zap_deps1(self):
|
||||
debug2('zap-deps1: %r\n' % self.name)
|
||||
_write('update Deps set delete_me=? where target=?', [True, self.id])
|
||||
|
||||
def zap_deps2(self):
|
||||
debug2('zap-deps2: %r\n' % self.name)
|
||||
_write('delete from Deps where target=? and delete_me=1', [self.id])
|
||||
|
||||
def add_dep(self, mode, dep):
|
||||
src = File(name=dep)
|
||||
debug3('add-dep: "%s" < %s "%s"\n' % (self.name, mode, src.name))
|
||||
assert self.id != src.id
|
||||
_write("insert or replace into Deps "
|
||||
" (target, mode, source, delete_me) values (?,?,?,?)",
|
||||
[self.id, mode, src.id, False])
|
||||
|
||||
def _read_stamp_st(self, statfunc):
|
||||
try:
|
||||
st = statfunc(os.path.join(vars.BASE, self.name))
|
||||
except OSError:
|
||||
return False, STAMP_MISSING
|
||||
if stat.S_ISDIR(st.st_mode):
|
||||
# directories change too much; detect only existence.
|
||||
return False, STAMP_DIR
|
||||
else:
|
||||
# a "unique identifier" stamp for a regular file
|
||||
return (
|
||||
stat.S_ISLNK(st.st_mode),
|
||||
'-'.join(str(s) for s in
|
||||
('%.6f' % st.st_mtime, st.st_size, st.st_ino,
|
||||
st.st_mode, st.st_uid, st.st_gid))
|
||||
)
|
||||
|
||||
def read_stamp(self):
|
||||
is_link, pre = self._read_stamp_st(os.lstat)
|
||||
if is_link:
|
||||
# if we're a symlink, we actually care about the link object
|
||||
# itself, *and* the target of the link. If either changes,
|
||||
# we're considered dirty.
|
||||
#
|
||||
# On the other hand, detect_override() doesn't care about the
|
||||
# target of the link, only the link itself.
|
||||
_, post = self._read_stamp_st(os.stat)
|
||||
return pre + '+' + post
|
||||
else:
|
||||
return pre
|
||||
|
||||
def nicename(self):
|
||||
return relpath(os.path.join(vars.BASE, self.name), vars.STARTDIR)
|
||||
|
||||
|
||||
def files():
|
||||
q = ('select %s from Files order by name' % join(', ', _file_cols))
|
||||
for cols in db().execute(q).fetchall():
|
||||
yield File(cols=cols)
|
||||
|
||||
|
||||
def logname(fid):
|
||||
"""Given the id of a File, return the filename of its build log."""
|
||||
return os.path.join(vars.BASE, '.redo', 'log.%d' % fid)
|
||||
|
||||
|
||||
# FIXME: I really want to use fcntl F_SETLK, F_SETLKW, etc here. But python
|
||||
# doesn't do the lockdata structure in a portable way, so we have to use
|
||||
# fcntl.lockf() instead. Usually this is just a wrapper for fcntl, so it's
|
||||
# ok, but it doesn't have F_GETLK, so we can't report which pid owns the lock.
|
||||
# The makes debugging a bit harder. When we someday port to C, we can do that.
|
||||
_locks = {}
|
||||
class Lock(object):
|
||||
def __init__(self, fid):
|
||||
self.owned = False
|
||||
self.fid = fid
|
||||
assert _lockfile >= 0
|
||||
assert _locks.get(fid, 0) == 0
|
||||
_locks[fid] = 1
|
||||
|
||||
def __del__(self):
|
||||
_locks[self.fid] = 0
|
||||
if self.owned:
|
||||
self.unlock()
|
||||
|
||||
def check(self):
|
||||
assert not self.owned
|
||||
if str(self.fid) in vars.get_locks():
|
||||
# Lock already held by parent: cyclic dependence
|
||||
raise CyclicDependencyError()
|
||||
|
||||
def trylock(self):
|
||||
self.check()
|
||||
assert not self.owned
|
||||
try:
|
||||
fcntl.lockf(_lockfile, fcntl.LOCK_EX|fcntl.LOCK_NB, 1, self.fid)
|
||||
except IOError, e:
|
||||
if e.errno in (errno.EAGAIN, errno.EACCES):
|
||||
pass # someone else has it locked
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
self.owned = True
|
||||
return self.owned
|
||||
|
||||
def waitlock(self, shared=False):
|
||||
self.check()
|
||||
assert not self.owned
|
||||
fcntl.lockf(
|
||||
_lockfile,
|
||||
fcntl.LOCK_SH if shared else fcntl.LOCK_EX,
|
||||
1, self.fid)
|
||||
self.owned = True
|
||||
|
||||
def unlock(self):
|
||||
if not self.owned:
|
||||
raise Exception("can't unlock %r - we don't own it"
|
||||
% self.fid)
|
||||
fcntl.lockf(_lockfile, fcntl.LOCK_UN, 1, self.fid)
|
||||
self.owned = False
|
||||
46
redo/vars.py
Normal file
46
redo/vars.py
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
import os
|
||||
from atoi import atoi
|
||||
|
||||
if not os.environ.get('REDO'):
|
||||
import sys
|
||||
sys.stderr.write('%s: error: must be run from inside a .do\n'
|
||||
% sys.argv[0])
|
||||
sys.exit(100)
|
||||
|
||||
PWD = os.environ.get('REDO_PWD', '')
|
||||
TARGET = os.environ.get('REDO_TARGET', '')
|
||||
DEPTH = os.environ.get('REDO_DEPTH', '')
|
||||
DEBUG = atoi(os.environ.get('REDO_DEBUG', ''))
|
||||
DEBUG_LOCKS = os.environ.get('REDO_DEBUG_LOCKS', '') and 1 or 0
|
||||
DEBUG_PIDS = os.environ.get('REDO_DEBUG_PIDS', '') and 1 or 0
|
||||
VERBOSE = os.environ.get('REDO_VERBOSE', '') and 1 or 0
|
||||
XTRACE = os.environ.get('REDO_XTRACE', '') and 1 or 0
|
||||
KEEP_GOING = os.environ.get('REDO_KEEP_GOING', '') and 1 or 0
|
||||
LOG = atoi(os.environ.get('REDO_LOG', '1')) # defaults on
|
||||
LOG_INODE = os.environ.get('REDO_LOG_INODE', '')
|
||||
COLOR = atoi(os.environ.get('REDO_COLOR', '1')) # defaults on
|
||||
# subprocesses mustn't pretty-print if a parent is running redo-log
|
||||
PRETTY = (not LOG) and atoi(os.environ.get('REDO_PRETTY', '1'))
|
||||
SHUFFLE = os.environ.get('REDO_SHUFFLE', '') and 1 or 0
|
||||
STARTDIR = os.environ.get('REDO_STARTDIR', '')
|
||||
RUNID = atoi(os.environ.get('REDO_RUNID')) or None
|
||||
BASE = os.environ['REDO_BASE']
|
||||
while BASE and BASE.endswith('/'):
|
||||
BASE = BASE[:-1]
|
||||
|
||||
UNLOCKED = os.environ.get('REDO_UNLOCKED', '') and 1 or 0
|
||||
os.environ['REDO_UNLOCKED'] = '' # not inheritable by subprocesses
|
||||
|
||||
NO_OOB = os.environ.get('REDO_NO_OOB', '') and 1 or 0
|
||||
os.environ['REDO_NO_OOB'] = '' # not inheritable by subprocesses
|
||||
|
||||
|
||||
def get_locks():
|
||||
"""Get the list of held locks."""
|
||||
return os.environ.get('REDO_LOCKS', '').split(':')
|
||||
|
||||
def add_lock(name):
|
||||
"""Add a lock to the list of held locks."""
|
||||
locks = set(get_locks())
|
||||
locks.add(name)
|
||||
os.environ['REDO_LOCKS'] = ':'.join(list(locks))
|
||||
53
redo/vars_init.py
Normal file
53
redo/vars_init.py
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
import sys, os
|
||||
|
||||
|
||||
is_toplevel = False
|
||||
|
||||
|
||||
def init_no_state():
|
||||
global is_toplevel
|
||||
if not os.environ.get('REDO'):
|
||||
os.environ['REDO'] = 'NOT_DEFINED'
|
||||
is_toplevel = True
|
||||
if not os.environ.get('REDO_BASE'):
|
||||
os.environ['REDO_BASE'] = 'NOT_DEFINED'
|
||||
|
||||
|
||||
def init(targets):
|
||||
global is_toplevel
|
||||
if not os.environ.get('REDO'):
|
||||
# toplevel call to redo
|
||||
is_toplevel = True
|
||||
if len(targets) == 0:
|
||||
targets.append('all')
|
||||
exenames = [os.path.abspath(sys.argv[0]),
|
||||
os.path.realpath(sys.argv[0])]
|
||||
dirnames = [os.path.dirname(p) for p in exenames]
|
||||
trynames = ([os.path.abspath(p+'/../lib/redo') for p in dirnames] +
|
||||
[p+'/../redo' for p in dirnames] +
|
||||
dirnames)
|
||||
seen = {}
|
||||
dirs = []
|
||||
for k in trynames:
|
||||
if not seen.get(k):
|
||||
seen[k] = 1
|
||||
dirs.append(k)
|
||||
os.environ['PATH'] = ':'.join(dirs) + ':' + os.environ['PATH']
|
||||
os.environ['REDO'] = os.path.abspath(sys.argv[0])
|
||||
|
||||
if not os.environ.get('REDO_BASE'):
|
||||
base = os.path.commonprefix([os.path.abspath(os.path.dirname(t))
|
||||
for t in targets] + [os.getcwd()])
|
||||
bsplit = base.split('/')
|
||||
for i in range(len(bsplit)-1, 0, -1):
|
||||
newbase = '/'.join(bsplit[:i])
|
||||
if os.path.exists(newbase + '/.redo'):
|
||||
base = newbase
|
||||
break
|
||||
os.environ['REDO_BASE'] = base
|
||||
os.environ['REDO_STARTDIR'] = os.getcwd()
|
||||
|
||||
import state
|
||||
state.init()
|
||||
|
||||
os.environ['REDO_LOCKS'] = os.environ.get('REDO_LOCKS', '')
|
||||
1
redo/version/.gitattributes
vendored
Normal file
1
redo/version/.gitattributes
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
gitvars.pre export-subst
|
||||
3
redo/version/.gitignore
vendored
Normal file
3
redo/version/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
/vars
|
||||
/gitvars
|
||||
/_version.py
|
||||
1
redo/version/__init__.py
Normal file
1
redo/version/__init__.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
from _version import COMMIT, TAG, DATE
|
||||
3
redo/version/_version.py.do
Normal file
3
redo/version/_version.py.do
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
redo-ifchange vars
|
||||
cat vars
|
||||
|
||||
1
redo/version/all.do
Normal file
1
redo/version/all.do
Normal file
|
|
@ -0,0 +1 @@
|
|||
redo-ifchange vars _version.py
|
||||
3
redo/version/clean.do
Normal file
3
redo/version/clean.do
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
rm -f *~ .*~ *.pyc _version.py vars gitvars
|
||||
|
||||
|
||||
28
redo/version/gitvars.do
Normal file
28
redo/version/gitvars.do
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
redo-ifchange gitvars.pre prodname
|
||||
|
||||
read PROD <prodname
|
||||
exec >$3
|
||||
|
||||
# Fix each line from gitvars.pre where git may or may not have already
|
||||
# substituted the variables. If someone generated a tarball with 'git archive',
|
||||
# then the data will have been substituted already. If we're in a checkout of
|
||||
# the git repo, then it won't, but we can just ask git to do the substitutions
|
||||
# right now.
|
||||
while read line; do
|
||||
# Lines *may* be of the form: $Format: ... $
|
||||
x=${line#\$Format:} # remove prefix
|
||||
if [ "$x" != "$line" ]; then
|
||||
# git didn't substitute it
|
||||
redo-always # git this from the git repo
|
||||
x=${x%\$} # remove trailing $
|
||||
if [ "$x" = "%d" ]; then
|
||||
tag=$(git describe --match="$PROD-*")
|
||||
x="(tag: $tag)"
|
||||
else
|
||||
x=$(git log -1 --pretty=format:"$x")
|
||||
fi
|
||||
fi
|
||||
echo "$x"
|
||||
done <gitvars.pre
|
||||
|
||||
redo-stamp <$3
|
||||
3
redo/version/gitvars.pre
Normal file
3
redo/version/gitvars.pre
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
$Format:%H$
|
||||
$Format:%d$
|
||||
$Format:%ci$
|
||||
1
redo/version/prodname
Normal file
1
redo/version/prodname
Normal file
|
|
@ -0,0 +1 @@
|
|||
redo
|
||||
40
redo/version/vars.do
Normal file
40
redo/version/vars.do
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
redo-ifchange gitvars prodname
|
||||
|
||||
read PROD <prodname
|
||||
|
||||
exec <gitvars
|
||||
read COMMIT
|
||||
read NAMES
|
||||
read DATE
|
||||
|
||||
# the list of names is of the form:
|
||||
# (x,y,tag: $PROD-####,tag: $PROD-####,a,b)
|
||||
# The entries we want are the ones starting with "tag: $PROD-" since those
|
||||
# refer to the right actual git tags.
|
||||
names_to_tag()
|
||||
{
|
||||
x=${1#\(}
|
||||
x=${x%\)}
|
||||
cur=
|
||||
while [ "$cur" != "$x" ]; do
|
||||
x=${x# }
|
||||
x=${x#tag: }
|
||||
cur=${x%%,*}
|
||||
tagpost=${cur#$PROD-}
|
||||
if [ "$cur" != "$tagpost" ]; then
|
||||
echo "$tagpost"
|
||||
return 0
|
||||
fi
|
||||
x=${x#*,}
|
||||
done
|
||||
commitpost=${COMMIT#???????}
|
||||
commitpre=${COMMIT%$commitpost}
|
||||
echo "unknown-$commitpre"
|
||||
}
|
||||
|
||||
|
||||
sTAG=$(names_to_tag "$NAMES")
|
||||
|
||||
echo "COMMIT='$COMMIT'"
|
||||
echo "TAG='$sTAG'"
|
||||
echo "DATE='${DATE%% *}'"
|
||||
13
redo/whichpython.do
Normal file
13
redo/whichpython.do
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
exec >&2
|
||||
for py in python2.7 python2 python; do
|
||||
echo "Trying: $py"
|
||||
cmd=$(command -v "$py")
|
||||
# intentionally using the 'print statement' (as opposed to print
|
||||
# function) here, to rule out any python3 interpreters
|
||||
out=$($cmd -c 'print "success"' 2>/dev/null) || true
|
||||
if [ "$out" = "success" ]; then
|
||||
echo $cmd >$3
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
exit 10
|
||||
Loading…
Add table
Add a link
Reference in a new issue