Merge branch 'sqlite'

This replaces the .redo state directory with an sqlite database instead,
improving correctness and sometimes performance.
This commit is contained in:
Avery Pennarun 2010-12-10 05:43:43 -08:00
commit b1bb48a029
15 changed files with 385 additions and 277 deletions

View file

@ -1,9 +1,10 @@
import sys, os, random, errno, stat
import sys, os, errno, stat
import vars, jwack, state
from helpers import log, log_, debug2, err, unlink, close_on_exec
from helpers import log, log_, debug2, err, warn, unlink, close_on_exec
def _possible_do_files(t):
t = os.path.join(vars.BASE, t)
yield "%s.do" % t, t, ''
dirname,filename = os.path.split(t)
l = filename.split('.')
@ -16,14 +17,14 @@ def _possible_do_files(t):
os.path.join(dirname, basename), ext)
def _find_do_file(t):
for dofile,basename,ext in _possible_do_files(t):
debug2('%s: %s ?\n' % (t, dofile))
def _find_do_file(f):
for dofile,basename,ext in _possible_do_files(f.name):
debug2('%s: %s ?\n' % (f.name, dofile))
if os.path.exists(dofile):
state.add_dep(t, 'm', dofile)
f.add_dep('m', dofile)
return dofile,basename,ext
else:
state.add_dep(t, 'c', dofile)
f.add_dep('c', dofile)
return None,None,None
@ -42,8 +43,9 @@ def _try_stat(filename):
class BuildJob:
def __init__(self, t, lock, shouldbuildfunc, donefunc):
self.t = t
def __init__(self, t, sf, lock, shouldbuildfunc, donefunc):
self.t = t # original target name, not relative to vars.BASE
self.sf = sf
self.tmpname = '%s.redo.tmp' % t
self.lock = lock
self.shouldbuildfunc = shouldbuildfunc
@ -53,12 +55,13 @@ class BuildJob:
def start(self):
assert(self.lock.owned)
t = self.t
sf = self.sf
tmpname = self.tmpname
if not self.shouldbuildfunc(t):
# target doesn't need to be built; skip the whole task
return self._after2(0)
if (os.path.exists(t) and not os.path.exists(t + '/.')
and not state.is_generated(t)):
and not sf.is_generated):
# an existing source file that was not generated by us.
# This step is mentioned by djb in his notes.
# For example, a rule called default.c.do could be used to try
@ -67,20 +70,21 @@ class BuildJob:
# FIXME: always refuse to redo any file that was modified outside
# of redo? That would make it easy for someone to override a
# file temporarily, and could be undone by deleting the file.
state.unmark_as_generated(t)
state.stamp_and_maybe_built(t)
debug2("-- static (%r)\n" % t)
sf.set_static()
sf.save()
return self._after2(0)
state.start(t)
(dofile, basename, ext) = _find_do_file(t)
sf.zap_deps()
(dofile, basename, ext) = _find_do_file(sf)
if not dofile:
if os.path.exists(t):
state.unmark_as_generated(t)
state.stamp_and_maybe_built(t)
sf.is_generated = False
sf.set_static()
sf.save()
return self._after2(0)
else:
err('no rule to make %r\n' % t)
return self._after2(1)
state.stamp_and_maybe_built(dofile)
unlink(tmpname)
ffd = os.open(tmpname, os.O_CREAT|os.O_RDWR|os.O_EXCL, 0666)
close_on_exec(ffd, True)
@ -97,13 +101,20 @@ class BuildJob:
if vars.VERBOSE or vars.XTRACE: log_('\n')
log('%s\n' % _nice(t))
self.argv = argv
sf.is_generated = True
sf.save()
dof = state.File(name=dofile)
dof.set_static()
dof.save()
state.commit()
jwack.start_job(t, self._do_subproc, self._after)
def _do_subproc(self):
# careful: REDO_PWD was the PWD relative to the STARTPATH at the time
# we *started* building the current target; but that target ran
# redo-ifchange, and it might have done it from a different directory
# than we started it in. So os.getcwd() might be != REDO_PWD right now.
# than we started it in. So os.getcwd() might be != REDO_PWD right
# now.
dn = os.path.dirname(self.t)
newp = os.path.realpath(dn)
os.environ['REDO_PWD'] = state.relpath(newp, vars.STARTDIR)
@ -121,7 +132,9 @@ class BuildJob:
def _after(self, t, rv):
try:
state.check_sane()
rv = self._after1(t, rv)
state.commit()
finally:
self._after2(rv)
@ -153,11 +166,17 @@ class BuildJob:
os.rename(tmpname, t)
else:
unlink(tmpname)
state.built(t)
state.stamp(t)
sf = self.sf
sf.is_generated=True
sf.update_stamp()
sf.set_changed()
sf.save()
else:
unlink(tmpname)
state.unstamp(t)
sf = self.sf
sf.stamp = None
sf.set_changed()
sf.save()
f.close()
if rv != 0:
err('%s: exit code %d\n' % (_nice(t),rv))
@ -177,6 +196,7 @@ class BuildJob:
def main(targets, shouldbuildfunc):
retcode = [0] # a list so that it can be reassigned from done()
if vars.SHUFFLE:
import random
random.shuffle(targets)
locked = []
@ -191,45 +211,60 @@ def main(targets, shouldbuildfunc):
# In the first cycle, we just build as much as we can without worrying
# about any lock contention. If someone else has it locked, we move on.
for t in targets:
if not jwack.has_token():
state.commit()
jwack.get_token(t)
if retcode[0] and not vars.KEEP_GOING:
break
if not state.is_sane():
if not state.check_sane():
err('.redo directory disappeared; cannot continue.\n')
retcode[0] = 205
break
lock = state.Lock(t)
f = state.File(name=t)
lock = state.Lock(f.id)
lock.trylock()
if not lock.owned:
if vars.DEBUG_LOCKS:
log('%s (locked...)\n' % _nice(t))
locked.append(t)
locked.append((f.id,t))
else:
BuildJob(t, lock, shouldbuildfunc, done).start()
BuildJob(t, f, lock, shouldbuildfunc, done).start()
# Now we've built all the "easy" ones. Go back and just wait on the
# remaining ones one by one. This is technically non-optimal; we could
# use select.select() to wait on more than one at a time. But it should
# be rare enough that it doesn't matter, and the logic is easier this way.
# remaining ones one by one. There's no reason to do it any more
# efficiently, because if these targets were previously locked, that
# means someone else was building them; thus, we probably won't need to
# do anything. The only exception is if we're invoked as redo instead
# of redo-ifchange; then we have to redo it even if someone else already
# did. But that should be rare.
while locked or jwack.running():
state.commit()
jwack.wait_all()
# at this point, we don't have any children holding any tokens, so
# it's okay to block below.
if retcode[0] and not vars.KEEP_GOING:
break
if locked:
if not state.is_sane():
if not state.check_sane():
err('.redo directory disappeared; cannot continue.\n')
retcode[0] = 205
break
t = locked.pop(0)
lock = state.Lock(t)
fid,t = locked.pop(0)
lock = state.Lock(fid)
lock.trylock()
if not lock.owned:
if vars.DEBUG_LOCKS and len(locked) >= 1:
warn('%s (WAITING)\n' % _nice(t))
lock.waitlock()
assert(lock.owned)
if vars.DEBUG_LOCKS:
log('%s (...unlocked!)\n' % _nice(t))
if state.stamped(t) == None:
if state.File(name=t).stamp == None:
err('%s: failed in another thread\n' % _nice(t))
retcode[0] = 2
lock.unlock()
else:
BuildJob(t, lock, shouldbuildfunc, done).start()
BuildJob(t, state.File(id=fid), lock,
shouldbuildfunc, done).start()
state.commit()
return retcode[0]

View file

@ -15,24 +15,6 @@ def unlink(f):
pass # it doesn't exist, that's what you asked for
def mkdirp(d, mode=None):
"""Recursively create directories on path 'd'.
Unlike os.makedirs(), it doesn't raise an exception if the last element of
the path already exists.
"""
try:
if mode:
os.makedirs(d, mode)
else:
os.makedirs(d)
except OSError, e:
if e.errno == errno.EEXIST:
pass
else:
raise
def log_(s):
sys.stdout.flush()
if vars.DEBUG_PIDS:
@ -52,13 +34,20 @@ def _cerr(s):
def _bwerr(s):
log_('redo: %s%s' % (vars.DEPTH, s))
def _cwarn(s):
log_('\x1b[33mredo: %s\x1b[1m%s\x1b[m' % (vars.DEPTH, s))
def _bwwarn(s):
log_('redo: %s%s' % (vars.DEPTH, s))
if os.isatty(2):
log = _clog
err = _cerr
warn = _cwarn
else:
log = _bwlog
err = _bwerr
warn = _bwwarn
def debug(s):

View file

@ -1,7 +1,7 @@
#
# beware the jobberwack
#
import sys, os, errno, select, fcntl
import sys, os, errno, select, fcntl, signal
import atoi
_toplevel = 0
@ -24,22 +24,35 @@ def _release(n):
_mytokens = 1
def _timeout(sig, frame):
pass
def _try_read(fd, n):
# FIXME: this isn't actually safe, because GNU make can't handle it if
# the socket is nonblocking. Ugh. That means we'll have to do their
# horrible SIGCHLD hack after all.
fcntl.fcntl(_fds[0], fcntl.F_SETFL, os.O_NONBLOCK)
# using djb's suggested way of doing non-blocking reads from a blocking
# socket: http://cr.yp.to/unix/nonblock.html
# We can't just make the socket non-blocking, because we want to be
# compatible with GNU Make, and they can't handle it.
r,w,x = select.select([fd], [], [], 0)
if not r:
return '' # try again
# ok, the socket is readable - but some other process might get there
# first. We have to set an alarm() in case our read() gets stuck.
oldh = signal.signal(signal.SIGALRM, _timeout)
try:
signal.alarm(1) # emergency fallback
try:
b = os.read(_fds[0], 1)
except OSError, e:
if e.errno == errno.EAGAIN:
return ''
if e.errno in (errno.EAGAIN, errno.EINTR):
# interrupted or it was nonblocking
return '' # try again
else:
raise
finally:
fcntl.fcntl(_fds[0], fcntl.F_SETFL, 0)
return b and b or None
signal.alarm(0)
signal.signal(signal.SIGALRM, oldh)
return b and b or None # None means EOF
def setup(maxjobs):
@ -70,7 +83,11 @@ def setup(maxjobs):
if maxjobs and not _fds:
# need to start a new server
_toplevel = maxjobs
_fds = os.pipe()
_fds1 = os.pipe()
_fds = (fcntl.fcntl(_fds1[0], fcntl.F_DUPFD, 100),
fcntl.fcntl(_fds1[1], fcntl.F_DUPFD, 101))
os.close(_fds1[0])
os.close(_fds1[1])
_release(maxjobs-1)
os.putenv('MAKEFLAGS',
'%s --jobserver-fds=%d,%d -j' % (os.getenv('MAKEFLAGS'),
@ -105,6 +122,11 @@ def wait(want_token):
pd.donefunc(pd.name, pd.rv)
def has_token():
if _mytokens >= 1:
return True
def get_token(reason):
global _mytokens
assert(_mytokens <= 1)
@ -149,8 +171,8 @@ def wait_all():
bb += b
if not b: break
if len(bb) != _toplevel-1:
raise Exception('on exit: expected %d tokens; found only %d'
% (_toplevel-1, len(b)))
raise Exception('on exit: expected %d tokens; found only %r'
% (_toplevel-1, len(bb)))
os.write(_fds[1], bb)

View file

@ -1,61 +1,61 @@
#!/usr/bin/python
import sys, os, errno, stat
import vars, state, builder, jwack
from helpers import debug, debug2, err, mkdirp, unlink
from helpers import debug, debug2, err, unlink
def dirty_deps(t, depth):
try:
st = os.stat(t)
realtime = st.st_mtime
except OSError:
st = None
realtime = 0
def dirty_deps(f, depth, max_changed):
if vars.DEBUG >= 1: debug('%s?%s\n' % (depth, f.name))
debug('%s?%s\n' % (depth, t))
if state.isbuilt(t):
if f.changed_runid == None:
debug('%s-- DIRTY (never built)\n' % depth)
return True
if f.changed_runid > max_changed:
debug('%s-- DIRTY (built)\n' % depth)
return True # has already been built during this session
if state.ismarked(t):
debug('%s-- CLEAN (marked)\n' % depth)
return True # has been built more recently than parent
if f.is_checked():
if vars.DEBUG >= 1: debug('%s-- CLEAN (checked)\n' % depth)
return False # has already been checked during this session
stamptime = state.stamped(t)
if stamptime == None:
if not f.stamp:
debug('%s-- DIRTY (no stamp)\n' % depth)
return True
if stamptime != realtime and not (st and stat.S_ISDIR(st.st_mode)):
if f.stamp != f.read_stamp():
debug('%s-- DIRTY (mtime)\n' % depth)
return True
for mode,name in state.deps(t):
for mode,f2 in f.deps():
if mode == 'c':
if os.path.exists(name):
if os.path.exists(os.path.join(vars.BASE, f2.name)):
debug('%s-- DIRTY (created)\n' % depth)
return True
elif mode == 'm':
if dirty_deps(os.path.join(vars.BASE, name), depth + ' '):
if dirty_deps(f2, depth = depth + ' ',
max_changed = f.changed_runid):
debug('%s-- DIRTY (sub)\n' % depth)
state.unstamp(t) # optimization for future callers
return True
state.mark(t)
f.set_checked()
f.save()
return False
def should_build(t):
return not state.isbuilt(t) and dirty_deps(t, depth = '')
f = state.File(name=t)
return dirty_deps(f, depth = '', max_changed = vars.RUNID)
rv = 202
try:
me = os.path.join(vars.STARTDIR,
os.path.join(vars.PWD, vars.TARGET))
f = state.File(name=me)
debug2('TARGET: %r %r %r\n' % (vars.STARTDIR, vars.PWD, vars.TARGET))
try:
targets = sys.argv[1:]
for t in targets:
state.add_dep(me, 'm', t)
f.add_dep('m', t)
f.save()
rv = builder.main(targets, should_build)
finally:
jwack.force_return_tokens()

View file

@ -1,15 +1,16 @@
#!/usr/bin/python
import sys, os
import vars, state
from helpers import err, mkdirp
from helpers import err
try:
me = state.File(name=vars.TARGET)
for t in sys.argv[1:]:
if os.path.exists(t):
err('redo-ifcreate: error: %r already exists\n' % t)
sys.exit(1)
else:
state.add_dep(vars.TARGET, 'c', t)
me.add_dep('c', t)
except KeyboardInterrupt:
sys.exit(200)

396
state.py
View file

@ -1,29 +1,114 @@
import sys, os, errno, glob
import sys, os, errno, glob, stat, fcntl, sqlite3
import vars
from helpers import unlink, err, debug2, debug3, mkdirp, close_on_exec
from helpers import unlink, err, debug2, debug3, close_on_exec
import helpers
SCHEMA_VER=1
TIMEOUT=60
def _connect(dbfile):
_db = sqlite3.connect(dbfile, timeout=TIMEOUT)
_db.execute("pragma synchronous = off")
_db.execute("pragma journal_mode = PERSIST")
return _db
_db = None
_lockfile = None
def db():
global _db, _lockfile
if _db:
return _db
dbdir = '%s/.redo' % vars.BASE
dbfile = '%s/db.sqlite3' % dbdir
try:
os.mkdir(dbdir)
except OSError, e:
if e.errno == errno.EEXIST:
pass # if it exists, that's okay
else:
raise
_lockfile = os.open(os.path.join(vars.BASE, '.redo/locks'),
os.O_RDWR | os.O_CREAT, 0666)
close_on_exec(_lockfile, True)
must_create = not os.path.exists(dbfile)
if not must_create:
_db = _connect(dbfile)
try:
row = _db.cursor().execute("select version from Schema").fetchone()
except sqlite3.OperationalError:
row = None
ver = row and row[0] or None
if ver != SCHEMA_VER:
err("state database: discarding v%s (wanted v%s)\n"
% (ver, SCHEMA_VER))
must_create = True
_db = None
if must_create:
unlink(dbfile)
_db = _connect(dbfile)
_db.execute("create table Schema "
" (version int)")
_db.execute("create table Runid "
" (id integer primary key autoincrement)")
_db.execute("create table Files "
" (name not null primary key, "
" is_generated int, "
" checked_runid int, "
" changed_runid int, "
" stamp, "
" csum)")
_db.execute("create table Deps "
" (target int, "
" source int, "
" mode not null, "
" primary key (target,source))")
_db.execute("insert into Schema (version) values (?)", [SCHEMA_VER])
# eat the '0' runid and File id
_db.execute("insert into Runid default values")
_db.execute("insert into Files (name) values (?)", [''])
if not vars.RUNID:
_db.execute("insert into Runid default values")
vars.RUNID = _db.execute("select last_insert_rowid()").fetchone()[0]
os.environ['REDO_RUNID'] = str(vars.RUNID)
_db.commit()
return _db
def init():
# FIXME: just wiping out all the locks is kind of cheating. But we
# only do this from the toplevel redo process, so unless the user
# deliberately starts more than one redo on the same repository, it's
# sort of ok.
mkdirp('%s/.redo' % vars.BASE)
for f in glob.glob('%s/.redo/lock*' % vars.BASE):
os.unlink(f)
for f in glob.glob('%s/.redo/mark^*' % vars.BASE):
os.unlink(f)
for f in glob.glob('%s/.redo/built^*' % vars.BASE):
os.unlink(f)
db()
_wrote = 0
def _write(q, l):
if _insane:
return
global _wrote
_wrote += 1
#helpers.log_('W: %r %r\n' % (q,l))
db().execute(q, l)
def commit():
if _insane:
return
global _wrote
if _wrote:
#helpers.log_("COMMIT (%d)\n" % _wrote)
db().commit()
_wrote = 0
_insane = None
def is_sane():
global _insane
def check_sane():
global _insane, _writable
if not _insane:
_insane = not os.path.exists('%s/.redo' % vars.BASE)
if _insane:
err('.redo directory disappeared; cannot continue.\n')
return not _insane
@ -46,185 +131,154 @@ def relpath(t, base):
return '/'.join(tparts)
def _sname(typ, t):
# FIXME: t.replace(...) is non-reversible and non-unique here!
tnew = relpath(t, vars.BASE)
v = vars.BASE + ('/.redo/%s^%s' % (typ, tnew.replace('/', '^')))
if vars.DEBUG >= 3:
debug3('sname: (%r) %r -> %r\n' % (os.getcwd(), t, tnew))
return v
class File(object):
# use this mostly to avoid accidentally assigning to typos
__slots__ = ['id', 'name', 'is_generated',
'checked_runid', 'changed_runid',
'stamp', 'csum']
def _init_from_cols(self, cols):
(self.id, self.name, self.is_generated,
self.checked_runid, self.changed_runid,
self.stamp, self.csum) = cols
def add_dep(t, mode, dep):
sn = _sname('dep', t)
def __init__(self, id=None, name=None, cols=None):
if cols:
return self._init_from_cols(cols)
q = ('select rowid, name, is_generated, checked_runid, changed_runid, '
' stamp, csum '
' from Files ')
if id != None:
q += 'where rowid=?'
l = [id]
elif name != None:
name = relpath(name, vars.BASE)
q += 'where name=?'
l = [name]
else:
raise Exception('name or id must be set')
d = db()
row = d.execute(q, l).fetchone()
if not row:
if not name:
raise Exception('File with id=%r not found and '
'name not given' % id)
try:
_write('insert into Files (name) values (?)', [name])
except sqlite3.IntegrityError:
# some parallel redo probably added it at the same time; no
# big deal.
pass
row = d.execute(q, l).fetchone()
assert(row)
self._init_from_cols(row)
def save(self):
_write('update Files set '
' is_generated=?, checked_runid=?, changed_runid=?, '
' stamp=?, csum=? '
' where rowid=?',
[self.is_generated,
self.checked_runid, self.changed_runid,
self.stamp, self.csum,
self.id])
def set_checked(self):
self.checked_runid = vars.RUNID
def set_changed(self):
debug2('BUILT: %r (%r)\n' % (self.name, self.stamp))
self.changed_runid = vars.RUNID
def set_static(self):
self.update_stamp()
def update_stamp(self):
newstamp = self.read_stamp()
if newstamp != self.stamp:
debug2("STAMP: %s: %r -> %r\n" % (self.name, self.stamp, newstamp))
self.stamp = newstamp
self.set_changed()
def is_changed(self):
return self.changed_runid and self.changed_runid >= vars.RUNID
def is_checked(self):
return self.checked_runid and self.checked_runid >= vars.RUNID
def deps(self):
q = ('select Deps.mode, Deps.source, '
' name, is_generated, checked_runid, changed_runid, '
' stamp, csum '
' from Files '
' join Deps on Files.rowid = Deps.source '
' where target=?')
for row in db().execute(q, [self.id]).fetchall():
mode = row[0]
cols = row[1:]
assert(mode in ('c', 'm'))
yield mode,File(cols=cols)
def zap_deps(self):
debug2('zap-deps: %r\n' % self.name)
_write('delete from Deps where target=?', [self.id])
def add_dep(self, mode, dep):
src = File(name=dep)
reldep = relpath(dep, vars.BASE)
debug2('add-dep: %r < %s %r\n' % (sn, mode, reldep))
debug2('add-dep: %r < %s %r\n' % (self.name, mode, reldep))
assert(src.name == reldep)
_write("insert or replace into Deps "
" (target, mode, source) values (?,?,?)",
[self.id, mode, src.id])
open(sn, 'a').write('%s %s\n' % (mode, reldep))
def deps(t):
for line in open(_sname('dep', t)).readlines():
assert(line[0] in ('c','m'))
assert(line[1] == ' ')
assert(line[-1] == '\n')
mode = line[0]
name = line[2:-1]
yield mode,name
def _stampname(t):
return _sname('stamp', t)
def stamp(t):
mark(t)
stampfile = _stampname(t)
newstampfile = _sname('stamp' + str(os.getpid()), t)
depfile = _sname('dep', t)
if not os.path.exists(vars.BASE + '/.redo'):
# .redo might not exist in a 'make clean' target
return
open(newstampfile, 'w').close()
def read_stamp(self):
try:
mtime = os.stat(t).st_mtime
st = os.stat(os.path.join(vars.BASE, self.name))
except OSError:
mtime = 0
os.utime(newstampfile, (mtime, mtime))
os.rename(newstampfile, stampfile)
open(depfile, 'a').close()
def unstamp(t):
unlink(_stampname(t))
unlink(_sname('dep', t))
def unmark_as_generated(t):
unstamp(t)
unlink(_sname('gen', t))
def stamped(t):
try:
stamptime = os.stat(_stampname(t)).st_mtime
except OSError, e:
if e.errno == errno.ENOENT:
return None
return '0' # does not exist
if stat.S_ISDIR(st.st_mode):
return 'dir' # the timestamp of a directory is meaningless
else:
raise
return stamptime
def built(t):
try:
open(_sname('built', t), 'w').close()
except IOError, e:
if e.errno == errno.ENOENT:
pass # may happen if someone deletes our .redo dir
else:
raise
_builts = {}
def isbuilt(t):
if _builts.get(t):
return True
if os.path.exists(_sname('built', t)):
_builts[t] = True
return True
# stamps the given input file, but only considers it to have been "built" if its
# mtime has changed. This is useful for static (non-generated) files.
def stamp_and_maybe_built(t):
if stamped(t) != os.stat(t).st_mtime:
built(t)
stamp(t)
def mark(t):
try:
open(_sname('mark', t), 'w').close()
except IOError, e:
if e.errno == errno.ENOENT:
pass # may happen if someone deletes our .redo dir
else:
raise
_marks = {}
def ismarked(t):
if _marks.get(t):
return True
if os.path.exists(_sname('mark', t)):
_marks[t] = True
return True
def is_generated(t):
return os.path.exists(_sname('gen', t))
def start(t):
unstamp(t)
open(_sname('dep', t), 'w').close()
open(_sname('gen', t), 'w').close() # it's definitely a generated file
# a "unique identifier" stamp for a regular file
return str((st.st_ctime, st.st_mtime, st.st_size, st.st_ino))
# FIXME: I really want to use fcntl F_SETLK, F_SETLKW, etc here. But python
# doesn't do the lockdata structure in a portable way, so we have to use
# fcntl.lockf() instead. Usually this is just a wrapper for fcntl, so it's
# ok, but it doesn't have F_GETLK, so we can't report which pid owns the lock.
# The makes debugging a bit harder. When we someday port to C, we can do that.
class Lock:
def __init__(self, t):
def __init__(self, fid):
assert(_lockfile >= 0)
self.owned = False
self.rfd = self.wfd = None
self.lockname = _sname('lock', t)
self.fid = fid
def __del__(self):
if self.owned:
self.unlock()
def trylock(self):
assert(not self.owned)
try:
os.mkfifo(self.lockname, 0600)
self.owned = True
self.rfd = os.open(self.lockname, os.O_RDONLY|os.O_NONBLOCK)
self.wfd = os.open(self.lockname, os.O_WRONLY)
close_on_exec(self.rfd, True)
close_on_exec(self.wfd, True)
except OSError, e:
if e.errno == errno.EEXIST:
pass
fcntl.lockf(_lockfile, fcntl.LOCK_EX|fcntl.LOCK_NB, 1, self.fid)
except IOError, e:
if e.errno in (errno.EAGAIN, errno.EACCES):
pass # someone else has it locked
else:
raise
else:
self.owned = True
def waitlock(self):
while not self.owned:
self.wait()
self.trylock()
assert(self.owned)
assert(not self.owned)
fcntl.lockf(_lockfile, fcntl.LOCK_EX, 1, self.fid)
self.owned = True
def unlock(self):
if not self.owned:
raise Exception("can't unlock %r - we don't own it"
% self.lockname)
unlink(self.lockname)
# ping any connected readers
os.close(self.rfd)
os.close(self.wfd)
self.rfd = self.wfd = None
fcntl.lockf(_lockfile, fcntl.LOCK_UN, 1, self.fid)
self.owned = False
def wait(self):
if self.owned:
raise Exception("can't wait on %r - we own it" % self.lockname)
try:
# open() will finish only when a writer exists and does close()
fd = os.open(self.lockname, os.O_RDONLY)
try:
os.read(fd, 1)
finally:
os.close(fd)
except OSError, e:
if e.errno == errno.ENOENT:
pass # it's not even unlocked or was unlocked earlier
else:
raise

View file

@ -2,15 +2,15 @@ rm -f chdir1
redo chdir2
redo chdir3
. ./flush-cache.sh
./flush-cache.sh
redo-ifchange chdir3
rm -f chdir1
. ./flush-cache.sh
./flush-cache.sh
redo-ifchange chdir3
[ -e chdir1 ] || exit 77
rm -f chdir1
. ./flush-cache.sh
./flush-cache.sh
redo-ifchange chdir3
[ -e chdir1 ] || exit 78

View file

@ -1,10 +1,10 @@
rm -f *.out *.log
. ../../flush-cache.sh
../../flush-cache.sh
redo-ifchange 1.out 2.out
[ "$(cat 1.log | wc -l)" = 1 ] || exit 55
[ "$(cat 2.log | wc -l)" = 1 ] || exit 56
. ../../flush-cache.sh
../../flush-cache.sh
touch 1.in
redo-ifchange 1.out 2.out
[ "$(cat 1.log | wc -l)" = 2 ] || exit 57

View file

@ -1,11 +1,11 @@
rm -f log dir1/log dir1/stinky
touch t1.do
. ../../flush-cache.sh
../../flush-cache.sh
redo t1
touch t1.do
. ../../flush-cache.sh
../../flush-cache.sh
redo t1
. ../../flush-cache.sh
../../flush-cache.sh
redo-ifchange t1
C1="$(wc -l <dir1/log)"
C2="$(wc -l <log)"

View file

@ -3,7 +3,7 @@ rm -f static.log
redo static1 static2
touch static.in
. ../flush-cache.sh
../flush-cache.sh
redo-ifchange static1 static2
COUNT=$(wc -l <static.log)

View file

@ -1,19 +1,19 @@
rm -f genfile2 genfile2.do genfile.log
echo echo hello >genfile2.do
. ../flush-cache.sh
../flush-cache.sh
redo genfile1
# this will cause a rebuild:
# genfile1 depends on genfile2 depends on genfile2.do
rm -f genfile2.do
. ../flush-cache.sh
../flush-cache.sh
redo-ifchange genfile1
# but genfile2.do was gone last time, so genfile2 no longer depends on it.
# thus, it can be considered up-to-date. Prior versions of redo had a bug
# where the dependency on genfile2.do was never dropped.
. ../flush-cache.sh
../flush-cache.sh
redo-ifchange genfile1
COUNT=$(wc -l <genfile.log)

View file

@ -6,6 +6,7 @@ if [ -e t1a ]; then
else
BEFORE=
fi
../flush-cache.sh
redo-ifchange t1a # it definitely had to rebuild because t1dep changed
AFTER="$(cat t1a)"
if [ "$BEFORE" = "$AFTER" ]; then

9
t/flush-cache.sh Normal file → Executable file
View file

@ -1,3 +1,8 @@
#!/bin/sh
#echo "Flushing redo cache..." >&2
find "$REDO_BASE/.redo" -name 'built^*' -o -name 'mark^*' |
xargs rm -f >&2
(
echo ".timeout 5000"
echo "pragma synchronous = off;"
echo "update Files set checked_runid=null, " \
" changed_runid=changed_runid-1;"
) | sqlite3 "$REDO_BASE/.redo/db.sqlite3"

View file

@ -1,7 +1,7 @@
rm -f makedir.log
redo makedir
touch makedir/outfile
. ./flush-cache.sh
./flush-cache.sh
redo-ifchange makedir
COUNT=$(wc -l <makedir.log)
[ "$COUNT" = 1 ] || exit 99

View file

@ -18,6 +18,7 @@ XTRACE = os.environ.get('REDO_XTRACE', '') and 1 or 0
KEEP_GOING = os.environ.get('REDO_KEEP_GOING', '') and 1 or 0
SHUFFLE = os.environ.get('REDO_SHUFFLE', '') and 1 or 0
STARTDIR = os.environ['REDO_STARTDIR']
RUNID = atoi.atoi(os.environ.get('REDO_RUNID')) or None
BASE = os.environ['REDO_BASE']
while BASE and BASE.endswith('/'):
BASE = BASE[:-1]