Change locking stuff from fifos to fcntl.lockf().
This should reduce filesystem grinding a bit, and makes the code simpler. It's also theoretically a bit more portable, since I'm guessing fifo semantics aren't the same on win32 if we ever get there. Also, a major problem with the old fifo-based system is that if a redo process died without cleaning up after itself, it wouldn't delete its lockfiles, so we had to wipe them all at the beginning of each build. Now we don't; in theory, you can now have multiple copies of redo poking at the same tree at the same time and not stepping on each other.
This commit is contained in:
parent
10afd9000f
commit
84169c5d27
2 changed files with 58 additions and 79 deletions
49
builder.py
49
builder.py
|
|
@ -43,8 +43,9 @@ def _try_stat(filename):
|
|||
|
||||
|
||||
class BuildJob:
|
||||
def __init__(self, t, lock, shouldbuildfunc, donefunc):
|
||||
self.t = t
|
||||
def __init__(self, t, sf, lock, shouldbuildfunc, donefunc):
|
||||
self.t = t # original target name, not relative to vars.BASE
|
||||
self.sf = sf
|
||||
self.tmpname = '%s.redo.tmp' % t
|
||||
self.lock = lock
|
||||
self.shouldbuildfunc = shouldbuildfunc
|
||||
|
|
@ -54,13 +55,13 @@ class BuildJob:
|
|||
def start(self):
|
||||
assert(self.lock.owned)
|
||||
t = self.t
|
||||
f = state.File(name=t)
|
||||
sf = self.sf
|
||||
tmpname = self.tmpname
|
||||
if not self.shouldbuildfunc(t):
|
||||
# target doesn't need to be built; skip the whole task
|
||||
return self._after2(0)
|
||||
if (os.path.exists(t) and not os.path.exists(t + '/.')
|
||||
and not f.is_generated):
|
||||
and not sf.is_generated):
|
||||
# an existing source file that was not generated by us.
|
||||
# This step is mentioned by djb in his notes.
|
||||
# For example, a rule called default.c.do could be used to try
|
||||
|
|
@ -70,16 +71,16 @@ class BuildJob:
|
|||
# of redo? That would make it easy for someone to override a
|
||||
# file temporarily, and could be undone by deleting the file.
|
||||
debug2("-- static (%r)\n" % t)
|
||||
f.set_static()
|
||||
f.save()
|
||||
sf.set_static()
|
||||
sf.save()
|
||||
return self._after2(0)
|
||||
f.zap_deps()
|
||||
(dofile, basename, ext) = _find_do_file(f)
|
||||
sf.zap_deps()
|
||||
(dofile, basename, ext) = _find_do_file(sf)
|
||||
if not dofile:
|
||||
if os.path.exists(t):
|
||||
f.is_generated = False
|
||||
f.set_static()
|
||||
f.save()
|
||||
sf.is_generated = False
|
||||
sf.set_static()
|
||||
sf.save()
|
||||
return self._after2(0)
|
||||
else:
|
||||
err('no rule to make %r\n' % t)
|
||||
|
|
@ -100,8 +101,8 @@ class BuildJob:
|
|||
if vars.VERBOSE or vars.XTRACE: log_('\n')
|
||||
log('%s\n' % _nice(t))
|
||||
self.argv = argv
|
||||
f.is_generated = True
|
||||
f.save()
|
||||
sf.is_generated = True
|
||||
sf.save()
|
||||
dof = state.File(name=dofile)
|
||||
dof.set_static()
|
||||
dof.save()
|
||||
|
|
@ -165,14 +166,14 @@ class BuildJob:
|
|||
os.rename(tmpname, t)
|
||||
else:
|
||||
unlink(tmpname)
|
||||
sf = state.File(name=t)
|
||||
sf = self.sf
|
||||
sf.is_generated=True
|
||||
sf.update_stamp()
|
||||
sf.set_changed()
|
||||
sf.save()
|
||||
else:
|
||||
unlink(tmpname)
|
||||
sf = state.File(name=t)
|
||||
sf = self.sf
|
||||
sf.stamp = None
|
||||
sf.set_changed()
|
||||
sf.save()
|
||||
|
|
@ -219,18 +220,19 @@ def main(targets, shouldbuildfunc):
|
|||
err('.redo directory disappeared; cannot continue.\n')
|
||||
retcode[0] = 205
|
||||
break
|
||||
lock = state.Lock(t)
|
||||
f = state.File(name=t)
|
||||
lock = state.Lock(f.id)
|
||||
lock.trylock()
|
||||
if not lock.owned:
|
||||
if vars.DEBUG_LOCKS:
|
||||
log('%s (locked...)\n' % _nice(t))
|
||||
locked.append(t)
|
||||
locked.append((f.id,t))
|
||||
else:
|
||||
BuildJob(t, lock, shouldbuildfunc, done).start()
|
||||
BuildJob(t, f, lock, shouldbuildfunc, done).start()
|
||||
|
||||
# Now we've built all the "easy" ones. Go back and just wait on the
|
||||
# remaining ones one by one. This is technically non-optimal; we could
|
||||
# use select.select() to wait on more than one at a time. But it should
|
||||
# remaining ones one by one. This is non-optimal; we could go faster if
|
||||
# we could wait on multiple locks at once. But it should
|
||||
# be rare enough that it doesn't matter, and the logic is easier this way.
|
||||
while locked or jwack.running():
|
||||
state.commit()
|
||||
|
|
@ -244,8 +246,8 @@ def main(targets, shouldbuildfunc):
|
|||
err('.redo directory disappeared; cannot continue.\n')
|
||||
retcode[0] = 205
|
||||
break
|
||||
t = locked.pop(0)
|
||||
lock = state.Lock(t)
|
||||
fid,t = locked.pop(0)
|
||||
lock = state.Lock(fid)
|
||||
lock.waitlock()
|
||||
assert(lock.owned)
|
||||
if vars.DEBUG_LOCKS:
|
||||
|
|
@ -255,6 +257,7 @@ def main(targets, shouldbuildfunc):
|
|||
retcode[0] = 2
|
||||
lock.unlock()
|
||||
else:
|
||||
BuildJob(t, lock, shouldbuildfunc, done).start()
|
||||
BuildJob(t, state.File(id=fid), lock,
|
||||
shouldbuildfunc, done).start()
|
||||
state.commit()
|
||||
return retcode[0]
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue