2010-12-10 02:58:13 -08:00
|
|
|
import sys, os, errno, glob, stat, fcntl, sqlite3
|
2010-11-19 03:03:05 -08:00
|
|
|
import vars
|
2010-12-19 01:38:38 -08:00
|
|
|
from helpers import unlink, close_on_exec, join
|
2010-12-19 02:31:40 -08:00
|
|
|
from log import warn, err, debug2, debug3
|
2010-11-21 02:08:05 -08:00
|
|
|
|
2018-10-11 08:43:59 +01:00
|
|
|
# When the module is imported, change the process title.
|
|
|
|
|
# We do it here because this module is imported by all the scripts.
|
|
|
|
|
try:
|
|
|
|
|
from setproctitle import setproctitle
|
|
|
|
|
except ImportError:
|
|
|
|
|
pass
|
|
|
|
|
else:
|
|
|
|
|
cmdline = sys.argv[:]
|
|
|
|
|
cmdline[0] = os.path.splitext(os.path.basename(cmdline[0]))[0]
|
|
|
|
|
setproctitle(" ".join(cmdline))
|
|
|
|
|
|
2010-12-09 01:56:17 -08:00
|
|
|
SCHEMA_VER=1
|
2010-12-08 21:40:42 -08:00
|
|
|
TIMEOUT=60
|
2010-12-07 02:17:22 -08:00
|
|
|
|
2010-12-11 02:17:51 -08:00
|
|
|
ALWAYS='//ALWAYS' # an invalid filename that is always marked as dirty
|
2010-12-10 22:42:33 -08:00
|
|
|
STAMP_DIR='dir' # the stamp of a directory; mtime is unhelpful
|
|
|
|
|
STAMP_MISSING='0' # the stamp of a nonexistent file
|
|
|
|
|
|
|
|
|
|
|
2016-11-27 23:35:28 -08:00
|
|
|
class CyclicDependencyError(Exception): pass
|
|
|
|
|
|
|
|
|
|
|
2010-12-09 04:58:05 -08:00
|
|
|
def _connect(dbfile):
|
|
|
|
|
_db = sqlite3.connect(dbfile, timeout=TIMEOUT)
|
|
|
|
|
_db.execute("pragma synchronous = off")
|
2018-10-06 05:06:42 -04:00
|
|
|
_db.execute("pragma journal_mode = WAL")
|
2011-02-14 22:41:43 +11:00
|
|
|
_db.text_factory = str
|
2010-12-09 04:58:05 -08:00
|
|
|
return _db
|
|
|
|
|
|
|
|
|
|
|
2010-12-07 02:17:22 -08:00
|
|
|
_db = None
|
|
|
|
|
def db():
|
2010-12-14 02:25:17 -08:00
|
|
|
global _db
|
2010-12-07 02:17:22 -08:00
|
|
|
if _db:
|
|
|
|
|
return _db
|
2010-12-10 02:58:13 -08:00
|
|
|
|
2010-12-07 02:17:22 -08:00
|
|
|
dbdir = '%s/.redo' % vars.BASE
|
|
|
|
|
dbfile = '%s/db.sqlite3' % dbdir
|
2010-12-09 03:01:26 -08:00
|
|
|
try:
|
|
|
|
|
os.mkdir(dbdir)
|
|
|
|
|
except OSError, e:
|
|
|
|
|
if e.errno == errno.EEXIST:
|
|
|
|
|
pass # if it exists, that's okay
|
|
|
|
|
else:
|
|
|
|
|
raise
|
2010-12-10 02:58:13 -08:00
|
|
|
|
2010-12-07 02:17:22 -08:00
|
|
|
must_create = not os.path.exists(dbfile)
|
|
|
|
|
if not must_create:
|
2010-12-09 04:58:05 -08:00
|
|
|
_db = _connect(dbfile)
|
2010-12-07 02:17:22 -08:00
|
|
|
try:
|
|
|
|
|
row = _db.cursor().execute("select version from Schema").fetchone()
|
|
|
|
|
except sqlite3.OperationalError:
|
|
|
|
|
row = None
|
|
|
|
|
ver = row and row[0] or None
|
|
|
|
|
if ver != SCHEMA_VER:
|
|
|
|
|
err("state database: discarding v%s (wanted v%s)\n"
|
|
|
|
|
% (ver, SCHEMA_VER))
|
|
|
|
|
must_create = True
|
|
|
|
|
_db = None
|
|
|
|
|
if must_create:
|
|
|
|
|
unlink(dbfile)
|
2010-12-09 04:58:05 -08:00
|
|
|
_db = _connect(dbfile)
|
2010-12-09 01:56:17 -08:00
|
|
|
_db.execute("create table Schema "
|
|
|
|
|
" (version int)")
|
2010-12-07 02:17:22 -08:00
|
|
|
_db.execute("create table Runid "
|
|
|
|
|
" (id integer primary key autoincrement)")
|
2010-12-09 01:56:17 -08:00
|
|
|
_db.execute("create table Files "
|
|
|
|
|
" (name not null primary key, "
|
|
|
|
|
" is_generated int, "
|
2010-12-10 22:42:33 -08:00
|
|
|
" is_override int, "
|
2010-12-09 01:56:17 -08:00
|
|
|
" checked_runid int, "
|
|
|
|
|
" changed_runid int, "
|
2010-12-10 20:53:31 -08:00
|
|
|
" failed_runid int, "
|
2010-12-09 01:56:17 -08:00
|
|
|
" stamp, "
|
|
|
|
|
" csum)")
|
2010-12-07 02:17:22 -08:00
|
|
|
_db.execute("create table Deps "
|
2010-12-09 01:56:17 -08:00
|
|
|
" (target int, "
|
|
|
|
|
" source int, "
|
|
|
|
|
" mode not null, "
|
2010-12-11 22:59:55 -08:00
|
|
|
" delete_me int, "
|
2010-12-09 01:56:17 -08:00
|
|
|
" primary key (target,source))")
|
2010-12-07 02:17:22 -08:00
|
|
|
_db.execute("insert into Schema (version) values (?)", [SCHEMA_VER])
|
2010-12-10 02:58:13 -08:00
|
|
|
# eat the '0' runid and File id
|
2011-05-07 23:47:03 -04:00
|
|
|
_db.execute("insert into Runid values "
|
|
|
|
|
" ((select max(id)+1 from Runid))")
|
2010-12-11 02:17:51 -08:00
|
|
|
_db.execute("insert into Files (name) values (?)", [ALWAYS])
|
2010-12-07 02:17:22 -08:00
|
|
|
|
|
|
|
|
if not vars.RUNID:
|
2011-05-07 23:47:03 -04:00
|
|
|
_db.execute("insert into Runid values "
|
|
|
|
|
" ((select max(id)+1 from Runid))")
|
2010-12-07 02:17:22 -08:00
|
|
|
vars.RUNID = _db.execute("select last_insert_rowid()").fetchone()[0]
|
|
|
|
|
os.environ['REDO_RUNID'] = str(vars.RUNID)
|
|
|
|
|
|
2010-12-09 02:44:33 -08:00
|
|
|
_db.commit()
|
2010-12-07 02:17:22 -08:00
|
|
|
return _db
|
|
|
|
|
|
2010-11-21 02:08:05 -08:00
|
|
|
|
|
|
|
|
def init():
|
2010-12-07 02:17:22 -08:00
|
|
|
db()
|
2010-11-19 03:03:05 -08:00
|
|
|
|
|
|
|
|
|
2010-12-09 02:44:33 -08:00
|
|
|
_wrote = 0
|
|
|
|
|
def _write(q, l):
|
2010-12-09 03:01:26 -08:00
|
|
|
if _insane:
|
|
|
|
|
return
|
2010-12-09 02:44:33 -08:00
|
|
|
global _wrote
|
|
|
|
|
_wrote += 1
|
|
|
|
|
db().execute(q, l)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def commit():
|
2010-12-09 03:01:26 -08:00
|
|
|
if _insane:
|
|
|
|
|
return
|
2010-12-09 02:44:33 -08:00
|
|
|
global _wrote
|
|
|
|
|
if _wrote:
|
|
|
|
|
db().commit()
|
|
|
|
|
_wrote = 0
|
|
|
|
|
|
|
|
|
|
|
2018-10-06 04:36:24 -04:00
|
|
|
def rollback():
|
|
|
|
|
if _insane:
|
|
|
|
|
return
|
|
|
|
|
global _wrote
|
|
|
|
|
if _wrote:
|
|
|
|
|
db().rollback()
|
|
|
|
|
_wrote = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def is_flushed():
|
|
|
|
|
return not _wrote
|
|
|
|
|
|
|
|
|
|
|
2010-11-22 03:34:37 -08:00
|
|
|
_insane = None
|
2010-12-09 03:01:26 -08:00
|
|
|
def check_sane():
|
|
|
|
|
global _insane, _writable
|
2010-11-22 03:34:37 -08:00
|
|
|
if not _insane:
|
|
|
|
|
_insane = not os.path.exists('%s/.redo' % vars.BASE)
|
|
|
|
|
return not _insane
|
|
|
|
|
|
|
|
|
|
|
2010-11-24 03:45:38 -08:00
|
|
|
_cwd = None
|
2010-11-21 04:57:04 -08:00
|
|
|
def relpath(t, base):
|
2010-11-24 03:45:38 -08:00
|
|
|
global _cwd
|
|
|
|
|
if not _cwd:
|
|
|
|
|
_cwd = os.getcwd()
|
|
|
|
|
t = os.path.normpath(os.path.join(_cwd, t))
|
2011-03-27 15:24:14 -04:00
|
|
|
base = os.path.normpath(base)
|
2010-11-21 04:57:04 -08:00
|
|
|
tparts = t.split('/')
|
|
|
|
|
bparts = base.split('/')
|
|
|
|
|
for tp,bp in zip(tparts,bparts):
|
|
|
|
|
if tp != bp:
|
|
|
|
|
break
|
|
|
|
|
tparts.pop(0)
|
|
|
|
|
bparts.pop(0)
|
|
|
|
|
while bparts:
|
|
|
|
|
tparts.insert(0, '..')
|
|
|
|
|
bparts.pop(0)
|
2010-12-19 01:38:38 -08:00
|
|
|
return join('/', tparts)
|
2010-11-21 04:57:04 -08:00
|
|
|
|
|
|
|
|
|
2010-12-19 02:31:40 -08:00
|
|
|
def warn_override(name):
|
|
|
|
|
warn('%s - you modified it; skipping\n' % name)
|
|
|
|
|
|
|
|
|
|
|
2010-12-19 01:38:38 -08:00
|
|
|
_file_cols = ['rowid', 'name', 'is_generated', 'is_override',
|
|
|
|
|
'checked_runid', 'changed_runid', 'failed_runid',
|
|
|
|
|
'stamp', 'csum']
|
2010-12-07 02:17:22 -08:00
|
|
|
class File(object):
|
2010-12-10 02:58:13 -08:00
|
|
|
# use this mostly to avoid accidentally assigning to typos
|
2010-12-19 01:38:38 -08:00
|
|
|
__slots__ = ['id'] + _file_cols[1:]
|
2010-12-09 02:13:36 -08:00
|
|
|
|
redo-log: capture and linearize the output of redo builds.
redo now saves the stderr from every .do script, for every target, into
a file in the .redo directory. That means you can look up the logs
from the most recent build of any target using the new redo-log
command, for example:
redo-log -r all
The default is to show logs non-recursively, that is, it'll show when a
target does redo-ifchange on another target, but it won't recurse into
the logs for the latter target. With -r (recursive), it does. With -u
(unchanged), it does even if redo-ifchange discovered that the target
was already up-to-date; in that case, it prints the logs of the *most
recent* time the target was generated.
With --no-details, redo-log will show only the 'redo' lines, not the
other log messages. For very noisy build systems (like recursing into
a 'make' instance) this can be helpful to get an overview of what
happened, without all the cruft.
You can use the -f (follow) option like tail -f, to follow a build
that's currently in progress until it finishes. redo itself spins up a
copy of redo-log -r -f while it runs, so you can see what's going on.
Still broken in this version:
- No man page or new tests yet.
- ANSI colors don't yet work (unless you use --raw-logs, which gives
the old-style behaviour).
- You can't redirect the output of a sub-redo to a file or a
pipe right now, because redo-log is eating it.
- The regex for matching 'redo' lines in the log is very gross.
Instead, we should put the raw log files in a more machine-parseable
format, and redo-log should turn that into human-readable format.
- redo-log tries to "linearize" the logs, which makes them
comprehensible even for a large parallel build. It recursively shows
log messages for each target in depth-first tree order (by tracing
into a new target every time it sees a 'redo' line). This works
really well, but in some specific cases, the "topmost" redo instance
can get stuck waiting for a jwack token, which makes it look like the
whole build has stalled, when really redo-log is just waiting a long
time for a particular subprocess to be able to continue. We'll need to
add a specific workaround for that.
2018-11-03 22:09:18 -04:00
|
|
|
def _init_from_idname(self, id, name, allow_add):
|
2010-12-19 01:38:38 -08:00
|
|
|
q = ('select %s from Files ' % join(', ', _file_cols))
|
2010-12-07 02:17:22 -08:00
|
|
|
if id != None:
|
|
|
|
|
q += 'where rowid=?'
|
|
|
|
|
l = [id]
|
|
|
|
|
elif name != None:
|
2010-12-11 07:02:45 -08:00
|
|
|
name = (name==ALWAYS) and ALWAYS or relpath(name, vars.BASE)
|
2010-12-07 02:17:22 -08:00
|
|
|
q += 'where name=?'
|
|
|
|
|
l = [name]
|
2010-11-21 04:14:52 -08:00
|
|
|
else:
|
2010-12-07 02:17:22 -08:00
|
|
|
raise Exception('name or id must be set')
|
|
|
|
|
d = db()
|
|
|
|
|
row = d.execute(q, l).fetchone()
|
|
|
|
|
if not row:
|
|
|
|
|
if not name:
|
redo-log: capture and linearize the output of redo builds.
redo now saves the stderr from every .do script, for every target, into
a file in the .redo directory. That means you can look up the logs
from the most recent build of any target using the new redo-log
command, for example:
redo-log -r all
The default is to show logs non-recursively, that is, it'll show when a
target does redo-ifchange on another target, but it won't recurse into
the logs for the latter target. With -r (recursive), it does. With -u
(unchanged), it does even if redo-ifchange discovered that the target
was already up-to-date; in that case, it prints the logs of the *most
recent* time the target was generated.
With --no-details, redo-log will show only the 'redo' lines, not the
other log messages. For very noisy build systems (like recursing into
a 'make' instance) this can be helpful to get an overview of what
happened, without all the cruft.
You can use the -f (follow) option like tail -f, to follow a build
that's currently in progress until it finishes. redo itself spins up a
copy of redo-log -r -f while it runs, so you can see what's going on.
Still broken in this version:
- No man page or new tests yet.
- ANSI colors don't yet work (unless you use --raw-logs, which gives
the old-style behaviour).
- You can't redirect the output of a sub-redo to a file or a
pipe right now, because redo-log is eating it.
- The regex for matching 'redo' lines in the log is very gross.
Instead, we should put the raw log files in a more machine-parseable
format, and redo-log should turn that into human-readable format.
- redo-log tries to "linearize" the logs, which makes them
comprehensible even for a large parallel build. It recursively shows
log messages for each target in depth-first tree order (by tracing
into a new target every time it sees a 'redo' line). This works
really well, but in some specific cases, the "topmost" redo instance
can get stuck waiting for a jwack token, which makes it look like the
whole build has stalled, when really redo-log is just waiting a long
time for a particular subprocess to be able to continue. We'll need to
add a specific workaround for that.
2018-11-03 22:09:18 -04:00
|
|
|
raise KeyError('No file with id=%r name=%r' % (id, name))
|
|
|
|
|
elif not allow_add:
|
|
|
|
|
raise KeyError('No file with name=%r' % (name,))
|
2010-12-09 03:33:53 -08:00
|
|
|
try:
|
|
|
|
|
_write('insert into Files (name) values (?)', [name])
|
|
|
|
|
except sqlite3.IntegrityError:
|
|
|
|
|
# some parallel redo probably added it at the same time; no
|
|
|
|
|
# big deal.
|
|
|
|
|
pass
|
2010-12-07 02:17:22 -08:00
|
|
|
row = d.execute(q, l).fetchone()
|
|
|
|
|
assert(row)
|
2010-12-11 02:17:51 -08:00
|
|
|
return self._init_from_cols(row)
|
|
|
|
|
|
|
|
|
|
def _init_from_cols(self, cols):
|
|
|
|
|
(self.id, self.name, self.is_generated, self.is_override,
|
|
|
|
|
self.checked_runid, self.changed_runid, self.failed_runid,
|
|
|
|
|
self.stamp, self.csum) = cols
|
2010-12-11 07:02:45 -08:00
|
|
|
if self.name == ALWAYS and self.changed_runid < vars.RUNID:
|
|
|
|
|
self.changed_runid = vars.RUNID
|
2010-12-11 02:17:51 -08:00
|
|
|
|
redo-log: capture and linearize the output of redo builds.
redo now saves the stderr from every .do script, for every target, into
a file in the .redo directory. That means you can look up the logs
from the most recent build of any target using the new redo-log
command, for example:
redo-log -r all
The default is to show logs non-recursively, that is, it'll show when a
target does redo-ifchange on another target, but it won't recurse into
the logs for the latter target. With -r (recursive), it does. With -u
(unchanged), it does even if redo-ifchange discovered that the target
was already up-to-date; in that case, it prints the logs of the *most
recent* time the target was generated.
With --no-details, redo-log will show only the 'redo' lines, not the
other log messages. For very noisy build systems (like recursing into
a 'make' instance) this can be helpful to get an overview of what
happened, without all the cruft.
You can use the -f (follow) option like tail -f, to follow a build
that's currently in progress until it finishes. redo itself spins up a
copy of redo-log -r -f while it runs, so you can see what's going on.
Still broken in this version:
- No man page or new tests yet.
- ANSI colors don't yet work (unless you use --raw-logs, which gives
the old-style behaviour).
- You can't redirect the output of a sub-redo to a file or a
pipe right now, because redo-log is eating it.
- The regex for matching 'redo' lines in the log is very gross.
Instead, we should put the raw log files in a more machine-parseable
format, and redo-log should turn that into human-readable format.
- redo-log tries to "linearize" the logs, which makes them
comprehensible even for a large parallel build. It recursively shows
log messages for each target in depth-first tree order (by tracing
into a new target every time it sees a 'redo' line). This works
really well, but in some specific cases, the "topmost" redo instance
can get stuck waiting for a jwack token, which makes it look like the
whole build has stalled, when really redo-log is just waiting a long
time for a particular subprocess to be able to continue. We'll need to
add a specific workaround for that.
2018-11-03 22:09:18 -04:00
|
|
|
def __init__(self, id=None, name=None, cols=None, allow_add=True):
|
2010-12-11 02:17:51 -08:00
|
|
|
if cols:
|
|
|
|
|
return self._init_from_cols(cols)
|
|
|
|
|
else:
|
redo-log: capture and linearize the output of redo builds.
redo now saves the stderr from every .do script, for every target, into
a file in the .redo directory. That means you can look up the logs
from the most recent build of any target using the new redo-log
command, for example:
redo-log -r all
The default is to show logs non-recursively, that is, it'll show when a
target does redo-ifchange on another target, but it won't recurse into
the logs for the latter target. With -r (recursive), it does. With -u
(unchanged), it does even if redo-ifchange discovered that the target
was already up-to-date; in that case, it prints the logs of the *most
recent* time the target was generated.
With --no-details, redo-log will show only the 'redo' lines, not the
other log messages. For very noisy build systems (like recursing into
a 'make' instance) this can be helpful to get an overview of what
happened, without all the cruft.
You can use the -f (follow) option like tail -f, to follow a build
that's currently in progress until it finishes. redo itself spins up a
copy of redo-log -r -f while it runs, so you can see what's going on.
Still broken in this version:
- No man page or new tests yet.
- ANSI colors don't yet work (unless you use --raw-logs, which gives
the old-style behaviour).
- You can't redirect the output of a sub-redo to a file or a
pipe right now, because redo-log is eating it.
- The regex for matching 'redo' lines in the log is very gross.
Instead, we should put the raw log files in a more machine-parseable
format, and redo-log should turn that into human-readable format.
- redo-log tries to "linearize" the logs, which makes them
comprehensible even for a large parallel build. It recursively shows
log messages for each target in depth-first tree order (by tracing
into a new target every time it sees a 'redo' line). This works
really well, but in some specific cases, the "topmost" redo instance
can get stuck waiting for a jwack token, which makes it look like the
whole build has stalled, when really redo-log is just waiting a long
time for a particular subprocess to be able to continue. We'll need to
add a specific workaround for that.
2018-11-03 22:09:18 -04:00
|
|
|
return self._init_from_idname(id, name, allow_add=allow_add)
|
2010-12-11 02:17:51 -08:00
|
|
|
|
2015-05-06 17:56:14 -04:00
|
|
|
def __repr__(self):
|
|
|
|
|
return "File(%r)" % (self.nicename(),)
|
|
|
|
|
|
2010-12-11 02:17:51 -08:00
|
|
|
def refresh(self):
|
redo-log: capture and linearize the output of redo builds.
redo now saves the stderr from every .do script, for every target, into
a file in the .redo directory. That means you can look up the logs
from the most recent build of any target using the new redo-log
command, for example:
redo-log -r all
The default is to show logs non-recursively, that is, it'll show when a
target does redo-ifchange on another target, but it won't recurse into
the logs for the latter target. With -r (recursive), it does. With -u
(unchanged), it does even if redo-ifchange discovered that the target
was already up-to-date; in that case, it prints the logs of the *most
recent* time the target was generated.
With --no-details, redo-log will show only the 'redo' lines, not the
other log messages. For very noisy build systems (like recursing into
a 'make' instance) this can be helpful to get an overview of what
happened, without all the cruft.
You can use the -f (follow) option like tail -f, to follow a build
that's currently in progress until it finishes. redo itself spins up a
copy of redo-log -r -f while it runs, so you can see what's going on.
Still broken in this version:
- No man page or new tests yet.
- ANSI colors don't yet work (unless you use --raw-logs, which gives
the old-style behaviour).
- You can't redirect the output of a sub-redo to a file or a
pipe right now, because redo-log is eating it.
- The regex for matching 'redo' lines in the log is very gross.
Instead, we should put the raw log files in a more machine-parseable
format, and redo-log should turn that into human-readable format.
- redo-log tries to "linearize" the logs, which makes them
comprehensible even for a large parallel build. It recursively shows
log messages for each target in depth-first tree order (by tracing
into a new target every time it sees a 'redo' line). This works
really well, but in some specific cases, the "topmost" redo instance
can get stuck waiting for a jwack token, which makes it look like the
whole build has stalled, when really redo-log is just waiting a long
time for a particular subprocess to be able to continue. We'll need to
add a specific workaround for that.
2018-11-03 22:09:18 -04:00
|
|
|
self._init_from_idname(self.id, None, allow_add=False)
|
2010-12-07 02:17:22 -08:00
|
|
|
|
|
|
|
|
def save(self):
|
2010-12-19 01:38:38 -08:00
|
|
|
cols = join(', ', ['%s=?'%i for i in _file_cols[2:]])
|
2010-12-09 02:44:33 -08:00
|
|
|
_write('update Files set '
|
2010-12-19 01:38:38 -08:00
|
|
|
' %s '
|
|
|
|
|
' where rowid=?' % cols,
|
2010-12-10 22:42:33 -08:00
|
|
|
[self.is_generated, self.is_override,
|
2010-12-10 20:53:31 -08:00
|
|
|
self.checked_runid, self.changed_runid, self.failed_runid,
|
2010-12-09 02:44:33 -08:00
|
|
|
self.stamp, self.csum,
|
|
|
|
|
self.id])
|
2010-12-07 02:17:22 -08:00
|
|
|
|
|
|
|
|
def set_checked(self):
|
|
|
|
|
self.checked_runid = vars.RUNID
|
2010-12-10 20:53:31 -08:00
|
|
|
|
2010-12-19 03:39:37 -08:00
|
|
|
def set_checked_save(self):
|
|
|
|
|
self.set_checked()
|
|
|
|
|
self.save()
|
|
|
|
|
|
2010-12-07 02:17:22 -08:00
|
|
|
def set_changed(self):
|
|
|
|
|
debug2('BUILT: %r (%r)\n' % (self.name, self.stamp))
|
|
|
|
|
self.changed_runid = vars.RUNID
|
2010-12-10 22:42:33 -08:00
|
|
|
self.failed_runid = None
|
|
|
|
|
self.is_override = False
|
2010-12-07 02:17:22 -08:00
|
|
|
|
2010-12-10 20:53:31 -08:00
|
|
|
def set_failed(self):
|
|
|
|
|
debug2('FAILED: %r\n' % self.name)
|
2010-12-10 22:42:33 -08:00
|
|
|
self.update_stamp()
|
2010-12-10 20:53:31 -08:00
|
|
|
self.failed_runid = vars.RUNID
|
2010-12-10 22:42:33 -08:00
|
|
|
self.is_generated = True
|
2010-12-10 20:53:31 -08:00
|
|
|
|
2010-12-07 02:17:22 -08:00
|
|
|
def set_static(self):
|
2011-01-17 23:57:20 -08:00
|
|
|
self.update_stamp(must_exist=True)
|
2010-12-10 22:42:33 -08:00
|
|
|
self.is_override = False
|
|
|
|
|
self.is_generated = False
|
|
|
|
|
|
|
|
|
|
def set_override(self):
|
|
|
|
|
self.update_stamp()
|
|
|
|
|
self.is_override = True
|
2010-12-07 02:17:22 -08:00
|
|
|
|
2011-01-17 23:57:20 -08:00
|
|
|
def update_stamp(self, must_exist=False):
|
2010-12-07 02:17:22 -08:00
|
|
|
newstamp = self.read_stamp()
|
2011-01-17 23:57:20 -08:00
|
|
|
if must_exist and newstamp == STAMP_MISSING:
|
|
|
|
|
raise Exception("%r does not exist" % self.name)
|
2010-12-07 02:17:22 -08:00
|
|
|
if newstamp != self.stamp:
|
|
|
|
|
debug2("STAMP: %s: %r -> %r\n" % (self.name, self.stamp, newstamp))
|
|
|
|
|
self.stamp = newstamp
|
|
|
|
|
self.set_changed()
|
|
|
|
|
|
2010-12-10 20:53:31 -08:00
|
|
|
def is_checked(self):
|
|
|
|
|
return self.checked_runid and self.checked_runid >= vars.RUNID
|
|
|
|
|
|
2010-12-07 02:17:22 -08:00
|
|
|
def is_changed(self):
|
|
|
|
|
return self.changed_runid and self.changed_runid >= vars.RUNID
|
|
|
|
|
|
2010-12-10 20:53:31 -08:00
|
|
|
def is_failed(self):
|
|
|
|
|
return self.failed_runid and self.failed_runid >= vars.RUNID
|
2010-12-07 02:17:22 -08:00
|
|
|
|
|
|
|
|
def deps(self):
|
2010-12-19 01:38:38 -08:00
|
|
|
q = ('select Deps.mode, Deps.source, %s '
|
2010-12-09 02:13:36 -08:00
|
|
|
' from Files '
|
|
|
|
|
' join Deps on Files.rowid = Deps.source '
|
2010-12-19 01:38:38 -08:00
|
|
|
' where target=?' % join(', ', _file_cols[1:]))
|
2010-12-09 02:13:36 -08:00
|
|
|
for row in db().execute(q, [self.id]).fetchall():
|
|
|
|
|
mode = row[0]
|
|
|
|
|
cols = row[1:]
|
2010-12-07 02:17:22 -08:00
|
|
|
assert(mode in ('c', 'm'))
|
2010-12-09 02:13:36 -08:00
|
|
|
yield mode,File(cols=cols)
|
2010-12-07 02:17:22 -08:00
|
|
|
|
2010-12-11 22:59:55 -08:00
|
|
|
def zap_deps1(self):
|
|
|
|
|
debug2('zap-deps1: %r\n' % self.name)
|
|
|
|
|
_write('update Deps set delete_me=? where target=?', [True, self.id])
|
|
|
|
|
|
|
|
|
|
def zap_deps2(self):
|
|
|
|
|
debug2('zap-deps2: %r\n' % self.name)
|
|
|
|
|
_write('delete from Deps where target=? and delete_me=1', [self.id])
|
2010-12-07 02:17:22 -08:00
|
|
|
|
|
|
|
|
def add_dep(self, mode, dep):
|
|
|
|
|
src = File(name=dep)
|
2010-12-19 05:47:38 -08:00
|
|
|
debug3('add-dep: "%s" < %s "%s"\n' % (self.name, mode, src.name))
|
2010-12-11 06:16:32 -08:00
|
|
|
assert(self.id != src.id)
|
2010-12-09 02:44:33 -08:00
|
|
|
_write("insert or replace into Deps "
|
2010-12-11 22:59:55 -08:00
|
|
|
" (target, mode, source, delete_me) values (?,?,?,?)",
|
|
|
|
|
[self.id, mode, src.id, False])
|
2010-12-07 02:17:22 -08:00
|
|
|
|
|
|
|
|
def read_stamp(self):
|
|
|
|
|
try:
|
2018-10-06 00:14:02 -04:00
|
|
|
st = os.lstat(os.path.join(vars.BASE, self.name))
|
2010-12-07 02:17:22 -08:00
|
|
|
except OSError:
|
2010-12-10 22:42:33 -08:00
|
|
|
return STAMP_MISSING
|
2010-12-07 02:17:22 -08:00
|
|
|
if stat.S_ISDIR(st.st_mode):
|
2010-12-10 22:42:33 -08:00
|
|
|
return STAMP_DIR
|
2010-11-21 00:54:35 -08:00
|
|
|
else:
|
2010-12-07 02:17:22 -08:00
|
|
|
# a "unique identifier" stamp for a regular file
|
2016-11-23 15:50:33 -08:00
|
|
|
return str((st.st_mtime, st.st_size, st.st_ino,
|
|
|
|
|
st.st_mode, st.st_uid, st.st_gid))
|
2010-11-19 03:03:05 -08:00
|
|
|
|
2010-12-11 21:19:15 -08:00
|
|
|
def nicename(self):
|
|
|
|
|
return relpath(os.path.join(vars.BASE, self.name), vars.STARTDIR)
|
|
|
|
|
|
|
|
|
|
|
2010-12-19 01:38:38 -08:00
|
|
|
def files():
|
|
|
|
|
q = ('select %s from Files order by name' % join(', ', _file_cols))
|
|
|
|
|
for cols in db().execute(q).fetchall():
|
|
|
|
|
yield File(cols=cols)
|
|
|
|
|
|
2010-11-19 03:03:05 -08:00
|
|
|
|
redo-log: capture and linearize the output of redo builds.
redo now saves the stderr from every .do script, for every target, into
a file in the .redo directory. That means you can look up the logs
from the most recent build of any target using the new redo-log
command, for example:
redo-log -r all
The default is to show logs non-recursively, that is, it'll show when a
target does redo-ifchange on another target, but it won't recurse into
the logs for the latter target. With -r (recursive), it does. With -u
(unchanged), it does even if redo-ifchange discovered that the target
was already up-to-date; in that case, it prints the logs of the *most
recent* time the target was generated.
With --no-details, redo-log will show only the 'redo' lines, not the
other log messages. For very noisy build systems (like recursing into
a 'make' instance) this can be helpful to get an overview of what
happened, without all the cruft.
You can use the -f (follow) option like tail -f, to follow a build
that's currently in progress until it finishes. redo itself spins up a
copy of redo-log -r -f while it runs, so you can see what's going on.
Still broken in this version:
- No man page or new tests yet.
- ANSI colors don't yet work (unless you use --raw-logs, which gives
the old-style behaviour).
- You can't redirect the output of a sub-redo to a file or a
pipe right now, because redo-log is eating it.
- The regex for matching 'redo' lines in the log is very gross.
Instead, we should put the raw log files in a more machine-parseable
format, and redo-log should turn that into human-readable format.
- redo-log tries to "linearize" the logs, which makes them
comprehensible even for a large parallel build. It recursively shows
log messages for each target in depth-first tree order (by tracing
into a new target every time it sees a 'redo' line). This works
really well, but in some specific cases, the "topmost" redo instance
can get stuck waiting for a jwack token, which makes it look like the
whole build has stalled, when really redo-log is just waiting a long
time for a particular subprocess to be able to continue. We'll need to
add a specific workaround for that.
2018-11-03 22:09:18 -04:00
|
|
|
def logname(fid):
|
|
|
|
|
"""Given the id of a File, return the filename of its build log."""
|
|
|
|
|
return os.path.join(vars.BASE, '.redo', 'log.%d' % fid)
|
|
|
|
|
|
|
|
|
|
|
2010-12-10 02:58:13 -08:00
|
|
|
# FIXME: I really want to use fcntl F_SETLK, F_SETLKW, etc here. But python
|
|
|
|
|
# doesn't do the lockdata structure in a portable way, so we have to use
|
|
|
|
|
# fcntl.lockf() instead. Usually this is just a wrapper for fcntl, so it's
|
|
|
|
|
# ok, but it doesn't have F_GETLK, so we can't report which pid owns the lock.
|
|
|
|
|
# The makes debugging a bit harder. When we someday port to C, we can do that.
|
2010-12-14 02:19:08 -08:00
|
|
|
_locks = {}
|
2010-11-19 03:03:05 -08:00
|
|
|
class Lock:
|
2010-12-10 02:58:13 -08:00
|
|
|
def __init__(self, fid):
|
2010-11-21 03:57:52 -08:00
|
|
|
self.owned = False
|
2010-12-10 02:58:13 -08:00
|
|
|
self.fid = fid
|
2018-10-30 01:08:21 +00:00
|
|
|
self.lockfile = None
|
2010-12-14 02:25:17 -08:00
|
|
|
self.lockfile = os.open(os.path.join(vars.BASE, '.redo/lock.%d' % fid),
|
|
|
|
|
os.O_RDWR | os.O_CREAT, 0666)
|
|
|
|
|
close_on_exec(self.lockfile, True)
|
2010-12-14 02:19:08 -08:00
|
|
|
assert(_locks.get(fid,0) == 0)
|
|
|
|
|
_locks[fid] = 1
|
2010-11-19 03:03:05 -08:00
|
|
|
|
|
|
|
|
def __del__(self):
|
2010-12-14 02:19:08 -08:00
|
|
|
_locks[self.fid] = 0
|
2010-11-19 03:03:05 -08:00
|
|
|
if self.owned:
|
|
|
|
|
self.unlock()
|
2018-10-30 01:08:21 +00:00
|
|
|
if self.lockfile is not None:
|
|
|
|
|
os.close(self.lockfile)
|
2010-11-19 03:03:05 -08:00
|
|
|
|
Cyclic dependency checker: don't give up token in common case.
The way the code was written, we'd give up our token, detect a cyclic
dependency, and then try to get our token back before exiting. Even
with -j1, the temporary token release allowed any parent up the tree to
continue running jobs, so it would take an arbitrary amount of time
before we could exit (and report an error code to the parent).
There was no visible symptom of this except that, with -j1, t/355-deps-cyclic
would not finish until some of the later tests finished, which was
surprising.
To fix it, let's just check for a cyclic dependency first, then release
the token only once we're sure things are sane.
2018-11-13 06:54:31 -05:00
|
|
|
def check(self):
|
2010-12-10 02:58:13 -08:00
|
|
|
assert(not self.owned)
|
Cyclic dependency checker: don't give up token in common case.
The way the code was written, we'd give up our token, detect a cyclic
dependency, and then try to get our token back before exiting. Even
with -j1, the temporary token release allowed any parent up the tree to
continue running jobs, so it would take an arbitrary amount of time
before we could exit (and report an error code to the parent).
There was no visible symptom of this except that, with -j1, t/355-deps-cyclic
would not finish until some of the later tests finished, which was
surprising.
To fix it, let's just check for a cyclic dependency first, then release
the token only once we're sure things are sane.
2018-11-13 06:54:31 -05:00
|
|
|
if str(self.fid) in vars.get_locks():
|
|
|
|
|
# Lock already held by parent: cyclic dependence
|
|
|
|
|
raise CyclicDependencyError()
|
|
|
|
|
|
|
|
|
|
def trylock(self):
|
|
|
|
|
self.check()
|
2010-11-19 03:03:05 -08:00
|
|
|
try:
|
2010-12-14 02:25:17 -08:00
|
|
|
fcntl.lockf(self.lockfile, fcntl.LOCK_EX|fcntl.LOCK_NB, 0, 0)
|
2010-12-10 02:58:13 -08:00
|
|
|
except IOError, e:
|
|
|
|
|
if e.errno in (errno.EAGAIN, errno.EACCES):
|
|
|
|
|
pass # someone else has it locked
|
2010-11-19 03:03:05 -08:00
|
|
|
else:
|
|
|
|
|
raise
|
2010-12-10 02:58:13 -08:00
|
|
|
else:
|
|
|
|
|
self.owned = True
|
redo-log: capture and linearize the output of redo builds.
redo now saves the stderr from every .do script, for every target, into
a file in the .redo directory. That means you can look up the logs
from the most recent build of any target using the new redo-log
command, for example:
redo-log -r all
The default is to show logs non-recursively, that is, it'll show when a
target does redo-ifchange on another target, but it won't recurse into
the logs for the latter target. With -r (recursive), it does. With -u
(unchanged), it does even if redo-ifchange discovered that the target
was already up-to-date; in that case, it prints the logs of the *most
recent* time the target was generated.
With --no-details, redo-log will show only the 'redo' lines, not the
other log messages. For very noisy build systems (like recursing into
a 'make' instance) this can be helpful to get an overview of what
happened, without all the cruft.
You can use the -f (follow) option like tail -f, to follow a build
that's currently in progress until it finishes. redo itself spins up a
copy of redo-log -r -f while it runs, so you can see what's going on.
Still broken in this version:
- No man page or new tests yet.
- ANSI colors don't yet work (unless you use --raw-logs, which gives
the old-style behaviour).
- You can't redirect the output of a sub-redo to a file or a
pipe right now, because redo-log is eating it.
- The regex for matching 'redo' lines in the log is very gross.
Instead, we should put the raw log files in a more machine-parseable
format, and redo-log should turn that into human-readable format.
- redo-log tries to "linearize" the logs, which makes them
comprehensible even for a large parallel build. It recursively shows
log messages for each target in depth-first tree order (by tracing
into a new target every time it sees a 'redo' line). This works
really well, but in some specific cases, the "topmost" redo instance
can get stuck waiting for a jwack token, which makes it look like the
whole build has stalled, when really redo-log is just waiting a long
time for a particular subprocess to be able to continue. We'll need to
add a specific workaround for that.
2018-11-03 22:09:18 -04:00
|
|
|
return self.owned
|
2010-11-19 03:03:05 -08:00
|
|
|
|
2010-11-21 22:46:20 -08:00
|
|
|
def waitlock(self):
|
Cyclic dependency checker: don't give up token in common case.
The way the code was written, we'd give up our token, detect a cyclic
dependency, and then try to get our token back before exiting. Even
with -j1, the temporary token release allowed any parent up the tree to
continue running jobs, so it would take an arbitrary amount of time
before we could exit (and report an error code to the parent).
There was no visible symptom of this except that, with -j1, t/355-deps-cyclic
would not finish until some of the later tests finished, which was
surprising.
To fix it, let's just check for a cyclic dependency first, then release
the token only once we're sure things are sane.
2018-11-13 06:54:31 -05:00
|
|
|
self.check()
|
2010-12-14 02:25:17 -08:00
|
|
|
fcntl.lockf(self.lockfile, fcntl.LOCK_EX, 0, 0)
|
2010-12-10 02:58:13 -08:00
|
|
|
self.owned = True
|
2010-11-21 22:46:20 -08:00
|
|
|
|
2010-11-19 03:03:05 -08:00
|
|
|
def unlock(self):
|
|
|
|
|
if not self.owned:
|
|
|
|
|
raise Exception("can't unlock %r - we don't own it"
|
|
|
|
|
% self.lockname)
|
2010-12-14 02:25:17 -08:00
|
|
|
fcntl.lockf(self.lockfile, fcntl.LOCK_UN, 0, 0)
|
2010-11-19 03:03:05 -08:00
|
|
|
self.owned = False
|