diff --git a/builder.py b/builder.py index 1125262..46109c3 100644 --- a/builder.py +++ b/builder.py @@ -197,7 +197,7 @@ class BuildJob: firstline = open(os.path.join(dodir, dofile)).readline().strip() if firstline.startswith('#!/'): argv[0:2] = firstline[2:].split(' ') - # make sure to create the logfile *before* writing the log about it. + # make sure to create the logfile *before* writing the meta() about it. # that way redo-log won't trace into an obsolete logfile. if vars.LOG: open(state.logname(self.sf.id), 'w') self.dodir = dodir diff --git a/state.py b/state.py index 72e76bd..4a65193 100644 --- a/state.py +++ b/state.py @@ -89,9 +89,11 @@ def db(): " delete_me int, " " primary key (target,source))") _db.execute("insert into Schema (version) values (?)", [SCHEMA_VER]) - # eat the '0' runid and File id - _db.execute("insert into Runid values " - " ((select max(id)+1 from Runid))") + # eat the '0' runid and File id. + # Because of the cheesy way t/flush-cache is implemented, leave a + # lot of runids available before the "first" one so that we + # can adjust cached values to be before the first value. + _db.execute("insert into Runid values (1000000000)") _db.execute("insert into Files (name) values (?)", [ALWAYS]) if not vars.RUNID: @@ -289,11 +291,13 @@ class File(object): def set_static(self): self.update_stamp(must_exist=True) + self.failed_runid = None self.is_override = False self.is_generated = False def set_override(self): self.update_stamp() + self.failed_runid = None self.is_override = True def update_stamp(self, must_exist=False): diff --git a/t/flush-cache b/t/flush-cache index f96abf1..90b226a 100755 --- a/t/flush-cache +++ b/t/flush-cache @@ -9,6 +9,27 @@ sys.stderr.write("Flushing redo cache...\n") db_file = os.path.join(os.environ["REDO_BASE"], ".redo/db.sqlite3") db = sqlite3.connect(db_file, timeout=5000) +# This is very (overly) tricky. Every time we flush the cache, we run an +# atomic transaction that subtracts 1 from all checked_runid and +# changed_runid values across the entire system. Then when checking +# dependencies, we can see if changed_runid for a given dependency is +# greater than checked_runid for the target, and their *relative* values +# will still be intact! So if a dependency had been built during the +# current run, it will act as if a *previous* run built the dependency but +# the current target was built even earlier. Meanwhile, checked_runid is +# less than REDO_RUNID, so everything will still need to be rechecked. +# +# A second tricky point is that failed_runid is usually null (unless +# building a given target really did fail last time). (null - 1) is still +# null, so this transaction doesn't change failed_runid at all unless it +# really did fail. +# +# Finally, an even more insane problem is that since we decrement these +# values more than once per run, they end up decreasing fairly rapidly. +# But 0 is special! Some code treats failed_runid==0 as if it were null, +# so when we decrement all the way to zero, we get a spurious test failure. +# To avoid this, we initialize the runid to a very large number at database +# creation time. db.executescript("pragma synchronous = off;" "update Files set checked_runid=checked_runid-1, " " changed_runid=changed_runid-1, "