Fix "up-to-date" logic.

This commit is contained in:
Daniel Jones 2013-04-24 11:36:24 -07:00
parent 3bdc96019f
commit b7df226f0d
3 changed files with 34 additions and 15 deletions

View file

@ -1,5 +1,6 @@
# Short-term # Short-term
* Expand ${foo}.
* Unit tests. * Unit tests.
* Expanding regex matches in targets. * Expanding regex matches in targets.
* Dummy rule for multiple explicit targets * Dummy rule for multiple explicit targets
@ -14,7 +15,4 @@
* An attribute to demand n processors for a particular rule. This way * An attribute to demand n processors for a particular rule. This way
resource hog rules can be run on their own without disabling parallel resource hog rules can be run on their own without disabling parallel
make. make.
* A switch that prints the rules that will be executed and prompts to user
to do so. I often find myself doing `mk -n` before `mk` to make sure my
rules aren't bogus.

View file

@ -29,6 +29,7 @@ type nodeStatus int
const ( const (
nodeStatusReady nodeStatus = iota nodeStatusReady nodeStatus = iota
nodeStatusStarted nodeStatusStarted
nodeStatusNop
nodeStatusDone nodeStatusDone
nodeStatusFailed nodeStatusFailed
) )

44
mk.go
View file

@ -90,6 +90,7 @@ func mkNode(g *graph, u *node, dryrun bool) {
wd, _ := os.Getwd() wd, _ := os.Getwd()
mkError(fmt.Sprintf("don't know how to make %s in %s\n", u.name, wd)) mkError(fmt.Sprintf("don't know how to make %s in %s\n", u.name, wd))
} }
finalstatus = nodeStatusNop
return return
} }
@ -118,17 +119,13 @@ func mkNode(g *graph, u *node, dryrun bool) {
e.r.mutex.Lock() e.r.mutex.Lock()
for i := range prereqs { for i := range prereqs {
prereqs[i].mutex.Lock() prereqs[i].mutex.Lock()
// needs to be built? switch prereqs[i].status {
u.updateTimestamp() case nodeStatusReady:
if !prereqs[i].exists || rebuildall || (u.exists && u.t.Before(prereqs[i].t)) { go mkNode(g, prereqs[i], dryrun)
switch prereqs[i].status { fallthrough
case nodeStatusReady: case nodeStatusStarted:
go mkNode(g, prereqs[i], dryrun) prereqs[i].listeners = append(prereqs[i].listeners, prereqstat)
fallthrough pending++
case nodeStatusStarted:
prereqs[i].listeners = append(prereqs[i].listeners, prereqstat)
pending++
}
} }
prereqs[i].mutex.Unlock() prereqs[i].mutex.Unlock()
} }
@ -143,13 +140,36 @@ func mkNode(g *graph, u *node, dryrun bool) {
} }
} }
uptodate := true
if !e.r.attributes.virtual {
u.updateTimestamp()
if u.exists {
for i := range prereqs {
if u.t.Before(prereqs[i].t) || prereqs[i].status == nodeStatusDone {
uptodate = false
}
}
} else {
uptodate = false
}
} else {
uptodate = false
}
if rebuildall {
uptodate = false
}
// execute the recipe, unless the prereqs failed // execute the recipe, unless the prereqs failed
if finalstatus != nodeStatusFailed && len(e.r.recipe) > 0 { if !uptodate && finalstatus != nodeStatusFailed && len(e.r.recipe) > 0 {
reserveSubproc() reserveSubproc()
if !dorecipe(u.name, u, e, dryrun) { if !dorecipe(u.name, u, e, dryrun) {
finalstatus = nodeStatusFailed finalstatus = nodeStatusFailed
} }
u.updateTimestamp()
finishSubproc() finishSubproc()
} else if finalstatus != nodeStatusFailed {
finalstatus = nodeStatusNop
} }
} }