Variable expansion bug.
This commit is contained in:
parent
8cbbe9c53c
commit
587bdd6c63
8 changed files with 99 additions and 79 deletions
|
|
@ -52,6 +52,9 @@ improvements.
|
|||
|
||||
1. A clean, modern implementation in Go, that doesn't depend on the whole Plan
|
||||
9 stack.
|
||||
1. Parallel by default. Modern computers can build more than one C file at a
|
||||
time. Cases that should not be run in parallel are the exception. Use
|
||||
`-p=1` if this is the case.
|
||||
1. Use Go regular expressions, which are perl-like. The original mk used plan9
|
||||
regex, which few people know or care to learn.
|
||||
1. Allow blank lines in recipes. A recipe is any indented block of text, and
|
||||
|
|
@ -60,8 +63,6 @@ improvements.
|
|||
1. Add an 'S' attribute to execute recipes with programs other than sh. This
|
||||
way, you don't have to separate your six line python script into its own
|
||||
file. Just stick it directly in the mkfile.
|
||||
1. Use sh syntax for command insertion (i.e. backticks) rather than rc shell
|
||||
syntax.
|
||||
1. Pretty colors.
|
||||
|
||||
# Current State
|
||||
|
|
|
|||
20
TODO.md
Normal file
20
TODO.md
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
|
||||
# Short-term
|
||||
* Unit tests.
|
||||
* Expanding regex matches in targets.
|
||||
* Dummy rule for multiple explicit targets
|
||||
* Expand `$newprereq`.
|
||||
* Expand `$alltargets`.
|
||||
* Man page.
|
||||
* Namelist syntax.
|
||||
* Environment variables.
|
||||
|
||||
# Long-term
|
||||
* Nicer syntax for alternative-shell rules.
|
||||
* An attribute to demand n processors for a particular rule. This way
|
||||
resource hog rules can be run on their own without disabling parallel
|
||||
make.
|
||||
* A switch that prints the rules that will be executed and prompts to user
|
||||
to do so. I often find myself doing `mk -n` before `mk` to make sure my
|
||||
rules aren't bogus.
|
||||
|
||||
|
|
@ -13,12 +13,13 @@ func expand(input string, vars map[string][]string, expandBackticks bool) []stri
|
|||
expanded := ""
|
||||
var i, j int
|
||||
for i = 0; i < len(input); {
|
||||
j = i + strings.IndexAny(input[i:], "\"'`$\\")
|
||||
j = strings.IndexAny(input[i:], "\"'`$\\")
|
||||
|
||||
if j < 0 {
|
||||
expanded += input[i:]
|
||||
break
|
||||
}
|
||||
j += i
|
||||
|
||||
expanded += input[i:j]
|
||||
c, w := utf8.DecodeRuneInString(input[j:])
|
||||
|
|
@ -155,8 +156,8 @@ func expandSigil(input string, vars map[string][]string) ([]string, int) {
|
|||
if ok {
|
||||
return varvals, offset
|
||||
} else {
|
||||
return []string{"$" + input[:offset]}, offset
|
||||
}
|
||||
return []string{"$" + input[:offset]}, offset
|
||||
}
|
||||
}
|
||||
|
||||
return []string{"$" + input}, len(input)
|
||||
|
|
|
|||
35
graph.go
35
graph.go
|
|
@ -56,7 +56,6 @@ type node struct {
|
|||
flags nodeFlag // bitwise combination of node flags
|
||||
}
|
||||
|
||||
|
||||
// Update a node's timestamp and 'exists' flag.
|
||||
func (u *node) updateTimestamp() {
|
||||
info, err := os.Stat(u.name)
|
||||
|
|
@ -67,7 +66,7 @@ func (u *node) updateTimestamp() {
|
|||
} else {
|
||||
_, ok := err.(*os.PathError)
|
||||
if ok {
|
||||
u.t = time.Unix(0, 0)
|
||||
u.t = time.Unix(0, 0)
|
||||
u.exists = false
|
||||
} else {
|
||||
mkError(err.Error())
|
||||
|
|
@ -78,7 +77,7 @@ func (u *node) updateTimestamp() {
|
|||
// Create a new node
|
||||
func (g *graph) newnode(name string) *node {
|
||||
u := &node{name: name}
|
||||
u.updateTimestamp()
|
||||
u.updateTimestamp()
|
||||
g.nodes[name] = u
|
||||
return u
|
||||
}
|
||||
|
|
@ -110,10 +109,10 @@ func buildgraph(rs *ruleSet, target string) *graph {
|
|||
// keep track of how many times each rule is visited, to avoid cycles.
|
||||
rulecnt := make([]int, len(rs.rules))
|
||||
g.root = applyrules(rs, g, target, rulecnt)
|
||||
g.cyclecheck(g.root)
|
||||
g.cyclecheck(g.root)
|
||||
g.root.flags |= nodeFlagProbable
|
||||
g.vacuous(g.root)
|
||||
g.ambiguous(g.root)
|
||||
g.vacuous(g.root)
|
||||
g.ambiguous(g.root)
|
||||
|
||||
return g
|
||||
}
|
||||
|
|
@ -283,16 +282,16 @@ func (g *graph) vacuous(u *node) bool {
|
|||
|
||||
// Check for cycles
|
||||
func (g *graph) cyclecheck(u *node) {
|
||||
if u.flags & nodeFlagCycle != 0 && len(u.prereqs) > 0 {
|
||||
mkError(fmt.Sprintf("cycle in the graph detected at target %s", u.name))
|
||||
}
|
||||
u.flags |= nodeFlagCycle
|
||||
for i := range u.prereqs {
|
||||
if u.prereqs[i].v != nil {
|
||||
g.cyclecheck(u.prereqs[i].v)
|
||||
}
|
||||
}
|
||||
u.flags &= ^nodeFlagCycle
|
||||
if u.flags&nodeFlagCycle != 0 && len(u.prereqs) > 0 {
|
||||
mkError(fmt.Sprintf("cycle in the graph detected at target %s", u.name))
|
||||
}
|
||||
u.flags |= nodeFlagCycle
|
||||
for i := range u.prereqs {
|
||||
if u.prereqs[i].v != nil {
|
||||
g.cyclecheck(u.prereqs[i].v)
|
||||
}
|
||||
}
|
||||
u.flags &= ^nodeFlagCycle
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -314,11 +313,11 @@ func (g *graph) ambiguous(u *node) {
|
|||
} else {
|
||||
if le.r.equivRecipe(e.r) {
|
||||
if le.r.ismeta && !e.r.ismeta {
|
||||
mkPrintRecipe(u.name, le.r.recipe)
|
||||
mkPrintRecipe(u.name, le.r.recipe)
|
||||
le.togo = true
|
||||
le = e
|
||||
} else if !le.r.ismeta && e.r.ismeta {
|
||||
mkPrintRecipe(u.name, e.r.recipe)
|
||||
mkPrintRecipe(u.name, e.r.recipe)
|
||||
e.togo = true
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
41
lex.go
41
lex.go
|
|
@ -1,10 +1,9 @@
|
|||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type tokenType int
|
||||
|
|
@ -16,7 +15,7 @@ const nonBareRunes = " \t\n\r\\=:#'\""
|
|||
|
||||
// Return true if the string contains whitespace only.
|
||||
func onlyWhitespace(s string) bool {
|
||||
return strings.IndexAny(s, " \t\r\n") < 0
|
||||
return strings.IndexAny(s, " \t\r\n") < 0
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
@ -86,9 +85,9 @@ type lexer struct {
|
|||
type lexerStateFun func(*lexer) lexerStateFun
|
||||
|
||||
func (l *lexer) lexerror(what string) {
|
||||
if l.errmsg == "" {
|
||||
l.errmsg = what
|
||||
}
|
||||
if l.errmsg == "" {
|
||||
l.errmsg = what
|
||||
}
|
||||
l.emit(tokenError)
|
||||
}
|
||||
|
||||
|
|
@ -183,9 +182,9 @@ func (l *lexer) acceptUntil(invalid string) {
|
|||
l.next()
|
||||
}
|
||||
|
||||
if l.peek() == eof {
|
||||
l.lexerror(fmt.Sprintf("end of file encountered while looking for one of: %s", invalid))
|
||||
}
|
||||
if l.peek() == eof {
|
||||
l.lexerror(fmt.Sprintf("end of file encountered while looking for one of: %s", invalid))
|
||||
}
|
||||
}
|
||||
|
||||
// Skip characters from the valid string until the next is not.
|
||||
|
|
@ -203,9 +202,9 @@ func (l *lexer) skipUntil(invalid string) {
|
|||
l.skip()
|
||||
}
|
||||
|
||||
if l.peek() == eof {
|
||||
l.lexerror(fmt.Sprintf("end of file encountered while looking for one of: %s", invalid))
|
||||
}
|
||||
if l.peek() == eof {
|
||||
l.lexerror(fmt.Sprintf("end of file encountered while looking for one of: %s", invalid))
|
||||
}
|
||||
}
|
||||
|
||||
// Start a new lexer to lex the given input.
|
||||
|
|
@ -269,9 +268,9 @@ func lexTopLevel(l *lexer) lexerStateFun {
|
|||
return lexBackQuotedWord
|
||||
}
|
||||
|
||||
if strings.IndexRune(nonBareRunes, c) >= 0 {
|
||||
l.lexerror(fmt.Sprintf("expected a unquoted string, but found '%c'", c))
|
||||
}
|
||||
if strings.IndexRune(nonBareRunes, c) >= 0 {
|
||||
l.lexerror(fmt.Sprintf("expected a unquoted string, but found '%c'", c))
|
||||
}
|
||||
|
||||
return lexBareWord
|
||||
}
|
||||
|
|
@ -313,9 +312,9 @@ func lexDoubleQuotedWord(l *lexer) lexerStateFun {
|
|||
}
|
||||
}
|
||||
|
||||
if l.peek() == eof {
|
||||
l.lexerror("end of file encountered while parsing a quoted string.")
|
||||
}
|
||||
if l.peek() == eof {
|
||||
l.lexerror("end of file encountered while parsing a quoted string.")
|
||||
}
|
||||
|
||||
l.next() // '"'
|
||||
return lexBareWord
|
||||
|
|
@ -344,9 +343,9 @@ func lexRecipe(l *lexer) lexerStateFun {
|
|||
}
|
||||
}
|
||||
|
||||
if !onlyWhitespace(l.input[l.start:l.pos]) {
|
||||
l.emit(tokenRecipe)
|
||||
}
|
||||
if !onlyWhitespace(l.input[l.start:l.pos]) {
|
||||
l.emit(tokenRecipe)
|
||||
}
|
||||
return lexTopLevel
|
||||
}
|
||||
|
||||
|
|
|
|||
30
mk.go
30
mk.go
|
|
@ -63,7 +63,7 @@ func mk(rs *ruleSet, target string, dryrun bool) {
|
|||
if g.root.exists && !rebuildall {
|
||||
return
|
||||
}
|
||||
mkNode(g, g.root)
|
||||
mkNode(g, g.root)
|
||||
}
|
||||
|
||||
// Build a target in the graph.
|
||||
|
|
@ -128,7 +128,7 @@ func mkNode(g *graph, u *node) {
|
|||
for i := range prereqs {
|
||||
prereqs[i].mutex.Lock()
|
||||
// needs to be built?
|
||||
u.updateTimestamp()
|
||||
u.updateTimestamp()
|
||||
if !prereqs[i].exists || e.r.attributes.virtual || rebuildall || (u.exists && u.t.Before(prereqs[i].t)) {
|
||||
switch prereqs[i].status {
|
||||
case nodeStatusReady:
|
||||
|
|
@ -233,16 +233,16 @@ func main() {
|
|||
targets := flag.Args()
|
||||
|
||||
// build the first non-meta rule in the makefile, if none are given explicitly
|
||||
if len(targets) == 0 {
|
||||
for i := range rs.rules {
|
||||
if !rs.rules[i].ismeta {
|
||||
for j := range rs.rules[i].targets {
|
||||
targets = append(targets, rs.rules[i].targets[j].spat)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(targets) == 0 {
|
||||
for i := range rs.rules {
|
||||
if !rs.rules[i].ismeta {
|
||||
for j := range rs.rules[i].targets {
|
||||
targets = append(targets, rs.rules[i].targets[j].spat)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
fmt.Println("mk: nothing to mk")
|
||||
|
|
@ -251,7 +251,7 @@ func main() {
|
|||
|
||||
// TODO: For multiple targets, we should add a dummy rule that depends on
|
||||
// all let mk handle executing each.
|
||||
for _, target := range targets {
|
||||
mk(rs, target, dryrun)
|
||||
}
|
||||
for _, target := range targets {
|
||||
mk(rs, target, dryrun)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
38
parse.go
38
parse.go
|
|
@ -5,10 +5,10 @@ package main
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
|
|
@ -20,10 +20,10 @@ type parser struct {
|
|||
|
||||
// Pretty errors.
|
||||
func (p *parser) parseError(context string, expected string, found token) {
|
||||
mkPrintError(fmt.Sprintf("%s:%d: syntax error: ", p.name, found.line))
|
||||
mkPrintError(fmt.Sprintf("%s:%d: syntax error: ", p.name, found.line))
|
||||
mkPrintError(fmt.Sprintf("while %s, expected %s but found '%s'.\n",
|
||||
context, expected, found.String()))
|
||||
mkError("")
|
||||
mkError("")
|
||||
}
|
||||
|
||||
// More basic errors.
|
||||
|
|
@ -65,7 +65,7 @@ func parseInto(input string, name string, rules *ruleSet) {
|
|||
state := parseTopLevel
|
||||
for t := range tokens {
|
||||
if t.typ == tokenError {
|
||||
p.basicErrorAtLine(l.errmsg, t.line)
|
||||
p.basicErrorAtLine(l.errmsg, t.line)
|
||||
break
|
||||
}
|
||||
|
||||
|
|
@ -135,7 +135,7 @@ func parsePipeInclude(p *parser, t token) parserStateFun {
|
|||
p.tokenbuf = append(p.tokenbuf, t)
|
||||
|
||||
default:
|
||||
p.parseError("parsing piped include", "a shell command", t)
|
||||
p.parseError("parsing piped include", "a shell command", t)
|
||||
}
|
||||
|
||||
return parsePipeInclude
|
||||
|
|
@ -145,25 +145,25 @@ func parsePipeInclude(p *parser, t token) parserStateFun {
|
|||
func parseRedirInclude(p *parser, t token) parserStateFun {
|
||||
switch t.typ {
|
||||
case tokenNewline:
|
||||
filename := ""
|
||||
for i := range p.tokenbuf {
|
||||
filename += p.tokenbuf[i].val
|
||||
}
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
p.basicErrorAtToken(fmt.Sprintf("cannot open %s", filename), p.tokenbuf[0])
|
||||
}
|
||||
input, _ := ioutil.ReadAll(file)
|
||||
parseInto(string(input), filename, p.rules)
|
||||
filename := ""
|
||||
for i := range p.tokenbuf {
|
||||
filename += p.tokenbuf[i].val
|
||||
}
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
p.basicErrorAtToken(fmt.Sprintf("cannot open %s", filename), p.tokenbuf[0])
|
||||
}
|
||||
input, _ := ioutil.ReadAll(file)
|
||||
parseInto(string(input), filename, p.rules)
|
||||
|
||||
p.clear()
|
||||
return parseTopLevel
|
||||
p.clear()
|
||||
return parseTopLevel
|
||||
|
||||
case tokenWord:
|
||||
p.tokenbuf = append(p.tokenbuf, t)
|
||||
|
||||
default:
|
||||
p.parseError("parsing include", "a file name", t)
|
||||
p.parseError("parsing include", "a file name", t)
|
||||
}
|
||||
|
||||
return parseRedirInclude
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ func subprocess(program string,
|
|||
var outbytes []byte
|
||||
outbytes, err = cmd.Output()
|
||||
output = string(outbytes)
|
||||
if output[len(output)-1] == '\n' {
|
||||
if len(output) > 0 && output[len(output)-1] == '\n' {
|
||||
output = output[:len(output)-1]
|
||||
}
|
||||
} else {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue