Tokenize output from shell expansions. (Fixes #1)
This commit is contained in:
parent
a005edc4a7
commit
6ef6f52936
2 changed files with 39 additions and 51 deletions
53
expand.go
53
expand.go
|
|
@ -42,12 +42,18 @@ func expand(input string, vars map[string][]string, expandBackticks bool) []stri
|
||||||
|
|
||||||
case '`':
|
case '`':
|
||||||
if expandBackticks {
|
if expandBackticks {
|
||||||
out, off = expandBackQuoted(input[i:], vars)
|
var outparts []string
|
||||||
|
outparts, off = expandBackQuoted(input[i:], vars)
|
||||||
|
if len(outparts) > 0 {
|
||||||
|
outparts[0] = expanded + outparts[0]
|
||||||
|
expanded = outparts[len(outparts)-1]
|
||||||
|
parts = append(parts, outparts[:len(outparts)-1]...)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
out = input
|
out = input
|
||||||
off = len(input)
|
off = len(input)
|
||||||
}
|
|
||||||
expanded += out
|
expanded += out
|
||||||
|
}
|
||||||
|
|
||||||
case '$':
|
case '$':
|
||||||
var outparts []string
|
var outparts []string
|
||||||
|
|
@ -251,46 +257,21 @@ func expandSuffixes(input string, stem string) string {
|
||||||
// TODO: expand RegexpRefs
|
// TODO: expand RegexpRefs
|
||||||
|
|
||||||
// Expand a backtick quoted string, by executing the contents.
|
// Expand a backtick quoted string, by executing the contents.
|
||||||
func expandBackQuoted(input string, vars map[string][]string) (string, int) {
|
func expandBackQuoted(input string, vars map[string][]string) ([]string, int) {
|
||||||
// TODO: expand sigils?
|
// TODO: expand sigils?
|
||||||
j := strings.Index(input, "`")
|
j := strings.Index(input, "`")
|
||||||
if j < 0 {
|
if j < 0 {
|
||||||
return input, len(input)
|
return []string{input}, len(input)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: handle errors
|
// TODO: handle errors
|
||||||
output, _ := subprocess("sh", nil, input[:j], true)
|
output, _ := subprocess("sh", nil, input[:j], true)
|
||||||
return output, (j + 1)
|
|
||||||
|
parts := make([]string, 0)
|
||||||
|
_, tokens := lexWords(output)
|
||||||
|
for t := range tokens {
|
||||||
|
parts = append(parts, t.val)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Split a string on whitespace taking into account escaping and quoting.
|
return parts, (j + 1)
|
||||||
//func splitQuoted(input string) []string {
|
}
|
||||||
//parts := make([]string, 0)
|
|
||||||
//var i, j int
|
|
||||||
//i = 0
|
|
||||||
//for {
|
|
||||||
//// skip all unescaped whitespace
|
|
||||||
//for i < len(input) {
|
|
||||||
//c, w := utf8.DecodeRuneInString(input[i:])
|
|
||||||
//if strings.IndexRune(" \t", c) < 0 {
|
|
||||||
//break
|
|
||||||
//}
|
|
||||||
//i += w
|
|
||||||
//}
|
|
||||||
|
|
||||||
//if i >= len(input) {
|
|
||||||
//break
|
|
||||||
//}
|
|
||||||
|
|
||||||
//// Ugh. Will this take into account quoting in variables?
|
|
||||||
|
|
||||||
//switch c {
|
|
||||||
//case '"':
|
|
||||||
//case '\'':
|
|
||||||
//default:
|
|
||||||
|
|
||||||
//}
|
|
||||||
//}
|
|
||||||
|
|
||||||
//return parts
|
|
||||||
//}
|
|
||||||
|
|
|
||||||
15
lex.go
15
lex.go
|
|
@ -78,6 +78,7 @@ type lexer struct {
|
||||||
col int // column within input
|
col int // column within input
|
||||||
errmsg string // set to an appropriate error message when necessary
|
errmsg string // set to an appropriate error message when necessary
|
||||||
indented bool // true if the only whitespace so far on this line
|
indented bool // true if the only whitespace so far on this line
|
||||||
|
barewords bool // lex only a sequence of words
|
||||||
}
|
}
|
||||||
|
|
||||||
// A lexerStateFun is simultaneously the the state of the lexer and the next
|
// A lexerStateFun is simultaneously the the state of the lexer and the next
|
||||||
|
|
@ -214,6 +215,12 @@ func lex(input string) (*lexer, chan token) {
|
||||||
return l, l.output
|
return l, l.output
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func lexWords(input string) (*lexer, chan token) {
|
||||||
|
l := &lexer{input: input, output: make(chan token), line: 1, col: 0, indented: true, barewords: true}
|
||||||
|
go l.run()
|
||||||
|
return l, l.output
|
||||||
|
}
|
||||||
|
|
||||||
func (l *lexer) run() {
|
func (l *lexer) run() {
|
||||||
for state := lexTopLevel; state != nil; {
|
for state := lexTopLevel; state != nil; {
|
||||||
state = state(l)
|
state = state(l)
|
||||||
|
|
@ -221,18 +228,18 @@ func (l *lexer) run() {
|
||||||
close(l.output)
|
close(l.output)
|
||||||
}
|
}
|
||||||
|
|
||||||
// What do we need?
|
|
||||||
// A function that consumes non-newline whitespace.
|
|
||||||
// A way of determining if the current line might be a recipe.
|
|
||||||
|
|
||||||
func lexTopLevel(l *lexer) lexerStateFun {
|
func lexTopLevel(l *lexer) lexerStateFun {
|
||||||
for {
|
for {
|
||||||
l.skipRun(" \t\r")
|
l.skipRun(" \t\r")
|
||||||
// emit a newline token if we are ending a non-empty line.
|
// emit a newline token if we are ending a non-empty line.
|
||||||
if l.peek() == '\n' && !l.indented {
|
if l.peek() == '\n' && !l.indented {
|
||||||
l.next()
|
l.next()
|
||||||
|
if l.barewords {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
l.emit(tokenNewline)
|
l.emit(tokenNewline)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
l.skipRun(" \t\r\n")
|
l.skipRun(" \t\r\n")
|
||||||
|
|
||||||
if l.peek() == '\\' && l.peekN(1) == '\n' {
|
if l.peek() == '\\' && l.peekN(1) == '\n' {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue