2011-07-28 00:29:31 +08:00
|
|
|
import io
|
2003-04-18 05:31:33 +08:00
|
|
|
import shlex
|
2011-07-28 00:29:31 +08:00
|
|
|
import string
|
|
|
|
import unittest
|
2003-04-18 05:31:33 +08:00
|
|
|
|
2003-04-18 07:04:22 +08:00
|
|
|
|
2003-04-18 05:31:33 +08:00
|
|
|
|
|
|
|
# The original test data set was from shellwords, by Hartmut Goebel.
|
|
|
|
|
|
|
|
data = r"""x|x|
|
|
|
|
foo bar|foo|bar|
|
|
|
|
foo bar|foo|bar|
|
|
|
|
foo bar |foo|bar|
|
|
|
|
foo bar bla fasel|foo|bar|bla|fasel|
|
|
|
|
x y z xxxx|x|y|z|xxxx|
|
|
|
|
\x bar|\|x|bar|
|
|
|
|
\ x bar|\|x|bar|
|
|
|
|
\ bar|\|bar|
|
|
|
|
foo \x bar|foo|\|x|bar|
|
|
|
|
foo \ x bar|foo|\|x|bar|
|
|
|
|
foo \ bar|foo|\|bar|
|
|
|
|
foo "bar" bla|foo|"bar"|bla|
|
|
|
|
"foo" "bar" "bla"|"foo"|"bar"|"bla"|
|
|
|
|
"foo" bar "bla"|"foo"|bar|"bla"|
|
|
|
|
"foo" bar bla|"foo"|bar|bla|
|
|
|
|
foo 'bar' bla|foo|'bar'|bla|
|
|
|
|
'foo' 'bar' 'bla'|'foo'|'bar'|'bla'|
|
|
|
|
'foo' bar 'bla'|'foo'|bar|'bla'|
|
|
|
|
'foo' bar bla|'foo'|bar|bla|
|
|
|
|
blurb foo"bar"bar"fasel" baz|blurb|foo"bar"bar"fasel"|baz|
|
|
|
|
blurb foo'bar'bar'fasel' baz|blurb|foo'bar'bar'fasel'|baz|
|
|
|
|
""|""|
|
|
|
|
''|''|
|
|
|
|
foo "" bar|foo|""|bar|
|
|
|
|
foo '' bar|foo|''|bar|
|
|
|
|
foo "" "" "" bar|foo|""|""|""|bar|
|
|
|
|
foo '' '' '' bar|foo|''|''|''|bar|
|
|
|
|
\""|\|""|
|
|
|
|
"\"|"\"|
|
|
|
|
"foo\ bar"|"foo\ bar"|
|
|
|
|
"foo\\ bar"|"foo\\ bar"|
|
|
|
|
"foo\\ bar\"|"foo\\ bar\"|
|
|
|
|
"foo\\" bar\""|"foo\\"|bar|\|""|
|
|
|
|
"foo\\ bar\" dfadf"|"foo\\ bar\"|dfadf"|
|
|
|
|
"foo\\\ bar\" dfadf"|"foo\\\ bar\"|dfadf"|
|
|
|
|
"foo\\\x bar\" dfadf"|"foo\\\x bar\"|dfadf"|
|
|
|
|
"foo\x bar\" dfadf"|"foo\x bar\"|dfadf"|
|
|
|
|
\''|\|''|
|
|
|
|
'foo\ bar'|'foo\ bar'|
|
|
|
|
'foo\\ bar'|'foo\\ bar'|
|
|
|
|
"foo\\\x bar\" df'a\ 'df'|"foo\\\x bar\"|df'a|\|'df'|
|
|
|
|
\"foo"|\|"foo"|
|
|
|
|
\"foo"\x|\|"foo"|\|x|
|
|
|
|
"foo\x"|"foo\x"|
|
|
|
|
"foo\ "|"foo\ "|
|
|
|
|
foo\ xx|foo|\|xx|
|
|
|
|
foo\ x\x|foo|\|x|\|x|
|
|
|
|
foo\ x\x\""|foo|\|x|\|x|\|""|
|
|
|
|
"foo\ x\x"|"foo\ x\x"|
|
|
|
|
"foo\ x\x\\"|"foo\ x\x\\"|
|
|
|
|
"foo\ x\x\\""foobar"|"foo\ x\x\\"|"foobar"|
|
|
|
|
"foo\ x\x\\"\''"foobar"|"foo\ x\x\\"|\|''|"foobar"|
|
|
|
|
"foo\ x\x\\"\'"fo'obar"|"foo\ x\x\\"|\|'"fo'|obar"|
|
|
|
|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|"foo\ x\x\\"|\|'"fo'|obar"|'don'|\|''|t'|
|
|
|
|
'foo\ bar'|'foo\ bar'|
|
|
|
|
'foo\\ bar'|'foo\\ bar'|
|
|
|
|
foo\ bar|foo|\|bar|
|
|
|
|
foo#bar\nbaz|foobaz|
|
|
|
|
:-) ;-)|:|-|)|;|-|)|
|
2010-10-28 02:52:48 +08:00
|
|
|
áéíóú|á|é|í|ó|ú|
|
2003-04-18 05:31:33 +08:00
|
|
|
"""
|
|
|
|
|
|
|
|
posix_data = r"""x|x|
|
|
|
|
foo bar|foo|bar|
|
|
|
|
foo bar|foo|bar|
|
|
|
|
foo bar |foo|bar|
|
|
|
|
foo bar bla fasel|foo|bar|bla|fasel|
|
|
|
|
x y z xxxx|x|y|z|xxxx|
|
|
|
|
\x bar|x|bar|
|
|
|
|
\ x bar| x|bar|
|
|
|
|
\ bar| bar|
|
|
|
|
foo \x bar|foo|x|bar|
|
|
|
|
foo \ x bar|foo| x|bar|
|
|
|
|
foo \ bar|foo| bar|
|
|
|
|
foo "bar" bla|foo|bar|bla|
|
|
|
|
"foo" "bar" "bla"|foo|bar|bla|
|
|
|
|
"foo" bar "bla"|foo|bar|bla|
|
|
|
|
"foo" bar bla|foo|bar|bla|
|
|
|
|
foo 'bar' bla|foo|bar|bla|
|
|
|
|
'foo' 'bar' 'bla'|foo|bar|bla|
|
|
|
|
'foo' bar 'bla'|foo|bar|bla|
|
|
|
|
'foo' bar bla|foo|bar|bla|
|
|
|
|
blurb foo"bar"bar"fasel" baz|blurb|foobarbarfasel|baz|
|
|
|
|
blurb foo'bar'bar'fasel' baz|blurb|foobarbarfasel|baz|
|
|
|
|
""||
|
|
|
|
''||
|
|
|
|
foo "" bar|foo||bar|
|
|
|
|
foo '' bar|foo||bar|
|
|
|
|
foo "" "" "" bar|foo||||bar|
|
|
|
|
foo '' '' '' bar|foo||||bar|
|
|
|
|
\"|"|
|
|
|
|
"\""|"|
|
|
|
|
"foo\ bar"|foo\ bar|
|
|
|
|
"foo\\ bar"|foo\ bar|
|
|
|
|
"foo\\ bar\""|foo\ bar"|
|
|
|
|
"foo\\" bar\"|foo\|bar"|
|
|
|
|
"foo\\ bar\" dfadf"|foo\ bar" dfadf|
|
|
|
|
"foo\\\ bar\" dfadf"|foo\\ bar" dfadf|
|
|
|
|
"foo\\\x bar\" dfadf"|foo\\x bar" dfadf|
|
|
|
|
"foo\x bar\" dfadf"|foo\x bar" dfadf|
|
|
|
|
\'|'|
|
|
|
|
'foo\ bar'|foo\ bar|
|
|
|
|
'foo\\ bar'|foo\\ bar|
|
|
|
|
"foo\\\x bar\" df'a\ 'df"|foo\\x bar" df'a\ 'df|
|
|
|
|
\"foo|"foo|
|
|
|
|
\"foo\x|"foox|
|
|
|
|
"foo\x"|foo\x|
|
|
|
|
"foo\ "|foo\ |
|
|
|
|
foo\ xx|foo xx|
|
|
|
|
foo\ x\x|foo xx|
|
|
|
|
foo\ x\x\"|foo xx"|
|
|
|
|
"foo\ x\x"|foo\ x\x|
|
|
|
|
"foo\ x\x\\"|foo\ x\x\|
|
|
|
|
"foo\ x\x\\""foobar"|foo\ x\x\foobar|
|
|
|
|
"foo\ x\x\\"\'"foobar"|foo\ x\x\'foobar|
|
|
|
|
"foo\ x\x\\"\'"fo'obar"|foo\ x\x\'fo'obar|
|
|
|
|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|foo\ x\x\'fo'obar|don't|
|
|
|
|
"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\|foo\ x\x\'fo'obar|don't|\|
|
|
|
|
'foo\ bar'|foo\ bar|
|
|
|
|
'foo\\ bar'|foo\\ bar|
|
|
|
|
foo\ bar|foo bar|
|
|
|
|
foo#bar\nbaz|foo|baz|
|
|
|
|
:-) ;-)|:-)|;-)|
|
2010-10-28 02:52:48 +08:00
|
|
|
áéíóú|áéíóú|
|
2003-04-18 05:31:33 +08:00
|
|
|
"""
|
|
|
|
|
|
|
|
class ShlexTest(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
|
|
self.data = [x.split("|")[:-1]
|
|
|
|
for x in data.splitlines()]
|
|
|
|
self.posix_data = [x.split("|")[:-1]
|
|
|
|
for x in posix_data.splitlines()]
|
|
|
|
for item in self.data:
|
|
|
|
item[0] = item[0].replace(r"\n", "\n")
|
|
|
|
for item in self.posix_data:
|
|
|
|
item[0] = item[0].replace(r"\n", "\n")
|
|
|
|
|
2003-04-20 09:57:03 +08:00
|
|
|
def splitTest(self, data, comments):
|
2003-04-18 05:31:33 +08:00
|
|
|
for i in range(len(data)):
|
2003-04-20 09:57:03 +08:00
|
|
|
l = shlex.split(data[i][0], comments=comments)
|
2003-04-18 05:31:33 +08:00
|
|
|
self.assertEqual(l, data[i][1:],
|
|
|
|
"%s: %s != %s" %
|
|
|
|
(data[i][0], l, data[i][1:]))
|
|
|
|
|
|
|
|
def oldSplit(self, s):
|
|
|
|
ret = []
|
2007-06-13 01:43:43 +08:00
|
|
|
lex = shlex.shlex(io.StringIO(s))
|
2003-04-18 05:31:33 +08:00
|
|
|
tok = lex.get_token()
|
|
|
|
while tok:
|
|
|
|
ret.append(tok)
|
|
|
|
tok = lex.get_token()
|
|
|
|
return ret
|
2003-04-25 00:02:54 +08:00
|
|
|
|
2003-04-18 05:31:33 +08:00
|
|
|
def testSplitPosix(self):
|
|
|
|
"""Test data splitting with posix parser"""
|
2003-04-25 00:02:54 +08:00
|
|
|
self.splitTest(self.posix_data, comments=True)
|
2003-04-18 05:31:33 +08:00
|
|
|
|
|
|
|
def testCompat(self):
|
|
|
|
"""Test compatibility interface"""
|
|
|
|
for i in range(len(self.data)):
|
|
|
|
l = self.oldSplit(self.data[i][0])
|
|
|
|
self.assertEqual(l, self.data[i][1:],
|
|
|
|
"%s: %s != %s" %
|
|
|
|
(self.data[i][0], l, self.data[i][1:]))
|
|
|
|
|
2016-07-30 05:35:03 +08:00
|
|
|
def testSyntaxSplitAmpersandAndPipe(self):
|
|
|
|
"""Test handling of syntax splitting of &, |"""
|
|
|
|
# Could take these forms: &&, &, |&, ;&, ;;&
|
|
|
|
# of course, the same applies to | and ||
|
|
|
|
# these should all parse to the same output
|
|
|
|
for delimiter in ('&&', '&', '|&', ';&', ';;&',
|
|
|
|
'||', '|', '&|', ';|', ';;|'):
|
|
|
|
src = ['echo hi %s echo bye' % delimiter,
|
|
|
|
'echo hi%secho bye' % delimiter]
|
|
|
|
ref = ['echo', 'hi', delimiter, 'echo', 'bye']
|
|
|
|
for ss in src:
|
|
|
|
s = shlex.shlex(ss, punctuation_chars=True)
|
|
|
|
result = list(s)
|
|
|
|
self.assertEqual(ref, result, "While splitting '%s'" % ss)
|
|
|
|
|
|
|
|
def testSyntaxSplitSemicolon(self):
|
|
|
|
"""Test handling of syntax splitting of ;"""
|
|
|
|
# Could take these forms: ;, ;;, ;&, ;;&
|
|
|
|
# these should all parse to the same output
|
|
|
|
for delimiter in (';', ';;', ';&', ';;&'):
|
|
|
|
src = ['echo hi %s echo bye' % delimiter,
|
|
|
|
'echo hi%s echo bye' % delimiter,
|
|
|
|
'echo hi%secho bye' % delimiter]
|
|
|
|
ref = ['echo', 'hi', delimiter, 'echo', 'bye']
|
|
|
|
for ss in src:
|
|
|
|
s = shlex.shlex(ss, punctuation_chars=True)
|
|
|
|
result = list(s)
|
|
|
|
self.assertEqual(ref, result, "While splitting '%s'" % ss)
|
|
|
|
|
|
|
|
def testSyntaxSplitRedirect(self):
|
|
|
|
"""Test handling of syntax splitting of >"""
|
|
|
|
# of course, the same applies to <, |
|
|
|
|
# these should all parse to the same output
|
|
|
|
for delimiter in ('<', '|'):
|
|
|
|
src = ['echo hi %s out' % delimiter,
|
|
|
|
'echo hi%s out' % delimiter,
|
|
|
|
'echo hi%sout' % delimiter]
|
|
|
|
ref = ['echo', 'hi', delimiter, 'out']
|
|
|
|
for ss in src:
|
|
|
|
s = shlex.shlex(ss, punctuation_chars=True)
|
|
|
|
result = list(s)
|
|
|
|
self.assertEqual(ref, result, "While splitting '%s'" % ss)
|
|
|
|
|
|
|
|
def testSyntaxSplitParen(self):
|
|
|
|
"""Test handling of syntax splitting of ()"""
|
|
|
|
# these should all parse to the same output
|
|
|
|
src = ['( echo hi )',
|
|
|
|
'(echo hi)']
|
|
|
|
ref = ['(', 'echo', 'hi', ')']
|
|
|
|
for ss in src:
|
|
|
|
s = shlex.shlex(ss, punctuation_chars=True)
|
|
|
|
result = list(s)
|
|
|
|
self.assertEqual(ref, result, "While splitting '%s'" % ss)
|
|
|
|
|
|
|
|
def testSyntaxSplitCustom(self):
|
|
|
|
"""Test handling of syntax splitting with custom chars"""
|
|
|
|
ref = ['~/a', '&', '&', 'b-c', '--color=auto', '||', 'd', '*.py?']
|
|
|
|
ss = "~/a && b-c --color=auto || d *.py?"
|
|
|
|
s = shlex.shlex(ss, punctuation_chars="|")
|
|
|
|
result = list(s)
|
|
|
|
self.assertEqual(ref, result, "While splitting '%s'" % ss)
|
|
|
|
|
|
|
|
def testTokenTypes(self):
|
|
|
|
"""Test that tokens are split with types as expected."""
|
|
|
|
for source, expected in (
|
|
|
|
('a && b || c',
|
|
|
|
[('a', 'a'), ('&&', 'c'), ('b', 'a'),
|
|
|
|
('||', 'c'), ('c', 'a')]),
|
|
|
|
):
|
|
|
|
s = shlex.shlex(source, punctuation_chars=True)
|
|
|
|
observed = []
|
|
|
|
while True:
|
|
|
|
t = s.get_token()
|
|
|
|
if t == s.eof:
|
|
|
|
break
|
|
|
|
if t[0] in s.punctuation_chars:
|
|
|
|
tt = 'c'
|
|
|
|
else:
|
|
|
|
tt = 'a'
|
|
|
|
observed.append((t, tt))
|
|
|
|
self.assertEqual(observed, expected)
|
|
|
|
|
|
|
|
def testPunctuationInWordChars(self):
|
|
|
|
"""Test that any punctuation chars are removed from wordchars"""
|
|
|
|
s = shlex.shlex('a_b__c', punctuation_chars='_')
|
|
|
|
self.assertNotIn('_', s.wordchars)
|
|
|
|
self.assertEqual(list(s), ['a', '_', 'b', '__', 'c'])
|
|
|
|
|
|
|
|
def testPunctuationWithWhitespaceSplit(self):
|
|
|
|
"""Test that with whitespace_split, behaviour is as expected"""
|
|
|
|
s = shlex.shlex('a && b || c', punctuation_chars='&')
|
|
|
|
# whitespace_split is False, so splitting will be based on
|
|
|
|
# punctuation_chars
|
|
|
|
self.assertEqual(list(s), ['a', '&&', 'b', '|', '|', 'c'])
|
|
|
|
s = shlex.shlex('a && b || c', punctuation_chars='&')
|
|
|
|
s.whitespace_split = True
|
|
|
|
# whitespace_split is True, so splitting will be based on
|
|
|
|
# white space
|
|
|
|
self.assertEqual(list(s), ['a', '&&', 'b', '||', 'c'])
|
|
|
|
|
|
|
|
def testEmptyStringHandling(self):
|
|
|
|
"""Test that parsing of empty strings is correctly handled."""
|
|
|
|
# see Issue #21999
|
|
|
|
expected = ['', ')', 'abc']
|
|
|
|
for punct in (False, True):
|
|
|
|
s = shlex.shlex("'')abc", posix=True, punctuation_chars=punct)
|
|
|
|
slist = list(s)
|
|
|
|
self.assertEqual(slist, expected)
|
|
|
|
expected = ["''", ')', 'abc']
|
|
|
|
s = shlex.shlex("'')abc", punctuation_chars=True)
|
|
|
|
self.assertEqual(list(s), expected)
|
|
|
|
|
2011-07-28 00:29:31 +08:00
|
|
|
def testQuote(self):
|
|
|
|
safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./'
|
2011-08-10 05:18:06 +08:00
|
|
|
unicode_sample = '\xe9\xe0\xdf' # e + acute accent, a + grave, sharp s
|
|
|
|
unsafe = '"`$\\!' + unicode_sample
|
2011-07-28 00:29:31 +08:00
|
|
|
|
|
|
|
self.assertEqual(shlex.quote(''), "''")
|
|
|
|
self.assertEqual(shlex.quote(safeunquoted), safeunquoted)
|
|
|
|
self.assertEqual(shlex.quote('test file name'), "'test file name'")
|
|
|
|
for u in unsafe:
|
|
|
|
self.assertEqual(shlex.quote('test%sname' % u),
|
|
|
|
"'test%sname'" % u)
|
|
|
|
for u in unsafe:
|
|
|
|
self.assertEqual(shlex.quote("test%s'name'" % u),
|
|
|
|
"'test%s'\"'\"'name'\"'\"''" % u)
|
|
|
|
|
2003-04-18 05:31:33 +08:00
|
|
|
# Allow this test to be used with old shlex.py
|
|
|
|
if not getattr(shlex, "split", None):
|
|
|
|
for methname in dir(ShlexTest):
|
|
|
|
if methname.startswith("test") and methname != "testCompat":
|
|
|
|
delattr(ShlexTest, methname)
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
2015-04-14 04:00:43 +08:00
|
|
|
unittest.main()
|