Merged revisions 78608 via svnmerge from

svn+ssh://pythondev@svn.python.org/python/branches/py3k

................
  r78608 | victor.stinner | 2010-03-03 01:18:49 +0100 (mer., 03 mars 2010) | 12 lines

  Merged revisions 78603 via svnmerge from
  svn+ssh://pythondev@svn.python.org/python/trunk

  ........
    r78603 | victor.stinner | 2010-03-03 00:20:02 +0100 (mer., 03 mars 2010) | 5 lines

    Issue #7820: The parser tokenizer restores all bytes in the right if the BOM
    check fails.

    Fix an assertion in pydebug mode.
  ........
................
This commit is contained in:
Victor Stinner 2010-03-03 00:22:21 +00:00
parent 117ff17da3
commit 151205f24f
3 changed files with 42 additions and 23 deletions

View File

@ -44,6 +44,17 @@ class PEP263Test(unittest.TestCase):
self.assertEqual(len(d['a']), len(d['b']))
self.assertEqual(ascii(d['a']), ascii(d['b']))
def test_issue7820(self):
# Ensure that check_bom() restores all bytes in the right order if
# check_bom() fails in pydebug mode: a buffer starts with the first
# byte of a valid BOM, but next bytes are different
# one byte in common with the UTF-16-LE BOM
self.assertRaises(SyntaxError, eval, b'\xff\x20')
# two bytes in common with the UTF-8 BOM
self.assertRaises(SyntaxError, eval, b'\xef\xbb\x20')
def test_main():
support.run_unittest(PEP263Test)

View File

@ -12,6 +12,9 @@ What's New in Python 3.1.2?
Core and Builtins
-----------------
- Issue #7820: The parser tokenizer restores all bytes in the right if
the BOM check fails.
- Handle errors from looking up __prepare__ correctly.
- Issue #5939: Add additional runtime checking to ensure a valid capsule

View File

@ -316,46 +316,51 @@ check_bom(int get_char(struct tok_state *),
int set_readline(struct tok_state *, const char *),
struct tok_state *tok)
{
int ch = get_char(tok);
int ch1, ch2, ch3;
ch1 = get_char(tok);
tok->decoding_state = STATE_RAW;
if (ch == EOF) {
if (ch1 == EOF) {
return 1;
} else if (ch == 0xEF) {
ch = get_char(tok);
if (ch != 0xBB) {
unget_char(ch, tok);
unget_char(0xEF, tok);
/* any token beginning with '\xEF' is a bad token */
} else if (ch1 == 0xEF) {
ch2 = get_char(tok);
if (ch2 != 0xBB) {
unget_char(ch2, tok);
unget_char(ch1, tok);
return 1;
}
ch = get_char(tok);
if (ch != 0xBF) {
unget_char(ch, tok);
unget_char(0xBB, tok);
unget_char(0xEF, tok);
/* any token beginning with '\xEF' is a bad token */
ch3 = get_char(tok);
if (ch3 != 0xBF) {
unget_char(ch3, tok);
unget_char(ch2, tok);
unget_char(ch1, tok);
return 1;
}
#if 0
/* Disable support for UTF-16 BOMs until a decision
is made whether this needs to be supported. */
} else if (ch == 0xFE) {
ch = get_char(tok);
if (ch != 0xFF)
goto NON_BOM;
} else if (ch1 == 0xFE) {
ch2 = get_char(tok);
if (ch2 != 0xFF) {
unget_char(ch2, tok);
unget_char(ch1, tok);
return 1;
}
if (!set_readline(tok, "utf-16-be"))
return 0;
tok->decoding_state = STATE_NORMAL;
} else if (ch == 0xFF) {
ch = get_char(tok);
if (ch != 0xFE)
goto NON_BOM;
} else if (ch1 == 0xFF) {
ch2 = get_char(tok);
if (ch2 != 0xFE) {
unget_char(ch2, tok);
unget_char(ch1, tok);
return 1;
}
if (!set_readline(tok, "utf-16-le"))
return 0;
tok->decoding_state = STATE_NORMAL;
#endif
} else {
unget_char(ch, tok);
unget_char(ch1, tok);
return 1;
}
if (tok->encoding != NULL)