From f066c1ba3755e844be42ce9bc2344d34a14c5701 Mon Sep 17 00:00:00 2001 From: Guido van Rossum Date: Sat, 4 Aug 2007 17:55:43 +0000 Subject: [PATCH] Make test_tokenize really pass -- don't add extra output. --- Lib/test/test_tokenize.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py index 788a04b989f..9ef65639114 100644 --- a/Lib/test/test_tokenize.py +++ b/Lib/test/test_tokenize.py @@ -98,7 +98,7 @@ _PRINT_WORKING_MSG_INTERVAL = 5 * 60 # and tokenized again from the latter. The test fails if the second # tokenization doesn't match the first. def test_roundtrip(f): - ## print 'Testing:', f + ## print('Testing:', f) # Get the encoding first fobj = open(f, encoding="latin-1") first2lines = fobj.readline() + fobj.readline() @@ -106,7 +106,7 @@ def test_roundtrip(f): m = re.search(r"coding:\s*(\S+)", first2lines) if m: encoding = m.group(1) - print(" coding:", encoding) + ## print(" coding:", encoding) else: encoding = "utf-8" fobj = open(f, encoding=encoding)