mirror of
https://github.com/python/cpython.git
synced 2024-12-01 05:45:40 +08:00
f3d7dbe72e
svn+ssh://pythondev@svn.python.org/python/branches/py3k ................ r75231 | benjamin.peterson | 2009-10-04 09:49:41 -0500 (Sun, 04 Oct 2009) | 166 lines Merged revisions 74779-74786,74793,74795,74811,74860-74861,74863,74876,74886,74896,74901,74903,74908,74912,74930,74933,74943,74946,74952-74955,75015,75019,75032,75068,75076,75095,75098,75102,75129,75139,75230 via svnmerge from svn+ssh://pythondev@svn.python.org/python/trunk ........ r74779 | michael.foord | 2009-09-13 11:13:36 -0500 (Sun, 13 Sep 2009) | 1 line Change to tutorial wording for reading text / binary files on Windows. Issue #6301. ........ r74780 | michael.foord | 2009-09-13 11:40:02 -0500 (Sun, 13 Sep 2009) | 1 line Objects that compare equal automatically pass or fail assertAlmostEqual and assertNotAlmostEqual tests on unittest.TestCase. Issue 6567. ........ r74781 | michael.foord | 2009-09-13 11:46:19 -0500 (Sun, 13 Sep 2009) | 1 line Note that sys._getframe is not guaranteed to exist in all implementations of Python, and a corresponding note in inspect.currentframe. Issue 6712. ........ r74782 | michael.foord | 2009-09-13 12:07:46 -0500 (Sun, 13 Sep 2009) | 1 line Tutorial tweaks. Issue 6849. ........ r74783 | michael.foord | 2009-09-13 12:28:35 -0500 (Sun, 13 Sep 2009) | 1 line unittest.TestLoader.loadTestsFromName honors the loader suiteClass attribute. Issue 6866. ........ r74784 | georg.brandl | 2009-09-13 13:15:07 -0500 (Sun, 13 Sep 2009) | 1 line Typo fix. ........ r74785 | michael.foord | 2009-09-13 14:07:03 -0500 (Sun, 13 Sep 2009) | 1 line Test discovery in unittest will only attempt to import modules that are importable; i.e. their names are valid Python identifiers. If an import fails during discovery this will be recorded as an error and test discovery will continue. Issue 6568. ........ r74786 | michael.foord | 2009-09-13 14:08:18 -0500 (Sun, 13 Sep 2009) | 1 line Remove an extraneous space in unittest documentation. ........ r74793 | georg.brandl | 2009-09-14 09:50:47 -0500 (Mon, 14 Sep 2009) | 1 line #6908: fix association of hashlib hash attributes. ........ r74795 | benjamin.peterson | 2009-09-14 22:36:26 -0500 (Mon, 14 Sep 2009) | 1 line Py_SetPythonHome uses static storage #6913 ........ r74811 | georg.brandl | 2009-09-15 15:26:59 -0500 (Tue, 15 Sep 2009) | 1 line Add Armin Ronacher. ........ r74860 | benjamin.peterson | 2009-09-16 21:46:54 -0500 (Wed, 16 Sep 2009) | 1 line kill bare except ........ r74861 | benjamin.peterson | 2009-09-16 22:18:28 -0500 (Wed, 16 Sep 2009) | 1 line pep 8 defaults ........ r74863 | benjamin.peterson | 2009-09-16 22:27:33 -0500 (Wed, 16 Sep 2009) | 1 line rationalize a bit ........ r74876 | georg.brandl | 2009-09-17 11:15:53 -0500 (Thu, 17 Sep 2009) | 1 line #6932: remove paragraph that advises relying on __del__ being called. ........ r74886 | benjamin.peterson | 2009-09-17 16:33:46 -0500 (Thu, 17 Sep 2009) | 1 line use macros ........ r74896 | georg.brandl | 2009-09-18 02:22:41 -0500 (Fri, 18 Sep 2009) | 1 line #6936: for interactive use, quit() is just fine. ........ r74901 | georg.brandl | 2009-09-18 04:14:52 -0500 (Fri, 18 Sep 2009) | 1 line #6905: use better exception messages in inspect when the argument is of the wrong type. ........ r74903 | georg.brandl | 2009-09-18 04:18:27 -0500 (Fri, 18 Sep 2009) | 1 line #6938: "ident" is always a string, so use a format code which works. ........ r74908 | georg.brandl | 2009-09-18 08:57:11 -0500 (Fri, 18 Sep 2009) | 1 line Use str.format() to fix beginner's mistake with %-style string formatting. ........ r74912 | georg.brandl | 2009-09-18 11:19:56 -0500 (Fri, 18 Sep 2009) | 1 line Optimize optimization and fix method name in docstring. ........ r74930 | georg.brandl | 2009-09-18 16:21:41 -0500 (Fri, 18 Sep 2009) | 1 line #6925: rewrite docs for locals() and vars() a bit. ........ r74933 | georg.brandl | 2009-09-18 16:35:59 -0500 (Fri, 18 Sep 2009) | 1 line #6930: clarify description about byteorder handling in UTF decoder routines. ........ r74943 | georg.brandl | 2009-09-19 02:35:07 -0500 (Sat, 19 Sep 2009) | 1 line #6944: the argument to PyArg_ParseTuple should be a tuple, otherwise a SystemError is set. Also clean up another usage of PyArg_ParseTuple. ........ r74946 | georg.brandl | 2009-09-19 03:43:16 -0500 (Sat, 19 Sep 2009) | 1 line Update bug tracker reference. ........ r74952 | georg.brandl | 2009-09-19 05:42:34 -0500 (Sat, 19 Sep 2009) | 1 line #6946: fix duplicate index entries for datetime classes. ........ r74953 | georg.brandl | 2009-09-19 07:04:16 -0500 (Sat, 19 Sep 2009) | 1 line Fix references to threading.enumerate(). ........ r74954 | georg.brandl | 2009-09-19 08:13:56 -0500 (Sat, 19 Sep 2009) | 1 line Add Doug. ........ r74955 | georg.brandl | 2009-09-19 08:20:49 -0500 (Sat, 19 Sep 2009) | 1 line Add Mark Summerfield. ........ r75015 | georg.brandl | 2009-09-22 05:55:08 -0500 (Tue, 22 Sep 2009) | 1 line Fix encoding name. ........ r75019 | vinay.sajip | 2009-09-22 12:23:41 -0500 (Tue, 22 Sep 2009) | 1 line Fixed a typo, and added sections on optimization and using arbitrary objects as messages. ........ r75032 | benjamin.peterson | 2009-09-22 17:15:28 -0500 (Tue, 22 Sep 2009) | 1 line fix typos/rephrase ........ r75068 | benjamin.peterson | 2009-09-25 21:57:59 -0500 (Fri, 25 Sep 2009) | 1 line comment out ugly xxx ........ r75076 | vinay.sajip | 2009-09-26 09:53:32 -0500 (Sat, 26 Sep 2009) | 1 line Tidied up name of parameter in StreamHandler ........ r75095 | michael.foord | 2009-09-27 14:15:41 -0500 (Sun, 27 Sep 2009) | 1 line Test creation moved from TestProgram.parseArgs to TestProgram.createTests exclusively. Issue 6956. ........ r75098 | michael.foord | 2009-09-27 15:08:23 -0500 (Sun, 27 Sep 2009) | 1 line Documentation improvement for load_tests protocol in unittest. Issue 6515. ........ r75102 | skip.montanaro | 2009-09-27 21:12:27 -0500 (Sun, 27 Sep 2009) | 3 lines Patch from Thomas Barr so that csv.Sniffer will set doublequote property. Closes issue 6606. ........ r75129 | vinay.sajip | 2009-09-29 02:08:54 -0500 (Tue, 29 Sep 2009) | 1 line Issue #7014: logging: Improved IronPython 2.6 compatibility. ........ r75139 | raymond.hettinger | 2009-09-29 13:53:24 -0500 (Tue, 29 Sep 2009) | 3 lines Issue 7008: Better document str.title and show how to work around the apostrophe problem. ........ r75230 | benjamin.peterson | 2009-10-04 08:38:38 -0500 (Sun, 04 Oct 2009) | 1 line test logging ........ ................
435 lines
15 KiB
Python
435 lines
15 KiB
Python
|
|
"""
|
|
csv.py - read/write/investigate CSV files
|
|
"""
|
|
|
|
import re
|
|
from _csv import Error, __version__, writer, reader, register_dialect, \
|
|
unregister_dialect, get_dialect, list_dialects, \
|
|
field_size_limit, \
|
|
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
|
|
__doc__
|
|
from _csv import Dialect as _Dialect
|
|
|
|
from io import StringIO
|
|
|
|
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
|
|
"Error", "Dialect", "__doc__", "excel", "excel_tab",
|
|
"field_size_limit", "reader", "writer",
|
|
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
|
|
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
|
|
|
|
class Dialect:
|
|
"""Describe an Excel dialect.
|
|
|
|
This must be subclassed (see csv.excel). Valid attributes are:
|
|
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
|
|
lineterminator, quoting.
|
|
|
|
"""
|
|
_name = ""
|
|
_valid = False
|
|
# placeholders
|
|
delimiter = None
|
|
quotechar = None
|
|
escapechar = None
|
|
doublequote = None
|
|
skipinitialspace = None
|
|
lineterminator = None
|
|
quoting = None
|
|
|
|
def __init__(self):
|
|
if self.__class__ != Dialect:
|
|
self._valid = True
|
|
self._validate()
|
|
|
|
def _validate(self):
|
|
try:
|
|
_Dialect(self)
|
|
except TypeError as e:
|
|
# We do this for compatibility with py2.3
|
|
raise Error(str(e))
|
|
|
|
class excel(Dialect):
|
|
"""Describe the usual properties of Excel-generated CSV files."""
|
|
delimiter = ','
|
|
quotechar = '"'
|
|
doublequote = True
|
|
skipinitialspace = False
|
|
lineterminator = '\r\n'
|
|
quoting = QUOTE_MINIMAL
|
|
register_dialect("excel", excel)
|
|
|
|
class excel_tab(excel):
|
|
"""Describe the usual properties of Excel-generated TAB-delimited files."""
|
|
delimiter = '\t'
|
|
register_dialect("excel-tab", excel_tab)
|
|
|
|
|
|
class DictReader:
|
|
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
|
|
dialect="excel", *args, **kwds):
|
|
self._fieldnames = fieldnames # list of keys for the dict
|
|
self.restkey = restkey # key to catch long rows
|
|
self.restval = restval # default value for short rows
|
|
self.reader = reader(f, dialect, *args, **kwds)
|
|
self.dialect = dialect
|
|
self.line_num = 0
|
|
|
|
def __iter__(self):
|
|
return self
|
|
|
|
@property
|
|
def fieldnames(self):
|
|
if self._fieldnames is None:
|
|
try:
|
|
self._fieldnames = next(self.reader)
|
|
except StopIteration:
|
|
pass
|
|
self.line_num = self.reader.line_num
|
|
return self._fieldnames
|
|
|
|
@fieldnames.setter
|
|
def fieldnames(self, value):
|
|
self._fieldnames = value
|
|
|
|
def __next__(self):
|
|
if self.line_num == 0:
|
|
# Used only for its side effect.
|
|
self.fieldnames
|
|
row = next(self.reader)
|
|
self.line_num = self.reader.line_num
|
|
|
|
# unlike the basic reader, we prefer not to return blanks,
|
|
# because we will typically wind up with a dict full of None
|
|
# values
|
|
while row == []:
|
|
row = next(self.reader)
|
|
d = dict(zip(self.fieldnames, row))
|
|
lf = len(self.fieldnames)
|
|
lr = len(row)
|
|
if lf < lr:
|
|
d[self.restkey] = row[lf:]
|
|
elif lf > lr:
|
|
for key in self.fieldnames[lr:]:
|
|
d[key] = self.restval
|
|
return d
|
|
|
|
|
|
class DictWriter:
|
|
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
|
|
dialect="excel", *args, **kwds):
|
|
self.fieldnames = fieldnames # list of keys for the dict
|
|
self.restval = restval # for writing short dicts
|
|
if extrasaction.lower() not in ("raise", "ignore"):
|
|
raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'"
|
|
% extrasaction)
|
|
self.extrasaction = extrasaction
|
|
self.writer = writer(f, dialect, *args, **kwds)
|
|
|
|
def _dict_to_list(self, rowdict):
|
|
if self.extrasaction == "raise":
|
|
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
|
|
if wrong_fields:
|
|
raise ValueError("dict contains fields not in fieldnames: "
|
|
+ ", ".join(wrong_fields))
|
|
return [rowdict.get(key, self.restval) for key in self.fieldnames]
|
|
|
|
def writerow(self, rowdict):
|
|
return self.writer.writerow(self._dict_to_list(rowdict))
|
|
|
|
def writerows(self, rowdicts):
|
|
rows = []
|
|
for rowdict in rowdicts:
|
|
rows.append(self._dict_to_list(rowdict))
|
|
return self.writer.writerows(rows)
|
|
|
|
# Guard Sniffer's type checking against builds that exclude complex()
|
|
try:
|
|
complex
|
|
except NameError:
|
|
complex = float
|
|
|
|
class Sniffer:
|
|
'''
|
|
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
|
|
Returns a Dialect object.
|
|
'''
|
|
def __init__(self):
|
|
# in case there is more than one possible delimiter
|
|
self.preferred = [',', '\t', ';', ' ', ':']
|
|
|
|
|
|
def sniff(self, sample, delimiters=None):
|
|
"""
|
|
Returns a dialect (or None) corresponding to the sample
|
|
"""
|
|
|
|
quotechar, doublequote, delimiter, skipinitialspace = \
|
|
self._guess_quote_and_delimiter(sample, delimiters)
|
|
if not delimiter:
|
|
delimiter, skipinitialspace = self._guess_delimiter(sample,
|
|
delimiters)
|
|
|
|
if not delimiter:
|
|
raise Error("Could not determine delimiter")
|
|
|
|
class dialect(Dialect):
|
|
_name = "sniffed"
|
|
lineterminator = '\r\n'
|
|
quoting = QUOTE_MINIMAL
|
|
# escapechar = ''
|
|
|
|
dialect.doublequote = doublequote
|
|
dialect.delimiter = delimiter
|
|
# _csv.reader won't accept a quotechar of ''
|
|
dialect.quotechar = quotechar or '"'
|
|
dialect.skipinitialspace = skipinitialspace
|
|
|
|
return dialect
|
|
|
|
|
|
def _guess_quote_and_delimiter(self, data, delimiters):
|
|
"""
|
|
Looks for text enclosed between two identical quotes
|
|
(the probable quotechar) which are preceded and followed
|
|
by the same character (the probable delimiter).
|
|
For example:
|
|
,'some text',
|
|
The quote with the most wins, same with the delimiter.
|
|
If there is no quotechar the delimiter can't be determined
|
|
this way.
|
|
"""
|
|
|
|
matches = []
|
|
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
|
|
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
|
|
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
|
|
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
|
|
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
|
|
matches = regexp.findall(data)
|
|
if matches:
|
|
break
|
|
|
|
if not matches:
|
|
# (quotechar, doublequote, delimiter, skipinitialspace)
|
|
return ('', False, None, 0)
|
|
quotes = {}
|
|
delims = {}
|
|
spaces = 0
|
|
for m in matches:
|
|
n = regexp.groupindex['quote'] - 1
|
|
key = m[n]
|
|
if key:
|
|
quotes[key] = quotes.get(key, 0) + 1
|
|
try:
|
|
n = regexp.groupindex['delim'] - 1
|
|
key = m[n]
|
|
except KeyError:
|
|
continue
|
|
if key and (delimiters is None or key in delimiters):
|
|
delims[key] = delims.get(key, 0) + 1
|
|
try:
|
|
n = regexp.groupindex['space'] - 1
|
|
except KeyError:
|
|
continue
|
|
if m[n]:
|
|
spaces += 1
|
|
|
|
quotechar = max(quotes, key=quotes.get)
|
|
|
|
if delims:
|
|
delim = max(delims, key=delims.get)
|
|
skipinitialspace = delims[delim] == spaces
|
|
if delim == '\n': # most likely a file with a single column
|
|
delim = ''
|
|
else:
|
|
# there is *no* delimiter, it's a single column of quoted data
|
|
delim = ''
|
|
skipinitialspace = 0
|
|
|
|
# if we see an extra quote between delimiters, we've got a
|
|
# double quoted format
|
|
dq_regexp = re.compile(r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
|
|
{'delim':delim, 'quote':quotechar}, re.MULTILINE)
|
|
|
|
|
|
|
|
if dq_regexp.search(data):
|
|
doublequote = True
|
|
else:
|
|
doublequote = False
|
|
|
|
return (quotechar, doublequote, delim, skipinitialspace)
|
|
|
|
|
|
def _guess_delimiter(self, data, delimiters):
|
|
"""
|
|
The delimiter /should/ occur the same number of times on
|
|
each row. However, due to malformed data, it may not. We don't want
|
|
an all or nothing approach, so we allow for small variations in this
|
|
number.
|
|
1) build a table of the frequency of each character on every line.
|
|
2) build a table of freqencies of this frequency (meta-frequency?),
|
|
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
|
|
7 times in 2 rows'
|
|
3) use the mode of the meta-frequency to determine the /expected/
|
|
frequency for that character
|
|
4) find out how often the character actually meets that goal
|
|
5) the character that best meets its goal is the delimiter
|
|
For performance reasons, the data is evaluated in chunks, so it can
|
|
try and evaluate the smallest portion of the data possible, evaluating
|
|
additional chunks as necessary.
|
|
"""
|
|
|
|
data = list(filter(None, data.split('\n')))
|
|
|
|
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
|
|
|
|
# build frequency tables
|
|
chunkLength = min(10, len(data))
|
|
iteration = 0
|
|
charFrequency = {}
|
|
modes = {}
|
|
delims = {}
|
|
start, end = 0, min(chunkLength, len(data))
|
|
while start < len(data):
|
|
iteration += 1
|
|
for line in data[start:end]:
|
|
for char in ascii:
|
|
metaFrequency = charFrequency.get(char, {})
|
|
# must count even if frequency is 0
|
|
freq = line.count(char)
|
|
# value is the mode
|
|
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
|
|
charFrequency[char] = metaFrequency
|
|
|
|
for char in charFrequency.keys():
|
|
items = list(charFrequency[char].items())
|
|
if len(items) == 1 and items[0][0] == 0:
|
|
continue
|
|
# get the mode of the frequencies
|
|
if len(items) > 1:
|
|
modes[char] = max(items, key=lambda x: x[1])
|
|
# adjust the mode - subtract the sum of all
|
|
# other frequencies
|
|
items.remove(modes[char])
|
|
modes[char] = (modes[char][0], modes[char][1]
|
|
- sum(item[1] for item in items))
|
|
else:
|
|
modes[char] = items[0]
|
|
|
|
# build a list of possible delimiters
|
|
modeList = modes.items()
|
|
total = float(chunkLength * iteration)
|
|
# (rows of consistent data) / (number of rows) = 100%
|
|
consistency = 1.0
|
|
# minimum consistency threshold
|
|
threshold = 0.9
|
|
while len(delims) == 0 and consistency >= threshold:
|
|
for k, v in modeList:
|
|
if v[0] > 0 and v[1] > 0:
|
|
if ((v[1]/total) >= consistency and
|
|
(delimiters is None or k in delimiters)):
|
|
delims[k] = v
|
|
consistency -= 0.01
|
|
|
|
if len(delims) == 1:
|
|
delim = list(delims.keys())[0]
|
|
skipinitialspace = (data[0].count(delim) ==
|
|
data[0].count("%c " % delim))
|
|
return (delim, skipinitialspace)
|
|
|
|
# analyze another chunkLength lines
|
|
start = end
|
|
end += chunkLength
|
|
|
|
if not delims:
|
|
return ('', 0)
|
|
|
|
# if there's more than one, fall back to a 'preferred' list
|
|
if len(delims) > 1:
|
|
for d in self.preferred:
|
|
if d in delims.keys():
|
|
skipinitialspace = (data[0].count(d) ==
|
|
data[0].count("%c " % d))
|
|
return (d, skipinitialspace)
|
|
|
|
# nothing else indicates a preference, pick the character that
|
|
# dominates(?)
|
|
items = [(v,k) for (k,v) in delims.items()]
|
|
items.sort()
|
|
delim = items[-1][1]
|
|
|
|
skipinitialspace = (data[0].count(delim) ==
|
|
data[0].count("%c " % delim))
|
|
return (delim, skipinitialspace)
|
|
|
|
|
|
def has_header(self, sample):
|
|
# Creates a dictionary of types of data in each column. If any
|
|
# column is of a single type (say, integers), *except* for the first
|
|
# row, then the first row is presumed to be labels. If the type
|
|
# can't be determined, it is assumed to be a string in which case
|
|
# the length of the string is the determining factor: if all of the
|
|
# rows except for the first are the same length, it's a header.
|
|
# Finally, a 'vote' is taken at the end for each column, adding or
|
|
# subtracting from the likelihood of the first row being a header.
|
|
|
|
rdr = reader(StringIO(sample), self.sniff(sample))
|
|
|
|
header = next(rdr) # assume first row is header
|
|
|
|
columns = len(header)
|
|
columnTypes = {}
|
|
for i in range(columns): columnTypes[i] = None
|
|
|
|
checked = 0
|
|
for row in rdr:
|
|
# arbitrary number of rows to check, to keep it sane
|
|
if checked > 20:
|
|
break
|
|
checked += 1
|
|
|
|
if len(row) != columns:
|
|
continue # skip rows that have irregular number of columns
|
|
|
|
for col in list(columnTypes.keys()):
|
|
|
|
for thisType in [int, float, complex]:
|
|
try:
|
|
thisType(row[col])
|
|
break
|
|
except (ValueError, OverflowError):
|
|
pass
|
|
else:
|
|
# fallback to length of string
|
|
thisType = len(row[col])
|
|
|
|
if thisType != columnTypes[col]:
|
|
if columnTypes[col] is None: # add new column type
|
|
columnTypes[col] = thisType
|
|
else:
|
|
# type is inconsistent, remove column from
|
|
# consideration
|
|
del columnTypes[col]
|
|
|
|
# finally, compare results against first row and "vote"
|
|
# on whether it's a header
|
|
hasHeader = 0
|
|
for col, colType in columnTypes.items():
|
|
if type(colType) == type(0): # it's a length
|
|
if len(header[col]) != colType:
|
|
hasHeader += 1
|
|
else:
|
|
hasHeader -= 1
|
|
else: # attempt typecast
|
|
try:
|
|
colType(header[col])
|
|
except (ValueError, TypeError):
|
|
hasHeader += 1
|
|
else:
|
|
hasHeader -= 1
|
|
|
|
return hasHeader > 0
|