"""
csv.py - read/write/investigate CSV files
""" import re
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect from collections import OrderedDict
from io import StringIO __all__ = ["QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "__doc__", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter",
"unix_dialect"] class Dialect:
"""Describe a CSV dialect. This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting. """
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate() def _validate(self):
try:
_Dialect(self)
except TypeError as e:
# We do this for compatibility with py2.3
raise Error(str(e)) class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel) class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab) class unix_dialect(Dialect):
"""Describe the usual properties of Unix-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\n'
quoting = QUOTE_ALL
register_dialect("unix", unix_dialect) class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0 def __iter__(self):
return self @property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = next(self.reader)
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames @fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value def __next__(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = next(self.reader)
self.line_num = self.reader.line_num # unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = next(self.reader)
d = OrderedDict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'"
% extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds) def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header) def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = rowdict.keys() - self.fieldnames
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: "
+ ", ".join([repr(x) for x in wrong_fields]))
return (rowdict.get(key, self.restval) for key in self.fieldnames) def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict)) def writerows(self, rowdicts):
return self.writer.writerows(map(self._dict_to_list, rowdicts)) # Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':'] def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
""" quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters) if not delimiter:
raise Error("Could not determine delimiter") class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = '' dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace return dialect def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
""" matches = []
for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
r'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
groupindex = regexp.groupindex
for m in matches:
n = groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1 quotechar = max(quotes, key=quotes.get) if delims:
delim = max(delims, key=delims.get)
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0 # if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE) if dq_regexp.search(data):
doublequote = True
else:
doublequote = False return (quotechar, doublequote, delim, skipinitialspace) def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
""" data = list(filter(None, data.split('\n'))) ascii = [chr(c) for c in range(127)] # 7-bit ASCII # build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency for char in charFrequency.keys():
items = list(charFrequency[char].items())
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = max(items, key=lambda x: x[1])
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- sum(item[1] for item in items))
else:
modes[char] = items[0] # build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01 if len(delims) == 1:
delim = list(delims.keys())[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace) # analyze another chunkLength lines
start = end
end += chunkLength if not delims:
return ('', 0) # if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace) # nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1] skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace) def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header. rdr = reader(StringIO(sample), self.sniff(sample)) header = next(rdr) # assume first row is header columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1 if len(row) != columns:
continue # skip rows that have irregular number of columns for col in list(columnTypes.keys()): for thisType in [int, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col]) if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col] # finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1 return hasHeader > 0

csv

python模块:csv的更多相关文章

  1. Python操作csv文件

    1.什么是csv文件 The so-called CSV (Comma Separated Values) format is the most common import and export fo ...

  2. Python第十一天 异常处理 glob模块和shlex模块 打开外部程序和subprocess模块 subprocess类 Pipe管道 operator模块 sorted函数 os模块 hashlib模块 platform模块 csv模块

    Python第十一天    异常处理  glob模块和shlex模块    打开外部程序和subprocess模块  subprocess类  Pipe管道  operator模块   sorted函 ...

  3. Python的csv文件(csv模块)和ini文件(configparser模块)处理

    Python的csv文本文件(csv模块)和ini文本文件(configparser模块)处理 作者:尹正杰 版权声明:原创作品,谢绝转载!否则将追究法律责任. 一.csv文件 1>.CSV文件 ...

  4. Python与CSV文件(CSV模块)

    Python与CSV文件(CSV模块)   1.CSV文件 CSV(逗号分隔值)格式是电子表格和数据库最常用的导入和导出格式.没有“CSV标准”,因此格式由许多读写的应用程序在操作上定义.缺乏标准意味 ...

  5. Python 读取csv文件到excel

    朋友问我如何通过python把csv格式的文件另存为xls文件,自己想了想通过读取csv文件然后再保存到xls文件中即可,也许还有其他简单的方法,但这里也为了练习python语法及其他知识,所以采用了 ...

  6. Python处理csv文件

    Python处理csv文件 CSV(Comma-Separated Values)即逗号分隔值,可以用Excel打开查看.由于是纯文本,任何编辑器也都可打开.与Excel文件不同,CSV文件中: 值没 ...

  7. Python/模块与包之模块

    Python/模块与包之模块 1.什么是模块? 模块就是py文件 2.为什么要用模块? 如果在解释器上进行编码,把解释器关闭之前写的文件就不存在了,如果使用模块的话就能永久保存在磁盘中. 3.如何使用 ...

  8. python模块:调用系统命令模块subprocess等

    http://blog.csdn.net/pipisorry/article/details/46972171 Python经常被称作"胶水语言",因为它能够轻易地操作其他程序,轻 ...

  9. python 模块和包

    一,模块 1,什么是模块? 常见的场景: 一个模块就是一个包含了python定义和声明的文件,文件名就是模块名字加上.py 的后缀. 但其实 import 加载的模块分为四个通用类别: 1,使用pyt ...

  10. python处理csv文档

    在工作中遇到了使用python解析csv文件的问题,包括读写操作,下面参考官网文档,进行一下总结: 首先CSV (Comma Separated Values) ,也就是逗号分开的数值,可以用Note ...

随机推荐

  1. 快速干掉Windows Defender

    1.快捷键Win+R,调出"运行"对话框,输入"gpedit.msc",打开组策略编辑器: 2.展开"计算机配置"→"管理模板&q ...

  2. [原创] debian 9.3 搭建seafile企业私有网盘

    [原创] debian 9.3 搭建seafile企业私有网盘 需求是这样的, 个人疲惫于 "成为大伙的文件中转站" ,公司不管大大小小的文件,都要打电话过来“转个xx文件”.“帮 ...

  3. 20175213 2018-2019-2 《Java程序设计》第7周学习总结

    教材学习内容总结 (1)String (char a[])用一个字符数组a创建一个String对象. (2)String(char a[],int startIndex,int count) 提取字符 ...

  4. destructuring

    解构(结构化赋值): 解构过程中,具备赋值和变量声明两个功能 目的在于把等号左右长的相似的两个东西内部的值取出来. 对象数组都可以参与解构: let obj = {name: ‘duyi’,age: ...

  5. Selenium+TestNG+Maven+Jenkins+SVN(转载)

    转载自:https://blog.csdn.net/u014202301/article/details/72354069 一. 创建Maven项目,下载Selenium和TestNG的依赖(依赖可以 ...

  6. Ubuntu16.04 git上网速度慢的解决方法.

    1.打开网站 IPAddress.com ,输入github.com和github.global.ssl.fastly.net,获取他们的域名对应的ip地址 2.sudo vi /etc/hosts ...

  7. SQL 数据开发(经典)转贴

    数据开发(经典) 1.按姓氏笔画排序: Select * From TableName Order By CustomerName Collate Chinese_PRC_Str oke_ci_as ...

  8. 验证当前启动APP的Package 和 Activity

    1. 打开手机USB 调试开关 2. 执行: adb devices 3.启动你的测试APP (如手机QQ) 4.执行:adb shell dumpsys window |findstr mCurre ...

  9. js 加减乘除以及四舍五入 新写法

    1 四舍五入 eg: (1.23).round() = 1.2 (1.2456).round(3) = 1.246 Number.prototype.round = function (count) ...

  10. redhat 7 dns 配置

    dns 配置(安装环境是neokylin7.4) #后为需要在root权限下执行的命令 一.安装 修改配置文件1.需要安装的包 bind . bind-chroot .bind-utils #yum ...