一、英文数据清洗

英文数据清洗是去除缩写、非字母符号、专有名词的缩写、提取词干、提取词根。

1.常规的清洗方式

去除非字母符号和常用缩写

#coding=utf-8
import jieba
import unicodedata
import sys,re,collections,nltk
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
class rule:
# 正则表达式过滤特殊符号用空格符占位,双引号、单引号、句点、逗号
pat_letter = re.compile(r'[^a-zA-Z \']+')#保留'
# 还原常见缩写单词
pat_is = re.compile("(it|he|she|that|this|there|here)(\'s)", re.I)
pat_s = re.compile("([a-zA-Z])(\'s)") # 处理类似于这样的缩写today’s
pat_not = re.compile("([a-zA-Z])(n\'t)") # not的缩写
pat_would = re.compile("([a-zA-Z])(\'d)") # would的缩写
pat_will = re.compile("([a-zA-Z])(\'ll)") # will的缩写
pat_am = re.compile("([I|i])(\'m)") # am的缩写
pat_are = re.compile("([a-zA-Z])(\'re)") # are的缩写
pat_ve = re.compile("([a-zA-Z])(\'ve)") # have的缩写 def replace_abbreviations(text):
new_text = text
new_text = rule.pat_letter.sub(' ', new_text).strip().lower()
new_text = rule.pat_is.sub(r"\1 is", new_text)#其中\1是匹配到的第一个group
new_text = rule.pat_s.sub(r"\1 ", new_text)
new_text = rule.pat_not.sub(r"\1 not", new_text)
new_text = rule.pat_would.sub(r"\1 would", new_text)
new_text = rule.pat_will.sub(r"\1 will", new_text)
new_text = rule.pat_am.sub(r"\1 am", new_text)
new_text = rule.pat_are.sub(r"\1 are", new_text)
new_text = rule.pat_ve.sub(r"\1 have", new_text)
new_text = new_text.replace('\'', ' ')
return new_text if __name__=='__main__':
text='there\'re many recen\'t \'t extensions of this basic idea to include attention. 120,yes\'s it\'s'
text=replace_abbreviations(text)
print(text)#there are many rece not t extensions of this basic idea to include attention yes it is

2.详细的处理方式

去除普通的缩写,还引入了一些专有名词的处理、标点符号的处理

import re
def clean_text(text):
"""
Clean text
:param text: the string of text
:return: text string after cleaning
"""
# acronym
text = re.sub(r"can\'t", "can not", text)
text = re.sub(r"cannot", "can not ", text)
text = re.sub(r"what\'s", "what is", text)
text = re.sub(r"What\'s", "what is", text)
text = re.sub(r"\'ve ", " have ", text)
text = re.sub(r"n\'t", " not ", text)
text = re.sub(r"i\'m", "i am ", text)
text = re.sub(r"I\'m", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r" e mail ", " email ", text)
text = re.sub(r" e \- mail ", " email ", text)
text = re.sub(r" e\-mail ", " email ", text) # spelling correction
text = re.sub(r"ph\.d", "phd", text)
text = re.sub(r"PhD", "phd", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" fb ", " facebook ", text)
text = re.sub(r"facebooks", " facebook ", text)
text = re.sub(r"facebooking", " facebook ", text)
text = re.sub(r" usa ", " america ", text)
text = re.sub(r" us ", " america ", text)
text = re.sub(r" u s ", " america ", text)
text = re.sub(r" U\.S\. ", " america ", text)
text = re.sub(r" US ", " america ", text)
text = re.sub(r" American ", " america ", text)
text = re.sub(r" America ", " america ", text)
text = re.sub(r" mbp ", " macbook-pro ", text)
text = re.sub(r" mac ", " macbook ", text)
text = re.sub(r"macbook pro", "macbook-pro", text)
text = re.sub(r"macbook-pros", "macbook-pro", text)
text = re.sub(r" 1 ", " one ", text)
text = re.sub(r" 2 ", " two ", text)
text = re.sub(r" 3 ", " three ", text)
text = re.sub(r" 4 ", " four ", text)
text = re.sub(r" 5 ", " five ", text)
text = re.sub(r" 6 ", " six ", text)
text = re.sub(r" 7 ", " seven ", text)
text = re.sub(r" 8 ", " eight ", text)
text = re.sub(r" 9 ", " nine ", text)
text = re.sub(r"googling", " google ", text)
text = re.sub(r"googled", " google ", text)
text = re.sub(r"googleable", " google ", text)
text = re.sub(r"googles", " google ", text)
text = re.sub(r"dollars", " dollar ", text) # punctuation
text = re.sub(r"\+", " + ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"-", " - ", text)
text = re.sub(r"/", " / ", text)
text = re.sub(r"\\", " \ ", text)
text = re.sub(r"=", " = ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r":", " : ", text)
text = re.sub(r"\.", " . ", text)
text = re.sub(r",", " , ", text)
text = re.sub(r"\?", " ? ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\"", " \" ", text)
text = re.sub(r"&", " & ", text)
text = re.sub(r"\|", " | ", text)
text = re.sub(r";", " ; ", text)
text = re.sub(r"\(", " ( ", text)
text = re.sub(r"\)", " ( ", text) # symbol replacement
text = re.sub(r"&", " and ", text)
text = re.sub(r"\|", " or ", text)
text = re.sub(r"=", " equal ", text)
text = re.sub(r"\+", " plus ", text)
text = re.sub(r"\$", " dollar ", text) # remove extra space
text = ' '.join(text.split()) return text if __name__=='__main__':
text = 'there\'re many recen\'t \'t extensions of this basic idea to include attention. 120,yes\'s it\'s'
text = clean_text(text)
print(text) # there are many rece not t extensions of this basic idea to include attention . 120 , yes s it s

3.包括有处理词根词缀的处理方式

去除符号、还原缩写、获取词根。

#coding=utf-8
import jieba
import unicodedata
import sys,re,collections,nltk
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
class rule:
# 正则表达式过滤特殊符号用空格符占位,双引号、单引号、句点、逗号
pat_letter = re.compile(r'[^a-zA-Z \']+')#保留'
# 还原常见缩写单词
pat_is = re.compile("(it|he|she|that|this|there|here)(\'s)", re.I)
pat_s = re.compile("([a-zA-Z])(\'s)") # 处理类似于这样的缩写today’s
pat_not = re.compile("([a-zA-Z])(n\'t)") # not的缩写
pat_would = re.compile("([a-zA-Z])(\'d)") # would的缩写
pat_will = re.compile("([a-zA-Z])(\'ll)") # will的缩写
pat_am = re.compile("([I|i])(\'m)") # am的缩写
pat_are = re.compile("([a-zA-Z])(\'re)") # are的缩写
pat_ve = re.compile("([a-zA-Z])(\'ve)") # have的缩写 def replace_abbreviations(text):
new_text = text
new_text = rule.pat_letter.sub(' ', new_text).strip().lower()
new_text = rule.pat_is.sub(r"\1 is", new_text)#其中\1是匹配到的第一个group
new_text = rule.pat_s.sub(r"\1 ", new_text)
new_text = rule.pat_not.sub(r"\1 not", new_text)
new_text = rule.pat_would.sub(r"\1 would", new_text)
new_text = rule.pat_will.sub(r"\1 will", new_text)
new_text = rule.pat_am.sub(r"\1 am", new_text)
new_text = rule.pat_are.sub(r"\1 are", new_text)
new_text = rule.pat_ve.sub(r"\1 have", new_text)
new_text = new_text.replace('\'', ' ')
return new_text # pos和tag有相似的地方,通过tag获得pos
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return nltk.corpus.wordnet.ADJ
elif treebank_tag.startswith('V'):
return nltk.corpus.wordnet.VERB
elif treebank_tag.startswith('N'):
return nltk.corpus.wordnet.NOUN
elif treebank_tag.startswith('R'):#以副词
return nltk.corpus.wordnet.ADV
else:
return '' def merge(words):
lmtzr = WordNetLemmatizer()
new_words = ''
words = nltk.pos_tag(word_tokenize(words)) # tag is like [('bigger', 'JJR')]
for word in words:
pos = get_wordnet_pos(word[1])
if pos:
# lemmatize()方法将word单词还原成pos词性的形式
word = lmtzr.lemmatize(word[0], pos)
new_words+=' '+word
else:
new_words+=' '+word[0]
return new_words def clear_data(text):
text=replace_abbreviations(text)
text=merge(text)
text=text.strip()
return text
if __name__=='__main__':
text='there\'re many recen\'t \'t extensions of this basic had idea to include attention. 120,had'
text=clear_data(text)
print(text)#there be many rece not t extension of this basic have idea to include attention have

二、中文数据清洗

去除一些停用词。而停用词是文本中一些高频的代词、连词、介词等对文本分类无意义的词,通常维护一个停用词表,特征提取过程中删除停用表中出现的词,本质上属于特征选择的一部分。具体可参考Hanlp的停用词表https://github.com/hankcs/HanLP

nlp英文的数据清洗代码的更多相关文章

  1. JavaScript验证字符串只能包含数字或者英文字符的代码实例

    验证字符串只能包含数字或者英文字符的代码实例:本章节分享一段代码实例,它实现了验证字符串内容是否只包含英文字符或者数字.代码实例如下: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 ...

  2. [NLP] The Annotated Transformer 代码修正

    1. RuntimeError: "exp" not implemented for 'torch.LongTensor' class PositionalEncoding(nn. ...

  3. NLP整体流程的代码

    import nltk import numpy as np import re from nltk.corpus import stopwords # 1 分词1 text = "Sent ...

  4. 8个数据清洗Python代码,复制可用,最长11行 | 资源

    最近,大数据工程师Kin Lim Lee在Medium上发表了一篇文章,介绍了8个用于数据清洗的Python代码. 数据清洗,是进行数据分析和使用数据训练模型的必经之路,也是最耗费数据科学家/程序员精 ...

  5. git入门(4)团队中git保管代码常用操作

    在团队中协作代码时候,一定要熟练使用以下git命令,不至于把代码库弄乱, PS:一定要提交自己代码(git push)时候,先进行更新本地代码库(git pull),不然提交异常 git常用命令 1· ...

  6. 快看Sample代码,速学Swift语言(1)-语法速览

    Swift是苹果推出的一个比较新的语言,它除了借鉴语言如C#.Java等内容外,好像还采用了很多JavaScript脚本里面的一些脚本语法,用起来感觉非常棒,作为一个使用C#多年的技术控,对这种比较超 ...

  7. IDEA 代码规范插件

    前言 在工作过程中,每个人的代码习惯都不同,在一起工作做同一个项目,如果按照自己的习惯来,有可能造成代码维护困难,开发进度缓慢等. 代码规范的重要性 谷歌发布的代码规范中指出,80% 的缺失是由 20 ...

  8. ph 提交代码的步骤;

    ph 提交代码的步骤: git status 查看状态: ls -ah 查看文件: git stash list 查看本地缓存的文件: git branch 查看本地的分支: git checkout ...

  9. cucumber java从入门到精通(2)用代码定义步骤

    cucumber java从入门到精通(2)用代码定义步骤 上一节里我们定义了feature文件,feature文件就是自然语言描述的用例文件,它有一定的章法,具体的潜规则是: 使用Feature关键 ...

随机推荐

  1. ES6语法中的class、extends与super的原理

    class 首先, 在JavaScript中, class类是一种函数 class User {    constructor(name) { this.name = name; }    sayHi ...

  2. 21.跨域和CORS

    一 跨域 同源策略(Same origin policy)是一种约定,它是浏览器最核心也最基本的安全功能,如果缺少了同源策略,则浏览器的正常功能可能都会受到影响.可以说Web是构建在同源策略基础之上的 ...

  3. 【Java Web开发学习】Spring MVC异常统一处理

    [Java Web开发学习]Spring MVC异常统一处理 文采有限,若有错误,欢迎留言指正. 转载:https://www.cnblogs.com/yangchongxing/p/9271900. ...

  4. 基于 HTML5 WebGL 构建智能数字化城市 3D 全景

    前言 自 2011 年我国城镇化率首次突破 50% 以来,<新型城镇化发展规划>将智慧城市列为我国城市发展的三大目标之一,并提出到 2020 年,建成一批特色鲜明的智慧城市.截至现今,全国 ...

  5. vue-cli项目中引入第三方插件

    前言 最近有小伙伴问道如何在vue-cli项目中引入第三方插件或者库,例如如果想在项目中使用jQuery中的Ajax请求数据呢?或者我想使用Bootstrap框架呢?等等这些问题,本篇博客将带你学习如 ...

  6. ubuntu19_nginx_uwsgi_flask_apt安装

    ubuntu19_nginx_uwsgi_flask_apt安装 转载注明来源: 本文链接 来自osnosn的博客,写于 2019-12-21. 在 ubuntu 19.04 apt 安装 apt i ...

  7. OPCode 详解

    OpCode 操作码(Operation Code, OPCode):描述机器语言指令中,指令要执行某种操作的机器码 OPCode在不同的场合中通常具有不同的含义,例如PHP虚拟机(Zend VM). ...

  8. 人工智能技术导论——使用PROLOG逻辑语言解决爱因斯坦斑马文件

    一.背景 在上一篇博客https://www.cnblogs.com/wkfvawl/p/12056951.html上,我简单介绍了一下Prolog的一些基本语法,这篇博客主要通过老师课上讲过的一个问 ...

  9. 在CentOS 7 上使用Docker 运行.NetCore项目

    安装Docker CentOS 7 安装 Docker 编写Dockerfile 右键项目->添加->Docker 支持 选择Linux 修改为如下: FROM mcr.microsoft ...

  10. c++-多态的学习

    多态的基本介绍 多态基础 面向对象新求 C++编译器提供的多态解决方案 多态意义.多态成立的是三个条件 多态理论基础 多态面试题强化 多态的理解 C++编译器如何实现多态 重载重写重定义 虚析构函数 ...