nlp英文的数据清洗代码
一、英文数据清洗
英文数据清洗是去除缩写、非字母符号、专有名词的缩写、提取词干、提取词根。
1.常规的清洗方式
去除非字母符号和常用缩写
#coding=utf-8
import jieba
import unicodedata
import sys,re,collections,nltk
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
class rule:
# 正则表达式过滤特殊符号用空格符占位,双引号、单引号、句点、逗号
pat_letter = re.compile(r'[^a-zA-Z \']+')#保留'
# 还原常见缩写单词
pat_is = re.compile("(it|he|she|that|this|there|here)(\'s)", re.I)
pat_s = re.compile("([a-zA-Z])(\'s)") # 处理类似于这样的缩写today’s
pat_not = re.compile("([a-zA-Z])(n\'t)") # not的缩写
pat_would = re.compile("([a-zA-Z])(\'d)") # would的缩写
pat_will = re.compile("([a-zA-Z])(\'ll)") # will的缩写
pat_am = re.compile("([I|i])(\'m)") # am的缩写
pat_are = re.compile("([a-zA-Z])(\'re)") # are的缩写
pat_ve = re.compile("([a-zA-Z])(\'ve)") # have的缩写 def replace_abbreviations(text):
new_text = text
new_text = rule.pat_letter.sub(' ', new_text).strip().lower()
new_text = rule.pat_is.sub(r"\1 is", new_text)#其中\1是匹配到的第一个group
new_text = rule.pat_s.sub(r"\1 ", new_text)
new_text = rule.pat_not.sub(r"\1 not", new_text)
new_text = rule.pat_would.sub(r"\1 would", new_text)
new_text = rule.pat_will.sub(r"\1 will", new_text)
new_text = rule.pat_am.sub(r"\1 am", new_text)
new_text = rule.pat_are.sub(r"\1 are", new_text)
new_text = rule.pat_ve.sub(r"\1 have", new_text)
new_text = new_text.replace('\'', ' ')
return new_text if __name__=='__main__':
text='there\'re many recen\'t \'t extensions of this basic idea to include attention. 120,yes\'s it\'s'
text=replace_abbreviations(text)
print(text)#there are many rece not t extensions of this basic idea to include attention yes it is
2.详细的处理方式
去除普通的缩写,还引入了一些专有名词的处理、标点符号的处理
import re
def clean_text(text):
"""
Clean text
:param text: the string of text
:return: text string after cleaning
"""
# acronym
text = re.sub(r"can\'t", "can not", text)
text = re.sub(r"cannot", "can not ", text)
text = re.sub(r"what\'s", "what is", text)
text = re.sub(r"What\'s", "what is", text)
text = re.sub(r"\'ve ", " have ", text)
text = re.sub(r"n\'t", " not ", text)
text = re.sub(r"i\'m", "i am ", text)
text = re.sub(r"I\'m", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r" e mail ", " email ", text)
text = re.sub(r" e \- mail ", " email ", text)
text = re.sub(r" e\-mail ", " email ", text) # spelling correction
text = re.sub(r"ph\.d", "phd", text)
text = re.sub(r"PhD", "phd", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" fb ", " facebook ", text)
text = re.sub(r"facebooks", " facebook ", text)
text = re.sub(r"facebooking", " facebook ", text)
text = re.sub(r" usa ", " america ", text)
text = re.sub(r" us ", " america ", text)
text = re.sub(r" u s ", " america ", text)
text = re.sub(r" U\.S\. ", " america ", text)
text = re.sub(r" US ", " america ", text)
text = re.sub(r" American ", " america ", text)
text = re.sub(r" America ", " america ", text)
text = re.sub(r" mbp ", " macbook-pro ", text)
text = re.sub(r" mac ", " macbook ", text)
text = re.sub(r"macbook pro", "macbook-pro", text)
text = re.sub(r"macbook-pros", "macbook-pro", text)
text = re.sub(r" 1 ", " one ", text)
text = re.sub(r" 2 ", " two ", text)
text = re.sub(r" 3 ", " three ", text)
text = re.sub(r" 4 ", " four ", text)
text = re.sub(r" 5 ", " five ", text)
text = re.sub(r" 6 ", " six ", text)
text = re.sub(r" 7 ", " seven ", text)
text = re.sub(r" 8 ", " eight ", text)
text = re.sub(r" 9 ", " nine ", text)
text = re.sub(r"googling", " google ", text)
text = re.sub(r"googled", " google ", text)
text = re.sub(r"googleable", " google ", text)
text = re.sub(r"googles", " google ", text)
text = re.sub(r"dollars", " dollar ", text) # punctuation
text = re.sub(r"\+", " + ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"-", " - ", text)
text = re.sub(r"/", " / ", text)
text = re.sub(r"\\", " \ ", text)
text = re.sub(r"=", " = ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r":", " : ", text)
text = re.sub(r"\.", " . ", text)
text = re.sub(r",", " , ", text)
text = re.sub(r"\?", " ? ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\"", " \" ", text)
text = re.sub(r"&", " & ", text)
text = re.sub(r"\|", " | ", text)
text = re.sub(r";", " ; ", text)
text = re.sub(r"\(", " ( ", text)
text = re.sub(r"\)", " ( ", text) # symbol replacement
text = re.sub(r"&", " and ", text)
text = re.sub(r"\|", " or ", text)
text = re.sub(r"=", " equal ", text)
text = re.sub(r"\+", " plus ", text)
text = re.sub(r"\$", " dollar ", text) # remove extra space
text = ' '.join(text.split()) return text if __name__=='__main__':
text = 'there\'re many recen\'t \'t extensions of this basic idea to include attention. 120,yes\'s it\'s'
text = clean_text(text)
print(text) # there are many rece not t extensions of this basic idea to include attention . 120 , yes s it s
3.包括有处理词根词缀的处理方式
去除符号、还原缩写、获取词根。
#coding=utf-8
import jieba
import unicodedata
import sys,re,collections,nltk
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
class rule:
# 正则表达式过滤特殊符号用空格符占位,双引号、单引号、句点、逗号
pat_letter = re.compile(r'[^a-zA-Z \']+')#保留'
# 还原常见缩写单词
pat_is = re.compile("(it|he|she|that|this|there|here)(\'s)", re.I)
pat_s = re.compile("([a-zA-Z])(\'s)") # 处理类似于这样的缩写today’s
pat_not = re.compile("([a-zA-Z])(n\'t)") # not的缩写
pat_would = re.compile("([a-zA-Z])(\'d)") # would的缩写
pat_will = re.compile("([a-zA-Z])(\'ll)") # will的缩写
pat_am = re.compile("([I|i])(\'m)") # am的缩写
pat_are = re.compile("([a-zA-Z])(\'re)") # are的缩写
pat_ve = re.compile("([a-zA-Z])(\'ve)") # have的缩写 def replace_abbreviations(text):
new_text = text
new_text = rule.pat_letter.sub(' ', new_text).strip().lower()
new_text = rule.pat_is.sub(r"\1 is", new_text)#其中\1是匹配到的第一个group
new_text = rule.pat_s.sub(r"\1 ", new_text)
new_text = rule.pat_not.sub(r"\1 not", new_text)
new_text = rule.pat_would.sub(r"\1 would", new_text)
new_text = rule.pat_will.sub(r"\1 will", new_text)
new_text = rule.pat_am.sub(r"\1 am", new_text)
new_text = rule.pat_are.sub(r"\1 are", new_text)
new_text = rule.pat_ve.sub(r"\1 have", new_text)
new_text = new_text.replace('\'', ' ')
return new_text # pos和tag有相似的地方,通过tag获得pos
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return nltk.corpus.wordnet.ADJ
elif treebank_tag.startswith('V'):
return nltk.corpus.wordnet.VERB
elif treebank_tag.startswith('N'):
return nltk.corpus.wordnet.NOUN
elif treebank_tag.startswith('R'):#以副词
return nltk.corpus.wordnet.ADV
else:
return '' def merge(words):
lmtzr = WordNetLemmatizer()
new_words = ''
words = nltk.pos_tag(word_tokenize(words)) # tag is like [('bigger', 'JJR')]
for word in words:
pos = get_wordnet_pos(word[1])
if pos:
# lemmatize()方法将word单词还原成pos词性的形式
word = lmtzr.lemmatize(word[0], pos)
new_words+=' '+word
else:
new_words+=' '+word[0]
return new_words def clear_data(text):
text=replace_abbreviations(text)
text=merge(text)
text=text.strip()
return text
if __name__=='__main__':
text='there\'re many recen\'t \'t extensions of this basic had idea to include attention. 120,had'
text=clear_data(text)
print(text)#there be many rece not t extension of this basic have idea to include attention have
二、中文数据清洗
去除一些停用词。而停用词是文本中一些高频的代词、连词、介词等对文本分类无意义的词,通常维护一个停用词表,特征提取过程中删除停用表中出现的词,本质上属于特征选择的一部分。具体可参考Hanlp的停用词表https://github.com/hankcs/HanLP
nlp英文的数据清洗代码的更多相关文章
- JavaScript验证字符串只能包含数字或者英文字符的代码实例
验证字符串只能包含数字或者英文字符的代码实例:本章节分享一段代码实例,它实现了验证字符串内容是否只包含英文字符或者数字.代码实例如下: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 ...
- [NLP] The Annotated Transformer 代码修正
1. RuntimeError: "exp" not implemented for 'torch.LongTensor' class PositionalEncoding(nn. ...
- NLP整体流程的代码
import nltk import numpy as np import re from nltk.corpus import stopwords # 1 分词1 text = "Sent ...
- 8个数据清洗Python代码,复制可用,最长11行 | 资源
最近,大数据工程师Kin Lim Lee在Medium上发表了一篇文章,介绍了8个用于数据清洗的Python代码. 数据清洗,是进行数据分析和使用数据训练模型的必经之路,也是最耗费数据科学家/程序员精 ...
- git入门(4)团队中git保管代码常用操作
在团队中协作代码时候,一定要熟练使用以下git命令,不至于把代码库弄乱, PS:一定要提交自己代码(git push)时候,先进行更新本地代码库(git pull),不然提交异常 git常用命令 1· ...
- 快看Sample代码,速学Swift语言(1)-语法速览
Swift是苹果推出的一个比较新的语言,它除了借鉴语言如C#.Java等内容外,好像还采用了很多JavaScript脚本里面的一些脚本语法,用起来感觉非常棒,作为一个使用C#多年的技术控,对这种比较超 ...
- IDEA 代码规范插件
前言 在工作过程中,每个人的代码习惯都不同,在一起工作做同一个项目,如果按照自己的习惯来,有可能造成代码维护困难,开发进度缓慢等. 代码规范的重要性 谷歌发布的代码规范中指出,80% 的缺失是由 20 ...
- ph 提交代码的步骤;
ph 提交代码的步骤: git status 查看状态: ls -ah 查看文件: git stash list 查看本地缓存的文件: git branch 查看本地的分支: git checkout ...
- cucumber java从入门到精通(2)用代码定义步骤
cucumber java从入门到精通(2)用代码定义步骤 上一节里我们定义了feature文件,feature文件就是自然语言描述的用例文件,它有一定的章法,具体的潜规则是: 使用Feature关键 ...
随机推荐
- 模拟实现 Promise(小白版)
模拟实现 Promise(小白版) 本篇来讲讲如何模拟实现一个 Promise 的基本功能,网上这类文章已经很多,本篇笔墨会比较多,因为想用自己的理解,用白话文来讲讲 Promise 的基本规范,参考 ...
- 初探three.js几何体
今天说说three.js的几何体,常见的几何体今天就不说了,今天说一说如何画直线,圆弧线,以及高级几何体. 1. 画一条直线 画直线我们使用THREE.Geometry()对象. //给空白几何体添加 ...
- tabhost改变标签颜色
package uiframe.zyx.uiframe.com.uiframe.fragments;import android.os.Bundle;import android.support.an ...
- Oracle GoldenGate for Sql Server连接ODBC失败的处理方法
Oracle GoldenGate for Sql Server连接oracle数据库的时候还是比较容易的,命令行下面只要: GGSCI> dblogin useridalias [ alias ...
- Django day03之学习知识点
今日是路由层学习: 3.路由匹配 3.1 正则表达式的特点: 一旦正则表达式能够匹配到内容,会立刻结束匹配关系 直接执行对应的函数.相当于采用就近原则,一旦找到就不再继续往下走了 重点: 正则表达式开 ...
- 更改CSDN博客皮肤的一种简易方法
CSDN改版后,皮肤设置变得不能够更改了,不过下面这种方法依然可以做到: 首先来到博客设置的主页面:. 接下来按ctrl + shift + i进入 如下页面,然后点击图中红色标记圈起来的选择元素按钮 ...
- c++之基础数据类型
c++规定了在创建一个变量或者常量时,必须先要指定相应的数据类型,否发无法将变量分配给内存. 1.整型 数据类型 占用空间 取值范围 short 2字节 -2^15-2^15-1 int 4字节 -2 ...
- php 弱类型总结
0x01 前言 最近CTF比赛,不止一次的出了php弱类型的题目,借此想总结一下关于php弱类型以及绕过方式 0x02 知识介绍 php中有两种比较的符号 == 与 === <?php $a = ...
- 解决mysql导入数据量很大导致失败及查找my.ini 位置(my.ini)在哪
数据库数据量很大的数据库导入到本地时,会等很久,然而等很久之后还是显示失败: 这是就要看看自己本地的没mysql是否设置了超时等待,如果报相关time_out这些,可以把mysql.ini尾部添加ma ...
- Linux重器之 Vim 实用命令
Vim 常用的命令 光标定位; hjkl 上下左右移动 0 $ 跳到行首或行尾 gg shift+G 跳到整个文件的开头行或者结尾行 1G ,2G,3G........NG ,跳到第1.2.3 ...