# coding: utf-8

# In[18]:

import pandas as pd
import numpy as np
from sklearn import tree
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import binarize
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import Normalizer
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score,recall_score,average_precision_score,auc

# In[32]:

data=pd.read_csv(r"D:\Users\sgg91044\Desktop\bad_wafer_data_pivot.csv")

# In[33]:

data.head()

# In[34]:

index=data.drop(columns=["defect_count","ETCM_PHA4","ETCM_PHB4","ETCM_PHC4","HELK_MAX.","HELK_MEAN","HELK_SD","LOWERCHM_PRESS","PBK4","RR13_MAX.","RR13_MEAN","RR23_MAX.","RR23_MEAN","THR3_MAX.","THR3_MAX._DIFF","THR3_MEAN","THR3_MEAN_DIFF","THR3_MEAN_SLOPE","THR3_SD"])
index=index.drop(columns="Target")
index

# In[35]:

data=data.drop(columns=["lotid","Step","Recipie_Name","defect_count"])
data.head()

# In[36]:

ohe = OneHotEncoder()
le = LabelEncoder()

# In[37]:

data.head()

# In[40]:

data["eqp_encoded"] = le.fit_transform(data.iloc[:,0])
data["slot_encoded"] = le.fit_transform(data.iloc[:,1])
data['chamber_encoded'] = le.fit_transform(data.iloc[:,2])
data.head()

# In[41]:

data=data.drop(columns=["eqpid","slotid","Chamber"])
data.head()

# In[42]:

nz = Normalizer()
data.iloc[:,10:12]=pd.DataFrame(nz.fit_transform(data.iloc[:,10:12]),columns=data.iloc[:,10:12].columns)
data.iloc[:,0:3]=pd.DataFrame(nz.fit_transform(data.iloc[:,0:3]),columns=data.iloc[:,0:3].columns)
data.head()

# In[43]:

def cleaning():
data=pd.read_csv(r"D:\Users\sgg91044\Desktop\bad_wafer_data_pivot.csv")
data=data.drop(columns=["lotid","Step","Recipie_Name","defect_count"])
le = LabelEncoder()
data["eqp_encoded"] = le.fit_transform(data.iloc[:,0])
data["slot_encoded"] = le.fit_transform(data.iloc[:,1])
data['chamber_encoded'] = le.fit_transform(data.iloc[:,2])
data=data.drop(columns=["eqpid","slotid","Chamber"])
nz = Normalizer()
data.iloc[:,10:12]=pd.DataFrame(nz.fit_transform(data.iloc[:,10:12]),columns=data.iloc[:,10:12].columns)
data.iloc[:,0:3]=pd.DataFrame(nz.fit_transform(data.iloc[:,0:3]),columns=data.iloc[:,0:3].columns)

我的代码-cleaning的更多相关文章

  1. AGC010 - C: Cleaning

    原题链接 题意简述 给出一棵个节点的树,每个点有点权.每次可以选择两个叶节点并将连接它们的路径上的节点的点权-1(包括叶节点).求能否将所有节点的点权都变为0. 分析 先考虑最简单的情况.在这种情况下 ...

  2. 【bzoj1672】[USACO2005 Dec]Cleaning Shifts 清理牛棚

    题目描述 Farmer John's cows, pampered since birth, have reached new heights of fastidiousness. They now ...

  3. Coursera-Getting and Cleaning Data-week1-课程笔记

    博客总目录,记录学习R与数据分析的一切:http://www.cnblogs.com/weibaar/p/4507801.html -- Sunday, January 11, 2015 课程概述 G ...

  4. Coursera-Getting and Cleaning Data-Week2-课程笔记

    Coursera-Getting and Cleaning Data-Week2 Saturday, January 17, 2015 课程概述 week2主要是介绍从各个来源读取数据.包括MySql ...

  5. Coursera-Getting and Cleaning Data-Week3-dplyr+tidyr+lubridate的组合拳

    Coursera-Getting and Cleaning Data-Week3 Wednesday, February 04, 2015 好久不写笔记了,年底略忙.. Getting and Cle ...

  6. Coursera-Getting and Cleaning Data-week4-R语言中的正则表达式以及文本处理

    博客总目录:http://www.cnblogs.com/weibaar/p/4507801.html Thursday, January 29, 2015 补上第四周笔记,以及本次课程总结. 第四周 ...

  7. poj 2376 Cleaning Shifts

    http://poj.org/problem?id=2376 Cleaning Shifts Time Limit: 1000MS   Memory Limit: 65536K Total Submi ...

  8. JAVA版Kafka代码及配置解释

    伟大的程序员版权所有,转载请注明:http://www.lenggirl.com/bigdata/java-kafka.html.html 一.JAVA代码 kafka是吞吐量巨大的一个消息系统,它是 ...

  9. POJ 2376 Cleaning Shifts(轮班打扫)

    POJ 2376 Cleaning Shifts(轮班打扫) Time Limit: 1000MS   Memory Limit: 65536K [Description] [题目描述] Farmer ...

随机推荐

  1. vs相同变量高亮显示

    https://blog.csdn.net/sinat_33718563/article/details/79241129 在VS2010中调试工程中,常常需要观察相同变量名在不同代码处的位置,VS默 ...

  2. linux --- 5. nginx 初始

    一. 安装nginx 1.安装nginxz之前的依赖包 yum install gcc patch libffi-devel python-devel zlib-devel bzip2-devel o ...

  3. eclipse的springboot插件

    eclipse的springboot插件官网下载地址还喜欢捉迷藏,正确的下载路径修改方法: 点击zip,然后复制出官网路径如下 http://download.springsource.com/rel ...

  4. react-router 4.0(一)

    import React from 'react'; import ReactDOM from 'react-dom' import {Link,Route,HashRouter} from 'rea ...

  5. python基础知识点四

    网络编程(socket) 软件开发的架构: 两个程序之间通讯的应用大致通过从用户层面可以分为两种: 1是C/S,即客户端与服务端,为应用类的,比如微信,网盘等需要安装桌面应用的 2是B/S,即浏览器与 ...

  6. c# 7.1 Async Main方法

    安装 .net framework sdk 7.1 新建一个 .net framework 7.1 的程序 在程序的工程文件的第一个 PropertyGroup  节点下加入以下子属性   <L ...

  7. vue--vux框架的使用

    <1>. 在项目里安装vux npm install vux --save <2>. 安装vux-loader npm install vux-loader --save-de ...

  8. Codefoces 277 E. Binary Tree on Plane

    题目链接:http://codeforces.com/problemset/problem/277/E 参考了这篇题解:http://blog.csdn.net/Sakai_Masato/articl ...

  9. js 数组元素遍历

    <html> <body> <script type="text/javascript"> var x var mycars = new Arr ...

  10. .net中文分词 jieba.NET

    简介 平时经常用Python写些小程序.在做文本分析相关的事情时免不了进行中文分词,于是就遇到了用Python实现的结巴中文分词.jieba使用起来非常简单,同时分词的结果也令人印象深刻,有兴趣的可以 ...