吴裕雄 python 机器学习-KNN(2)】的更多相关文章

import numpy as np import matplotlib.pyplot as plt from sklearn import neighbors, datasets from sklearn.model_selection import train_test_split def create_regression_data(n): ''' 创建回归模型使用的数据集 ''' X =5 * np.random.rand(n, 1) y = np.sin(X).ravel() # 每隔…
import numpy as np import matplotlib.pyplot as plt from sklearn import neighbors, datasets from sklearn.model_selection import train_test_split def load_classification_data(): # 使用 scikit-learn 自带的手写识别数据集 Digit Dataset digits=datasets.load_digits() X…
import matplotlib import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle n = 1000 #number of points to create xcord = np.zeros((n)) ycord = np.zeros((n)) markers =[] colors =[] fw = open('D:\\LearningResource\\mac…
import numpy as np import operator as op from os import listdir def classify0(inX, dataSet, labels, k): dataSetSize = dataSet.shape[0] diffMat = np.tile(inX, (dataSetSize,1)) - dataSet sqDiffMat = diffMat**2 sqDistances = sqDiffMat.sum(axis=1) distan…
import numpy as np import matplotlib.pyplot as plt from sklearn import metrics from sklearn import datasets from sklearn.semi_supervised.label_propagation import LabelSpreading def load_data(): ''' 加载数据集 ''' digits = datasets.load_digits() ###### 混洗样…
import numpy as np import matplotlib.pyplot as plt from sklearn import metrics from sklearn import datasets from sklearn.semi_supervised import LabelPropagation def load_data(): ''' 加载数据集 ''' digits = datasets.load_digits() ###### 混洗样本 ######## rng =…
import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier,DecisionTreeRegressor def load_data(): ''' 加载用于分类问题的数据集.数据集采用 scikit-…
import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier,DecisionTreeRegressor def creat_data(n): np.random.seed(0) X = 5 * np…
import numpy as np import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D from sklearn.model_selection import train_test_split from sklearn import datasets, linear_model,discriminant_analysis def load_data()…
import numpy as np import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D from sklearn import datasets, linear_model from sklearn.model_selection import train_test_split def load_data(): # 使用 scikit-learn 自带…
import numpy as np import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.mplot3d import Axes3D from sklearn import datasets, linear_model from sklearn.model_selection import train_test_split def load_data(): diabetes = datasets.…
import numpy as np import matplotlib.pyplot as plt from sklearn import datasets, linear_model from sklearn.model_selection import train_test_split def load_data(): diabetes = datasets.load_diabetes() return train_test_split(diabetes.data,diabetes.tar…
import numpy as np import matplotlib.pyplot as plt from sklearn import datasets, linear_model from sklearn.model_selection import train_test_split def load_data(): diabetes = datasets.load_diabetes() return train_test_split(diabetes.data,diabetes.tar…
import numpy as np from sklearn import datasets,linear_model from sklearn.model_selection import train_test_split def load_data(): diabetes = datasets.load_diabetes() return train_test_split(diabetes.data,diabetes.target,test_size=0.25,random_state=0…
import numpy as np def loadDataSet(): dataMat = [] labelMat = [] fr = open('D:\\LearningResource\\machinelearninginaction\\Ch05\\testSet.txt') for line in fr.readlines(): lineArr = line.strip().split() dataMat.append([1.0, float(lineArr[0]), float(li…
import matplotlib import numpy as np import matplotlib.pyplot as plt n = 1000 xcord0 = [] ycord0 = [] xcord1 = [] ycord1 = [] markers =[] colors =[] for i in range(n): [r0,r1] = np.random.standard_normal(2) myClass = np.random.uniform(0,1) if (myClas…
import numpy as np def loadDataSet(): postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], ['stop', 'post…
import matplotlib.pyplot as plt decisionNode = dict(boxstyle="sawtooth", fc="0.8") leafNode = dict(boxstyle="round4", fc="0.8") arrow_args = dict(arrowstyle="<-") def getNumLeafs(myTree): numLeafs = 0 f…
import numpy as np import operator as op from math import log def createDataSet(): dataSet = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']] labels = ['no surfacing','flippers'] return dataSet, labels dataSet,labels = createD…
import matplotlib.pyplot as plt from sklearn import datasets,naive_bayes from sklearn.model_selection import train_test_split # 加载 scikit-learn 自带的 digits 数据集 def load_data(): ''' 加载用于分类问题的数据集.这里使用 scikit-learn 自带的 digits 数据集 ''' digits=datasets.load…
import numpy as np import matplotlib.pyplot as plt from sklearn import cluster from sklearn.metrics import adjusted_rand_score from sklearn.datasets.samples_generator import make_blobs def create_data(centers,num=100,std=0.7): X, labels_true = make_b…
import numpy as np import matplotlib.pyplot as plt from sklearn.datasets.samples_generator import make_blobs def create_data(centers,num=100,std=0.7): ''' 生成用于聚类的数据集 :param centers: 聚类的中心点组成的数组.如果中心点是二维的,则产生的每个样本都是二维的. :param num: 样本数 :param std: 每个簇…
import numpy as np import matplotlib.pyplot as plt from sklearn import mixture from sklearn.metrics import adjusted_rand_score from sklearn.datasets.samples_generator import make_blobs def create_data(centers,num=100,std=0.7): X, labels_true = make_b…
import numpy as np import matplotlib.pyplot as plt from sklearn import cluster from sklearn.metrics import adjusted_rand_score from sklearn.datasets.samples_generator import make_blobs def create_data(centers,num=100,std=0.7): X, labels_true = make_b…
import numpy as np import matplotlib.pyplot as plt from sklearn import cluster from sklearn.metrics import adjusted_rand_score from sklearn.datasets.samples_generator import make_blobs def create_data(centers,num=100,std=0.7): X, labels_true = make_b…
# -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt from sklearn import datasets,manifold def load_data(): ''' 加载用于降维的数据 ''' # 使用 scikit-learn 自带的 iris 数据集 iris=datasets.load_iris() return iris.data,iris.target #等度量映射Isomap降维模型…
import numpy as np import matplotlib.pyplot as plt from sklearn import datasets, linear_model,svm from sklearn.model_selection import train_test_split def load_data_regression(): ''' 加载用于回归问题的数据集 ''' diabetes = datasets.load_diabetes() #使用 scikit-lea…
# -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt from sklearn import datasets,manifold def load_data(): ''' 加载用于降维的数据 ''' # 使用 scikit-learn 自带的 iris 数据集 iris=datasets.load_iris() return iris.data,iris.target #多维缩放降维MDS模型 def…
# -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt from sklearn import datasets,decomposition def load_data(): ''' 加载用于降维的数据 ''' # 使用 scikit-learn 自带的 iris 数据集 iris=datasets.load_iris() return iris.data,iris.target #核化PCAKerne…
# -*- coding: utf-8 -*- import numpy as np import matplotlib.pyplot as plt from sklearn import datasets,decomposition def load_data(): ''' 加载用于降维的数据 ''' # 使用 scikit-learn 自带的 iris 数据集 iris=datasets.load_iris() return iris.data,iris.target #超大规模数据集降维I…