写在前面的话:

实例中的所有数据都是在GitHub上下载的,打包下载即可。

地址是:http://github.com/pydata/pydata-book

还有一定要说明的:

我使用的是Python2.7,书中的代码有一些有错误,我使用自己的2.7版本调通。

# coding: utf-8
from pandas import  Series, DataFrame
import pandas as pd
import numpy as np

obj = Series([4,7,-9,7])
obj
obj.values
obj.index

obj2 = Series([4,5,6,3],index = ['a','b','e','c'])
obj2
obj2.index
obj2['a']
obj2[['a','b','c']]
obj2[obj2>4]
obj2 * 2
np.exp(obj2)
'b' in obj2
'r' in obj2

sdata = {'Ohio':35000, 'Texas':71000, 'Oregon':16000, 'Utah':5000}
obj3 = Series(sdata)
obj3
states = ['California','Ohio','Oregon','Texas']
obj4 = Series(sdata, index=states)
obj4
pd.isnull(obj4)
pd.notnull(obj4)
obj4.isnull()

obj3 + obj4
obj4.name = 'population'
obj4.index.name = 'state'
obj4

obj.index = ['Bob', 'Steve', 'Jeff', 'Ryan']
obj

data = {'state':['Ohio','Ohio','Ohio','Nevada','Nevada'],'year':[2000,2001,2002,2001,2002],'pop':[1.5,1.7,3.6,2.4,2.9]}
frame = DataFrame(data)
frame
DataFrame(data,columns=['year','states','pop'])
frame2 = DataFrame(data, columns=['year','state','pop','debt'],index=['one','two','three','four','five'])
frame2
frame2.columns
frame2['state']
frame2.year
frame2.ix['three']
frame2['debt'] = 16.5
frame2
frame2['debt'] = np.arange(5.)
frame2

val = Series([-1.2,-1.5,-1.7],index=['two','four','five'])
frame2['debt'] = val
frame2

frame2['eastern'] = frame2.state == 'Ohio'
frame2
del frame2['eastern']
frame2.columns

pop = {'Nevada':{2001:2.4,2002:2.9},'Ohio':{2000:1.5,2001:1.7,2002:3.6}}
frame3 = DataFrame(pop)
frame3
frame3.T
DataFrame(pop,index=[2001,2002,2003])
pdata = {'Ohio':frame3.Ohio[:-1],'Nevada':frame3.Nevada[:2]}
DataFrame(pdata)

frame3.index.name = 'year';frame3.columns.name = 'state'
frame3
frame3.values
frame2.values

obj = Series(range(3), index=['a','b','c'])
index = obj.index
index
index[1:]
index[1] = 'd'
index = pd.Index(np.arange(3))
obj2 = Series([1.5,-2.5,0],index=index)
obj2.index is index

frame3
'Ohio' in frame3.columns
2003 in frame3.index

obj = Series([4.5,7.2,-5.3,3.6],index=['d','b','a','c'])
obj
obj2 = obj.reindex(['a','b','c','d','e'])
obj2
obj.reindex(['a','b','c','d','e'],fill_value=0)
obj3 = Series(['bule','purple','yellow'],index=[0,2,4])
obj3.reindex(range(9),method='ffill')

frame = DataFrame(np.arange(9).reshape((3,3)), index=['a','c','d'],columns=['Ohio','Texas','California'])
frame
frame2 = frame.reindex(['a','b','c','d'])
frame2
states = ['Texas','Utah','California']
frame.reindex(columns=states)
frame.reindex(index=['a','b','c','d'],method='ffill',columns=states)
frame.ix[['a','b','c','d'],states]

obj = Series(np.arange(5.),index=['a','b','c','d','e'])
new_obj = obj.drop('c')
new_obj
obj.drop(['d','c'])

data = DataFrame(np.arange(16).reshape((4,4)),index=['Ohio','Colorado','Utah','New York'],columns=['one','two','three','four'])
data.drop(['Colorado','Ohio'])
data.drop('two',axis=1)
data.drop(['two','four'],axis=1)

obj = Series(np.arange(4.),index=['a','b','c','d'])
obj['b']
obj[1]
obj[2:4]
obj[['b','a','d']]
obj[{1,3}]
obj[obj < 2]
obj['b':'c']
obj['b':'c'] = 5
obj

data = DataFrame(np.arange(16).reshape((4,4)),index=['Ohio','Colorado','Utah','New York'],columns=['one','two','three','four'])
data
data['two']
data[['three','one']]
data[:2]
data[data['three'] > 5]
data < 5
data[data < 5] = 0
data
data.ix['Colorado',['two','three']]
data.ix[['Colorado','Utah'],[3,0,1]]
data.ix[2]
data.ix[:'Utah','two']
data.ix[data.three > 5, :4]

s1 = Series([7.3,-2.5,3.4,1.5],index=['a','c','d','e'])
s2 = Series([-2.1,3.6,-1.5,4,3.1], index=['a','c','e','f','g'])
s1
s2
s1 + s2

df1 = DataFrame(np.arange(9.).reshape((3,3)),columns=list('bcd'),index=['Ohio','Texas','Colorado'])
df2 = DataFrame(np.arange(12.).reshape((4,3)),columns=list('bde'),index=['Utah','Ohio','Texas','Oregon'])
df1
df2
df1 + df2

df1 = DataFrame(np.arange(12.).reshape((3,4)),columns=list('abcd'))
df2 = DataFrame(np.arange(20.).reshape((4,5)),columns=list('abcde'))
df1
df2
df1 + df2
df1.add(df2, fill_value=0)
df1.reindex(columns=df2.columns,fill_value=0)

arr = np.arange(12.).reshape((3,4))
arr
arr[0]
arr - arr[0]

frame = DataFrame(np.arange(12.).reshape((4,3)),columns=list('bde'),index=['Utah','Ohio','Texas','Oregon'])
series = frame.ix[0]
frame
series
frame - series

series2 = Series(range(3),index=['b','e','f'])
frame + series2

series3 = frame['d']
frame
series3
frame.sub(series3,axis=0)

frame = DataFrame(np.random.randn(4,3),columns=list('bde'),index=['Utah','Ohio','Texas','Oregon'])
frame
np.abs(frame)
f = lambda x:x.max() - x.min()
frame.apply(f)
frame.apply(f,axis=1)

def f(x):
    return Series([x.min(),x.max()],index=['min','max'])
frame.apply(f)

format = lambda x: '%.2f' % x
frame.applymap(format)
frame['e'].map(format)

obj = Series(range(4),index=['d','a','b','c'])
obj.sort_index()

frame = DataFrame(np.arange(8).reshape((2,4)),index=['three','one'],columns=['d','a','b','c'])
frame.sort_index()
frame.sort_index(axis=1)
frame.sort_index(axis=1,ascending=False)

obj = Series([4,7,-1,3])
obj.order()
obj = Series([4,np.nan,7,-1,np.nan,3])
obj.order()

frame = DataFrame({'b':[4,7,-3,2],'a':[0,1,0,1]})
frame
frame.sort_index(by='b')
frame.sort_index(by=['a','b'])

obj = Series([7,-5,7,4,2,0,4])
obj.rank()
obj.rank(method='first')
obj.rank(ascending=False,method='max')
frame = DataFrame({'b':[4.3,7,-3,2],'a':[0,1,0,1],'c':[-2,5,8,-2.5]})
frame
frame.rank(axis=1)

obj = Series(range(5),index=['a','a','b','b','c'])
obj
obj.index.is_unique
obj['a']
obj['c']
df = DataFrame(np.random.randn(4,3),index=['a','a','b','c'])
df
df.ix['b']

df = DataFrame([[1.4,np.nan],[7.1,-4.5],[np.nan,np.nan],[0.75,-1.3]],index=['a','b','c','d'],columns=['one','two'])
df
df.sum()
df.sum(axis=1)
df.mean(axis=1,skipna=False)
df.idxmax()
df.idxmin()
df.cumsum()
df.describe()
obj = Series(['a','a','b','c']*4)
obj.describe()

import pandas.io.data as web
all_data = {}
for ticker in ['AAPL','IBM','MSFT','GOOG']:
    all_data['ticker'] = web.get_data_yahoo(ticker,'1/1/2000','1/1/2010')
price = DataFrame({tic:data['Adj Close'] for tic,data in all_data.iteritems()})
volume = DataFrame({tic:data['Volume'] for tic,data in all_data.iteritems()})
returns = price.pct_change()
returns.tail()

obj = Series([1,2,3,4,5,63,2,11,34])
uniques = obj.unique()
uniques
obj.value_counts()
pd.value_counts(obj.values,sort=False)
mask = obj.isin([1])
mask
obj[mask]

data = DataFrame({'Qu1':[1,3,4,3,4],'Qu2':[2,3,1,2,3],'Qu3':[1,5,2,4,4]})
data
result = data.apply(pd.value_counts).fillna(0)
result

string_data = Series(['aaaaaaa','cccccccc',np.nan,'ddadsds'])
string_data.isnull()
string_data[0] = None
string_data.isnull()

from numpy import nan as NA
data = Series([1,NA,3.5,NA,7])
data.dropna()
data[data.notnull()]

data = DataFrame([[1.,6.5,3.],[1.,NA,NA],[NA,NA,NA],[NA,6.5,3.]])
cleaned = data.dropna()
data
cleaned
data.dropna(how='all')
data[4] = NA
data
data.dropna(axis=1,how='all')
df = DataFrame(np.random.randn(7,3))
df.ix[:4,1] = NA;df.ix[:2,2] = NA
df
df.dropna(thresh=3)
df.fillna(0)
df.fillna({1:0.5,3:-1})

data = Series(np.random.randn(10),index=[['a','a','a','b','b','b','c','c','d','d'],[1,2,3,1,2,3,1,2,2,3]])
data
data.index
data['b']
data[:,2]
data.unstack()
data.unstack().stack()

frame = DataFrame(np.arange(12).reshape((4,3)),index=[['a','a','b','b'],[1,2,1,2]],columns=[['Ohio','Ohio','Colorado'],['Green','Red','Green']])
frame
frame.index.names = ['key1','key2']
frame.columns.names = ['state','color']
frame
frame['Ohio']
frame.swaplevel('key1','key2')
frame.sortlevel(1)
frame.swaplevel(0,1).sortlevel(0)
frame.sum(level='key2')
frame.sum(level='color',axis=1)

《利用Python进行数据分析》笔记---第5章pandas入门的更多相关文章

  1. 《利用python进行数据分析》读书笔记--第五章 pandas入门

    http://www.cnblogs.com/batteryhp/p/5006274.html pandas是本书后续内容的首选库.pandas可以满足以下需求: 具备按轴自动或显式数据对齐功能的数据 ...

  2. 《利用Python进行数据分析》第8章学习笔记

    绘图和可视化 matplotlib入门 创建窗口和画布 fig = plt.figure() ax1 = fig.add_subplot(2,2,1) ax2 = fig.add_subplot(2, ...

  3. 《利用Python进行数据分析》第6章学习笔记

    数据加载.存储与文件格式 读写文本格式的数据 逐块读取文本文件 read_xsv参数nrows=x 要逐块读取文件,需要设置chunksize(行数),返回一个TextParser对象. 还有一个ge ...

  4. 《利用Python进行数据分析》第4章学习笔记

    NumPy基础:数组和矢量计算 NumPy的ndarray:一种多维数组对象 该对象是一个快速灵活的大数据集容器.你可以利用这种数组对整块数据执行一些数学运算,其语法跟标量元素之间的运算一样 列表转换 ...

  5. 《利用Python进行数据分析》第7章学习笔记

    数据规整化:清理.转换.合并.重塑 合并数据集 pandas.merge pandas.concat combine_first 数据库风格的DataFrame合并 索引上的合并 join()实例方法 ...

  6. 《利用Python进行数据分析》第123章学习笔记

    引言 1 列表推导式 records = [json.loads(line) for line in open(path)] 这是一种在一组字符串(或一组别的对象)上执行一条相同操作(如json.lo ...

  7. 《利用Python进行数据分析》第5章学习笔记

    pandas入门 数据结构 Series Series是一种类似于一维数组的对象,它由一组数据(各种NumPy数据类型)以及一组与之相关的数据标签(即索引)组成.仅由一组数据即可产生最简单的Serie ...

  8. 利用Python进行数据分析笔记-时间序列(时区、周期、频率)

    此文对Python中时期.时间戳.时区处理等阐述十分清楚,特别值得推荐学习. 原文链接:https://blog.csdn.net/wuzlun/article/details/80287517

  9. 利用python进行数据分析--(阅读笔记一)

    以此记录阅读和学习<利用Python进行数据分析>这本书中的觉得重要的点! 第一章:准备工作 1.一组新闻文章可以被处理为一张词频表,这张词频表可以用于情感分析. 2.大多数软件是由两部分 ...

随机推荐

  1. 在SSM框架中,multfile转file

    import org.apache.commons.fileupload.disk.DiskFileItem; import org.springframework.web.multipart.Mul ...

  2. 【hbuilder】如何根据Geolocation获得的坐标获取所在城市?

    第一步通过mui.plusReady[表示页面加载事件]调用hbuilder提供的百度定位 mui.plusReady(function() { plus.geolocation.getCurrent ...

  3. LINUX设备驱动模型之class

    转自 https://blog.csdn.net/qq_20678703/article/details/52754661 1.LINUX设备驱动模型中的bus.device.driver,.其中bu ...

  4. Linux——用户管理简单学习笔记(一)

    Linux用户分为三种: 1:超级用户(root,UID=0) 2:普通用户(UID 500-60000) 3:伪用户(UID 1-499)  伪用户: 1.伪用户与系统和程序服务相关 :nbin.d ...

  5. Python中浮点数精度处理

    Python中,浮点数运算,经常会碰到如下情况: 出现上面的情况,主要还是因浮点数在计算机中实际是以二进制保存的,有些数不精确.比如说: 0.1是十进制,转化为二进制后它是个无限循环的数:0.0001 ...

  6. 视图层view layer

    视图层是Django处理请求的核心代码层,我们大多数Python代码都集中在这一层面. 它对外接收用户请求,对内调度模型层和模版层,统合数据库和前端,最后根据业务逻辑,将处理好的数据,与前端结合,返回 ...

  7. list_01

    双向链表 不支持随机存取([?] / at(?)) A.头尾 添加/移除 A.1.list::push_back(elemValue); A.2.list::pop_back(); A.3.list: ...

  8. 滑动窗口解决Substring Search Problem

    2018-07-18 11:19:19 一.Minimum Window Substring 问题描述: 问题求解: public String minWindow(String s, String ...

  9. Mysql错误: Lock wait timeout exceeded 解决办法

    一.临时解决办法: 执行mysql命令:show full processlist; 然后找出插入语句的系统id 执行mysql命令:kill id 或 首先,查看数据库的进程信息: show ful ...

  10. memcached 内存初始化与key-value存储

    本次笔记未涉及到slab的动态重新平衡分配 /**首先介绍一下一个跟内存相关的非常重要的概念,内存块类型数据结构:*/ typedef struct { unsigned int size; /* c ...