》》

from django.shortcuts import render
# Create your views here. from hdfs.client import Client
from django.views import View
from hdfs.client import Client
import os #
# # 关于python操作hdfs的API可以查看官网:
# # https://hdfscli.readthedocs.io/en/latest/api.html
#
# # 读取hdfs文件内容,将每行存入数组返回
# def read_hdfs_file(client, filename):
# lines = []
# with client.read(filename, encoding='utf-8', delimiter='\n') as reader:
# for line in reader:
# # pass
# # print line.strip()
# lines.append(line.strip())
# return lines
#
#
# # 创建目录
# def mkdirs(client, hdfs_path):
# client.makedirs(hdfs_path)
#
#
# # 删除hdfs文件
# def delete_hdfs_file(client, hdfs_path):
# client.delete(hdfs_path)
#
#
# # 上传文件到hdfs
def put_to_hdfs(client, local_path, hdfs_path):
client.upload(hdfs_path, local_path, cleanup=True) #
# # 从hdfs获取文件到本地
# def get_from_hdfs(client, hdfs_path, local_path):
# download(hdfs_path, local_path, overwrite=False)
#
#
# # 追加数据到hdfs文件
# def append_to_hdfs(client, hdfs_path, data):
# client.write(hdfs_path, data, overwrite=False, append=True)
#
#
# # 覆盖数据写到hdfs文件
def write_to_hdfs(client, hdfs_path, data):
client.write(hdfs_path, data, overwrite=True, append=False) #
# # 移动或者修改文件
def move_or_rename(client, hdfs_src_path, hdfs_dst_path):
client.rename(hdfs_src_path, hdfs_dst_path)
#
#
# # 返回目录下的文件
# def list(client, hdfs_path):
# return client.list(hdfs_path, status=False)
#
#
# # root:连接的跟目录
# client = Client("http://192.168.88.129:50070",
# root="/", timeout=5 * 1000, session=False)
# # put_to_hdfs(client,'a.csv','/user/root/a.csv')
# # append_to_hdfs(client,'/b.txt','111111111111111'+'\n')
# # write_to_hdfs(client,'/b.txt','222222222222'+'\n')
# # move_or_rename(client,'/b.txt', '/user/b.txt')
# mkdirs(client, '/input1/python1')
# print(list(client,'/input'))
# read_hdfs_file(client,'/')
# client.list("/")
def mkdirs(client, hdfs_path):
client.makedirs(hdfs_path) def get_IS_File(client,hdfs_paht):
return client.status(hdfs_paht)['type'] == 'FILE'
from hadoop_hdfs.settings import UPLOAD_ROOT
import os def put_to_hdfs(client, local_path, hdfs_path):
client.upload(hdfs_path, local_path, cleanup=True)
client = Client("http://192.168.88.129:50070",
root="/", timeout=5 * 1000, session=False)
class Index(View):
def get(self,request):
return render(request,"index.html")
def post(self,request):
def uploadfile(img):
f = open(os.path.join(UPLOAD_ROOT, '', img.name), 'wb')
for chunk in img.chunks():
f.write(chunk)
f.close() def read_hdfs_file(client, filename):
lines = []
with client.read(filename, encoding='utf-8', delimiter='\n') as reader:
for line in reader:
# pass
# print line.strip()
lines.append(line.strip())
return lines file = request.FILES.get("file")
uploadfile(file)
all_file = client.list("/")
for i in all_file:
file_true =get_IS_File(client,"/{}".format(i)),i
print(file_true)
# if file_true == "True":
return render(request,"index.html",locals())
# else:
# pass
# # data = {"file_true":file_true}
# return render(request,"index.html",locals())
# else:
# pass
# get_IS_File(all_file,"/")
# lujin = "/upload/"+file.name
mkdirs(client,file.name)
# move_or_rename(client,file.name, '/c.txt')
# put_to_hdfs(client, "C:/Users/Lenovo/Desktop/hadoop_hdfs/upload/"+file.name, '/')
# write_to_hdfs(client,"upload"+file.name,'222222222222'+'\n')
return render(request,"index.html",locals())

Views.py

 # 导入必要模块
import pandas as pd
from sqlalchemy import create_engine
from matplotlib import pylab as plt
from django.views import View
from django.shortcuts import render
import os
from hadoop_hdfs.settings import UPLOAD_ROOT
from web.models import *
def uploadfile(img):
f = open(os.path.join(UPLOAD_ROOT, '', img.name), 'wb')
for chunk in img.chunks():
f.write(chunk)
f.close()
class Upload(View):
def get(self,request):
# show = Image.objects.all()
return render(request,"haha.html",locals())
def post(self,request):
imgs = request.FILES.get('img')
uploadfile(imgs)
all = Image(img="/upload/"+imgs.name)
all.save()
return render(request,"haha.html")
# def post(self,request):
# # 初始化数据库连接,使用pymysql模块
# # MySQL的用户:root, 密码:147369, 端口:3306,数据库:mydb
# engine = create_engine('mysql+pymysql://root:@127.0.0.1/dj')
#
# # 查询语句,选出employee表中的所有数据
# sql = '''select * from haha;'''
#
# # read_sql_query的两个参数: sql语句, 数据库连接
# df = pd.read_sql_query(sql, engine)
#
# # 输出employee表的查询结果
# print(df)
#
# # 新建pandas中的DataFrame, 只有id,num两列
# # df = pd.DataFrame({'id':[1,2],'name': ['111','222'],'image_url':['http://111','http://222']})
# # df = pd.DataFrame({"id":df['sentiment'],"text":df['text'],})
# df.groupby(by='sentiment').count()['text'].plot.pie(autopct="%0.4f%%", subplots=True)
# plt.savefig("../upload/1.jpg")
# plt.show()
# # 将新建的DataFrame储存为MySQL中的数据表,不储存index列
# # df.to_sql(name='lallala', con=engine, index=False)
#
# print('Read from and write to Mysql table successfully!')
# return render(request,"haha.html",locals()) class Uploads(View):
def get(self,request):
show = Image.objects.all()
return render(request,"haha.html",locals())

haha.py

hadoop 》》 django 简单操作hdfs 语句的更多相关文章

  1. Java 简单操作hdfs API

    注:图片如果损坏,点击文章链接:https://www.toutiao.com/i6632047118376780295/ 启动Hadoop出现问题:datanode的clusterID 和 name ...

  2. Hadoop Java API操作HDFS文件系统(Mac)

    1.下载Hadoop的压缩包 tar.gz   https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/stable/ 2.关联jar包 在 ...

  3. java 简单操作HDFS

    创建java 项目 package com.yw.hadoop273; import org.apache.hadoop.conf.Configuration; import org.apache.h ...

  4. Django简单操作

    一.静态文件配置 静态文件配置 STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(BASE_DIR,'static') ] # 暴露给 ...

  5. hadoop 使用java操作hdfs

    1.创建目录 import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.ha ...

  6. mysql 首次安装后 简单操作与语句 新手入门

    首先cd到安装目录中bin路径:这是我的安装路径以管理员身份打开cmd(防止权限不足)cd E:\>cd E:\mysql\mysql-5.5.40-winx64\bin 首次安装需要输入 my ...

  7. python django简单操作

    准备: pip3 install  django==1.10.3 cmd django-admin startproject  guest  创建一个guest的项目 cd guest manage. ...

  8. HDFS介绍及简单操作

    目录 1.HDFS是什么? 2.HDFS设计基础与目标 3.HDFS体系结构 3.1 NameNode(NN)3.2 DataNode(DN)3.3 SecondaryNameNode(SNN)3.4 ...

  9. Django简单的数据库操作

    当然,本篇的前提是你已经配置好了相关的环境,这里就不详细介绍. 一. 在settings.py文件中设置数据库属性. 如下: DATABASES = { 'default': { 'ENGINE': ...

随机推荐

  1. 2019-2020 ICPC, NERC, Northern Eurasia Finals (Unrated, Online Mirror, ICPC Rules, Teams Preferred)

    这是一场三人组队赛来的,单人带电子模板不限时单挑试一下.按照难度排序. B - Balls of Buma 题意:玩祖玛,射入任意颜色的球一个,当某段长度变长了且长度变长后>=3则这段就会消除, ...

  2. 【Robot Framework 项目实战 04】基于录制,生成RF关键字及 自动化用例

    背景 因为服务的迁移,Jira版本的更新,很多接口文档的维护变少,导致想要编写部分服务的自动化测试变得尤为麻烦,很多服务,尤其是客户端接口需要通过抓包的方式查询参数来编写自动化用例,但是过程中手工重复 ...

  3. GA函数优化

    一.遗传算法简介         遗传算法(Genetic Algorithms,GA)是1962年美国人提出,模拟自然界遗传和生物进化论而成的一种并行随机搜索最优化方法. 与自然界中“优胜略汰,适者 ...

  4. jvm方法栈

    调用栈 先入后出 栈是一个只有一个口的容器,先进入栈的会落到栈底,出栈的时候最后出.最后进入栈的,在栈顶,出栈时先出. 方法调用时,需要在内存中开辟一块存储空间做为线程栈空间 每个线程都由自己的栈 调 ...

  5. onNewIntent

    当Activity不是Standard模式,并且被复用的时候,会触发onNewIntent(Intent intent) 这个方法,一般用来获取新的Intent传递的数据 我们一般会把MainAcit ...

  6. Qt编写数据可视化大屏界面电子看板4-布局另存

    一.前言 布局另存是数据可视化大屏界面电子看板系统中的额外功能之一,主要用于有时候用户需要在现有布局上做个微调,然后直接将该布局另存为一个布局配置文件使用,可以省略重新新建布局重新来一次大的调整的工作 ...

  7. mac配置maven项目的方法(含导入selenium包)

    1.配置了java的环境变量    文件位置:  /Users/lucax/.bash_profile JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk1 ...

  8. 生产订单BADI实例

    转自:https://blog.csdn.net/wbin9752/article/details/7951390 以生产订单收货为例: 1.BADI的查找方法: SE38在程序中搜索关键字CL_EX ...

  9. pop动画库简单使用小记

    - (void)animateInView:(UIView *)view{ UIImageView *imageView = [[UIImageView alloc] initWithImage:[U ...

  10. nginx虚拟机无法访问解决

    .重要:修改配置文件使用虚拟机,否则怎么配置都不生效,添加如下用户 [root@host---- html]# ll /etc/nginx/nginx.conf -rw-r--r-- root roo ...