#!/usr/bin/env python2
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements.  See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import sys, os, subprocess
from terminalsize import get_terminal_size
from time import time, sleep
import re
import fnmatch

def load_colors():
    color_script_fn = os.path.join(os.path.dirname(__file__), "color.enabled.sh")
    with open(color_script_fn) as f:
        return dict([(k,v.split("'")[1].replace('\e[', "\033[")) for k,v in [x.strip().split('=') for x in f.readlines() if x.strip() and not x.strip().startswith('#')]])

Color=load_colors()
if int(os.environ.get("HIBENCH_PRINTFULLLOG", 0)):
    Color['ret'] = os.linesep
else:
    Color['ret']='\r'

tab_matcher = re.compile("\t")
tabstop = 8
def replace_tab_to_space(s):
    def tab_replacer(match):
        pos = match.start()
        length = pos % tabstop
        if not length: length += tabstop
        return " " * length
    return tab_matcher.sub(tab_replacer, s)

class _Matcher:
    hadoop = re.compile(r"^.*map\s*=\s*(\d+)%,\s*reduce\s*=\s*(\d+)%.*$")
    hadoop2 = re.compile(r"^.*map\s+\s*(\d+)%\s+reduce\s+\s*(\d+)%.*$")
    spark = re.compile(r"^.*finished task \S+ in stage \S+ \(tid \S+\) in.*on.*\((\d+)/(\d+)\)\s*$")
    def match(self, line):
        for p in [self.hadoop, self.hadoop2]:
            m = p.match(line)
            if m:
                return (float(m.groups()[0]) + float(m.groups()[1]))/2

        for p in [self.spark]:
            m = p.match(line)
            if m:
                return float(m.groups()[0]) / float(m.groups()[1]) * 100

matcher = _Matcher()

def show_with_progress_bar(line, progress, line_width):
    """
    Show text with progress bar.

    @progress:0-100
    @line: text to show
    @line_width: width of screen
    """
    pos = int(line_width * progress / 100)
    if len(line) < line_width:
        line = line + " " * (line_width - len(line))
    line = "{On_Yellow}{line_seg1}{On_Blue}{line_seg2}{Color_Off}{ret}".format(
        line_seg1 = line[:pos], line_seg2 = line[pos:], **Color)
    sys.stdout.write(line)

def execute(workload_result_file, command_lines):
    proc = subprocess.Popen(" ".join(command_lines), shell=True, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    count = 100
    last_time=0
    log_file = open(workload_result_file, 'w')
    # see http://stackoverflow.com/a/4417735/1442961
    lines_iterator = iter(proc.stdout.readline, b"")
    for line in lines_iterator:
        count += 1
        if count > 100 or time()-last_time>1: # refresh terminal size for 100 lines or each seconds
            count, last_time = 0, time()
            width, height = get_terminal_size()
            width -= 1

        try:
            line = line.rstrip()
            log_file.write(line+"\n")
            log_file.flush()
        except KeyboardInterrupt:
            proc.terminate()
            break
        line = line.decode('utf-8')
        line = replace_tab_to_space(line)
        #print "{Red}log=>{Color_Off}".format(**Color), line
        lline = line.lower()

        def table_not_found_in_log(line):
            table_not_found_pattern = "*Table * not found*"
            regex = fnmatch.translate(table_not_found_pattern)
            reobj = re.compile(regex)
            if reobj.match(line):
                return True
            else:
                return False

        def database_default_exist_in_log(line):
            database_default_already_exist = "Database default already exists"
            if database_default_already_exist in line:
                return True
            else:
                return False

        def uri_with_key_not_found_in_log(line):
            uri_with_key_not_found = "Could not find uri with key [dfs.encryption.key.provider.uri]"
            if uri_with_key_not_found in line:
                return True
            else:
                return False

        if ('error' in lline) and lline.lstrip() == lline:
            #Bypass hive 'error's and KeyProviderCache error
            bypass_error_condition = table_not_found_in_log or database_default_exist_in_log(lline) or uri_with_key_not_found_in_log(lline)
            if not bypass_error_condition:
                COLOR = "Red"
                sys.stdout.write((u"{%s}{line}{Color_Off}{ClearEnd}\n" % COLOR).format(line=line,**Color).encode('utf-8'))

        else:
            if len(line) >= width:
                line = line[:width-4]+'...'
            progress = matcher.match(lline)
            if progress is not None:
                show_with_progress_bar(line, progress, width)
            else:
                sys.stdout.write(u"{line}{ClearEnd}{ret}".format(line=line, **Color).encode('utf-8'))
        sys.stdout.flush()
    print
    log_file.close()
    try:
        proc.wait()
    except KeyboardInterrupt:
        proc.kill()
        return 1
    return proc.returncode

def test_progress_bar():
    for i in range(101):
        show_with_progress_bar("test progress : %d" % i, i, 80)
        sys.stdout.flush()

        sleep(0.05)

if __name__=="__main__":
    sys.exit(execute(workload_result_file=sys.argv[1],
                     command_lines=sys.argv[2:]))
#    test_progress_bar()

HiBench成长笔记——(10) 分析源码execute_with_log.py的更多相关文章

  1. HiBench成长笔记——(9) 分析源码monitor.py

    monitor.py 是主监控程序,将监控数据写入日志,并统计监控数据生成HTML统计展示页面: #!/usr/bin/env python2 # Licensed to the Apache Sof ...

  2. HiBench成长笔记——(8) 分析源码workload_functions.sh

    workload_functions.sh 是测试程序的入口,粘连了监控程序 monitor.py 和 主运行程序: #!/bin/bash # Licensed to the Apache Soft ...

  3. HiBench成长笔记——(11) 分析源码run.sh

    #!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor licen ...

  4. HiBench成长笔记——(5) HiBench-Spark-SQL-Scan源码分析

    run.sh #!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributo ...

  5. Hadoop学习笔记(10) ——搭建源码学习环境

    Hadoop学习笔记(10) ——搭建源码学习环境 上一章中,我们对整个hadoop的目录及源码目录有了一个初步的了解,接下来计划深入学习一下这头神象作品了.但是看代码用什么,难不成gedit?,单步 ...

  6. CentOS 7运维管理笔记(10)----MySQL源码安装

    MySQL可以支持多种平台,如Windows,UNIX,FreeBSD或其他Linux系统.本篇随笔记录在CentOS 7 上使用源码安装MySQL的过程. 1.下载源码 选择使用北理工的镜像文件: ...

  7. memcached学习笔记——存储命令源码分析下篇

    上一篇回顾:<memcached学习笔记——存储命令源码分析上篇>通过分析memcached的存储命令源码的过程,了解了memcached如何解析文本命令和mencached的内存管理机制 ...

  8. memcached学习笔记——存储命令源码分析上篇

    原创文章,转载请标明,谢谢. 上一篇分析过memcached的连接模型,了解memcached是如何高效处理客户端连接,这一篇分析memcached源码中的process_update_command ...

  9. kernel 3.10内核源码分析--hung task机制

    kernel 3.10内核源码分析--hung task机制 一.相关知识: 长期以来,处于D状态(TASK_UNINTERRUPTIBLE状态)的进程 都是让人比较烦恼的问题,处于D状态的进程不能接 ...

随机推荐

  1. linux mysql 查看数据库大小

    SELECT CONCAT(TRUNCATE(SUM(data_length)//,),'MB') AS data_size, CONCAT(TRUNCATE(SUM(max_data_length) ...

  2. Centos7 [ubuntu] 安装pycharm2019.1.3并永久破解教程

    一.安装pycharm2019专业版并激活步骤 1.拉取安装包 # wget   https://download.jetbrains.com/python/pycharm-professional- ...

  3. 操作系统OS - 同步和异步,阻塞和非阻塞

    同步和异步关注的是消息通信机制,阻塞/非阻塞是程序在等待调用结果(消息,返回值)时的状态

  4. MD5 加密解密字符串

    方法1: using System.Text; using System.Security.Cryptography; public string Hash(string toHash) { MD5C ...

  5. hadoop3.1.1高可用集群web端口9870

  6. day4-2数组及方法

    数组: Js数组 可以存放任意数据类型的数据 如果索引大于数组的长度,数组自动增加到该索引值加1的长度 var arr = ["terry","larry",& ...

  7. mybatis+spring boot+vue

    参考https://www.cnblogs.com/wlovet/p/8317282.html

  8. ES5中的this

    参考资料:>>> this的指向 在 ES5 中,其实 this 的指向,始终坚持一个原理: this 永远指向最后调用它的那个对象 下面我们来看一个最简单的例子:(例子均来自参考资 ...

  9. Java基础 -3.5

    我觉得上一篇不是很严谨啊 我认为这个逻辑还是正确的 原码.反码.补码: (1)在Java中,所有数据的表示方式都是以补码形式来表示 如果没有特别的说明,Java 中的数据类型默认为int,int数据类 ...

  10. Spring注解@Qualifier、@Autowired、@Primary

    @Qualifier 1.当一个接口有多个实现类,且均已注入到Spring容器中了,使用@AutoWired是byType的,而这些实现类类型都相同,此时就需要使用@Qualifier明确指定使用那个 ...