def Del_Dr(self, htmlpath, dirlist, ret, filetype, *kwargs):
''' 该函数的目的是为了修改html页面的内容,主要是对index中的覆盖率,类,新增行数等内容修改,去掉非增量的相关内容 :param htmlpath:要修改的html文件的路径 :param dirlist:html页面要保留的类名列表或文件 :param ret:ret :param diff_results:diff文件过滤的字典 :param filetype:需要修改的类型(root指文件根目录,package指包目录下,file指文件类型) ''' with open(htmlpath, 'r') as e: html_doc = "".join(e.readlines()) soup = BeautifulSoup(html_doc, 'lxml') a_list = soup.select("a") # 获取html页面所有的a标签 for a_s in a_list: a_s_text = a_s.text.strip("\n").strip(" ").strip("\n") # 循环获取a标签的text属性并过滤掉\n和空格 if filetype == "file": a_s_text = a_s_text.split("(")[0] if str(a_s_text) not in dirlist and a_s.parent.parent.name == "tr": # 如果text不等于要保留的类名,则直接删除该节点所属的tr标签 a_s.parent.parent.extract() del_td = soup.find_all("tr")[0].find_all("td")[1:] for td in del_td: td.extract()//上面这一部分主要是去掉已经覆盖的类名的的相关内容,只保留未覆盖的类的相关信息
# 新增td行Add lines new_tr = soup.new_tag("td") new_tr.string = "Add lines" soup.thead.tr.append(new_tr) new_tr.attrs = {'class': 'sortable'} # 新增td行Overlay lines overlay_tr = soup.new_tag("td") overlay_tr.string = "Overlay lines" soup.thead.tr.append(overlay_tr) overlay_tr.attrs = {'class': 'sortable'} # 新增td行Coverage coverage_tr = soup.new_tag("td") coverage_tr.string = "Coverage" soup.thead.tr.append(coverage_tr) coverage_tr.attrs = {'class': 'sortable'}//在报告上方加入了统计表格字段
pack_tr_list = soup.find_all("tbody")[0].find_all("tr") # 获取tbody中tr组成的列表 for tpack in pack_tr_list: # 删除tbody中tr中除类名或文件名的其他列 for pa_td in tpack.find_all("td")[1:]: pa_td.extract() tfoot_list = soup.find_all("tfoot")[0].find_all("td")[1:] # 删除tfoot中除Total外的其他列 for tfoot in tfoot_list: tfoot.extract() for npack in pack_tr_list: pack_name = npack.find_all("a")[0].string.strip("\n").strip(" ").strip("\n") addlines = 0 covlines = 0 if filetype == "package": # 如果是包名下的index.html文件做如下处理 addlines = ret[pack_name]['new'] covlines = ret[pack_name]['cover'] elif filetype == "root": for k, v in enumerate(ret[pack_name]): addlines += ret[pack_name][v]['new'] covlines += ret[pack_name][v]['cover'] elif filetype == "file": pack_void_name = pack_name.split("(")[0] filename, diff_dict = kwargs filename_new_list = filename.split("src/main/java/")[-1].split("/") filename_new = ".".join(filename_new_list[:-1]) class_name = filename_new_list[-1].split(".")[0] if filename in diff_dict.keys() and class_name in ret[filename_new].keys(): void_lines_list = diff_dict[filename]['diff_voids'][pack_void_name] new_line_list = list( set(ret[filename_new][class_name]['new_lines']).intersection(set(void_lines_list))) cover_line_list = list( set(ret[filename_new][class_name]['cover_lines']).intersection(set(void_lines_list))) addlines = len(new_line_list) covlines = len(cover_line_list) if addlines == 0: coverage = '{:.2%}'.format(0) else: coverage = '{:.2%}'.format(covlines / addlines) # 覆盖率 addlines_tr = soup.new_tag("td") if addlines: addlines_tr.string = "%s" % addlines npack.append(addlines_tr) covlines_tr = soup.new_tag("td") covlines_tr.string = "%s" % covlines npack.append(covlines_tr) coverage_tr = soup.new_tag("td") coverage_tr.string = "%s" % coverage npack.append(coverage_tr) else: npack.extract() # 重新生成index.html页面 html_path_new = htmlpath + "_bat" with open(html_path_new, 'w+') as f: f.write(HTMLParser.HTMLParser().unescape(soup.prettify())) os.remove(htmlpath) os.rename(html_path_new, htmlpath)
浙公网安备 33010602011771号