python脚本收集

发送邮件

from email.mime.text import MIMEText
from email.header import Header
from smtplib import SMTP_SSL


#qq邮箱smtp服务器
host_server = 'smtp.qq.com'
#sender_qq为发件人的qq号码
sender_qq = ''
#pwd为qq邮箱的授权码
pwd = ''
#发件人的邮箱
sender_qq_mail = ''
#收件人邮箱
receiver = ''
#邮件的正文内容
mail_content = '你好,我是来自知乎的 ,现在在进行一项用python登录qq邮箱发邮件的测试'
#邮件标题
mail_title = '邮件'

#ssl登录
smtp = SMTP_SSL(host_server)
#set_debuglevel()是用来调试的。参数值为1表示开启调试模式,参数值为0关闭调试模式
smtp.set_debuglevel(1)
smtp.ehlo(host_server)
smtp.login(sender_qq, pwd)

msg = MIMEText(mail_content, "plain", 'utf-8')
msg["Subject"] = Header(mail_title, 'utf-8')
msg["From"] = sender_qq_mail
msg["To"] = receiver
smtp.sendmail(sender_qq_mail, receiver, msg.as_string())
smtp.quit()

coco数据提取yolo

#COCO 格式的数据集转化为 YOLO 格式的数据集
#--json_path 输入的json文件路径
#--save_path 保存的文件夹名字,默认为当前目录下的labels。

import os
import json
from tqdm import tqdm
import argparse

parser = argparse.ArgumentParser()
#这里根据自己的json文件位置,换成自己的就行
parser.add_argument('--json_path', default='wildlife_instance_test2017.json',type=str, help="input: coco format(json)")
#这里设置.txt文件保存位置
parser.add_argument('--save_path', default='Lable/test', type=str, help="specify where to save the output dir of labels")
arg = parser.parse_args()

def convert(size, box):
    dw = 1. / (size[0])
    dh = 1. / (size[1])
    x = box[0] + box[2] / 2.0
    y = box[1] + box[3] / 2.0
    w = box[2]
    h = box[3]
#round函数确定(xmin, ymin, xmax, ymax)的小数位数
    x = round(x * dw, 6)
    w = round(w * dw, 6)
    y = round(y * dh, 6)
    h = round(h * dh, 6)
    return (x, y, w, h)

# coding=utf-8
def check_charset(file_path):
    import chardet
    with open(file_path, "rb") as f:
        data = f.read(4)
        charset = chardet.detect(data)['encoding']
    return charset
 
if __name__ == '__main__':
    json_file =   arg.json_path # COCO Object Instance 类型的标注
    ana_txt_save_path = arg.save_path  # 保存的路径

    data = json.load(open(file=json_file,encoding='ISO-8859-1', mode='r'))
    if not os.path.exists(ana_txt_save_path):
        os.makedirs(ana_txt_save_path)

    id_map = {} # coco数据集的id不连续!重新映射一下再输出!
    with open(os.path.join(ana_txt_save_path, 'classes.txt'), 'w') as f:
        # 写入classes.txt
        for i, category in enumerate(data['categories']):
            f.write(f"{category['name']}\n")
            id_map[category['id']] = i
    # print(id_map)
    #这里需要根据自己的需要,更改写入图像相对路径的文件位置。
    list_file = open(os.path.join(ana_txt_save_path, 'test2017.txt'), 'w')
    for img in tqdm(data['images']):
        filename = img["file_name"]
        img_width = img["width"]
        img_height = img["height"]
        img_id = img["id"]
        head, tail = os.path.splitext(filename)
        ana_txt_name = head + ".txt"  # 对应的txt名字,与jpg一致
        f_txt = open(os.path.join(ana_txt_save_path, ana_txt_name), 'w')
        for ann in data['annotations']:
            if ann['image_id'] == img_id:
                box = convert((img_width, img_height), ann["bbox"])
                f_txt.write("%s %s %s %s %s\n" % (id_map[ann["category_id"]], box[0], box[1], box[2], box[3]))
        f_txt.close()
        #将图片的相对路径写入train2017或val2017的路径
        list_file.write('./images/test2017/%s.jpg\n' %(head))
    list_file.close()

检测简单物体并裁剪

import cv2
import numpy as np

image = cv2.imread("1 (1).JPG")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradY = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)
# subtract the y-gradient from the x-gradient
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
# blur and threshold the image
blurred = cv2.blur(gradient, (9, 9))
(_, thresh) = cv2.threshold(blurred, 90, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 25))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# perform a series of erosions and dilations
closed = cv2.erode(closed, None, iterations=4)
closed = cv2.dilate(closed, None, iterations=4)
(cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
# compute the rotated bounding box of the largest contour
rect = cv2.minAreaRect(c)
box = np.int0(cv2.boxPoints(rect))
# draw a bounding box arounded the detected barcode and display the image
# cv2.drawContours(image, [box], -1, (0, 255, 0), 3)
# cv2.imshow("Image", image)
# cv2.imwrite("contoursImage2.jpg", image)
# cv2.waitKey(0)
Xs = [i[0] for i in box]
Ys = [i[1] for i in box]
x1 = min(Xs)
x2 = max(Xs)
y1 = min(Ys)
y2 = max(Ys)
hight = y2 - y1
width = x2 - x1
cropImg = image[y1-10:y1+hight+10, x1-10:x1+width+10]
cv2.imwrite("contoursImage3.jpg", cropImg)

获取文件夹下文件

def get_file_name(path,filetype):
    pathList=[]
    for root,dirs,files in os.walk(path):
        for file in files:
            if file.endswith(filetype):
                pathList.append(file)
    return pathList

数据获取

import cv2
import numpy as np

import os

def get_file_name(path,filetype):
    pathList=[]
    for root,dirs,files in os.walk(path):
        for file in files:
            if file.endswith(filetype):
                pathList.append(file)
    return pathList

def control_photo(ori_path,save_path):
    image = cv2.imread(ori_path)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
    gradY = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)
    # subtract the y-gradient from the x-gradient
    gradient = cv2.subtract(gradX, gradY)
    gradient = cv2.convertScaleAbs(gradient)
    # blur and threshold the image
    blurred = cv2.blur(gradient, (9, 9))
    (_, thresh) = cv2.threshold(blurred, 90, 255, cv2.THRESH_BINARY)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 25))
    closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
    # perform a series of erosions and dilations
    closed = cv2.erode(closed, None, iterations=4)
    closed = cv2.dilate(closed, None, iterations=4)
    (cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
    # compute the rotated bounding box of the largest contour
    rect = cv2.minAreaRect(c)
    box = np.int0(cv2.boxPoints(rect))
    # draw a bounding box arounded the detected barcode and display the image
    # cv2.drawContours(image, [box], -1, (0, 255, 0), 3)
    # cv2.imshow("Image", image)
    # cv2.imwrite("contoursImage2.jpg", image)
    # cv2.waitKey(0)
    Xs = [i[0] for i in box]
    Ys = [i[1] for i in box]
    x1 = min(Xs)
    x2 = max(Xs)
    y1 = min(Ys)
    y2 = max(Ys)
    hight = y2 - y1
    width = x2 - x1
    cropImg = image[y1-10:y1+hight+10, x1-10:x1+width+10]
    cv2.imwrite(save_path, cropImg)


if __name__ == '__main__':
    for img_name in get_file_name("2A",".JPG"):
        control_photo("2A/"+img_name,"cut_"+img_name)

# COCO 2017 dataset http://cocodataset.org

# download command/URL (optional)
download: bash ./scripts/get_coco.sh

# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
train: ./data/lote/images/train  # 118287 images
val: ./data/lote/images/val  # 5000 images
test: ./data/lote/images/test  # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794

# number of classes
nc: 11

# class names
names: ['Pandas大熊猫','RedPandas小熊猫','MartesFlavigula黄喉鸬','MacacaThibetana藏猕猴','RhinopithecusRoxellana金丝猴','HystrixBrachyura豪猪','SusScrofa野猪','RusaUnicolor水鹿','ElaphodusCephalophus簇绒鹿','CapricornisMilneedwardsii中华鬣羚','PseudoisNayaur青羊']

存储txt信息

import torch, os
from model.model import parsingNet
from utils.common import merge_config
from utils.dist_utils import dist_print
from evaluation.eval_wrapper import eval_lane
import torch
import os

def get_file_name(path,filetype):
    pathList=[]
    for root,dirs,files in os.walk(path):
        for file in files:
            if file.endswith(filetype):
                pathList.append(file)
    return pathList

def save_txt(save_dir,str_info):
    path=save_dir+"/tusimple_eval.txt"
    os.makedirs(os.path.dirname(path),exist_ok=True)
    with open(path,"a") as f:
        f.write(str_info)
        

if __name__ == "__main__":
    torch.backends.cudnn.benchmark = True

    cfg_test_model_path= "1115_1422_b32_zbt_mas_cssc_b_test_2"
    cfg_backbone='18p'
    cfg_dataset = 'Tusimple'
    cls_num_per_lane = 56
    cfg_griding_num=100
    cfg_num_lanes=4
    cfg_test_work_dir="./tmp"
    cfg_data_root="D:\pythoncode\dataset\Tusimple"

    distributed = False
    if 'WORLD_SIZE' in os.environ:
        distributed = int(os.environ['WORLD_SIZE']) > 1

    net = parsingNet(pretrained = False, backbone=cfg_backbone,cls_dim = (cfg_griding_num+1,cls_num_per_lane, cfg_num_lanes),
                    use_aux=False).cuda() # we dont need auxiliary segmentation in testing

    model_strlist=get_file_name(cfg_test_model_path,"pth")

    for model_str in model_strlist:
        cfg_test_model_temp=cfg_test_model_path
        cfg_test_model_temp=cfg_test_model_path+"/"+model_str
        #这里设置循环

        state_dict = torch.load(cfg_test_model_temp, map_location = 'cpu')['model']
        compatible_state_dict = {}
        for k, v in state_dict.items():
            if 'module.' in k:
                compatible_state_dict[k[7:]] = v
            else:
                compatible_state_dict[k] = v

        net.load_state_dict(compatible_state_dict, strict = False)

        if not os.path.exists(cfg_test_work_dir):
            os.mkdir(cfg_test_work_dir)

        strlist=eval_lane(net, cfg_dataset, cfg_data_root, cfg_test_work_dir, cfg_griding_num, False, distributed)
        model_savestr=model_str+"\n"+strlist
        save_txt(cfg_test_model_path,model_savestr)


posted @ 2024-01-14 16:26  东血  阅读(5)  评论(0编辑  收藏  举报

载入天数...载入时分秒...