用python遍历文件中html,读取文章中的文字和图片

昨天捣鼓一下午,调试勉强完成了。结果能出,OK!
前提: python终端安装几个需要的库

(.venv) PS C:\Users\灵应\PyCharmMiscProject.venv> pip list
Package Version


beautifulsoup4 4.13.3
certifi 2025.1.31
charset-normalizer 3.4.1
colorama 0.4.6
idna 3.10
lxml 5.3.1
pip 25.0.1
python-docx 1.1.2
requests 2.32.3
soupsieve 2.6
tqdm 4.67.1
typing_extensions 4.13.0
urllib3 2.3.0

不会显示过程,但是可以在download_image 文件夹下看见不断下载的图片,
一直调教AI就行。

import os
import json
import requests
import re
import hashlib
from tqdm import tqdm
from bs4 import BeautifulSoup
from docx import Document
from docx.shared import Inches, Pt
from docx.oxml.ns import qn
from docx.enum.text import WD_BREAK

配置参数

MAX_RETRIES = 3
TIMEOUT = 10
DOWNLOAD_DIR = "downloaded_images"

def set_chinese_font(doc):
doc.styles['Normal'].font.name = 'SimSun'
doc.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), 'SimSun')

def extract_content(soup):
for tag in ['article', 'div', 'section']:
for class_name in ['content', 'article', 'main', 'post']:
element = soup.find(tag, class_=class_name)
if element:
return element.get_text(separator='\n', strip=True)
return soup.body.get_text(separator='\n', strip=True) if soup.body else ''

def generate_unique_filename(url):
"""生成唯一的文件名"""
hasher = hashlib.sha256()
hasher.update(url.encode('utf-8'))
return hasher.hexdigest()[:8] + os.path.splitext(url)[1]

def download_image(session, url, save_path):
"""带重试机制的图片下载"""
for attempt in range(MAX_RETRIES):
try:
response = session.get(url, stream=True, timeout=TIMEOUT)
response.raise_for_status()
with open(save_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
return True
except Exception as e:
print(f"第{attempt+1}次尝试下载图片 {url} 失败: {str(e)}")
return False

def process_html(file_path):
try:
with open(file_path, 'r', encoding='utf-8') as f:
soup = BeautifulSoup(f.read(), 'html.parser')

    title = soup.find('h1').get_text(strip=True) if soup.find('h1') else ''

    images = []
    for img in soup.find_all('img'):
        src = img.get('src')
        if src:
            if src.startswith('http'):
                filename = generate_unique_filename(src)
                save_path = os.path.join(DOWNLOAD_DIR, filename)
                if not os.path.exists(save_path):
                    success = download_image(session, src, save_path)
                else:
                    success = True
                images.append({
                    'src': src,
                    'alt': img.get('alt', ''),
                    'path': save_path if success else None,
                    'attempts': MAX_RETRIES
                })
            else:
                abs_path = os.path.abspath(os.path.join(os.path.dirname(file_path), src))
                images.append({
                    'src': src,
                    'alt': img.get('alt', ''),
                    'path': abs_path if os.path.exists(abs_path) else None,
                    'attempts': 0
                })

    content = extract_content(soup)
    return {
        'title': title,
        'content': content,
        'images': images
    }
except Exception as e:
    print(f"处理文件 {file_path} 出错: {str(e)}")
    return None

def create_single_word_doc(all_data, output_path):
doc = Document()
set_chinese_font(doc)
doc.add_heading('微信公众号文章合集', 0).alignment = 1

for idx, article in tqdm(enumerate(all_data), total=len(all_data), desc="生成文档"):
    doc.add_heading(f"文章{idx+1}: {article['title']}", level=1)
    doc.add_paragraph(article['content']).runs[0].font.size = Pt(10.5)

    if article['images']:
        doc.add_heading('相关图片', level=2)
        for img in article['images']:
            if img['alt']:
                doc.add_paragraph(f"图片描述:{img['alt']}", style='Intense Quote')
            if img['path'] and os.path.exists(img['path']):
                try:
                    doc.add_picture(img['path'], width=Inches(5))
                except Exception:
                    doc.add_paragraph(f"[图片加载失败:{img['src']}]")
            else:
                doc.add_paragraph(f"[图片缺失:{img['src']}]")
            doc.add_paragraph()

    if idx < len(all_data) - 1:
        doc.add_page_break()

doc.save(output_path)

def scan_directories(directories):
all_data = []
for dir_path in directories:
if not os.path.exists(dir_path):
print(f"目录不存在: {dir_path}")
continue
for root, _, files in os.walk(dir_path):
for file in files:
if file.lower().endswith('.html'):
file_path = os.path.join(root, file)
result = process_html(file_path)
if result:
all_data.append(result)
return all_data

if name == "main":
# 初始化
os.makedirs(DOWNLOAD_DIR, exist_ok=True)
session = requests.Session()
retry = requests.packages.urllib3.util.Retry(
total=MAX_RETRIES,
backoff_factor=1,
status_forcelist=[500, 502, 503, 504]
)
session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retry))
session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retry))

directories = [
    r'D:\玄学有关\贺老师公众号\20250327\京管易学研究\html',
    r'D:\玄学有关\贺老师公众号\20250327\干支易象学研究\html'
]

# 处理所有文件
print("开始扫描所有目录...")
all_articles = scan_directories(directories)

if all_articles:
    output_doc = os.path.join(os.getcwd(), 'combined_articles.docx')
    create_single_word_doc(all_articles, output_doc)
    print(f"\n成功生成文档:{output_doc}")
    print("图片保存在:", os.path.abspath(DOWNLOAD_DIR))
else:
    print("未找到可处理的HTML文件")
posted @ 2025-04-01 14:12  三要靈应  阅读(40)  评论(0)    收藏  举报