uibot 爬取网页数据

Dim arrayData = ""
Dim objExcelWorkBook = ""
arrayData = UiElement.DataScrap({"wnd":[{"cls":"Chrome_WidgetWin_1","title":"*","app":"chrome"},{"cls":"Chrome_RenderWidgetHostHWND","title":"Chrome Legacy Window"}],"html":[{"tag":"TABLE"}]},{"ExtractTable":1,"Columns":[]},{"objNextLinkElement":{"wnd":[{"cls":"Chrome_WidgetWin_1","title":"*","app":"chrome"},{"cls":"Chrome_RenderWidgetHostHWND","title":"Chrome Legacy Window"}],"html":[{"tag":"A","parentid":"dwData","aaname":""}]},"iMaxNumberOfPage":23,"iMaxNumberOfResult":-1,"iDelayBetweenMS":1000,"bContinueOnError":True})
list = Set.ToArray(arrayData)
objExcelWorkBook = Excel.OpenExcel('''C:\Users\Asus\Desktop\爬取数据.xlsx''',False,"WPS","","")
Dim i = 0
For Each value In list
    i = i+1
    Excel.InsertRow(objExcelWorkBook,"Sheet1","A"&i,"'"&value[4],False)
    TracePrint(value[4])
Next
Excel.CloseExcel(objExcelWorkBook,True)
CollectGarbage()

//Log.Info(arrayData)
//File.WriteFile('''E:\爬取数据.txt''',arrayData,"utf-8")

真的觉得学python 你还是不如uibot 傻瓜式可以操作,真聪明uibot

import requests
from bs4 import BeautifulSoup
import re
import os.path


base_url = "http://172.17.0.10/wzjy/wzjyx218.aspx?t=usFictaHWuU="
response = requests.get(base_url)
base_data = response.content
base_soup = BeautifulSoup(base_data, "html.parser")
links = []
for tag in base_soup.find_all('td', {'scope' : 'row'}):
    for anchor in tag.find_all('a'):
     links.append(anchor['href'])

print(links) 

 

posted @ 2022-06-08 14:37  dafengchui  阅读(1319)  评论(0)    收藏  举报