- 当我们在逆向时,有的登录界面,需要滑块或者点选之类的图片验证
- 这篇我们主要了解使用opencv识别滑块
- 1.获取滑块图片并且保存在本地
import base64
import requests
res = requests.post(
url='https://m.captcha.qq.com/',
data='{"Action":"CheckCaptchaAppId_v1.0.1","CaptchaAppId":2075530305}',
)
esid = res.json()['Response']['SId']
res = requests.post(
url='https://m.captcha.qq.com/',
data='{"Action":"GetImageData_v1.0.1","ESId":"%s"}' % esid,
)
data_dict = res.json()
# 背景图片,base64
l_image = data_dict['Response']['ImageDataL']
l_image_content = base64.b64decode(bytes(l_image, encoding="ascii"))
with open('l_image.png', mode='wb') as f:
f.write(l_image_content)
# 缺口图片,base64
s_image = data_dict['Response']['ImageDataS']
s_image_content = base64.b64decode(bytes(s_image, encoding="ascii"))
with open('s_image.png', mode='wb') as f:
f.write(s_image_content)
import base64
import requests
import json
import cv2
import numpy as np
def get_image_object(bs_image):
obj = base64.b64decode(bytes(bs_image, encoding="ascii"))
img_buffer_np = np.frombuffer(obj, dtype=np.uint8)
# 从指定的内存缓存中读取一维numpy数据, 并把数据转换(解码)成图像矩阵格式
img_np = cv2.imdecode(img_buffer_np, 1)
bg_img = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
return bg_img
def get_distance(l_image_object, s_image_object):
# 边缘检测
bg_edge = cv2.Canny(l_image_object, 255, 255)
tp_edge = cv2.Canny(s_image_object, 255, 255)
bg_pic = cv2.cvtColor(bg_edge, cv2.COLOR_GRAY2RGB)
tp_pic = cv2.cvtColor(tp_edge, cv2.COLOR_GRAY2RGB)
res = cv2.matchTemplate(bg_pic, tp_pic, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # 寻找最优匹配
x = max_loc[0]
return x
res = requests.post(
url='https://m.captcha.qq.com/',
data='{"Action":"CheckCaptchaAppId_v1.0.1","CaptchaAppId":2075530305}',
)
esid = res.json()['Response']['SId']
res = requests.post(
url='https://m.captcha.qq.com/',
data='{"Action":"GetImageData_v1.0.1","ESId":"%s"}' % esid,
)
data_dict = res.json()
l_image = data_dict['Response']['ImageDataL']
l_image_object = get_image_object(l_image)
s_image = data_dict['Response']['ImageDataS']
s_image_object = get_image_object(s_image)
# 计算距离
distance = get_distance(l_image_object, s_image_object)
print(distance)
import base64
import requests
import json
import time
import random
import cv2
import numpy as np
def get_image_object(bs_image):
obj = base64.b64decode(bytes(bs_image, encoding="ascii"))
img_buffer_np = np.frombuffer(obj, dtype=np.uint8)
# 从指定的内存缓存中读取一维numpy数据, 并把数据转换(解码)成图像矩阵格式
img_np = cv2.imdecode(img_buffer_np, 1)
bg_img = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
return bg_img
def get_distance(l_image_object, s_image_object):
# 边缘检测
bg_edge = cv2.Canny(l_image_object, 255, 255)
tp_edge = cv2.Canny(s_image_object, 255, 255)
bg_pic = cv2.cvtColor(bg_edge, cv2.COLOR_GRAY2RGB)
tp_pic = cv2.cvtColor(tp_edge, cv2.COLOR_GRAY2RGB)
res = cv2.matchTemplate(bg_pic, tp_pic, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # 寻找最优匹配
x = max_loc[0]
return x
res = requests.post(
url='https://m.captcha.qq.com/',
data='{"Action":"CheckCaptchaAppId_v1.0.1","CaptchaAppId":2075530305}',
)
esid = res.json()['Response']['SId']
res = requests.post(
url='https://m.captcha.qq.com/',
data='{"Action":"GetImageData_v1.0.1","ESId":"%s"}' % esid,
)
data_dict = res.json()
l_image = data_dict['Response']['ImageDataL']
l_image_object = get_image_object(l_image)
s_image = data_dict['Response']['ImageDataS']
s_image_object = get_image_object(s_image)
distance = get_distance(l_image_object, s_image_object)
sid = data_dict['Response']['UniqueSId']
# 提交滑块
time.sleep(random.randint(3, 5))
data = '{"Action":"VerificationCaptchaImageAnswer_v1.0.1","UniqueSId":"%s","LeftTopX":"%s","ranNum":0,"Frequency":1}' % (sid, distance)
res = requests.post(
url='https://m.captcha.qq.com/',
data=data,
)
print(res.text)