import scrapy
class CookiedemoSpider(scrapy.Spider):
name = 'cookiedemo'
# allowed_domains = ['www.douban.com']
start_urls = ['https://www.douban.com/accounts/login/']
def parse(self, response):
# 登录成功后对页面数据进行存储
fp = open("main.html", "w", encoding="utf-8")
fp.write(response.text)
# 获取当前页面进行存储
# fp.close()
url = "https://www.douban.com/people/188796857/"
print(2)
yield scrapy.Request(url=url, callback=self.pagebyscrapy)
def pagebyscrapy(self,response):
print(3)
fp = open("loginwin.html", "w", encoding="utf-8")
fp.write(response.text)
# fp.close()
def start_requests(self):
# 将请求放在字典中
data = {
'source': 'index_nav',
'form_email': '473450**@qq.com',
'form_password': '****'
}
for url in self.start_urls:
print(1)
yield scrapy.FormRequest(url=url, formdata=data, callback=self.parse)