1了解需求
2根据需求找网站
3请求
4获取
5存储
from urllib import request, parse
from urllib.error import HTTPError, URLError
def get(url, headers=None):
    return urlrequests(url, headers=headers)
  #必须写headers,因为按顺序走会form
def post(url, form, headers=None):
    return urlrequests(url, form, headers=headers)
#b. post(url, form, headers=None)
def urlrequests(url, form=None, headers=None):
    user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
    # 如果用户需要自行传入headers, 则覆盖之前的headers
    if headers == None:
        headers = {
            'User-Agent': user_agent
        }
    html_bytes = b''
    try:
        if form:
            # POST请求
            # 2.1 转换成str
            form_str = parse.urlencode(form)
            #print(form_str)
            # 2.2 转换成bytes
            form_bytes = form_str.encode('utf-8')
            req = request.Request(url, data=form_bytes, headers=headers)
        else:
            # GET请求
            req = request.Request(url, headers=headers)
        response = request.urlopen(req)
        html_bytes = response.read()
    except HTTPError as e:
        print(e)
    except URLError as e:
        print(e)
    return html_bytes
if __name__ == '__main__':
  
  url = 'http://fanyi.baidu.com/sug'
  #1,准备数据
  form = {
      'kw': '呵呵'
  }
  html_bytes = post(url, form=form)
  #2,调到函数  
  print(html_bytes)
  #3,打印
  # url = 'http://www.baidu.com'
  # html_byte = get(url)
  # print(html_byte)