import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re
import pandas
import sqlite3
url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.get(url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
def getclick(url):
m=re.search(r'_(.*).html',url)
newsid=m.group(1)[5:]
clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsid)
resc=requests.get(clickurl).text
r=re.search(r'hits(.*)',resc).group(1)
click=r.lstrip("').html('").rstrip("');")
return int(click)
print(getclick('http://news.gzcc.cn/html/2017/xiaoyuanxinwen_1017/8338.html'))
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re
import pandas
import sqlite3
url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.get(url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
def getclick(url):
m=re.search(r'_(.*).html',url)
newsid=m.group(1)[5:]
clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsid)
resc=requests.get(clickurl).text
r=re.search(r'hits(.*)',resc).group(1)
click=r.lstrip("').html('").rstrip("');")
return int(click)
def getdetail(url):
resd=requests.get(url)
resd.encoding='utf-8'
soupd=BeautifulSoup(resd.text,'html.parser')
news={}
news['url']=url
news['title']=soupd.select('.show-title')[0].text
info=soupd.select(".show-info")[0].text
news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')
news['source']=re.search('来源:(.*)点击',info).group(1).strip()
#news['content']=soupd.select('.show-content')[0].text.strip()
news['click']=getclick(url)
return(news)
print(getdetail('http://news.gzcc.cn/html/2017/xiaoyuanxinwen_1017/8338.html'))
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re
import pandas
import sqlite3
url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.get(url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
def getclick(url):
m=re.search(r'_(.*).html',url)
newsid=m.group(1)[5:]
clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsid)
resc=requests.get(clickurl).text
r=re.search(r'hits(.*)',resc).group(1)
click=r.lstrip("').html('").rstrip("');")
return int(click)
def getdetail(url):
resd=requests.get(url)
resd.encoding='utf-8'
soupd=BeautifulSoup(resd.text,'html.parser')
news={}
news['url']=url
news['title']=soupd.select('.show-title')[0].text
info=soupd.select(".show-info")[0].text
news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')
news['source']=re.search('来源:(.*)点击',info).group(1).strip()
#news['content']=soupd.select('.show-content')[0].text.strip()
news['click']=getclick(url)
return(news)
def onepage(pageurl):
res=requests.get(pageurl)
res.encoding='utf-8'
soup=BeautifulSoup(res.text,'html.parser')
newsls=[]
for news in soup.select('li'):
if len(news.select('.news-list-title'))>0:
newsls.append(getdetail(news.select('a')[0]['href']))
return (newsls)
print(onepage('http://news.gzcc.cn/html/xiaoyuanxinwen/'))
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re
import pandas
import sqlite3
url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.get(url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
def getclick(url):
m=re.search(r'_(.*).html',url)
newsid=m.group(1)[5:]
clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsid)
resc=requests.get(clickurl).text
r=re.search(r'hits(.*)',resc).group(1)
click=r.lstrip("').html('").rstrip("');")
return int(click)
def getdetail(url):
resd=requests.get(url)
resd.encoding='utf-8'
soupd=BeautifulSoup(resd.text,'html.parser')
news={}
news['url']=url
news['title']=soupd.select('.show-title')[0].text
info=soupd.select(".show-info")[0].text
news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')
news['source']=re.search('来源:(.*)点击',info).group(1).strip()
#news['content']=soupd.select('.show-content')[0].text.strip()
news['click']=getclick(url)
return(news)
def onepage(pageurl):
res=requests.get(pageurl)
res.encoding='utf-8'
soup=BeautifulSoup(res.text,'html.parser')
newsls=[]
for news in soup.select('li'):
if len(news.select('.news-list-title'))>0:
newsls.append(getdetail(news.select('a')[0]['href']))
return (newsls)
newstotal=[]
gzccurl='http://news.gzcc.cn/html/xiaoyuanxinwen/'
newstotal.extend(onepage(gzccurl))
res=requests.get(gzccurl)
res.encoding='utf-8'
soup=BeautifulSoup(res.text,'html.parser')
n=int(soup.select('.a1')[0].text.rstrip('条'))
pages=n//10+1
for i in range(2,3):
listurl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
newstotal.extend(onepage(listurl))
#print(len(newstotal)) #20
df = pandas.DataFrame(newstotal)
print(df.head())
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re
import pandas
import sqlite3
url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.get(url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
def getclick(url):
m=re.search(r'_(.*).html',url)
newsid=m.group(1)[5:]
clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsid)
resc=requests.get(clickurl).text
r=re.search(r'hits(.*)',resc).group(1)
click=r.lstrip("').html('").rstrip("');")
return int(click)
def getdetail(url):
resd=requests.get(url)
resd.encoding='utf-8'
soupd=BeautifulSoup(resd.text,'html.parser')
news={}
news['url']=url
news['title']=soupd.select('.show-title')[0].text
info=soupd.select(".show-info")[0].text
news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')
news['source']=re.search('来源:(.*)点击',info).group(1).strip()
#news['content']=soupd.select('.show-content')[0].text.strip()
news['click']=getclick(url)
return(news)
def onepage(pageurl):
res=requests.get(pageurl)
res.encoding='utf-8'
soup=BeautifulSoup(res.text,'html.parser')
newsls=[]
for news in soup.select('li'):
if len(news.select('.news-list-title'))>0:
newsls.append(getdetail(news.select('a')[0]['href']))
return (newsls)
newstotal=[]
gzccurl='http://news.gzcc.cn/html/xiaoyuanxinwen/'
newstotal.extend(onepage(gzccurl))
res=requests.get(gzccurl)
res.encoding='utf-8'
soup=BeautifulSoup(res.text,'html.parser')
n=int(soup.select('.a1')[0].text.rstrip('条'))
pages=n//10+1
for i in range(2,3):
listurl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
newstotal.extend(onepage(listurl))
#print(len(newstotal)) #20
df = pandas.DataFrame(newstotal)
print(df.head())
print(df['title'])
print(df[df.click>5000])
df.to_excel('gzccnews.xlsx')
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re
import pandas
import sqlite3
url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
res = requests.get(url)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
def getclick(url):
m=re.search(r'_(.*).html',url)
newsid=m.group(1)[5:]
clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsid)
resc=requests.get(clickurl).text
r=re.search(r'hits(.*)',resc).group(1)
click=r.lstrip("').html('").rstrip("');")
return int(click)
def getdetail(url):
resd=requests.get(url)
resd.encoding='utf-8'
soupd=BeautifulSoup(resd.text,'html.parser')
news={}
news['url']=url
news['title']=soupd.select('.show-title')[0].text
info=soupd.select(".show-info")[0].text
news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')
news['source']=re.search('来源:(.*)点击',info).group(1).strip()
#news['content']=soupd.select('.show-content')[0].text.strip()
news['click']=getclick(url)
return(news)
def onepage(pageurl):
res=requests.get(pageurl)
res.encoding='utf-8'
soup=BeautifulSoup(res.text,'html.parser')
newsls=[]
for news in soup.select('li'):
if len(news.select('.news-list-title'))>0:
newsls.append(getdetail(news.select('a')[0]['href']))
return (newsls)
newstotal=[]
gzccurl='http://news.gzcc.cn/html/xiaoyuanxinwen/'
newstotal.extend(onepage(gzccurl))
res=requests.get(gzccurl)
res.encoding='utf-8'
soup=BeautifulSoup(res.text,'html.parser')
n=int(soup.select('.a1')[0].text.rstrip('条'))
pages=n//10+1
for i in range(2,3):
listurl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
newstotal.extend(onepage(listurl))
#print(len(newstotal)) #20
df = pandas.DataFrame(newstotal)
print(df.head())
print(df['title'])
print(df[df.click>5000])
with sqlite3.connect('gzccnews_db.sqlite') as db:
df.to_sql('news_table',con = db)