相思资源网 Design By www.200059.com
用python实现的抓取腾讯视频所有电影的爬虫
# -*- coding: utf-8 -*- import re import urllib2 from bs4import BeautifulSoup import string, time import pymongo NUM =0 #全局变量,电影数量 m_type = u'' #全局变量,电影类型 m_site = u'qq' #全局变量,电影网站 #根据指定的URL获取网页内容 def gethtml(url): req = urllib2.Request(url) response = urllib2.urlopen(req) html = response.read() return html #从电影分类列表页面获取电影分类 def gettags(html): global m_type soup = BeautifulSoup(html) #过滤出分类内容 #print soup #<ulclass="clearfix _group" gname="mi_type" gtype="1"> tags_all = soup.find_all('ul', {'class' :'clearfix _group' ,'gname' :'mi_type'}) #print len(tags_all), tags_all #print str(tags_all[1]).replace('\n','') #<a _hot="tag.sub" class="_gtag _hotkey" href="http://v.qq.com/list/1_0_-1_-1_1_0_0_20_0_-1_0.html" title="动作" tvalue="0">动作</a> re_tags = r'<a _hot=\"tag\.sub\" class=\"_gtag _hotkey\" href=\"(.+" title=\"(.+" tvalue=\"(.+">.+"Not Find" return tags_url #获取每个分类的页数 def get_pages(tag_url): tag_html = gethtml(tag_url) #divclass="paginator soup = BeautifulSoup(tag_html) #过滤出标记页面的html #print soup #<divclass="mod_pagenav" id="pager"> div_page = soup.find_all('div', {'class' :'mod_pagenav','id' :'pager'}) #print div_page #len(div_page), div_page[0] #<aclass="c_txt6" href="http://v.qq.com/list/1_2_-1_-1_1_0_24_20_0_-1_0.html" title="25"><span>25</span></a> re_pages = r'<a class=.+?><span>(.+"mod_list_pic_130"> divs = soup.find_all('ul', {'class' :'mod_list_pic_130'}) #print divs for div_htmlin divs: div_html = str(div_html).replace('\n','') #print div_html getmovie(div_html) def getmovie(html): global NUM global m_type global m_site re_movie = r'<li><a class=\"mod_poster_130\" href=\"(.+" target=\"_blank\" title=\"(.+"><img.+"%s : %d" % ("=" *70, NUM) values = dict( movie_title = movie[1], movie_url = movie[0], movie_site = m_site, movie_type = m_type ) print values playlinks.insert(values) print"_" *70 NUM +=1 print"%s : %d" % ("=" *70, NUM) #else: # print"Not Find" def getmovieinfo(url): html = gethtml(url) soup = BeautifulSoup(html) #pack pack_album album_cover divs = soup.find_all('div', {'class' :'pack pack_album album_cover'}) #print divs[0] #<a href="http://www.tudou.com/albumplay/9NyofXc_lHI/32JqhiKJykI.html" target="new" title="《血滴子》独家纪录片" wl="1"> </a> re_info = r'<a href=\"(.+" target=\"new\" title=\"(.+" wl=\".+"> </a>' p_info = re.compile(re_info, re.DOTALL) m_info = p_info.findall(str(divs[0])) if m_info: return m_info else: print"Not find movie info" return m_info def insertdb(movieinfo): global conn movie_db = conn.dianying_at movies = movie_db.movies movies.insert(movieinfo) if __name__ =="__main__": global conn tags_url ="http://v.qq.com/list/1_-1_-1_-1_1_0_0_20_0_-1_0.html" #print tags_url tags_html = gethtml(tags_url) #print tags_html tag_urls = gettags(tags_html) #print tag_urls for urlin tag_urls.items(): print str(url[1]).encode('utf-8') #,url[0] maxpage =int(get_pages(str(url[1]).encode('utf-8'))) print maxpage for xin range(0, maxpage): #http://v.qq.com/list/1_0_-1_-1_1_0_0_20_0_-1_0.html m_url = str(url[1]).replace('0_20_0_-1_0.html','') movie_url ="%s%d_20_0_-1_0.html" % (m_url, x) print movie_url movie_html = gethtml(movie_url.encode('utf-8')) #print movie_html getmovielist(movie_html) time.sleep(0.1)
总结
以上所述是小编给大家介绍的使用python实现抓取腾讯视频所有电影的爬虫,希望对大家有所帮助,如果大家有任何疑问欢迎给我留言,小编会及时回复大家的!
相思资源网 Design By www.200059.com
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件! 如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
相思资源网 Design By www.200059.com
暂无使用python实现抓取腾讯视频所有电影的爬虫的评论...