摘要:好的,我也不想多說,爬蟲的代碼我會分享到去轉盤網,想下載本爬蟲代碼的孩子請點我下載,如果沒有下載到,請點擊這個鏈接。
上一篇我寫了如何爬取百度網盤的爬蟲,在這里還是重溫一下,把鏈接附上:
http://www.cnblogs.com/huangx...
這一篇我想寫寫如何爬取百度圖片的爬蟲,這個爬蟲也是:搜搜gif(在線制作功能點我) 的爬蟲代碼,其實爬蟲整體框架還是差不多的,但就是會涉及到圖片的的一些處理,還是花費了我不少時間的,所以我請閱讀的本爬蟲的孩子還是認真一些,畢竟程序猿都不容易啊。好的,我也不想多說,爬蟲的代碼我會分享到去轉盤網,想下載本爬蟲代碼的孩子請點我下載,如果沒有下載到,請點擊這個鏈接。
附代碼:
PS:不會python的孩子趕快去補補吧,先把基礎搞清楚再說
#coding:utf-8 """ Created on 2015-9-17 @author: huangxie """ import time,math,os,re,urllib,urllib2,cookielib from bs4 import BeautifulSoup import time import re import uuid import json from threading import Thread from Queue import Queue import MySQLdb as mdb import sys import threading import utils import imitate_browser from MySQLdb.constants.REFRESH import STATUS reload(sys) sys.setdefaultencoding("utf-8") DB_HOST = "127.0.0.1" DB_USER = "root" DB_PASS = "root" proxy = {u"http":u"222.39.64.13:8118"} TOP_URL="http://image.baidu.com/i?tn=resultjsonavatarnew&ie=utf-8&word={word}&pn={pn}&rn={rn}" KEYWORD_URL="https://www.baidu.com/s?ie=utf-8&f=8&tn=baidu&wd={wd}" """ i_headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11", "Accept":"json;q=0.9,*/*;q=0.8", "Accept-Charset":"utf-8;q=0.7,*;q=0.3", "Accept-Encoding":"gzip", "Connection":"close", "Referer":None #注意如果依然不能抓取的話,這里可以設置抓取網站的host } """ i_headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.48"} def GetDateString(): x = time.localtime(time.time()) foldername = str(x.__getattribute__("tm_year"))+"-"+str(x.__getattribute__("tm_mon"))+"-"+str(x.__getattribute__("tm_mday")) return foldername class BaiduImage(threading.Thread): def __init__(self): Thread.__init__(self) self.browser=imitate_browser.BrowserBase() self.chance=0 self.chance1=0 self.request_queue=Queue() self.wait_ana_queue=Queue() #self.key_word_queue.put((("動態圖", 0, 24))) self.count=0 self.mutex = threading.RLock() #可重入鎖,使單線程可以再次獲得已經獲得的鎖 self.commit_count=0 self.ID=500 self.next_proxy_set = set() self.dbconn = mdb.connect(DB_HOST, DB_USER, DB_PASS, "sosogif", charset="utf8") self.dbconn.autocommit(False) self.dbcurr = self.dbconn.cursor() self.dbcurr.execute("SET NAMES utf8") """ def run(self): while True: self.get_pic() """ def work(self,item): print "start thread",item while True: #MAX_REQUEST條以上則等待 self.get_pic() self.prepare_request() def format_keyword_url(self,keyword): return KEYWORD_URL.format(wd=keyword).encode("utf-8") def generateSeed(self,url): html = self.browser.openurl(url).read() if html: try: soup = BeautifulSoup(html) trs = soup.find("div", id="rs").find("table").find_all("tr") #獲得所有行 for tr in trs: ths=tr.find_all("th") for th in ths: a=th.find_all("a")[0] keyword=a.text.strip() if "動態圖" in keyword or "gif" in keyword: print "keyword",keyword self.dbcurr.execute("select id from info where word=%s",(keyword)) y = self.dbcurr.fetchone() if not y: self.dbcurr.execute("INSERT INTO info(word,status,page_num,left_num,how_many) VALUES(%s,0,0,0,0)",(keyword)) self.dbconn.commit() except: pass def prepare_request(self): self.lock() self.dbcurr.execute("select * from info where status=0") result = self.dbcurr.fetchone() if result: id,word,status,page_num,left_num,how_many=result self.request_queue.put((id,word,page_num)) if page_num==0 and left_num==0 and how_many==0: url=self.format_keyword_url(word) self.generateSeed(url) html="" try: url=self.format_top_url(word, page_num, 24) html = self.browser.openurl(url).read() except Exception as err: print "err",err #pass if html!="": how_many=self.how_many(html) print "how_many",how_many if how_many==None: how_many=0 t=math.ceil(how_many/24*100) #只要前1/100即可 num = int(t) for i in xrange(0,num-1): self.dbcurr.execute("INSERT INTO info(word,status,page_num,left_num,how_many) VALUES(%s,%s,%s,%s,%s)",(word,0,i*24,num-i,how_many)) self.dbcurr.execute("update info SET status=1 WHERE id=%s",(id)) #置為已經訪問 self.dbconn.commit() self.unlock() def start_work(self,req_max): for item in xrange(req_max): t = threading.Thread(target=self.work, args=(item,)) t.setDaemon(True) t.start() def lock(self): #加鎖 self.mutex.acquire() def unlock(self): #解鎖 self.mutex.release() def get_para(self,url,key): values = url.split("?")[-1] for key_value in values.split("&"): value=key_value.split("=") if value[0]==key: return value[1] return None def makeDateFolder( self,par,child): #self.lock() if os.path.isdir( par ): path=par + "http://" + GetDateString() newFolderName = path+"http://"+child if not os.path.isdir(path): os.mkdir(path) if not os.path.isdir( newFolderName ): os.mkdir( newFolderName ) return newFolderName else: return par #self.unlock() def parse_json(self,data): ipdata = json.loads(data) try: if ipdata["imgs"]: for n in ipdata["imgs"]: #data子項 if n["objURL"]: try: proxy_support = urllib2.ProxyHandler(proxy) opener = urllib2.build_opener(proxy_support) urllib2.install_opener(opener) #print "proxy",proxy self.lock() self.dbcurr.execute("select ID from pic_info where objURL=%s", (n["objURL"])) y = self.dbcurr.fetchone() #print "y=",y if y: print "database exist" self.unlock() #continue 前解鎖 continue else: real_extension=utils.get_extension(n["objURL"]) req = urllib2.Request(n["objURL"],headers=i_headers) resp = urllib2.urlopen(req,None,5) dataimg=resp.read() name=str(uuid.uuid1()) filename="" if len(real_extension)>4: real_extension=".gif" real_extension=real_extension.lower() if real_extension==".gif": filename =self.makeDateFolder("E://sosogif", "d"+str(self.count % 60))+"http://"+name+"-www.sosogif.com-搜搜gif貢獻"+real_extension self.count+=1 else: filename =self.makeDateFolder("E://sosogif", "o"+str(self.count % 20))+"http://"+name+"-www.sosogif.com-搜搜gif貢獻"+real_extension self.count+=1 """ name=str(uuid.uuid1()) filename="" if len(real_extension)>4: real_extension=".gif" filename =self.makeDateFolder("E://sosogif", "d"+str(self.count % 60))+"http://"+name+"-www.sosogif.com-搜搜gif貢獻"+real_extension self.count+=1 """ try: if not os.path.exists(filename): file_object = open(filename,"w+b") file_object.write(dataimg) file_object.close() self.anaylis_info(n,filename,real_extension) #入庫操作 else: print "file exist" except IOError,e1: print "e1=",e1 pass self.unlock() except IOError,e2: #print "e2=",e2 pass self.chance1+=1 except Exception as parse_error: print "parse_error",parse_error pass def title_dealwith(self,title): #print "title",title a=title.find("") temp1=title[0:a] b=title.find("") temp2=title[a+8:b] temp3=title[b+9:len(title)] return (temp1+temp2+temp3).strip() def anaylis_info(self,n,filename,real_extension): print "success." #if self.wait_ana_queue.qsize()!=0: #n,filename,real_extension=self.wait.ana_queue.get() #self.lock() objURL=n["objURL"] #圖片地址 fromURLHost=n["fromURLHost"] #來源網站 width=n["width"] #寬度 height=n["height"] #高度 di=n["di"] #用來唯一標識 type=n["type"] #格式 fromPageTitle=n["fromPageTitle"] #來自網站 keyword=self.title_dealwith(fromPageTitle) cs=n["cs"] #未知 os=n["os"] #未知 temp = time.time() x = time.localtime(float(temp)) acTime = time.strftime("%Y-%m-%d %H:%M:%S",x) #爬取時間 self.dbcurr.execute("select ID from pic_info where cs=%s", (cs)) y = self.dbcurr.fetchone() if not y: print "add pic",filename self.commit_count+=1 self.dbcurr.execute("INSERT INTO pic_info(objURL,fromURLHost,width,height,di,type,keyword,cs,os,acTime,filename,real_extension) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",(objURL,fromURLHost,width,height,di,type,keyword,cs,os,acTime,filename,real_extension)) if self.commit_count==10: self.dbconn.commit() self.commit_count=0 #self.unlock() def format_top_url(self,word,pn,rn): url = TOP_URL.format(word=word, pn=pn,rn=rn).encode("utf-8") return url def how_many(self,data): try: ipdata = json.loads(data) if ipdata["displayNum"]>0: how_many=ipdata["displayNum"] return int(how_many) else: return 0 except Exception as e: pass def get_pic(self): """ word="gif" pn=0 rn=24 if self.key_word_queue.qsize()!=0: word,pn,rn=self.key_word_queue.get() url=self.format_top_url(word,pn,rn) global proxy if url: try: html="" try: req = urllib2.Request(url,headers=i_headers) response = urllib2.urlopen(req, None,5) #print "url",url html = self.browser.openurl(url).read() except Exception as err: print "err",err #pass if html: how_many=self.how_many(html) #how_many=10000 print "how_many",how_many word=self.get_para(url,"word") rn=int(self.get_para(url,"rn")) t=math.ceil(how_many/rn) num = int(t) for item in xrange(0,num-1): """ try: global proxy print "size of queue",self.request_queue.qsize() if self.request_queue.qsize()!=0: id,word,page_num = self.request_queue.get() u=self.format_top_url(word,page_num,24) self.lock() self.dbcurr.execute("update info SET status=1 WHERE id=%s",(id)) self.dbconn.commit() if self.chance >0 or self.chance1>1: #任何一個出問題都給換代理 if self.ID % 100==0: self.dbcurr.execute("select count(*) from proxy") for r in self.dbcurr: count=r[0] if self.ID>count: self.ID=50 self.dbcurr.execute("select * from proxy where ID=%s",(self.ID)) results = self.dbcurr.fetchall() for r in results: protocol=r[1] ip=r[2] port=r[3] pro=(protocol,ip+":"+port) if pro not in self.next_proxy_set: self.next_proxy_set.add(pro) self.chance=0 self.chance1=0 self.ID+=1 self.unlock() proxy_support = urllib2.ProxyHandler(proxy) opener = urllib2.build_opener(proxy_support) urllib2.install_opener(opener) html="" try: req = urllib2.Request(u,headers=i_headers) #print "u=",u response = urllib2.urlopen(req, None,5) html = response.read() if html: #print "html",type(html) self.parse_json(html) except Exception as ex1: #print "error=",ex1 pass self.chance+=1 if self.chance>0 or self.chance1>1: if len(self.next_proxy_set)>0: protocol,socket=self.next_proxy_set.pop() proxy= {protocol:socket} print "change proxy finished<<",proxy,self.ID except Exception as e: print "error1",e pass if __name__ == "__main__": app = BaiduImage() app.start_work(80) #app.generateSeed() while 1: pass
本人建個qq群,歡迎大家一起交流技術, 群號:512245829 喜歡微博的朋友關注:轉盤娛樂即可
文章版權歸作者所有,未經允許請勿轉載,若此文章存在違規行為,您可以聯系管理員刪除。
轉載請注明本文地址:http://specialneedsforspecialkids.com/yun/38133.html
摘要:背景介紹我大一的時候學校就開設了,但是并沒有好好學,基本等于是什么也不會,最近才開始看,所以本身也是摸著石頭過河,見諒心得講真的,爬蟲確實不像別人想象的那樣簡單,爬蟲首先要靜下心來,細心尋找目標網站的布局規律,最重要的是的變化,這是一個考驗 背景介紹 我大一的時候學校就開設了 python,但是并沒有好好學,基本等于是什么也不會,最近才開始看,所以本身也是摸著石頭過河,見諒... 心得...
摘要:時間永遠都過得那么快,一晃從年注冊,到現在已經過去了年那些被我藏在收藏夾吃灰的文章,已經太多了,是時候把他們整理一下了。那是因為收藏夾太亂,橡皮擦給設置私密了,不收拾不好看呀。 ...
?????? ???Hello,大家好我叫是Dream呀,一個有趣的Python博主,小白一枚,多多關照??? ???CSDN Python領域新星創作者,大二在讀,歡迎大家找我合作學習 ?入門須知:這片樂園從不缺乏天才,努力才是你的最終入場券!??? ?最后,愿我們都能在看不到的地方閃閃發光,一起加油進步??? ???一萬次悲傷,依然會有Dream,我一直在最溫暖的地方等你,唱的就是我!哈哈哈~...
摘要:在不懂和等協議的情況下,我直接打個比方來解釋一下什么是請求,以瀏覽器為例,人在瀏覽器輸入,然后敲擊鍵,直到頁面出現,整個過程,我們可以抽象為我們向百度服務器發起的一次請求。更專業,更詳細的解釋,自己去百度學習吧。 前言 ??剛學完python基礎,想學習爬蟲的新手,這里有你想要的東西。??本文著重點在于教新手如何學習爬蟲,并且會以外行人的思維進行形象地講解。最近我一兄弟想學,我就想寫個...
摘要:楚江數據是專業的互聯網數據技術服務,現整理出零基礎如何學爬蟲技術以供學習,。本文來源知乎作者路人甲鏈接楚江數據提供網站數據采集和爬蟲軟件定制開發服務,服務范圍涵蓋社交網絡電子商務分類信息學術研究等。 楚江數據是專業的互聯網數據技術服務,現整理出零基礎如何學爬蟲技術以供學習,http://www.chujiangdata.com。 第一:Python爬蟲學習系列教程(來源于某博主:htt...
閱讀 2252·2021-11-22 09:34
閱讀 2021·2021-09-22 15:22
閱讀 2023·2019-08-29 15:05
閱讀 2111·2019-08-26 10:43
閱讀 3411·2019-08-26 10:26
閱讀 889·2019-08-23 18:29
閱讀 3522·2019-08-23 16:42
閱讀 2001·2019-08-23 14:46