這個程序其實很早之前就完成了,一直沒有發(fā)出了,趁著最近不是很忙就分享給大家.
使用BeautifulSoup模塊和urllib2模塊實現(xiàn),然后保存成word是使用python docx模塊的,安裝方式網(wǎng)上一搜一大堆,我就不再贅述了.
主要實現(xiàn)的功能是登陸知乎,然后將個人收藏的問題和答案獲取到之后保存為word文檔,以便沒有網(wǎng)絡的時候可以查閱.當然,答案中如果有圖片的話也是可以獲取到的.不過這塊還是有點問題的.等以后有時間了在修改修改吧.
還有就是正則,用的簡直不要太爛…鄙視下自己…
還有,現(xiàn)在是問題的話所有的答案都會保存下來的.看看有時間修改成只保存第一個答案或者收藏頁問題的答案吧.要不然如果收藏的太多了的話保存下來的word會嚇你一跳的哦.O(∩_∩)O哈哈~
在登陸的時候可能會需要驗證碼,如果提示輸入驗證碼的話在程序的文件夾下面就可以看到驗證碼的圖片,照著輸入就ok了.
- # -*- coding: utf-8 -*-
- #登陸知乎抓取個人收藏 然后保存為word
- import sys
- reload(sys)
- sys.setdefaultencoding('utf-8')
- import urllib
- import urllib2
- import cookielib
- import string
- import re
- from bs4 import BeautifulSoup
- from docx import Document
- from docx import *
- from docx.shared import Inches
- from sys import exit
- import os
- #這兒是因為在公司上網(wǎng)的話需要使用socket代理
- #import socks
- #import socket
- #socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5,"127.0.0.1",8088)
- #socket.socket =socks.socksocket
- loginurl='http://www.zhihu.com/login'
- headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.116 Safari/537.36',}
- postdata={
- '_xsrf': 'acab9d276ea217226d9cc94a84a231f7',
- 'email': '',
- 'password': '',
- 'rememberme':'y'
- }
- if not os.path.exists('myimg'):
- os.mkdir('myimg')
- if os.path.exists('123.docx'):
- os.remove('123.docx')
- if os.path.exists('checkcode.gif'):
- os.remove('checkcode.gif')
- mydoc=Document()
- questiontitle=''
- #----------------------------------------------------------------------
- def dealimg(imgcontent):
- soup=BeautifulSoup(imgcontent)
- try:
- for imglink in soup.findAll('img'):
- if imglink is not None :
- myimg= imglink.get('src')
- #print myimg
- if myimg.find('http')>=0:
- imgsrc=urllib2.urlopen(myimg).read()
- imgnamere=re.compile(r'http\S*/')
- imgname=imgnamere.sub('',myimg)
- #print imgname
- with open(u'myimg'+'/'+imgname,'wb') as code:
- code.write(imgsrc)
- mydoc.add_picture(u'myimg/'+imgname,width=Inches(1.25))
- except:
- pass
- strinfo=re.compile(r'<noscript>[\s\S]*</noscript>')
- imgcontent=strinfo.sub('',imgcontent)
- strinfo=re.compile(r'<img class[\s\S]*</>')
- imgcontent=strinfo.sub('',imgcontent)
- #show all
- strinfo=re.compile(r'<a class="toggle-expand[\s\S]*</a>')
- imgcontent=strinfo.sub('',imgcontent)
- strinfo=re.compile(r'<a class=" wrap external"[\s\S]*rel="nofollow noreferrer" target="_blank">')
- imgcontent=strinfo.sub('',imgcontent)
- imgcontent=imgcontent.replace('<i class="icon-external"></i></a>','')
- imgcontent=imgcontent.replace('</b>','').replace('</p>','').replace('<p>','').replace('<p>','').replace('<br>','')
- return imgcontent
- def enterquestionpage(pageurl):
- html=urllib2.urlopen(pageurl).read()
- soup=BeautifulSoup(html)
- questiontitle=soup.title.string
- mydoc.add_heading(questiontitle,level=3)
- for div in soup.findAll('div',{'class':'fixed-summary zm-editable-content clearfix'}):
- #print div
- conent=str(div).replace('<div class="fixed-summary zm-editable-content clearfix">','').replace('</div>','')
- conent=conent.decode('utf-8')
- conent=conent.replace('<br/>','\n')
- conent=dealimg(conent)
- ###這一塊弄得太復雜了 有時間找找看有沒有處理html的模塊
- conent=conent.replace('<div class="fixed-summary-mask">','').replace('<blockquote>','').replace('<b>','').replace('<strong>','').replace('</strong>','').replace('<em>','').replace('</em>','').replace('</blockquote>','')
- mydoc.add_paragraph(conent,style='BodyText3')
- """file=open('222.txt','a')
- file.write(str(conent))
- file.close()"""
- def entercollectpage(pageurl):
- html=urllib2.urlopen(pageurl).read()
- soup=BeautifulSoup(html)
- for div in soup.findAll('div',{'class':'zm-item'}):
- h2content=div.find('h2',{'class':'zm-item-title'})
- #print h2content
- if h2content is not None:
- link=h2content.find('a')
- mylink=link.get('href')
- quectionlink='http://www.zhihu.com'+mylink
- enterquestionpage(quectionlink)
- print quectionlink
- def loginzhihu():
- postdatastr=urllib.urlencode(postdata)
- '''
- cj = cookielib.LWPCookieJar()
- cookie_support = urllib2.HTTPCookieProcessor(cj)
- opener = urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
- urllib2.install_opener(opener)
- '''
- h = urllib2.urlopen(loginurl)
- request = urllib2.Request(loginurl,postdatastr,headers)
- request.get_origin_req_host
- response = urllib2.urlopen(request)
- #print response.geturl()
- text = response.read()
- collecturl='http://www.zhihu.com/collections'
- req=urllib2.urlopen(collecturl)
- if str(req.geturl())=='http://www.zhihu.com/?next=%2Fcollections':
- print 'login fail!'
- return
- txt=req.read()
- soup=BeautifulSoup(txt)
- count=0
- divs =soup.findAll('div',{'class':'zm-item'})
- if divs is None:
- print 'login fail!'
- return
- print 'login ok!\n'
- for div in divs:
- link=div.find('a')
- mylink=link.get('href')
- collectlink='http://www.zhihu.com'+mylink
- entercollectpage(collectlink)
- print collectlink
- #這兒是當時做測試用的,值獲取一個收藏
- #count+=1
- #if count==1:
- # return
- def getcheckcode(thehtml):
- soup=BeautifulSoup(thehtml)
- div=soup.find('div',{'class':'js-captcha captcha-wrap'})
- if div is not None:
- #print div
- imgsrc=div.find('img')
- imglink=imgsrc.get('src')
- if imglink is not None:
- imglink='http://www.zhihu.com'+imglink
- imgcontent=urllib2.urlopen(imglink).read()
- with open('checkcode.gif','wb') as code:
- code.write(imgcontent)
- return True
- else:
- return False
- return False
- if __name__=='__main__':
- import getpass
- username=raw_input('input username:')
- password=getpass.getpass('Enter password: ')
- postdata['email']=username
- postdata['password']=password
- postdatastr=urllib.urlencode(postdata)
- cj = cookielib.LWPCookieJar()
- cookie_support = urllib2.HTTPCookieProcessor(cj)
- opener = urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
- urllib2.install_opener(opener)
- h = urllib2.urlopen(loginurl)
- request = urllib2.Request(loginurl,postdatastr,headers)
- response = urllib2.urlopen(request)
- txt = response.read()
- if getcheckcode(txt):
- checkcode=raw_input('input checkcode:')
- postdata['captcha']=checkcode
- loginzhihu()
- mydoc.save('123.docx')
- else:
- loginzhihu()
- mydoc.save('123.docx')
- print 'the end'
- raw_input()
好了,大概就是這樣,大家如果有什么好的建議或者什么的可以再下面留言,我會盡快回復的.或者在小站的關于頁面有我的聯(lián)系方式,直接聯(lián)系我就ok.