您好,登錄后才能下訂單哦!
我們首先來看下實例代碼:
from time import sleep import faker import requests from lxml import etree fake = faker.Faker() base_url = "http://angelimg.spbeen.com" def get_next_link(url): content = downloadHtml(url) html = etree.HTML(content) next_url = html.xpath("http://a[@class='ch next']/@href") if next_url: return base_url + next_url[0] else: return False def downloadHtml(ur): user_agent = fake.user_agent() headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"} response = requests.get(url, headers=headers) return response.text def getImgUrl(content): html = etree.HTML(content) img_url = html.xpath('//*[@id="content"]/a/img/@src') title = html.xpath(".//div['@class=article']/h3/text()") return img_url[0],title[0] def saveImg(title,img_url): if img_url is not None and title is not None: with open("txt/"+str(title)+".jpg",'wb') as f: user_agent = fake.user_agent() headers = {'User-Agent': user_agent,"Referer":"http://angelimg.spbeen.com/"} content = requests.get(img_url, headers=headers) #request_view(content) f.write(content.content) f.close() def request_view(response): import webbrowser request_url = response.url base_url = '<head><base href="%s" rel="external nofollow" >' %(request_url) base_url = base_url.encode() content = response.content.replace(b"<head>",base_url) tem_html = open('tmp.html','wb') tem_html.write(content) tem_html.close() webbrowser.open_new_tab('tmp.html') def crawl_img(url): content = downloadHtml(url) res = getImgUrl(content) title = res[1] img_url = res[0] saveImg(title,img_url) if __name__ == "__main__": url = "http://angelimg.spbeen.com/ang/4968/1" while url: print(url) crawl_img(url) url = get_next_link(url)
python 爬蟲如何執行自動下一頁循環加載文字
from bs4 import BeautifulSoup import requests import time from lxml import etree import os # 該demo執行的為如何利用bs去爬一些文字 def start(): # 發起網絡請求 html=requests.get('http://www.baidu.com') #編碼 html.encoding=html.apparent_encoding #創建sp soup=BeautifulSoup(html.text,'html.parser') print(type(soup)) print('打印元素') print(soup.prettify()) #存儲一下title 該方法沒有提示直接展示 title=soup.head.title.string print(title) # 寫入文本 with open(r'C:/Users/a/Desktop/a.txt','w') as f: f.write(title) print(time.localtime()) url_2 = 'http://news.gdzjdaily.com.cn/zjxw/politics/sz_4.shtml' def get_html_from_bs4(url): # response = requests.get(url,headers=data,proxies=ip).content.decode('utf-8') response = requests.get(url).content.decode('utf-8') soup = BeautifulSoup(response, 'html.parser') next_page = soup.select('#displaypagenum a:nth-of-type(9)')[0].get('href') # for i in nett print(next_page) next2='http://news.gdzjdaily.com.cn/zjxw/politics/'+next_page def get_html_from_etree(url): response = requests.get(url).content.decode('utf-8') html= etree.HTML(response) next_page = html.xpath('.//a[@class="PageNum"][8]/@href')[0] print(next_page) # next2='http://news.gdzjdaily.com.cn/zjxw/politics/'+next_page get_html_from_etree(url_2) if __name__ == '__main__': start()
到此這篇關于python爬蟲實現獲取下一頁代碼的文章就介紹到這了,更多相關python爬蟲獲取下一頁內容請搜索億速云以前的文章或繼續瀏覽下面的相關文章希望大家以后多多支持億速云!
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。