91超碰碰碰碰久久久久久综合_超碰av人澡人澡人澡人澡人掠_国产黄大片在线观看画质优化_txt小说免费全本

溫馨提示×

溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊×
其他方式登錄
點擊 登錄注冊 即表示同意《億速云用戶服務條款》

python 知識星球文件下載

發布時間:2020-07-18 10:04:41 來源:網絡 閱讀:2535 作者:xuwu147220591 欄目:編程語言
#!/usr/bin/python3
# -*- coding: UTF-8 -*-

import requests
import json
from urllib.parse import quote
import os
from pyquery import PyQuery as pq
import datetime

headers = {
    'Authorization': '37923FBC-C87D-454C-902D-A81DB0834605',
    'x-request-id': "73e67a6f-cf88-4c10-26da-a30441464ed5",
    'accept': "application/json, text/plain, */*",
    'host': "api.zsxq.com",
    'connection': "keep-alive",
    'referer': "https://wx.zsxq.com/dweb/",
    'user-agent': "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36",
}

def readtopicurl(filename):
    with open(filename, 'r') as f:
        url = f.read()
    return url

def writetopicurl(filename,url):
    try:
        with open(filename, 'w') as f:
            f.write(url)
        return True
    except:
        return False   

def getDownloadURL(file_id):
    file_url = 'https://api.zsxq.com/v1.10/files/{0}/download_url'.format(file_id)
    return file_url

def get_topic_list(topics_url, end_time=None):
    if end_time:
        url = topics_url + '&end_time=' + quote(end_time)
    return url

def get_endtime(create_time):
    try:
        # int -1 后需要進行補 0 處理,test_str.zfill(3)
        end_time = create_time[:20]+str(int(create_time[20:23])-1).zfill(3)+create_time[23:]
        # 時間出現整點時需要特殊處理,否則會出現 -1
        if create_time[20:23] == '000':
            temp_time = datetime.datetime.strptime(create_time, "%Y-%m-%dT%H:%M:%S.%f+0800")
            temp_time += datetime.timedelta(seconds=-1)
            end_time = temp_time.strftime("%Y-%m-%dT%H:%M:%S") + '.999+0800'
            print('end_time:{0}'.format(end_time))
        return end_time
    except IndexError :
        print("error")
        return None

def request_topics_url(topics_url,headers):
    topics_page = requests.get(topics_url, headers=headers)
    if topics_page.status_code == 200:
        resp_data = json.loads(topics_page.text)
        return resp_data
    else:
        return None

def download_file(index, url, file_name,filedir):
    currentpath=os.getcwd()
    if not os.path.exists(filedir):
        os.mkdir(filedir)
    file_fullpath = '{0}\{1}\{2}'.format(currentpath,filedir,file_name)
    if os.path.exists(file_fullpath):
        return True
    file_res = requests.get(url)
    #print('file_res.status_code:{0}'.format(file_res.status_code))
    if file_res.status_code == 200:
        with open(file_fullpath, 'wb') as f:
            f.write(file_res.content)
            print('----第 {0}個文件:{1}下載成功!'.format(index+1,file_name))
            return True

    else:
        return False

def download_file_url(url):
    doc = pq(requests.get(url,headers=headers).text)
    res_data = json.loads(doc('p').text()) 
    fileurl = res_data['resp_data']['download_url'] 
    return fileurl     

if __name__ =="__main__":
    init_topics_url = 'https://api.zsxq.com/v1.10/groups/454548818428/files?count=20'
        #以下載老齊的讀書圈為例
    urlfile = 'temp_topics_url.txt'
    filedir = '讀書圈文件'
    if not os.path.exists(urlfile):
        writetopicurl(urlfile, init_topics_url)
        topic_urls = init_topics_url
    else:
        topic_urls = readtopicurl(urlfile)

    print('file_urls:{0}'.format(topic_urls))

    while True:
        resp_data = request_topics_url(topic_urls,headers)
        filelist = resp_data['resp_data']['files']
        for index , urlinfo in enumerate(filelist):
            file_id = urlinfo['file']['file_id']
            file_name = urlinfo['file']['name']
            create_time = urlinfo['file']['create_time']
            downloadurl = getDownloadURL(file_id)
            file_url = download_file_url(downloadurl)
            download_file(index,file_url, file_name,filedir)
            if index == 19:
                end_time = get_endtime(create_time)
                topic_urls = get_topic_list(init_topics_url,end_time)
                writetopicurl(urlfile, topic_urls)
                print('topic_urls:{0}'.format(topic_urls))
                #print('end_time:'.format(end_time))
        if len(filelist) < 20:
            print('全部文件下載完成!!!')
            break
向AI問一下細節

免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。

AI

敦煌市| 沛县| 株洲县| 沙河市| 潢川县| 武城县| 贺州市| 通许县| 怀宁县| 三原县| 长顺县| 宜阳县| 大埔区| 西峡县| 雷山县| 怀化市| 吕梁市| 长治市| 德钦县| 马鞍山市| 凉山| 清河县| 长宁区| 盐津县| 台东县| 临武县| 固原市| 绥滨县| 永宁县| 洛川县| 横峰县| 老河口市| 梨树县| 嵊泗县| 承德县| 四川省| 合作市| 高密市| 沁水县| 汝城县| 达日县|