视频1 视频21 视频41 视频61 视频文章1 视频文章21 视频文章41 视频文章61 推荐1 推荐3 推荐5 推荐7 推荐9 推荐11 推荐13 推荐15 推荐17 推荐19 推荐21 推荐23 推荐25 推荐27 推荐29 推荐31 推荐33 推荐35 推荐37 推荐39 推荐41 推荐43 推荐45 推荐47 推荐49 关键词1 关键词101 关键词201 关键词301 关键词401 关键词501 关键词601 关键词701 关键词801 关键词901 关键词1001 关键词1101 关键词1201 关键词1301 关键词1401 关键词1501 关键词1601 关键词1701 关键词1801 关键词1901 视频扩展1 视频扩展6 视频扩展11 视频扩展16 文章1 文章201 文章401 文章601 文章801 文章1001 资讯1 资讯501 资讯1001 资讯1501 标签1 标签501 标签1001 关键词1 关键词501 关键词1001 关键词1501 专题2001
python支持断点续传的多线程下载示例
2020-11-27 14:29:56 责编:小采
文档

代码如下:


#! /usr/bin/env python
#coding=utf-8

from __future__ import unicode_literals

from multiprocessing.dummy import Pool as ThreadPool
import threading

import os
import sys
import cPickle
from collections import namedtuple
import urllib2
from urlparse import urlsplit

import time


# global lock
lock = threading.Lock()


# default parameters
defaults = dict(thread_count=10,
buffer_size=10*1024,
block_size=1000*1024)


def progress(percent, width=50):
print "%s %d%%\r" % (('%%-%ds' % width) % (width * percent / 100 * '='), percent),
if percent >= 100:
print
sys.stdout.flush()


def write_data(filepath, data):
with open(filepath, 'wb') as output:
cPickle.dump(data, output)


def read_data(filepath):
with open(filepath, 'rb') as output:
return cPickle.load(output)


FileInfo = namedtuple('FileInfo', 'url name size lastmodified')


def get_file_info(url):
class HeadRequest(urllib2.Request):
def get_method(self):
return "HEAD"
res = urllib2.urlopen(HeadRequest(url))
res.read()
headers = dict(res.headers)
size = int(headers.get('content-length', 0))
lastmodified = headers.get('last-modified', '')
name = None
if headers.has_key('content-disposition'):
name = headers['content-disposition'].split('filename=')[1]
if name[0] == '"' or name[0] == "'":
name = name[1:-1]
else:
name = os.path.basename(urlsplit(url)[2])

return FileInfo(url, name, size, lastmodified)


def download(url, output,
thread_count = defaults['thread_count'],
buffer_size = defaults['buffer_size'],
block_size = defaults['block_size']):
# get latest file info
file_info = get_file_info(url)

# init path
if output is None:
output = file_info.name
workpath = '%s.ing' % output
infopath = '%s.inf' % output

# split file to blocks. every block is a array [start, offset, end],
# then each greenlet download filepart according to a block, and
# update the block' offset.
blocks = []

if os.path.exists(infopath):
# load blocks
_x, blocks = read_data(infopath)
if (_x.url != url or
_x.name != file_info.name or
_x.lastmodified != file_info.lastmodified):
blocks = []

if len(blocks) == 0:
# set blocks
if block_size > file_info.size:
blocks = [[0, 0, file_info.size]]
else:
block_count, remain = divmod(file_info.size, block_size)
blocks = [[i*block_size, i*block_size, (i+1)*block_size-1] for i in range(block_count)]
blocks[-1][-1] += remain
# create new blank workpath
with open(workpath, 'wb') as fobj:
fobj.write('')

print 'Downloading %s' % url
# start monitor
threading.Thread(target=_monitor, args=(infopath, file_info, blocks)).start()

# start downloading
with open(workpath, 'rb+') as fobj:
args = [(url, blocks[i], fobj, buffer_size) for i in range(len(blocks)) if blocks[i][1] < blocks[i][2]]

if thread_count > len(args):
thread_count = len(args)

pool = ThreadPool(thread_count)
pool.map(_worker, args)
pool.close()
pool.join()


# rename workpath to output
if os.path.exists(output):
os.remove(output)
os.rename(workpath, output)

# delete infopath
if os.path.exists(infopath):
os.remove(infopath)

assert all([block[1]>=block[2] for block in blocks]) is True


def _worker((url, block, fobj, buffer_size)):
req = urllib2.Request(url)
req.headers['Range'] = 'bytes=%s-%s' % (block[1], block[2])
res = urllib2.urlopen(req)

while 1:
chunk = res.read(buffer_size)
if not chunk:
break
with lock:
fobj.seek(block[1])
fobj.write(chunk)
block[1] += len(chunk)


def _monitor(infopath, file_info, blocks):
while 1:
with lock:
percent = sum([block[1] - block[0] for block in blocks]) * 100 / file_info.size
progress(percent)
if percent >= 100:
break
write_data(infopath, (file_info, blocks))
time.sleep(2)


if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Download file by multi-threads.')
parser.add_argument('url', type=str, help='url of the download file')
parser.add_argument('-o', type=str, default=None, dest="output", help='output file')
parser.add_argument('-t', type=int, default=defaults['thread_count'], dest="thread_count", help='thread counts to downloading')
parser.add_argument('-b', type=int, default=defaults['buffer_size'], dest="buffer_size", help='buffer size')
parser.add_argument('-s', type=int, default=defaults['block_size'], dest="block_size", help='block size')

argv = sys.argv[1:]

if len(argv) == 0:
argv = ['https://eyes.nasa.gov/eyesproduct/EYES/os/win']

args = parser.parse_args(argv)

start_time = time.time()
download(args.url, args.output, args.thread_count, args.buffer_size, args.block_size)
print 'times: %ds' % int(time.time()-start_time)

下载本文
显示全文
专题