Xem trên Github
single_thread. py
import requests
from time import perf_counter
# read 1024 bytes every time
buffer_size = 1024
def download[url]:
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
t = perf_counter[]
for url in urls:
download[url]
print[f"Time took: {perf_counter[] - t:.2f}s"]
nhiều_threads. py
import requests
from concurrent.futures import ThreadPoolExecutor
from time import perf_counter
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[url]:
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
t = perf_counter[]
with ThreadPoolExecutor[max_workers=n_threads] as pool:
pool.map[download, urls]
print[f"Time took: {perf_counter[] - t:.2f}s"]
multiple_threads_using_threading. py
import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
Luồng cho phép song song mã và ngôn ngữ Python có hai cách để đạt được thứ nhất là thông qua mô-đun đa xử lý và cách thứ hai là thông qua mô-đun đa luồng. Đa luồng rất phù hợp để tăng tốc các tác vụ liên quan đến I/O như thực hiện yêu cầu web hoặc thao tác cơ sở dữ liệu hoặc đọc/ghi vào tệp. Ngược lại với CPU này, các tác vụ chuyên sâu như các tác vụ tính toán toán học được hưởng lợi nhiều nhất khi sử dụng đa xử lý. Điều này xảy ra do GIL [Khóa phiên dịch toàn cầu]
Từ Python 3. 2 trở đi, một lớp mới có tên ThreadPoolExecutor đã được giới thiệu đồng thời trong Python. mô-đun tương lai để quản lý và tạo chủ đề hiệu quả. Nhưng đợi đã nếu python đã có sẵn một mô-đun luồng thì tại sao một mô-đun mới lại được giới thiệu. Hãy để tôi trả lời điều này đầu tiên
- Tạo các luồng mới một cách nhanh chóng không phải là vấn đề khi số lượng luồng ít hơn, nhưng việc quản lý luồng sẽ trở nên thực sự cồng kềnh nếu chúng ta đang xử lý nhiều luồng. Ngoài ra, việc tạo ra quá nhiều luồng sẽ không hiệu quả về mặt tính toán, dẫn đến giảm thông lượng. Một cách tiếp cận để duy trì thông lượng là tạo và khởi tạo trước một nhóm các luồng nhàn rỗi và sử dụng lại các luồng từ nhóm này cho đến khi tất cả các luồng đều cạn kiệt. Bằng cách này, chi phí tạo chủ đề mới được giảm bớt
- Ngoài ra, nhóm theo dõi và quản lý vòng đời của các luồng và lên lịch cho chúng thay mặt cho lập trình viên, do đó làm cho mã đơn giản hơn và ít lỗi hơn
cú pháp. đồng thời. tương lai. ThreadPoolExecutor[max_workers=None, thread_name_prefix=”, bộ khởi tạo=Không, initargs=[]]
Thông số
- max_workers. Đó là một số Chủ đề hay còn gọi là kích thước của nhóm. từ 3. 8 trở đi giá trị mặc định là min[32, os. cpu_count[] + 4]. Trong số 5 luồng này được bảo toàn cho tác vụ ràng buộc I/O
- thread_name_prefix. thread_name_prefix đã được thêm từ python 3. 6 trở đi để đặt tên cho luồng nhằm mục đích gỡ lỗi dễ dàng hơn
- trình khởi tạo. trình khởi tạo có thể gọi được khi bắt đầu mỗi luồng công nhân
- ban đầu. Đó là một bộ đối số được truyền cho trình khởi tạo
Phương thức ThreadPoolExecutor
Lớp ThreadPoolExecutor hiển thị ba phương thức để thực thi các luồng không đồng bộ. Một lời giải thích chi tiết được đưa ra dưới đây
- gửi[fn, *args, **kwargs]. Nó chạy một phương thức có thể gọi được hoặc một phương thức và trả về một đối tượng Tương lai biểu thị trạng thái thực thi của phương thức
- map[fn, *iterables, timeout = Không, chunksize = 1].
- Nó ánh xạ phương thức và các lần lặp lại với nhau ngay lập tức và sẽ đưa ra một ngoại lệ đồng thời. tương lai. TimeoutError nếu không thực hiện được trong giới hạn thời gian chờ
- Nếu các lần lặp rất lớn, thì việc có kích thước khối lớn hơn 1 có thể cải thiện hiệu suất khi sử dụng ProcessPoolExecutor nhưng với ThreadPoolExecutor thì không có lợi thế như vậy, tức là có thể để giá trị mặc định của nó
- tắt máy [chờ = Đúng, *, hủy_tương lai = Sai].
- Nó báo hiệu cho người thực hiện giải phóng tất cả các tài nguyên khi thực hiện xong các hợp đồng tương lai.
- Nó phải được gọi trước khi thực thi. đệ trình [] và người thi hành. map[] nếu không nó sẽ ném RuntimeError
- wait=True làm cho phương thức không trả về cho đến khi thực hiện xong tất cả các luồng và tài nguyên được giải phóng
- cancel_futures=True thì người thực thi sẽ hủy tất cả các luồng trong tương lai chưa bắt đầu
ví dụ 1
Đoạn mã dưới đây minh họa việc sử dụng ThreadPoolExecutor, lưu ý rằng không giống như mô-đun luồng, chúng tôi không phải gọi rõ ràng bằng cách sử dụng vòng lặp, theo dõi luồng bằng cách sử dụng danh sách hoặc đợi các luồng bằng cách sử dụng nối để đồng bộ hóa hoặc giải phóng tài nguyên sau các luồng
Python3
import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
0 import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
1import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
2 import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
3import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
0 import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
5import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
2 import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
7
Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds0
Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds1
Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds2
Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds3
Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds4
Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds5
Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds4
Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds7
Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds4
Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds9
import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
00
import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
01 import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
02import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
03import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
04import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
05import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
06import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
07
import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
08 import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
09____31Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds1
import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
12import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
13import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
03import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
15Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds1
import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
17import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
03import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
19____31Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds7
import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
22import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
23import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
24____225import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
07import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
23import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
23import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
29import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
23import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
15Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Single Threaded Code Took :2.5529379630024778 seconds ************************************************** Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. Downloading.. MultiThreaded Code Took:0.5221083430078579 seconds1
import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
33import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
34import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
35import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
03import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
03import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
38 import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
39____200 import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
01import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
02import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
04import requests
from threading import Thread
from queue import Queue
# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[]:
global q
while True:
# get the url from the queue
url = q.get[]
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
# we're done downloading the file
q.task_done[]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
# fill the queue with all the urls
for url in urls:
q.put[url]
# start the threads
for t in range[n_threads]:
worker = Thread[target=download]
# daemon thread means a thread that will end when the main thread ends
worker.daemon = True
worker.start[]
# wait until the queue is empty
q.join[]
04đầu ra
import requests
from concurrent.futures import ThreadPoolExecutor
from time import perf_counter
# number of threads to spawn
n_threads = 5
# read 1024 bytes every time
buffer_size = 1024
def download[url]:
# download the body of response by chunk, not immediately
response = requests.get[url, stream=True]
# get the file name
filename = url.split["/"][-1]
with open[filename, "wb"] as f:
for data in response.iter_content[buffer_size]:
# write data read to the file
f.write[data]
if __name__ == "__main__":
urls = [
"//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
"//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
"//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
"//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
"//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
] * 5
t = perf_counter[]
with ThreadPoolExecutor[max_workers=n_threads] as pool:
pool.map[download, urls]
print[f"Time took: {perf_counter[] - t:.2f}s"]
9ví dụ 2
Đoạn mã dưới đây đang tìm nạp hình ảnh qua internet bằng cách thực hiện một yêu cầu HTTP, tôi đang sử dụng thư viện yêu cầu cho cùng một yêu cầu. Phần đầu tiên của mã thực hiện lệnh gọi một đối một tới API và tôi. e quá trình tải xuống chậm, trong khi phần thứ hai của mã đưa ra yêu cầu song song bằng cách sử dụng các luồng để tìm nạp API
Bạn có thể thử tất cả các tham số khác nhau được thảo luận ở trên để xem cách nó điều chỉnh tốc độ tăng tốc, ví dụ: nếu tôi tạo nhóm luồng gồm 6 thay vì 3 thì việc tăng tốc sẽ quan trọng hơn