Python ThreadPoolExecutor ghi vào tệp

Xem trên Github

single_thread. py

import requests
from time import perf_counter

# read 1024 bytes every time 
buffer_size = 1024

def download[url]:
    # download the body of response by chunk, not immediately
    response = requests.get[url, stream=True]
    # get the file name
    filename = url.split["/"][-1]
    with open[filename, "wb"] as f:
        for data in response.iter_content[buffer_size]:
            # write data read to the file
            f.write[data]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    t = perf_counter[]
    for url in urls:
        download[url]
    print[f"Time took: {perf_counter[] - t:.2f}s"]

nhiều_threads. py

import requests
from concurrent.futures import ThreadPoolExecutor
from time import perf_counter

# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024


def download[url]:
    # download the body of response by chunk, not immediately
    response = requests.get[url, stream=True]
    # get the file name
    filename = url.split["/"][-1]
    with open[filename, "wb"] as f:
        for data in response.iter_content[buffer_size]:
            # write data read to the file
            f.write[data]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    t = perf_counter[]
    with ThreadPoolExecutor[max_workers=n_threads] as pool:
        pool.map[download, urls]
    print[f"Time took: {perf_counter[] - t:.2f}s"]

multiple_threads_using_threading. py

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]

Luồng cho phép song song mã và ngôn ngữ Python có hai cách để đạt được thứ nhất là thông qua mô-đun đa xử lý và cách thứ hai là thông qua mô-đun đa luồng. Đa luồng rất phù hợp để tăng tốc các tác vụ liên quan đến I/O như thực hiện yêu cầu web hoặc thao tác cơ sở dữ liệu hoặc đọc/ghi vào tệp. Ngược lại với CPU này, các tác vụ chuyên sâu như các tác vụ tính toán toán học được hưởng lợi nhiều nhất khi sử dụng đa xử lý. Điều này xảy ra do GIL [Khóa phiên dịch toàn cầu]

Từ Python 3. 2 trở đi, một lớp mới có tên ThreadPoolExecutor đã được giới thiệu đồng thời trong Python. mô-đun tương lai để quản lý và tạo chủ đề hiệu quả. Nhưng đợi đã nếu python đã có sẵn một mô-đun luồng thì tại sao một mô-đun mới lại được giới thiệu. Hãy để tôi trả lời điều này đầu tiên

  • Tạo các luồng mới một cách nhanh chóng không phải là vấn đề khi số lượng luồng ít hơn, nhưng việc quản lý luồng sẽ trở nên thực sự cồng kềnh nếu chúng ta đang xử lý nhiều luồng. Ngoài ra, việc tạo ra quá nhiều luồng sẽ không hiệu quả về mặt tính toán, dẫn đến giảm thông lượng. Một cách tiếp cận để duy trì thông lượng là tạo và khởi tạo trước một nhóm các luồng nhàn rỗi và sử dụng lại các luồng từ nhóm này cho đến khi tất cả các luồng đều cạn kiệt. Bằng cách này, chi phí tạo chủ đề mới được giảm bớt
  • Ngoài ra, nhóm theo dõi và quản lý vòng đời của các luồng và lên lịch cho chúng thay mặt cho lập trình viên, do đó làm cho mã đơn giản hơn và ít lỗi hơn

cú pháp. đồng thời. tương lai. ThreadPoolExecutor[max_workers=None, thread_name_prefix=”, bộ khởi tạo=Không, initargs=[]]

Thông số

  • max_workers. Đó là một số Chủ đề hay còn gọi là kích thước của nhóm. từ 3. 8 trở đi giá trị mặc định là min[32, os. cpu_count[] + 4]. Trong số 5 luồng này được bảo toàn cho tác vụ ràng buộc I/O
  • thread_name_prefix. thread_name_prefix đã được thêm từ python 3. 6 trở đi để đặt tên cho luồng nhằm mục đích gỡ lỗi dễ dàng hơn
  • trình khởi tạo. trình khởi tạo có thể gọi được khi bắt đầu mỗi luồng công nhân
  • ban đầu. Đó là một bộ đối số được truyền cho trình khởi tạo

Phương thức ThreadPoolExecutor

Lớp ThreadPoolExecutor hiển thị ba phương thức để thực thi các luồng không đồng bộ. Một lời giải thích chi tiết được đưa ra dưới đây

  1. gửi[fn, *args, **kwargs]. Nó chạy một phương thức có thể gọi được hoặc một phương thức và trả về một đối tượng Tương lai biểu thị trạng thái thực thi của phương thức
  2. map[fn, *iterables, timeout = Không, chunksize = 1].  
    • Nó ánh xạ phương thức và các lần lặp lại với nhau ngay lập tức và sẽ đưa ra một ngoại lệ đồng thời. tương lai. TimeoutError nếu không thực hiện được trong giới hạn thời gian chờ
    • Nếu các lần lặp rất lớn, thì việc có kích thước khối lớn hơn 1 có thể cải thiện hiệu suất khi sử dụng ProcessPoolExecutor nhưng với ThreadPoolExecutor thì không có lợi thế như vậy, tức là có thể để giá trị mặc định của nó
  3. tắt máy [chờ = Đúng, *, hủy_tương lai = Sai].  
    • Nó báo hiệu cho người thực hiện giải phóng tất cả các tài nguyên khi thực hiện xong các hợp đồng tương lai.
    • Nó phải được gọi trước khi thực thi. đệ trình [] và người thi hành. map[] nếu không nó sẽ ném RuntimeError
    • wait=True làm cho phương thức không trả về cho đến khi thực hiện xong tất cả các luồng và tài nguyên được giải phóng
    • cancel_futures=True thì người thực thi sẽ hủy tất cả các luồng trong tương lai chưa bắt đầu

ví dụ 1

Đoạn mã dưới đây minh họa việc sử dụng ThreadPoolExecutor, lưu ý rằng không giống như mô-đun luồng, chúng tôi không phải gọi rõ ràng bằng cách sử dụng vòng lặp, theo dõi luồng bằng cách sử dụng danh sách hoặc đợi các luồng bằng cách sử dụng nối để đồng bộ hóa hoặc giải phóng tài nguyên sau các luồng

Python3




import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
0
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
1
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
2
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
3

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
0
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
5
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
2
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
7

 

Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
0
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
1
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
2
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
3
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
4
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
5
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
4
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
7
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
4
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
9
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
00

 

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
01
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
02

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
03
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
04
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
05
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
06
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
07

 

 

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
08
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
09____31
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
1
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
12
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
13

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
03
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
15
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
1
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
17

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
03
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
19____31
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
7
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
22

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
23
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
24____225
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
07

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
23

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
23
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
29

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
23
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
15
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Single Threaded Code Took :2.5529379630024778 seconds
**************************************************
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
Downloading..
MultiThreaded Code Took:0.5221083430078579 seconds
1
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
33
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
34
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
35

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
03

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
03
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
38
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
39____200
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
01

import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
02
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
04
import requests

from threading import Thread
from queue import Queue

# thread-safe queue initialization
q = Queue[]
# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024

def download[]:
    global q
    while True:
        # get the url from the queue
        url = q.get[]
        # download the body of response by chunk, not immediately
        response = requests.get[url, stream=True]
        # get the file name
        filename = url.split["/"][-1]
        with open[filename, "wb"] as f:
            for data in response.iter_content[buffer_size]:
                # write data read to the file
                f.write[data]
        # we're done downloading the file
        q.task_done[]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    # fill the queue with all the urls
    for url in urls:
        q.put[url]

    # start the threads
    for t in range[n_threads]:
        worker = Thread[target=download]
        # daemon thread means a thread that will end when the main thread ends
        worker.daemon = True
        worker.start[]

    # wait until the queue is empty
    q.join[]
04

đầu ra

import requests
from concurrent.futures import ThreadPoolExecutor
from time import perf_counter

# number of threads to spawn
n_threads = 5

# read 1024 bytes every time 
buffer_size = 1024


def download[url]:
    # download the body of response by chunk, not immediately
    response = requests.get[url, stream=True]
    # get the file name
    filename = url.split["/"][-1]
    with open[filename, "wb"] as f:
        for data in response.iter_content[buffer_size]:
            # write data read to the file
            f.write[data]


if __name__ == "__main__":
    urls = [
        "//cdn.pixabay.com/photo/2018/01/14/23/12/nature-3082832__340.jpg",
        "//cdn.pixabay.com/photo/2013/10/02/23/03/dawn-190055__340.jpg",
        "//cdn.pixabay.com/photo/2016/10/21/14/50/plouzane-1758197__340.jpg",
        "//cdn.pixabay.com/photo/2016/11/29/05/45/astronomy-1867616__340.jpg",
        "//cdn.pixabay.com/photo/2014/07/28/20/39/landscape-404072__340.jpg",
    ] * 5

    t = perf_counter[]
    with ThreadPoolExecutor[max_workers=n_threads] as pool:
        pool.map[download, urls]
    print[f"Time took: {perf_counter[] - t:.2f}s"]
9

ví dụ 2

Đoạn mã dưới đây đang tìm nạp hình ảnh qua internet bằng cách thực hiện một yêu cầu HTTP, tôi đang sử dụng thư viện yêu cầu cho cùng một yêu cầu. Phần đầu tiên của mã thực hiện lệnh gọi một đối một tới API và tôi. e quá trình tải xuống chậm, trong khi phần thứ hai của mã đưa ra yêu cầu song song bằng cách sử dụng các luồng để tìm nạp API

Bạn có thể thử tất cả các tham số khác nhau được thảo luận ở trên để xem cách nó điều chỉnh tốc độ tăng tốc, ví dụ: nếu tôi tạo nhóm luồng gồm 6 thay vì 3 thì việc tăng tốc sẽ quan trọng hơn

Đang ghi vào chủ đề tập tin

Việc ghi vào tệp không an toàn cho luồng . Việc ghi vào cùng một tệp từ nhiều luồng đồng thời không phải là luồng an toàn và có thể dẫn đến tình trạng dồn đuổi.

ThreadPoolExecutor hoạt động như thế nào trong Python?

ThreadPoolExecutor. ThreadPoolExecutor là một lớp con Executor sử dụng nhóm luồng để thực hiện lệnh gọi không đồng bộ . Một lớp con Executor sử dụng nhóm tối đa max_workers luồng để thực hiện các cuộc gọi không đồng bộ. Tất cả các chủ đề được liệt kê vào ThreadPoolExecutor sẽ được nối trước khi trình thông dịch có thể thoát.

Đang nối thêm vào một chuỗi tệp

Việc thêm một tệp từ nhiều luồng là không an toàn cho luồng và sẽ dẫn đến dữ liệu bị ghi đè và hỏng tệp. Trong hướng dẫn này, bạn sẽ khám phá cách nối thêm vào một tệp từ nhiều luồng.

Là ThreadPoolExecutor chủ đề

An toàn luồng ThreadPoolExecutor . Tuy nhiên, khi truy cập tài nguyên hoặc các phần quan trọng, sự an toàn của chuỗi có thể là mối quan tâm .

Chủ Đề