Skip to content
This repository was archived by the owner on Mar 31, 2026. It is now read-only.

Commit d09c408

Browse files
committed
feat(samples): make bucket_name required and clarify description in download_many snippet
1 parent e6c7b54 commit d09c408

1 file changed

Lines changed: 22 additions & 5 deletions

File tree

samples/snippets/storage_transfer_manager_download_many.py

Lines changed: 22 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,22 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
# Example usage:
16+
# python samples/snippets/storage_transfer_manager_download_many.py \
17+
# --bucket_name <your-bucket-name> \
18+
# --blobs <blob_name_1> <blob_name_2> \
19+
# --destination_directory <destination_directory> \
20+
# --blob_name_prefix <prefix>
21+
1522
# [START storage_transfer_manager_download_many]
1623
def download_many_blobs_with_transfer_manager(
17-
bucket_name, blob_names, destination_directory="", workers=8
24+
bucket_name, blob_names, destination_directory="", blob_name_prefix="", workers=8
1825
):
1926
"""Download blobs in a list by name, concurrently in a process pool.
2027
2128
The filename of each blob once downloaded is derived from the blob name and
2229
the `destination_directory `parameter. For complete control of the filename
23-
of each blob, use transfer_manager.download_many() instead.
30+
of each blob, use transfer_manager.download_`many() instead.
2431
2532
Directories will be created automatically as needed to accommodate blob
2633
names that include slashes.
@@ -56,7 +63,11 @@ def download_many_blobs_with_transfer_manager(
5663
bucket = storage_client.bucket(bucket_name)
5764

5865
results = transfer_manager.download_many_to_path(
59-
bucket, blob_names, destination_directory=destination_directory, max_workers=workers
66+
bucket,
67+
blob_names,
68+
destination_directory=destination_directory,
69+
blob_name_prefix=blob_name_prefix,
70+
max_workers=workers,
6071
)
6172

6273
for name, result in zip(blob_names, results):
@@ -68,7 +79,7 @@ def download_many_blobs_with_transfer_manager(
6879
elif isinstance(result, Warning):
6980
print("Skipped download for {} due to warning: {}".format(name, result))
7081
else:
71-
print("Downloaded {} to {}.".format(name, destination_directory + name))
82+
print("Downloaded {} inside {} directory.".format(name, destination_directory))
7283
# [END storage_transfer_manager_download_many]
7384

7485
if __name__ == "__main__":
@@ -77,7 +88,7 @@ def download_many_blobs_with_transfer_manager(
7788
parser = argparse.ArgumentParser(
7889
description="Download blobs in a list by name, concurrently in a process pool."
7990
)
80-
parser.add_argument("--bucket_name", help="The ID of your GCS bucket")
91+
parser.add_argument("--bucket_name", required=True, help="The name of your GCS bucket")
8192
parser.add_argument(
8293
"--blobs",
8394
nargs="+",
@@ -89,6 +100,11 @@ def download_many_blobs_with_transfer_manager(
89100
default="",
90101
help="The directory on your computer to which to download all of the files",
91102
)
103+
parser.add_argument(
104+
"--blob_name_prefix",
105+
default="",
106+
help="A string that will be prepended to each blob_name to determine the source blob name",
107+
)
92108
parser.add_argument(
93109
"--workers", type=int, default=8, help="The maximum number of processes to use"
94110
)
@@ -99,5 +115,6 @@ def download_many_blobs_with_transfer_manager(
99115
bucket_name=args.bucket_name,
100116
blob_names=args.blobs,
101117
destination_directory=args.destination_directory,
118+
blob_name_prefix=args.blob_name_prefix,
102119
workers=args.workers,
103120
)

0 commit comments

Comments
 (0)