55 lines
1.7 KiB
Python
55 lines
1.7 KiB
Python
import requests
|
|
import os
|
|
import math
|
|
from dotenv import load_dotenv
|
|
|
|
load_dotenv()
|
|
# 获取用户输入的 API 密钥和搜索关键字
|
|
search_query = input("请输入你想搜索的图片类型:")
|
|
total_images = int(input("请输入你想下载的图片数量:"))
|
|
|
|
# 设置基本参数
|
|
params = {
|
|
'key': os.getenv('API_KEY'),
|
|
'q': search_query,
|
|
'image_type': 'photo',
|
|
'pretty': 'true',
|
|
'per_page': 200 # 每页最多下载 200 张图片
|
|
}
|
|
|
|
# 计算需要的页数
|
|
images_per_page = params['per_page']
|
|
num_pages = math.ceil(total_images / images_per_page)
|
|
|
|
# 创建保存图片的目录
|
|
save_dir = f'./Pixabay_{search_query}_Images'
|
|
if not os.path.exists(save_dir):
|
|
os.makedirs(save_dir)
|
|
|
|
# 定义函数用于下载每页的图片
|
|
def download_images(page_number, remaining_images):
|
|
params['page'] = page_number
|
|
response = requests.get('https://pixabay.com/api/', params=params)
|
|
data = response.json()
|
|
|
|
if 'hits' in data:
|
|
for i, image in enumerate(data['hits']):
|
|
if remaining_images <= 0:
|
|
break
|
|
img_url = image['webformatURL']
|
|
img_data = requests.get(img_url).content
|
|
img_path = os.path.join(save_dir, f"{search_query}_page_{page_number}_img_{i + 1}.jpg")
|
|
with open(img_path, 'wb') as file:
|
|
file.write(img_data)
|
|
remaining_images -= 1
|
|
print(f"已下载:{img_path}")
|
|
else:
|
|
print(f"在第 {page_number} 页没有找到相关图片。")
|
|
return remaining_images
|
|
|
|
# 下载图片直到总数达到用户指定的数量
|
|
remaining_images = total_images
|
|
for page in range(1, num_pages + 1):
|
|
remaining_images = download_images(page, remaining_images)
|
|
if remaining_images <= 0:
|
|
break |