import os
|
import requests
|
from bs4 import BeautifulSoup
|
from PIL import Image
|
from io import BytesIO
|
|
|
def fetch_images(keyword, save_path):
|
if not os.path.exists(save_path):
|
os.makedirs(save_path)
|
|
url = f"https://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word={keyword}"
|
headers = {
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
|
}
|
|
response = requests.get(url, headers=headers)
|
soup = BeautifulSoup(response.text, 'html.parser')
|
img_tags = soup.find_all('ObjURL', class_='obj')
|
|
for i, img_tag in enumerate(img_tags):
|
try:
|
img_url = img_tag['data-src']
|
img_data = requests.get(img_url).content
|
img = Image.open(BytesIO(img_data))
|
img.save(os.path.join(save_path, f"{keyword}_{i}.jpg"))
|
print(f"Downloaded {keyword}_{i}.jpg")
|
except Exception as e:
|
print(f"Failed to download image: {e}")
|
|
|
if __name__ == "__main__":
|
keyword = "人参"
|
save_path = "images"
|
fetch_images(keyword, save_path)
|