Python爬取4k高清动漫壁纸
某天我想下载些动漫高清图片来,于是找到了这个网站。站点为https://wall.alphacoders.com/by_sub_category.php?id=239594&name=Fate%2FGrand+Order+Wallpapers于是用python写了个爬虫下载了2000张fgo的4k壁纸贴上代码
# encoding = 'utf-8'
import requests
import time
from lxml import etree
import os
from soupsieve.util import string
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36 ',
'Referer': 'https://wall.alphacoders.com/by_sub_category.php?id=181807&name=%E5%88%80%E5%89%91%E7%A5%9E%E5%9F%9F+%E5%A3%81%E7%BA%B8&lang=Chinese '
}
def get_url(input_url,dir, page1, page2):# 获取多页地址并且下载
for i in range(page1, page2 + 1):
url = input_url + '&page={}'.format(i)
r = requests.get(url, headers=headers).text
s = etree.HTML(r)
src = s.xpath('//div[@class="thumb-container-big "]//a/img/@data-src')# 一整页的图片下载链接
src = # 把链接中的'thumb-350-'去掉才是原图
# print(src)
print("开始下载第%s页的图片" % i)
dirpath = 'E:\图片\%s' % dir# 创建文件夹
if not os.path.exists(dirpath):
os.mkdir(dirpath)
download_full_page(src, dirpath)
def download_image(src, index, dirpath):# 这是下载一张
name = src.split('/')[-1]
# print(name)
filename = os.path.basename(name)
filepath = os.path.join(dirpath, filename)
down_re = requests.get(url=src)
with open(filepath, 'wb') as fp:
fp.write(down_re.content)
def download_full_page(src, dirpath):
for i in range(len(src)):# 循环下载一整页
download_image(src, i, dirpath)
print('图片{}下载完成'.format(i + 1))
time.sleep(1)
def main():
input_url = string(input("输入链接:"))
dir = string(input("输入保存的文件夹:"))
start_page = int(input("起始页数:"))
end_page = int(input("终止页数:"))
get_url(input_url,dir, start_page, end_page)
if __name__ == '__main__':
main()
下载结果如图:
简单写了一个Go版本,没做速度限制,也没做优化。
估计用这个爬,过两天,这网站都要关闭了,还是按需使用哈。 {:1_925:}
package main
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/PuerkitoBio/goquery"
"github.com/gocolly/colly"
)
// 判断文件夹是否存在(公共方法)
func PathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
// 判断所给路径是否为文件夹
func IsDir(path string) bool {
s, err := os.Stat(path)
if err != nil {
return false
}
return s.IsDir()
}
// 判断所给路径是否为文件
func IsFile(path string) bool {
return !IsDir(path)
}
// 判断所给路径文件/文件夹是否存在
func Exists(path string) bool {
_, err := os.Stat(path) //os.Stat获取文件信息
if err != nil {
if os.IsExist(err) {
return true
}
return false
}
return true
}
var waitGroup = new(sync.WaitGroup)
//下载图片(存在则跳过)
func download(name string, imgurl string, path string, imgtype string) {
isExi := Exists(path + "//" + name + "." + imgtype)
// fmt.Printf(strconv.FormatBool(isExi))
if isExi {
fmt.Printf("开始下载:文件已存在!\n")
waitGroup.Done()
return
}
fmt.Printf("开始下载:%s\n", imgurl)
res, err := http.Get(imgurl)
if err != nil || res.StatusCode != 200 {
fmt.Printf("下载失败:%s", res.Request.URL)
}
fmt.Printf("开始读取文件内容,url=%s\n", imgurl)
data, err2 := ioutil.ReadAll(res.Body)
if err2 != nil {
fmt.Printf("读取数据失败")
}
ioutil.WriteFile(fmt.Sprintf(path+"//%s."+imgtype, name), data, 0644)
//if failed, sudo chmod 777 pic2016/
//计数器-1
waitGroup.Done()
}
// 解析URL-返回Colly控制器
func OpenUrl(urlstr string) *colly.Collector {
//解析URL
u, err := url.Parse(urlstr)
if err != nil {
log.Fatal(err)
}
c := colly.NewCollector()
// 超时设定
c.SetRequestTimeout(100 * time.Second)
// 指定Agent信息
c.UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:71.0) Gecko/20110101 Firefox/71.0"
c.OnRequest(func(r *colly.Request) {
// Request头部设定
r.Headers.Set("Host", u.Host)
r.Headers.Set("Connection", "keep-alive")
r.Headers.Set("Accept", "*/*")
r.Headers.Set("Origin", u.Host)
r.Headers.Set("Referer", urlstr)
r.Headers.Set("Accept-Encoding", "gzip, deflate")
r.Headers.Set("Accept-Language", "zh-CN, zh;q=0.9")
})
return c
}
// 根据图片地址下载图片
func UrlDow(band string, prefix string, urlpath string, s *goquery.Selection, i int, _dir string, err error) {
c := OpenUrl(band)
// c.OnRequest(func(r *colly.Request) {
// // waitGroup.Done()
// })
// c.OnHTML("title", func(e *colly.HTMLElement) {
// })
c.OnResponse(func(resp *colly.Response) {
fmt.Println("response received 1", resp.StatusCode)
fmt.Printf(band + "\n")
title := s.Text()
fmt.Printf("链接 %d: %s - %s\n", i, title, band)
waitGroup.Add(1)
go download(urlpath, band, _dir, "png")
})
c.OnError(func(resp *colly.Response, errHttp error) {
err = errHttp
})
c.OnScraped(func(r *colly.Response) {
waitGroup.Done()
})
err = c.Visit(band)
}
// 根据页码地址下载图片
func PageUrlDow(urlstr string, _dir string, err error, sec int) {
// urlstr := "https://wallhaven.cc/toplist?page=1"
c := OpenUrl(urlstr)
c.OnHTML("title", func(e *colly.HTMLElement) {
fmt.Println("title:", e.Text)
})
c.OnScraped(func(r *colly.Response) {
waitGroup.Done()
})
//获取图片列表
c.OnResponse(func(resp *colly.Response) {
fmt.Println("response received", resp.StatusCode)
// goquery直接读取resp.Body的内容
htmlDoc, err := goquery.NewDocumentFromReader(bytes.NewReader(resp.Body))
// fmt.Printf(htmlDoc.Html()) //查看页面内容
if err != nil {
log.Fatal(err)
}
// 找到抓取项 <div class="hotnews" alog-group="focustop-hotnews"> 下所有的a解析
htmlDoc.Find("div img").Each(func(i int, s *goquery.Selection) {
temp, bl := s.Attr("data-src")
if bl != true {
return
// log.Fatal(err)
}
imgurl := strings.Replace(temp, "thumb-350-", "", -1)
fmt.Printf(imgurl + "\n") //查看链接
waitGroup.Add(1)
urlpath := strings.Split(imgurl, "/")
urlpath = strings.Split(urlpath, ".")
prefix := urlpath
fmt.Println("\n文件名:" + urlpath)
fmt.Println("\n暂无:" + prefix)
go UrlDow(imgurl, prefix, urlpath, s, i, _dir, err)
// go ImgUrlDow(imgurl, _dir, err, 2)
})
})
c.OnError(func(resp *colly.Response, errHttp error) {
err = errHttp
})
err = c.Visit(urlstr)
}
//主线程
func main() {
//创建文件夹
_dir := "./Wnacg"
exist, err := PathExists(_dir)
if err != nil {
fmt.Printf("get dir error![%v]\n", err)
return
}
if exist {
fmt.Printf("has dir![%v]\n", _dir)
} else {
fmt.Printf("no dir![%v]\n", _dir)
// 创建文件夹
err := os.Mkdir(_dir, os.ModePerm)
if err != nil {
fmt.Printf("mkdir failed![%v]\n", err)
} else {
fmt.Printf("mkdir success!\n")
}
}
now := time.Now()
fmt.Printf("多页模式:下载0-n页\n")
fmt.Printf("中断模式:下载n-m页\n")
isFirst := "10"
fmt.Printf("请输入n或n,m选择-多页模式或者中断模式(默认为多页模式n=10): ")
fmt.Scanln(&isFirst)
urlstr := "https://wall.alphacoders.com/by_sub_category.php?id=239594&name=Fate%2FGrand+Order+Wallpapers"
fmt.Printf("请输入页面地址: ")
fmt.Scanln(&urlstr)
num := strings.Split(isFirst, ",")
if len(num) == 1 {
int, err := strconv.Atoi(num)
if err != nil {
fmt.Printf("get dir error![%v]\n", err)
return
}
for i := 1; i <= int; i++ {
if i == 1 {
waitGroup.Add(1)
go PageUrlDow(urlstr, _dir, err, 2)
} else {
waitGroup.Add(1)
go PageUrlDow(urlstr+"&page="+strconv.Itoa(i), _dir, err, 2)
}
}
} else {
intone, err := strconv.Atoi(num)
if err != nil {
fmt.Printf("get dir error![%v]\n", err)
return
}
inttwo, errt := strconv.Atoi(num)
if errt != nil {
fmt.Printf("get dir error![%v]\n", err)
return
}
for i := intone; i <= inttwo; i++ {
if i == 1 {
waitGroup.Add(1)
go PageUrlDow(urlstr, _dir, err, 2)
} else {
waitGroup.Add(1)
go PageUrlDow(urlstr+"&page="+strconv.Itoa(i), _dir, err, 2)
}
}
}
//等待所有协程操作完成
waitGroup.Wait()
fmt.Printf("下载总时间:%v\n", time.Now().Sub(now))
time.Sleep(60 * time.Second)
}
这回不愁没壁纸了,感谢楼主分享,隔天随便换 白晓生 发表于 2020-5-30 19:33
这回不愁没壁纸了,感谢楼主分享,隔天随便换
{:301_986:}这么多小姐姐,每天换一个,你身体吃得消吗? 壁纸挺不错 谢谢分享 一下子搞忘py的包管理器了 哈哈{:1_918:} 楼主运行后为什么会报出这个问题:
File "C:\Users\Administrator.Honkai-2020BGUM\Desktop\Minecraft\download py\4k-picture.py", line 8, in <module>
from soupsieve.util import string
ImportError: cannot import name 'string' from 'soupsieve.util' (C:\Users\Administrator.Honkai-2020BGUM\AppData\Local\Programs\Python\Python38\lib\site-packages\soupsieve\util.py)
能帮忙分析一下吗? mol7732 发表于 2020-8-8 21:50
楼主运行后为什么会报出这个问题:
File "C:%users\Administrator.Honkai-2020BGUM\Desktop\Minecraft\dow ...
from soupsieve.util import string
这个包有问题,不知道是楼主写错了,还是官方更新了'soupsieve',找不到'string' 星辰物语 发表于 2020-11-17 22:02
from soupsieve.util import string
这个包有问题,不知道是楼主写错了,还是官方更新了'soupsieve',找不 ...
我也是这样的,估计版本有点对不上 感谢分享
页:
[1]
2