Puppeteer使⽤⽰例详解
PhantomJS曾经是⽆头浏览器⾥的王者,测试、爬⾍等都在使⽤,随着GoogleChrome Headless的出现,PhantomJS的作者已经明确表⽰不在更新,⽽GoogleChrome Headless将会是未来爬⾍的趋势,⽽测试将依然会使⽤Webdriver那套⽅案,GoogleChrome Headless 可以利⽤WebDriver调⽤,也可以使⽤其集成的API——Puppeteer(操纵⽊偶的⼈),他的功能和他的名字⼀样强⼤,可以随意操控Chrome或Chromeium,缺点就是只有node的API,来看看他的图标:
Puppeteer是基于DevTools协议来控制headless Chrome的Node库,依赖6.4以上版本的node,本⼈是在接触这个软件时才开始学习node,依然感觉到它的异步async/await超级强⼤,在Puppeteer中也⼤量使⽤异步来完成任务。
Puppeteer的安装可以使⽤node的包管理⼯具npm来安装:
npm i puppeteer
这⾥安装时会⾃动安装Chromeium,如果不需要则可以通过配置npm跳过下载,做为⼀名爬⾍⼯程师我不会去讨论测试相关的使⽤,接下来看看如何使⽤,和WebDriver类似,⾸先需要实例化brower,代码如下:
const puppeteer = require('puppeteer');
(async () => {
const browser = await puppeteer.launch();
const page = wPage();
('www.baidu');
await browser.close();
})();
这段代码执⾏结束时,你可能什么也没有感觉到,因为它在后台启动了⼀个Chromeium进程,打开了百度⾸页,接着就关闭了,当然我们可以在前台打开Chromeium,这⾥就需要配置⼀下,所配置参数只需传⼊launch()即可,常⽤参数如下:
headless:  是否打开浏览器,默认为true
ignoreHTTPSErrors: 是否忽略https错误,默认为true
executablePath:  配置要调⽤浏览器的可执⾏路径,默认是同Puppeteer⼀起安装的Chromeium
slowMo:指定的毫秒延缓Puppeteer的操作
args:设置浏览器的相关参数,⽐如是否启动沙箱模式“--no-sandbox”,是否更换代理“--proxy-server”,具体参数请
使⽤⽰例如下:
const browser = await puppeteer.launch({headless:false, args: ["--no-sandbox",]}) //打开浏览器
打开新的窗⼝:
const page = wPage();
设置窗⼝⼤⼩
await page.setViewport({
width: 1920,
height: 1080
过滤不需要的请求:
await page.setRequestInterception(true);
<('request', interceptedRequest => {
if (interceptedRequest.url().endsWith('.png') || interceptedRequest.url().endsWith('.jpg'))
interceptedRequest.abort();
else
});
为浏览器设置userAgent:
await page.setUserAgent("Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299")设置cookie,
const data = {
name: "smidB2",
domain: ".csdn",
value: "201806051502283cf43902aa8991a248f9c605204f92530032f23ef22c16270"
}
await page.setCookie(data)
⽰例中只是演⽰,真实的cookie是⼀个列表形式的,需要循环添加才可以
for(let data of cookies){
await page.setCookie(data)
}
请求url:
const url = "www.baidu"
(url, { waitUntil: "networkidle2" });
设置页⾯等待时间:
await page.waitFor(1000); // 单位是毫秒
等待页⾯某个元素加载完成
nodeselector
await page.waitForSelector("input[class='usrname']")
点击某个元素
await page.click("input[class='submit']")
利⽤page.evaluate()函数拖动⿏标⾄页⾯底部,原理就是在页⾯注⼊js代码。
let scrollEnable = false;
let scrollStep = 500; //每次滚动的步长
while (scrollEnable) {
scrollEnable = await page.evaluate((scrollStep) => {
let scrollTop = document.scrollingElement.scrollTop;
document.scrollingElement.scrollTop = scrollTop + scrollStep;
return document.body.clientHeight > scrollTop + 1080 ? true : false
}, scrollStep);
await page.waitFor(600)
}
获取html信息
const frame = await page.mainFrame()
const bodyHandle = await frame.$('html');
const html = await frame.evaluate(body => body.innerHTML, bodyHandle);
await bodyHandle.dispose(); //销毁
console.log(html)
这是爬⾍能⽤到的⼤致操作,以下是爬取⾖瓣热门电影的基本信息和评分的代码,写这个程序时对node也是⼀知半解,如有不对,欢迎
留⾔
basePupp.js
const puppeteer = require("puppeteer")
class BasePuppeteer{
puppConfig(){
const config = {
headless: false
}
return config
async openBrower(setting){
const browser = puppeteer.launch(setting)
return browser
}
async openPage(browser){
const page = wPage()
return page
}
async closeBrower(browser){
await browser.close()
}
async closePage(page){
await page.close()
}
}
const pupp = new BasePuppeteer()
douban.js
const pupp = require("./basePupp.js")
const cheerio = require("cheerio")
const mongo = require("mongodb")
const assert = require("assert")
const MongoClient = mongo.MongoClient
const Urls = "mongodb://10.4.251.129:27017/douban"
if (err) throw err;
console.log('数据库已创建');
var dbase = db.db("runoob");
if (err) throw err;
console.log("创建集合!");
db.close();
});
});
async function getList(){
const brower = await pupp.openBrower()
const page = await pupp.openPage( brower)
const url = "movie.douban/explore#!type=movie&tag=%E7%83%AD%E9%97%A8&sort=recommend&page_limit=20&page_start=0" (url);
while(true){          // 循环点击, 直到获取不到该元素
try{
await page.waitFor(1000);
await page.waitForSelector('a[class=more]'); // 等待元素加载完成,超时时间30000ms
await page.click("a[class=more]")
// break
}catch(err){
console.log(err)
console.log("stop click ")
break
}
}
await page.waitFor(1000);        // 等待页⾯⼀秒
const links = await page.evaluate(() => {    // 获取电影详情url
let movies = [...document.querySelectorAll('.list a[class=item]')];
return movies.map((movie) =>{
return {
href: im(),
}
});
});
console.log(links.length)
for (var i = 0; i < links.length; i++) {
const a = links[i];
await page.waitFor(2000);
await getDetail(brower, a.href)
// break
}
await pupp.closePage(page)
await pupp.closeBrower(brower)
}
async function getDetail(brower, url){
const page = await pupp.openPage(brower)
(url);
await page.waitFor(1000);
await page.click(".more-actor", {delay: 20})
}catch(err){
console.log(err)
}
const frame = await page.mainFrame()
const bodyHandle = await frame.$('html');
const html = await frame.evaluate(body => body.innerHTML, bodyHandle);
await bodyHandle.dispose();      // 销毁
const $ = cheerio.load(html)
const title = $("h1 span").text().trim()
const rating_num = $(".rating_num").text().trim()
const data = {}
data["title"] = title
data["rating_num"] = rating_num
let info = $("#info").text()
const keyword = ["director", "screenplay", "lead", "type", "website", "location", "language", "playdate", "playtime", "byname", "imdb"] if (info.indexOf("www.") > 0){
info = place(/https:\/\/|http:\/\//g, "").replace(/\t/g," ").replace(/\r/g, " ").split(":")
for(var i = 1; i < info.length; i++){
data[keyword[i-1]] = info[i].split(/\n/g)[0].replace(/ \/ /g, ",").trim()
}
}else{
info = place(/\t/g," ").replace(/\r/g, " ").split(":")
keyword.splice(4,1)
for(var i = 1; i < info.length-1; i++){
data[keyword[i-1]] = info[i].split(/\n/g)[0].replace(/ \/ /g, ",").trim()
}
data["website"] = ""
}
/
/ console.log(data)
assert.equal(null,err);          //使⽤断⾔模块代替以前的 if判断
var dbo = db.db("douban");
assert.equal(null,err);
console.log(result);
db.close();
});
});
await pupp.closePage(page)
}
getList()
以上的代码完成了对⾖瓣热门电影的全部抓取,有以下⼏个步骤:
1,循环点击加载更多,直到没有此元素可操作⽽抛出异常
2,加载完全部热门电影列表时解析出每个电影详情页的url并逐⼀请求
3,解析详情页的所需数据,
4,对所抓取数据进⾏⼊库,这⾥使⽤MongoDB
⼊库后的数据如下所⽰:
对以上的浏览器实例化做了优化,写成了单例模式
config.js
browserOptions:{
headless: false,
// args: ['--no-sandbox', '--proxy-server=proxy:st:8995'],
args: ['--no-sandbox'],
}
};
brower.js
const puppeteer = require("puppeteer");
const config = require('./config');//
const deasync = require('deasync');
const BROWSER_KEY = Symbol.for('browser');
const BROWSER_STATUS_KEY = Symbol.for('browser_status');
launch(config.browserOptions)
wait4Lunch();
/**
* 启动并获取浏览器实例
* @param {*} options
* param options is puppeteer.launch function's options
*/
function launch(options = {}) {
if (!global[BROWSER_STATUS_KEY]) {
global[BROWSER_STATUS_KEY] = 'lunching';
puppeteer.launch(options)
.then((browser) => {
global[BROWSER_KEY] = browser;
global[BROWSER_STATUS_KEY] = 'lunched';
})
.catch((err) => {
global[BROWSER_STATUS_KEY] = 'error';
throw err;
});
}
}
function wait4Lunch(){
while (!global[BROWSER_KEY] && global[BROWSER_STATUS_KEY] == 'lunching') {
/
/ wait for lunch
deasync.runLoopOnce();
}
}
以上就是本⽂的全部内容,希望对⼤家的学习有所帮助,也希望⼤家多多⽀持。