Added generic photo page extract method to media module, to allow pre-filtering sources and relief Dogfart scraper. Added 'transsexual' site tag to Trans Angels.

This commit is contained in:
2020-02-02 22:36:33 +01:00
parent 204a4d4bdd
commit 0ed1b2eff9
4 changed files with 51 additions and 26 deletions

View File

@@ -9,7 +9,11 @@ async function fetchScene(url, site) {
const res = await bhttp.get(`https://www.blowpass.com/en/video/${site.id}/${new URL(url).pathname.split('/').slice(-2).join('/')}`);
const release = await scrapeScene(res.body.toString(), url, site);
release.channel = release.$('.siteNameSpan').text().trim().toLowerCase();
release.channel = release.$('.siteNameSpan')
.text()
.trim()
.toLowerCase()
.replace('.com', '');
if (['onlyteenblowjobs.com', 'mommyblowsbest.com'].includes(release.channel)) release.url = url.replace(/video\/\w+\//, 'scene/');
else release.url = url.replace(/video\/\w+\//, 'video/');

View File

@@ -1,21 +1,11 @@
'use strict';
/* eslint-disable newline-per-chained-call */
const Promise = require('bluebird');
// const Promise = require('bluebird');
const bhttp = require('bhttp');
const { JSDOM } = require('jsdom');
const moment = require('moment');
async function getPhoto(url) {
const res = await bhttp.get(url);
const html = res.body.toString();
const { document } = new JSDOM(html).window;
const photoUrl = document.querySelector('.scenes-module img').src;
return photoUrl;
}
async function getPhotos(albumUrl) {
const res = await bhttp.get(albumUrl);
const html = res.body.toString();
@@ -24,12 +14,13 @@ async function getPhotos(albumUrl) {
const lastPhotoPage = Array.from(document.querySelectorAll('.preview-image-container a')).slice(-1)[0].href;
const lastPhotoIndex = parseInt(lastPhotoPage.match(/\d+.jpg/)[0], 10);
const photoUrls = await Promise.map(Array.from({ length: lastPhotoIndex }), async (value, index) => {
const pageUrl = `https://blacksonblondes.com${lastPhotoPage.replace(/\d+.jpg/, `${index.toString().padStart(3, '0')}.jpg`)}`;
const photoUrls = Array.from({ length: lastPhotoIndex }, (value, index) => {
const pageUrl = `https://blacksonblondes.com${lastPhotoPage.replace(/\d+.jpg/, `${(index + 1).toString().padStart(3, '0')}.jpg`)}`;
return getPhoto(pageUrl);
}, {
concurrency: 5,
return {
src: pageUrl,
extract: q => q('.scenes-module img', 'src'),
};
});
return photoUrls;
@@ -90,6 +81,9 @@ async function scrapeScene(html, url, site) {
.trim();
const channel = document.querySelector('.site-name').textContent.split('.')[0].toLowerCase();
const { origin, pathname } = new URL(url);
const entryId = `${channel}_${pathname.split('/').slice(-2)[0]}`;
const date = new Date(document.querySelector('meta[itemprop="uploadDate"]').content);
const duration = moment
.duration(`00:${document
@@ -103,13 +97,13 @@ async function scrapeScene(html, url, site) {
const { trailer } = trailerElement.dataset;
const lastPhotosUrl = Array.from(document.querySelectorAll('.pagination a')).slice(-1)[0].href;
const { origin, pathname } = new URL(url);
const photos = await getPhotos(`${origin}${pathname}${lastPhotosUrl}`, site, url);
const stars = Math.floor(Number(document.querySelector('span[itemprop="average"]').textContent) / 2);
const tags = Array.from(document.querySelectorAll('.scene-details .categories a')).map(({ textContent }) => textContent);
return {
entryId,
url: `${origin}${pathname}`,
title,
description,
@@ -131,11 +125,7 @@ async function scrapeScene(html, url, site) {
}
async function fetchLatest(site, page = 1) {
console.time('dogfart');
console.log('scraping...', site.name);
const res = await bhttp.get(`https://dogfartnetwork.com/tour/scenes/?p=${page}`);
console.timeEnd('dogfart');
console.log('done!', site.name);
return scrapeLatest(res.body.toString(), site);
}