Updated dependencies. Added periodic memory logger.

This commit is contained in:
DebaucheryLibrarian
2021-11-20 23:59:15 +01:00
parent a867817dc1
commit 26539b74a5
109 changed files with 10238 additions and 10833 deletions

View File

@@ -25,7 +25,7 @@ function scrapeAllTour(scenes, channel) {
release.title = query.q('.scene-img-wrapper img', 'alt').replace(/\s*image$/i, '');
release.date = query.date('.scene-update-stats span, .feature-update-details span', 'MMM DD, YYYY');
release.actors = query.cnt('.scene-update-details h3, .feature-update-details h2')?.split(/\s*\|\s*/).map(actor => actor.trim());
release.actors = query.cnt('.scene-update-details h3, .feature-update-details h2')?.split(/\s*\|\s*/).map((actor) => actor.trim());
const poster = query.img('.scene-img-wrapper img');
release.poster = [
@@ -124,7 +124,7 @@ async function scrapeRelease({ query, html }, url, channel, baseRelease, options
};
}
release.photos = query.imgs('#dv_frames a > img').map(photo => [
release.photos = query.imgs('#dv_frames a > img').map((photo) => [
photo.replace(/(\/p\/\d+\/)\d+/, (match, path) => `${path}1920`),
photo.replace(/(\/p\/\d+\/)\d+/, (match, path) => `${path}1600`),
photo,
@@ -301,7 +301,7 @@ async function fetchProfile(baseActor, channel, include) {
const searchRes = await http.get(`${channel.url}/search/SearchAutoComplete_Agg_ByMedia?rows=9&name_startsWith=${slugify(baseActor.name, '+')}`);
if (searchRes.ok) {
const actorResult = searchRes.body.Results.find(result => /performer/i.test(result.BasicResponseGroup?.displaytype) && new RegExp(baseActor.name, 'i').test(result.BasicResponseGroup?.description));
const actorResult = searchRes.body.Results.find((result) => /performer/i.test(result.BasicResponseGroup?.displaytype) && new RegExp(baseActor.name, 'i').test(result.BasicResponseGroup?.description));
if (actorResult) {
return fetchProfilePage(`${channel.url}${actorResult.BasicResponseGroup.id}`, channel, include);

View File

@@ -22,13 +22,13 @@ async function networkFetchScene(url, site, release) {
async function fetchLatest(site, page = 1) {
const releases = await fetchApiLatest(site, page, false);
return releases.map(release => curateRelease(release, site));
return releases.map((release) => curateRelease(release, site));
}
async function fetchUpcoming(site, page = 1) {
const releases = await fetchApiUpcoming(site, page, false);
return releases.map(release => curateRelease(release, site));
return releases.map((release) => curateRelease(release, site));
}
module.exports = {

View File

@@ -34,7 +34,7 @@ function extractActors(scene) {
async function fetchLatestWrap(site, page = 1, include, preData) {
const latest = await fetchLatest(site, page, include, preData);
return latest.map(scene => extractActors(scene));
return latest.map((scene) => extractActors(scene));
}
async function fetchSceneWrap(url, channel, baseRelease, include) {

View File

@@ -13,7 +13,7 @@ function scrapeScene({ query }, channel) {
release.description = query.cnt('.latest_update_description');
release.date = query.date('.update_date', 'MM/DD/YYYY');
release.actors = query.all('.tour_update_models a').map(actorEl => ({
release.actors = query.all('.tour_update_models a').map((actorEl) => ({
name: query.cnt(actorEl),
url: query.url(actorEl, null),
}));
@@ -30,7 +30,7 @@ function scrapeScene({ query }, channel) {
poster,
];
release.photos = query.imgs('.small_update_thumb', 'src', { origin: channel.url }).map(img => [
release.photos = query.imgs('.small_update_thumb', 'src', { origin: channel.url }).map((img) => [
img.replace(/.jpg$/, '-full.jpg'),
img,
]);

View File

@@ -6,8 +6,8 @@ function extractActors(actorString) {
return actorString
?.replace(/.*:|\(.*\)|\d+(-|\s)year(-|\s)old|nurses?|tangled/ig, '') // remove Patient:, (date) and other nonsense
.split(/\band\b|\bvs\b|\/|,|&/ig)
.map(actor => actor.trim())
.filter(actor => !!actor && !/\banal\b|\bschool\b|\bgamer\b|\breturn\b|\bfor\b|\bare\b|\bpart\b|realdoll|bimbo|p\d+/ig.test(actor))
.map((actor) => actor.trim())
.filter((actor) => !!actor && !/\banal\b|\bschool\b|\bgamer\b|\breturn\b|\bfor\b|\bare\b|\bpart\b|realdoll|bimbo|p\d+/ig.test(actor))
|| [];
}
@@ -16,7 +16,7 @@ function matchActors(actorString, models) {
return [];
}
return models.filter(model => new RegExp(model.name, 'i').test(actorString));
return models.filter((model) => new RegExp(model.name, 'i').test(actorString));
}
function scrapeLatest(scenes, site, models) {
@@ -61,7 +61,7 @@ function scrapeScene({ html, qu }, url, site, include, models) {
release.tags = qu.all('.tags a', true);
release.photos = qu.imgs('.stills img').map(photoPath => `${site.url}/${photoPath}`);
release.photos = qu.imgs('.stills img').map((photoPath) => `${site.url}/${photoPath}`);
const posterIndex = 'splash:';
const poster = html.slice(html.indexOf('faceimages/', posterIndex), html.indexOf('.jpg', posterIndex) + 4);
@@ -101,7 +101,7 @@ async function fetchModels(site, page = 1, accModels = []) {
if (res.ok) {
const models = extractModels(res.item, site);
const nextPage = res.item.qa('.pagenumbers', true)
.map(pageX => Number(pageX))
.map((pageX) => Number(pageX))
.filter(Boolean) // remove << and >>
.includes(page + 1);

View File

@@ -46,7 +46,7 @@ function scrapeScene({ html, qu }, url) {
release.date = extractDate(html, 'MM/DD/YYYY', /\b\d{2}\/\d{2}\/\d{4}\b/);
release.actors = qu.all('h5:not(.video_categories) a').map(actor => ({
release.actors = qu.all('h5:not(.video_categories) a').map((actor) => ({
name: qu.q(actor, null, true),
url: qu.url(actor, null),
}));
@@ -58,7 +58,7 @@ function scrapeScene({ html, qu }, url) {
const poster = qu.img('a img');
release.poster = getFallbacks(poster);
release.photos = qu.imgs('.featured-video img', 'src0_1x').map(source => getFallbacks(source));
release.photos = qu.imgs('.featured-video img', 'src0_1x').map((source) => getFallbacks(source));
return release;
}

View File

@@ -30,7 +30,7 @@ function scrapeAll(scenes, channel) {
release.date = query.date('.video-card-upload-date', 'YYYY-MM-DD HH:mm:ss', null, 'content') || query.date('.video-card-upload-date', 'MMMM DD, YYYY');
release.duration = query.duration('.video-card-duration', null, 'content') || query.number('.video-card-duration') * 60;
release.actors = query.all('.video-card-details--cast a').map(el => ({
release.actors = query.all('.video-card-details--cast a').map((el) => ({
name: qu.query.cnt(el),
url: qu.query.url(el, null, 'href', { origin: channel.url }),
}));
@@ -57,7 +57,7 @@ function scrapeScene({ query }, url, channel) {
release.date = query.date('.video-upload-date', 'YYYY-MM-DD HH:mm:ss', null, 'content') || query.date('.video-upload-date', 'MMMM DD, YYYY', /\w+ \d{1,2}, \d{4}/);
release.duration = query.duration('.video-duration', null, 'content') || query.number('.video-duration') * 60;
release.actors = query.all('.video-actors a').map(el => ({
release.actors = query.all('.video-actors a').map((el) => ({
name: qu.query.cnt(el),
url: qu.query.url(el, null, 'href', { origin: channel.url }),
}));

View File

@@ -22,13 +22,13 @@ function scrapeAll(scenes, site) {
if (/bts/i.test(release.title)) release.tags = ['behind the scenes'];
[release.poster, ...release.photos] = qu.all('.item-thumbs img')
.map(source => [
.map((source) => [
source.getAttribute('src0_3x'),
source.getAttribute('src0_2x'),
source.getAttribute('src0_1x'),
]
.filter(Boolean)
.map(fallback => (/^http/.test(fallback) ? fallback : `${site.url}${fallback}`)));
.map((fallback) => (/^http/.test(fallback) ? fallback : `${site.url}${fallback}`)));
release.entryId = `${formatDate(release.date, 'YYYY-MM-DD')}-${slugify(release.title)}`;
@@ -116,7 +116,7 @@ async function scrapeProfile({ qu }, site, withScenes) {
qu.q('.profile-pic img', 'src0_3x'),
qu.q('.profile-pic img', 'src0_2x'),
qu.q('.profile-pic img', 'src0_1x'),
].filter(Boolean).map(source => (/^http/.test(source) ? source : `${site.url}${source}`));
].filter(Boolean).map((source) => (/^http/.test(source) ? source : `${site.url}${source}`));
if (withScenes) {
const actorId = qu.q('.profile-pic img', 'id')?.match(/set-target-(\d+)/)?.[1];

View File

@@ -48,7 +48,7 @@ async function fetchPhotos(scene) {
});
if (res.ok && res.body.images) {
return res.body.images.map(image => qu.prefixUrl(image, 'https://photos.bang.com'));
return res.body.images.map((image) => qu.prefixUrl(image, 'https://photos.bang.com'));
}
return null;
@@ -59,7 +59,7 @@ async function scrapeScene(scene, entity, options) {
entryId: scene.id,
title: scene.name,
description: scene.description,
tags: scene.genres.concat(scene.actions).map(genre => genre.name),
tags: scene.genres.concat(scene.actions).map((genre) => genre.name),
duration: scene.duration,
};
@@ -69,19 +69,19 @@ async function scrapeScene(scene, entity, options) {
const date = new Date(scene.releaseDate);
release.date = new Date(Date.UTC(date.getUTCFullYear(), date.getUTCMonth(), date.getUTCDate()));
release.actors = scene.actors.map(actor => ({ name: actor.name, gender: genderMap[actor.gender] }));
release.actors = scene.actors.map((actor) => ({ name: actor.name, gender: genderMap[actor.gender] }));
if (scene.is4k) release.tags.push('4k');
if (scene.gay) release.tags.push('gay');
const defaultPoster = scene.screenshots.find(photo => photo.default === true);
const screens = scene.screenshots.filter(photo => photo.default === false);
const defaultPoster = scene.screenshots.find((photo) => photo.default === true);
const screens = scene.screenshots.filter((photo) => photo.default === false);
const remainingScreens = defaultPoster ? screens : screens.slice(1);
const poster = defaultPoster || screens[0];
release.poster = getScreenUrl(poster, scene);
release.photos = remainingScreens.map(photo => getScreenUrl(photo, scene));
release.photos = remainingScreens.map((photo) => getScreenUrl(photo, scene));
if (options?.includePhotos) {
const photos = await fetchPhotos(scene);
@@ -399,7 +399,7 @@ async function fetchProfile({ name: actorName }, context, include) {
});
if (res.ok) {
const actor = res.body.hits.hits.find(hit => hit._source.name.toLowerCase() === actorName.toLowerCase());
const actor = res.body.hits.hits.find((hit) => hit._source.name.toLowerCase() === actorName.toLowerCase());
if (actor) {
return scrapeProfile(actor._source, context.entity, include);

View File

@@ -8,10 +8,9 @@ function scrapeProfile(html) {
const profile = {};
const bio = qu.all('.infobox tr[valign="top"]')
.map(detail => qu.all(detail, 'td', true))
.map((detail) => qu.all(detail, 'td', true))
.reduce((acc, [key, value]) => ({ ...acc, [key.slice(0, -1).replace(/[\s+|/]/g, '_')]: value }), {});
/* unreliable, see: Syren De Mer
const catlinks = qa('#mw-normal-catlinks a', true);
const isTrans = catlinks.some(link => link.match(/shemale|transgender/i));

View File

@@ -26,7 +26,7 @@ function scrapeAll(scenes) {
release.entryId = new URL(release.url).pathname.match(/\/videos\/([\w-]+)/)[1];
release.title = query.cnt('.title') || query.q('img', 'title');
release.actors = subtitle.slice(subtitle.indexOf(':') + 1).split(',').map(actor => actor.trim()).filter(Boolean);
release.actors = subtitle.slice(subtitle.indexOf(':') + 1).split(',').map((actor) => actor.trim()).filter(Boolean);
release.poster = query.img('.thumb img');
@@ -48,13 +48,13 @@ function scrapeScene({ query, html }, url, channel) {
const dataString = query.html('.yoast-schema-graph');
const data = dataString && JSON.parse(dataString)['@graph'];
const pageData = data.find(item => item['@type'] === 'WebPage');
const imageData = data.find(item => item['@type'] === 'ImageObject');
const pageData = data.find((item) => item['@type'] === 'WebPage');
const imageData = data.find((item) => item['@type'] === 'ImageObject');
release.entryId = new URL(url).pathname.match(/\/videos\/([\w-]+)/)[1];
release.title = query.cnt('.video .title h1')
|| data.find(item => item['@type'] === 'BreadcrumbList')?.itemListElement.slice(-1)[0].item.name
|| data.find((item) => item['@type'] === 'BreadcrumbList')?.itemListElement.slice(-1)[0].item.name
|| pageData?.name.slice(0, pageData.name.lastIndexOf('-')).trim();
release.description = query.cnt('.video .descript');

View File

@@ -66,7 +66,7 @@ function scrapeProfile({ query }) {
const profile = {};
const keys = query.all('.model-descr_line:not(.model-descr_rait) p.text span', true);
const values = query.all('.model-descr_line:not(.model-descr_rait) p.text').map(el => query.text(el));
const values = query.all('.model-descr_line:not(.model-descr_rait) p.text').map((el) => query.text(el));
const bio = keys.reduce((acc, key, index) => ({ ...acc, [slugify(key, '_')]: values[index] }), {});
if (bio.height) profile.height = Number(bio.height.match(/\((\d+)\s*cm\)/)?.[1]);
@@ -100,7 +100,7 @@ function scrapeProfile({ query }) {
profile.piercings = bio.piercings;
}
if (bio.aliases) profile.aliases = bio.aliases.split(',').map(alias => alias.trim());
if (bio.aliases) profile.aliases = bio.aliases.split(',').map((alias) => alias.trim());
const avatar = query.q('.model-img img');
profile.avatar = avatar.getAttribute('src0_3x') || avatar.getAttribute('src0_2x') || avatar.dataset.src;

View File

@@ -43,7 +43,7 @@ function scrapeScene({ query }, channel, html) {
release.date = date;
release.datePrecision = precision;
release.actors = query.all('.sub-video .pornstar-link').map(el => ({
release.actors = query.all('.sub-video .pornstar-link').map((el) => ({
name: query.cnt(el, null),
url: query.url(el, null, 'href', { origin: 'https://www.cumlouder.com' }),
}));

View File

@@ -52,10 +52,10 @@ async function scrapeScene({ query }, url, channel) {
release.poster = query.poster() || query.poster('dl8-video') || query.img('#videoBlock img');
release.photos = query.urls('.photo-slider-guest .card a');
release.trailer = query.all('source[type="video/mp4"]').map(trailer => ({
release.trailer = query.all('source[type="video/mp4"]').map((trailer) => ({
src: trailer.src,
quality: Number(trailer.attributes.res?.value || trailer.attributes.quality?.value.slice(0, -1)) || null,
vr: channel.tags?.some(tag => tag.slug === 'vr'),
vr: channel.tags?.some((tag) => tag.slug === 'vr'),
}));
return release;
@@ -63,7 +63,7 @@ async function scrapeScene({ query }, url, channel) {
async function fetchActorReleases(urls) {
// DDF Network and DDF Network Stream list all scenes, exclude
const sources = urls.filter(url => !/ddfnetwork/.test(url));
const sources = urls.filter((url) => !/ddfnetwork/.test(url));
const releases = await Promise.all(sources.map(async (url) => {
const res = await qu.getAll(url, '.card.m-1:not(.pornstar-card)');
@@ -79,10 +79,10 @@ async function fetchActorReleases(urls) {
}
async function scrapeProfile({ query }, _url, actorName) {
const keys = query.all('.about-title', true).map(key => slugify(key, '_'));
const keys = query.all('.about-title', true).map((key) => slugify(key, '_'));
const values = query.all('.about-info').map((el) => {
if (el.children.length > 0) {
return Array.from(el.children, child => child.textContent.trim()).join(', ');
return Array.from(el.children, (child) => child.textContent.trim()).join(', ');
}
return el.textContent.trim();

View File

@@ -47,7 +47,7 @@ function scrapeLatest(html, site, filter = true) {
const entryId = `${site.slug}_${pathname.split('/')[4]}`;
const title = element.querySelector('.scene-title').textContent;
const actors = title.split(/[,&]|\band\b/).map(actor => actor.replace(/BTS/i, '').trim());
const actors = title.split(/[,&]|\band\b/).map((actor) => actor.replace(/BTS/i, '').trim());
const poster = `https:${element.querySelector('img').src}`;
const teaser = sceneLinkElement.dataset.preview_clip_url;

View File

@@ -12,7 +12,7 @@ function scrapeAll(scenes, channel) {
release.title = query.cnt('.title');
release.actors = query.all('.actors a').map(actorEl => ({
release.actors = query.all('.actors a').map((actorEl) => ({
name: query.cnt(actorEl),
url: query.url(actorEl, null, 'href', { origin: channel.url }),
}));
@@ -40,7 +40,7 @@ function scrapeScene({ query }, url, channel) {
release.date = query.date('.publish_date', 'MMMM DD, YYYY');
release.duration = query.dur('.duration');
release.actors = query.all('.actress a').map(actorEl => ({
release.actors = query.all('.actress a').map((actorEl) => ({
name: query.cnt(actorEl),
url: query.url(actorEl, null, 'href', { origin: channel.url }),
}));
@@ -91,7 +91,7 @@ function scrapeMovie({ query, el }, url, channel) {
release.duration = query.dur('.duration');
release.actors = query.all('.actors .actor').map(actorEl => ({
release.actors = query.all('.actors .actor').map((actorEl) => ({
name: query.cnt(actorEl, '.name'),
url: query.url(actorEl, 'a', 'href', { origin: channel.url }),
avatar: query.sourceSet(actorEl, '.thumbnail img', 'data-srcset'),
@@ -99,7 +99,7 @@ function scrapeMovie({ query, el }, url, channel) {
release.poster = query.sourceSet('.banner', 'data-src')?.[0];
release.covers = [query.all(query.el('.cover').parentElement, 'source')
?.map(coverEl => query.sourceSet(coverEl, null, 'data-srcset'))
?.map((coverEl) => query.sourceSet(coverEl, null, 'data-srcset'))
.flat()
.sort((coverA, coverB) => {
const resA = Number(coverA.match(/_(\d{3,})_/)?.[1]);

View File

@@ -53,7 +53,7 @@ function getImageWithFallbacks(q, selector, site, el) {
q(selector, 'src0_1x'),
];
return sources.filter(Boolean).map(src => `${site.parameters?.media || site.url}${src}`);
return sources.filter(Boolean).map((src) => `${site.parameters?.media || site.url}${src}`);
}
function scrapeAllClassic(scenes, channel) {
@@ -107,7 +107,7 @@ function scrapeAllTubular(scenes, channel, accNetworkReleases) {
// release.entryId = q('.img-div img', 'id')?.match(/set-target-(\d+)/)[1];
release.entryId = deriveEntryId(release);
if (channel.parameters?.accFilter && accNetworkReleases?.map(accRelease => accRelease.entryId).includes(release.entryId)) {
if (channel.parameters?.accFilter && accNetworkReleases?.map((accRelease) => accRelease.entryId).includes(release.entryId)) {
// filter out releases that were already scraped from a categorized site, requeryires sequeryential site scraping
return null;
}
@@ -143,7 +143,7 @@ function scrapeSceneTubular({ query, html }, entity, url, baseRelease) {
release.date = query.date('.update-info-row', 'MMM D, YYYY', /\w+ \d{1,2}, \d{4}/);
release.duration = query.dur('.update-info-row:nth-child(2)');
release.actors = query.all('.models-list-thumbs a').map(el => ({
release.actors = query.all('.models-list-thumbs a').map((el) => ({
name: query.cnt(el, 'span'),
avatar: getImageWithFallbacks(query.q, 'img', entity, el),
url: query.url(el, null),
@@ -164,8 +164,8 @@ function scrapeSceneTubular({ query, html }, entity, url, baseRelease) {
if (stars) release.stars = Number(stars);
if (entity.type === 'network') {
const channelRegExp = new RegExp(entity.children.map(channel => channel.parameters?.match || channel.name).join('|'), 'i');
const channel = release.tags.find(tag => channelRegExp.test(tag));
const channelRegExp = new RegExp(entity.children.map((channel) => channel.parameters?.match || channel.name).join('|'), 'i');
const channel = release.tags.find((tag) => channelRegExp.test(tag));
if (channel) {
release.channel = slugify(channel, '');
@@ -199,8 +199,8 @@ async function scrapeProfile({ query }, entity, parameters) {
avatarEl.getAttribute('src0'),
avatarEl.getAttribute('src'),
]
.filter(avatar => avatar && !/p\d+.jpe?g/.test(avatar)) // remove non-existing attributes and placeholder images
.map(avatar => qu.prefixUrl(avatar, entity.url));
.filter((avatar) => avatar && !/p\d+.jpe?g/.test(avatar)) // remove non-existing attributes and placeholder images
.map((avatar) => qu.prefixUrl(avatar, entity.url));
if (avatarSources.length) profile.avatar = avatarSources;
}

View File

@@ -18,7 +18,7 @@ function extractLowArtActors(release) {
const actors = release.title
.replace(/solo/i, '')
.split(/,|\band\b/ig)
.map(actor => actor.trim());
.map((actor) => actor.trim());
return {
...release,
@@ -32,7 +32,7 @@ async function networkFetchLatest(site, page = 1) {
const releases = await fetchLatest(site, page);
if (site.slug === 'lowartfilms') {
return releases.map(release => extractLowArtActors(release));
return releases.map((release) => extractLowArtActors(release));
}
return releases;
@@ -76,7 +76,7 @@ async function fetchClassicProfile(actorName, { site }) {
if (!pornstarsRes.ok) return null;
const actorPath = pornstarsRes.item.qa('option[value*="/pornstar"]')
.find(el => slugify(el.textContent) === actorSlug)
.find((el) => slugify(el.textContent) === actorSlug)
?.value;
if (actorPath) {

View File

@@ -14,7 +14,7 @@ function scrapeAllA(scenes, channel) {
release.date = query.date('.thumb-added, .date', ['MMM D, YYYY', 'MMMM DD, YYYY'], /\w+ \d{1,2}, \d{4}/);
release.duration = query.dur('.thumb-duration');
release.actors = query.all('.thumb-models a, .models a').map(actorEl => ({
release.actors = query.all('.thumb-models a, .models a').map((actorEl) => ({
name: query.cnt(actorEl),
url: query.url(actorEl, null, 'href', { origin: channel.url }),
}));
@@ -70,7 +70,7 @@ function scrapeSceneA({ query }, url, channel) {
release.duration = query.dur('.media-body li span, .duration');
release.actors = query.all('.media-body a[href*="models/"], .models a').map(actorEl => ({
release.actors = query.all('.media-body a[href*="models/"], .models a').map((actorEl) => ({
name: query.cnt(actorEl),
url: query.url(actorEl, null, 'href', { origin: channel.url }),
}));

View File

@@ -9,7 +9,7 @@ function scrapeProfile(html, actorName) {
const { document } = new JSDOM(html).window;
const profile = { name: actorName };
const bio = Array.from(document.querySelectorAll('a[href^="/babes"]'), el => decodeURI(el.href)).reduce((acc, item) => {
const bio = Array.from(document.querySelectorAll('a[href^="/babes"]'), (el) => decodeURI(el.href)).reduce((acc, item) => {
const keyMatch = item.match(/\[\w+\]/);
if (keyMatch) {
@@ -52,7 +52,7 @@ function scrapeProfile(html, actorName) {
if (bio.height) profile.height = Number(bio.height.split(',')[0]);
if (bio.weight) profile.weight = Number(bio.weight.split(',')[0]);
profile.social = Array.from(document.querySelectorAll('.profile-meta-item a.social-icons'), el => el.href);
profile.social = Array.from(document.querySelectorAll('.profile-meta-item a.social-icons'), (el) => el.href);
const avatar = document.querySelector('.profile-image-large img').src;
if (!avatar.match('placeholder')) profile.avatar = { src: avatar, credit: null };

View File

@@ -33,7 +33,7 @@ async function fetchApiCredentials(referer, site) {
const res = await http.get(referer);
const body = res.body.toString();
const apiLine = body.split('\n').find(bodyLine => bodyLine.match('apiKey'));
const apiLine = body.split('\n').find((bodyLine) => bodyLine.match('apiKey'));
if (!apiLine) {
throw new Error(`No Gamma API key found for ${referer}`);
@@ -169,7 +169,7 @@ async function getThumbs(entryId, site, parameters) {
});
if (res.ok && res.body.results?.[0]?.hits[0]?.set_pictures) {
return res.body.results[0].hits[0].set_pictures.map(img => ([
return res.body.results[0].hits[0].set_pictures.map((img) => ([
`https://transform.gammacdn.com/photo_set${img.thumb_path}`,
`https://images-evilangel.gammacdn.com/photo_set${img.thumb_path}`,
]));
@@ -214,7 +214,7 @@ async function scrapeApiReleases(json, site) {
release.date = moment.utc(scene.release_date, 'YYYY-MM-DD').toDate();
release.director = scene.directors[0]?.name || null;
release.actors = scene.actors.map(actor => ({
release.actors = scene.actors.map((actor) => ({
entryId: actor.actor_id,
name: actor.name,
gender: actor.gender,
@@ -226,7 +226,7 @@ async function scrapeApiReleases(json, site) {
}));
release.tags = scene.master_categories
.concat(scene.categories?.map(category => category.name))
.concat(scene.categories?.map((category) => category.name))
.filter(Boolean); // some categories don't have a name
const posterPath = scene.pictures.resized || (scene.pictures.nsfw?.top && Object.values(scene.pictures.nsfw.top)[0]);
@@ -272,7 +272,7 @@ function scrapeAll(html, site, networkUrl, hasTeaser = true) {
[release.likes, release.dislikes] = $(element).find('.value')
.toArray()
.map(value => Number($(value).text()));
.map((value) => Number($(value).text()));
const posterEl = $(element).find('.imgLink img, .tlcImageItem');
if (posterEl) release.poster = posterEl.attr('data-original') || posterEl.attr('src');
@@ -327,13 +327,13 @@ async function scrapeScene(html, url, site, baseRelease, mobileHtml, options) {
const actors = data?.actor || data2?.actor;
if (actors) {
release.actors = actors.map(actor => ({
release.actors = actors.map((actor) => ({
name: actor.name,
gender: actor.gender,
}));
}
const hasTrans = release.actors?.some(actor => actor.gender === 'shemale');
const hasTrans = release.actors?.some((actor) => actor.gender === 'shemale');
const rawTags = data?.keywords?.split(', ') || data2?.keywords?.split(', ') || [];
release.tags = hasTrans ? [...rawTags, 'transsexual'] : rawTags;
@@ -420,7 +420,7 @@ async function scrapeSceneApi(data, site, options) {
release.duration = data.length;
release.date = new Date(data.date * 1000) || qu.parseDate(data.release_date, 'YYYY-MM-DD');
release.actors = data.actors.map(actor => ({
release.actors = data.actors.map((actor) => ({
entryId: actor.actor_id,
name: actor.name,
gender: actor.gender,
@@ -429,7 +429,7 @@ async function scrapeSceneApi(data, site, options) {
: qu.prefixUrl(`/en/pornstar/${actor.url_name}/${data.actor_id}`, site.url),
}));
release.tags = data.categories.map(category => category.name);
release.tags = data.categories.map((category) => category.name);
if (data.pictures) {
release.poster = [
@@ -501,7 +501,7 @@ async function scrapeMovie({ query, html }, window, url, entity, options) {
release.date = qu.extractDate(data.dvdReleaseDate);
release.title = data.dvdName;
release.actors = data.dvdActors.map(actor => ({ name: actor.actorName, entryId: actor.actorId }));
release.actors = data.dvdActors.map((actor) => ({ name: actor.actorName, entryId: actor.actorId }));
release.tags = query.cnts('.dvdCol a');
release.scenes = scrapeAll(html, entity, entity.url);
@@ -602,9 +602,9 @@ function scrapeApiProfile(data, releases, siteSlug) {
if (data.attributes.hair_color) profile.hair = data.attributes.hair_color;
const avatarPaths = Object.values(data.pictures).reverse();
if (avatarPaths.length > 0) profile.avatar = avatarPaths.map(avatarPath => `https://images01-evilangel.gammacdn.com/actors${avatarPath}`);
if (avatarPaths.length > 0) profile.avatar = avatarPaths.map((avatarPath) => `https://images01-evilangel.gammacdn.com/actors${avatarPath}`);
if (releases) profile.releases = releases.map(release => `https://${siteSlug}.com/en/video/${release.url_title}/${release.clip_id}`);
if (releases) profile.releases = releases.map((release) => `https://${siteSlug}.com/en/video/${release.url_title}/${release.clip_id}`);
return profile;
}
@@ -723,7 +723,7 @@ function getDeepUrl(url, site, baseRelease, mobile) {
const filter = new Set(['en', 'video', 'scene', site.slug, site.parent.slug]);
const pathname = baseRelease?.path || new URL(url).pathname
.split('/')
.filter(component => !filter.has(component))
.filter((component) => !filter.has(component))
.join('/'); // reduce to scene ID and title slug
const sceneId = baseRelease?.entryId || pathname.match(/\/(\d+)\//)?.[1];
@@ -863,7 +863,7 @@ async function fetchApiProfile({ name: actorName }, context, include) {
});
if (res.status === 200 && res.body.results[0].hits.length > 0) {
const actorData = res.body.results[0].hits.find(actor => slugify(actor.name) === slugify(actorName));
const actorData = res.body.results[0].hits.find((actor) => slugify(actor.name) === slugify(actorName));
if (actorData) {
const actorScenes = include.releases && await fetchActorScenes(actorData.name, apiUrl, siteSlug);

View File

@@ -42,13 +42,13 @@ function scrapeScene({ query }, url) {
release.duration = query.dur('.content-metas span:nth-child(2)');
release.likes = query.number('.content-metas span:nth-child(6)');
release.actors = query.all('.model-thumb img').map(el => ({
release.actors = query.all('.model-thumb img').map((el) => ({
name: query.q(el, null, 'alt'),
avatar: query.img(el, null, 'src'),
}));
release.poster = query.poster('.content-video video');
release.photos = query.urls('#photo-carousel a').map(photo => [
release.photos = query.urls('#photo-carousel a').map((photo) => [
photo.replace('/full', ''),
photo,
photo.replace('/full', '/thumbs'),
@@ -135,7 +135,7 @@ async function fetchProfile(baseActor, entity, include) {
});
if (searchRes.ok) {
const actor = searchRes.body.find(result => result.type === 'model' && result.title === baseActor.name);
const actor = searchRes.body.find((result) => result.type === 'model' && result.title === baseActor.name);
if (actor) {
const actorRes = await qu.get(actor.url);

View File

@@ -21,7 +21,7 @@ function scrapeAll(scenes) {
avatar: [
avatarEl.src.replace(/-\d+x\d+/, ''),
avatarEl.src,
].map(src => ({ src, interval: 1000, concurrency: 1 })),
].map((src) => ({ src, interval: 1000, concurrency: 1 })),
}),
};
}).concat({

View File

@@ -53,7 +53,7 @@ function getImageWithFallbacks(q, selector, site, el) {
q(selector, 'src0_1x'),
];
return sources.filter(Boolean).map(src => `${site.parameters?.media || site.url}${src}`);
return sources.filter(Boolean).map((src) => `${site.parameters?.media || site.url}${src}`);
}
function scrapeAll(scenes, channel) {
@@ -108,7 +108,7 @@ function scrapeAllT1(scenes, site, accNetworkReleases) {
// release.entryId = q('.img-div img', 'id')?.match(/set-target-(\d+)/)[1];
release.entryId = deriveEntryId(release);
if (site.parameters?.accFilter && accNetworkReleases?.map(accRelease => accRelease.entryId).includes(release.entryId)) {
if (site.parameters?.accFilter && accNetworkReleases?.map((accRelease) => accRelease.entryId).includes(release.entryId)) {
// filter out releases that were already scraped from a categorized site, requeryires sequeryential site scraping
return null;
}
@@ -132,7 +132,7 @@ function scrapeScene({ html, query }, channel, url) {
const poster = qu.prefixUrl(posterPath, channel.url) || query.img('.update_thumb', 'src0_1x', { origin: channel.url }); // latter used when trailer requires signup
[release.poster, ...release.photos] = [poster, ...query.imgs('.item-thumb img', 'src0_1x', { origin: channel.url })]
.map(src => src && [
.map((src) => src && [
src.replace('-1x', '-3x'),
src.replace('-1x', '-2x'),
src,
@@ -161,7 +161,7 @@ function scrapeSceneT1({ html, query }, site, url, baseRelease) {
release.date = query.date('.update-info-row', 'MMM D, YYYY', /\w+ \d{1,2}, \d{4}/);
release.duration = query.dur('.update-info-row:nth-child(2)');
release.actors = query.all('.models-list-thumbs a').map(el => ({
release.actors = query.all('.models-list-thumbs a').map((el) => ({
name: query.q(el, 'span', true),
avatar: getImageWithFallbacks(query.q, 'img', site, el),
}));
@@ -180,8 +180,8 @@ function scrapeSceneT1({ html, query }, site, url, baseRelease) {
if (stars) release.stars = Number(stars);
if (site.type === 'network') {
const channelRegExp = new RegExp(site.children.map(channel => channel.parameters?.match || channel.name).join('|'), 'i');
const channel = release.tags.find(tag => channelRegExp.test(tag));
const channelRegExp = new RegExp(site.children.map((channel) => channel.parameters?.match || channel.name).join('|'), 'i');
const channel = release.tags.find((tag) => channelRegExp.test(tag));
if (channel) {
release.channel = slugify(channel, '');
@@ -290,7 +290,7 @@ async function scrapeProfile({ query, el }, channel, options) {
if (bio.piercings && /yes/i.test(bio.piercings)) profile.hasPiercings = true;
if (bio.piercings && /no/i.test(bio.piercings)) profile.hasPiercings = false;
if (bio.aliases) profile.aliases = bio.aliases.split(',').map(alias => alias.trim());
if (bio.aliases) profile.aliases = bio.aliases.split(',').map((alias) => alias.trim());
profile.social = [bio.onlyfans, bio.twitter, bio.instagram].filter(Boolean);

View File

@@ -26,7 +26,7 @@ function scrapeLatest(scenes, site) {
release.date = qu.ed(title.slice(0, title.indexOf(':')), 'MMM D, YYYY');
}
release.actors = actors.map(actor => actor.trim());
release.actors = actors.map((actor) => actor.trim());
const description = query.q('.articleCopyText', true);
if (description) release.description = description.slice(0, description.lastIndexOf('('));
@@ -81,7 +81,7 @@ function scrapeScene({ query }, site) {
release.title = title.trim();
release.description = query.q('.articleCopyText', true);
release.actors = actors.map(actor => actor.trim());
release.actors = actors.map((actor) => actor.trim());
release.date = query.date('.articlePostDateText', 'MMMM D, YYYY');
release.duration = query.dur('.articlePostDateText a:nth-child(2)');

View File

@@ -117,7 +117,7 @@ function scrapeProfile({ query }, actorName, actorAvatar, channel, releasesFromS
profile.releases = releasesFromScene?.[profile.name] || scrapeProfileScenes(qu.initAll(query.all('.Models li')), actorName, channel);
// avatar is the poster of a scene, find scene and use its high quality poster instead
const avatarRelease = profile.releases.find(release => new URL(release.poster[1]).pathname === new URL(actorAvatar).pathname);
const avatarRelease = profile.releases.find((release) => new URL(release.poster[1]).pathname === new URL(actorAvatar).pathname);
profile.avatar = avatarRelease?.poster[0];
return profile;

View File

@@ -13,7 +13,7 @@ async function fetchActors(entryId, channel, { token, time }) {
const res = await http.get(url);
if (res.statusCode === 200 && res.body.status === true) {
return Object.values(res.body.response.collection).map(actor => Object.values(actor.modelId.collection)[0].stageName);
return Object.values(res.body.response.collection).map((actor) => Object.values(actor.modelId.collection)[0].stageName);
}
return [];
@@ -46,7 +46,7 @@ function scrapeLatest(items, channel) {
release.title = query.cnt('h5 a');
[release.poster, ...release.photos] = query.imgs('.screenshot').map(src => [
[release.poster, ...release.photos] = query.imgs('.screenshot').map((src) => [
// unnecessarily large
// src.replace(/\/\d+/, 3840),
// src.replace(/\/\d+/, '/2000'),
@@ -99,7 +99,7 @@ function scrapeScene({ query, html }, url, channel) {
[release.poster, ...release.photos] = [poster]
.concat(photos)
.filter(Boolean)
.map(src => [
.map((src) => [
src.replace(/\/(\d+)\/\d+/, '/$1/1500'),
src.replace(/\/(\d+)\/\d+/, '/$1/1000'),
src,
@@ -128,8 +128,8 @@ async function scrapeSceneApi(scene, channel, tokens, deep) {
release.date = new Date(scene.sites.collection[scene.id].publishDate);
release.poster = scene._resources.primary[0].url;
if (scene.tags) release.tags = Object.values(scene.tags.collection).map(tag => tag.alias);
if (scene._resources.base) release.photos = scene._resources.base.map(resource => resource.url);
if (scene.tags) release.tags = Object.values(scene.tags.collection).map((tag) => tag.alias);
if (scene._resources.base) release.photos = scene._resources.base.map((resource) => resource.url);
if (deep) {
// don't make external requests during update scraping, as this would happen for every scene on the page
@@ -149,7 +149,7 @@ async function scrapeSceneApi(scene, channel, tokens, deep) {
}
function scrapeLatestApi(scenes, site, tokens) {
return Promise.map(scenes, async scene => scrapeSceneApi(scene, site, tokens, false), { concurrency: 10 });
return Promise.map(scenes, async (scene) => scrapeSceneApi(scene, site, tokens, false), { concurrency: 10 });
}
async function fetchToken(channel) {

View File

@@ -33,7 +33,7 @@ function scrapeLatest(scenes, dates, site) {
const poster = qu.img('img[src*="photos/"][width="400"]');
release.poster = `${site.url}/visitors/${poster}`;
release.photos = qu.imgs('img[src*="photos/"]:not([width="400"])').map(source => `${site.url}/visitors/${source}`);
release.photos = qu.imgs('img[src*="photos/"]:not([width="400"])').map((source) => `${site.url}/visitors/${source}`);
return release;
});

View File

@@ -55,7 +55,7 @@ async function getPhotosLegacy(entryId, site, type = 'highres', page = 1) {
// don't add first URL to pages to prevent unnecessary duplicate request
const photos = scrapePhotos(html, type);
const pages = Array.from(new Set($('.page_numbers a').toArray().map(el => $(el).attr('href'))));
const pages = Array.from(new Set($('.page_numbers a').toArray().map((el) => $(el).attr('href'))));
const otherPhotos = pages
? await Promise.map(pages, async (pageX) => {
@@ -84,7 +84,7 @@ async function getPhotos(entryId, site, type = 'highres', page = 1) {
const res = await http.get(albumUrl);
const html = res.body.toString();
const sourceLines = html.split(/\n/).filter(line => line.match(/ptx\["\w+"\]/));
const sourceLines = html.split(/\n/).filter((line) => line.match(/ptx\["\w+"\]/));
const sources = sourceLines.reduce((acc, sourceLine) => {
const quality = sourceLine.match(/\["\w+"\]/)[0].slice(2, -2);
const sourceStart = sourceLine.match(/\/trial|\/tour|\/content/);
@@ -261,7 +261,7 @@ async function scrapeScene({ html, query }, url, site, include) {
}
if (include.trailer && site.slug !== 'manuelferrara') {
const trailerLines = html.split('\n').filter(line => /movie\["trailer\w*"\]\[/i.test(line));
const trailerLines = html.split('\n').filter((line) => /movie\["trailer\w*"\]\[/i.test(line));
if (trailerLines.length) {
release.trailer = trailerLines.map((trailerLine) => {
@@ -307,7 +307,7 @@ function scrapeMovie({ el, query }, url, site) {
const scenes = scrapeAll(sceneQus, site);
const curatedScenes = scenes
?.map(scene => ({ ...scene, movie }))
?.map((scene) => ({ ...scene, movie }))
.sort((sceneA, sceneB) => sceneA.date - sceneB.date);
movie.date = curatedScenes?.[0].date;
@@ -354,13 +354,13 @@ function scrapeProfile(html, url, actorName, entity) {
avatarEl.getAttribute('src0'),
avatarEl.getAttribute('src'),
]
.filter(avatar => avatar && !/p\d+.jpe?g/.test(avatar)) // remove non-existing attributes and placeholder images
.map(avatar => qu.prefixUrl(avatar, entity.url));
.filter((avatar) => avatar && !/p\d+.jpe?g/.test(avatar)) // remove non-existing attributes and placeholder images
.map((avatar) => qu.prefixUrl(avatar, entity.url));
if (avatarSources.length) profile.avatar = avatarSources;
}
profile.releases = Array.from(document.querySelectorAll('.category_listing_block .update_details > a:first-child'), el => el.href);
profile.releases = Array.from(document.querySelectorAll('.category_listing_block .update_details > a:first-child'), (el) => el.href);
return profile;
}

View File

@@ -35,7 +35,7 @@ function scrapeScene({ query }, url) {
release.title = query.cnt('.title');
release.date = query.date('.date .content', 'MMM Do, YYYY');
release.actors = query.all('.models .content a').map(modelEl => ({
release.actors = query.all('.models .content a').map((modelEl) => ({
name: query.cnt(modelEl),
url: query.url(modelEl, null),
}));
@@ -76,7 +76,7 @@ async function fetchProfile(baseActor, entity) {
return searchRes.status;
}
const actorUrl = searchRes.items.find(item => slugify(item.query.cnt('.title')) === baseActor.slug)?.query.url('a');
const actorUrl = searchRes.items.find((item) => slugify(item.query.cnt('.title')) === baseActor.slug)?.query.url('a');
if (!actorUrl) {
return null;

View File

@@ -53,7 +53,7 @@ function scrapeLatest(scenes, site) {
}
return release;
}).filter(scene => scene);
}).filter((scene) => scene);
}
async function scrapeScene({ query, html }, url, baseRelease, channel, session) {
@@ -100,7 +100,7 @@ async function scrapeScene({ query, html }, url, baseRelease, channel, session)
const trailerInfoRes = await http.post(trailerInfoUrl, null, { session });
if (trailerInfoRes.ok && trailerInfoRes.body.sources?.length > 0) {
release.trailer = trailerInfoRes.body.sources.map(trailer => ({
release.trailer = trailerInfoRes.body.sources.map((trailer) => ({
src: trailer.src,
type: trailer.type,
/* unreliable, sometimes actual video is 720p

View File

@@ -4,11 +4,11 @@ const qu = require('../utils/qu');
const slugify = require('../utils/slugify');
function scrapeAll({ query }) {
const urls = query.urls('td > a:not([href*=joinnow])').map(pathname => `http://killergram.com/${encodeURI(pathname)}`);
const urls = query.urls('td > a:not([href*=joinnow])').map((pathname) => `http://killergram.com/${encodeURI(pathname)}`);
const posters = query.imgs('td > a img');
const titles = query.all('.episodeheadertext', true);
const actors = query.all('.episodetextinfo:nth-child(3)').map(el => query.all(el, 'a', true));
const channels = query.all('.episodetextinfo:nth-child(2) a', true).map(channel => slugify(channel, ''));
const actors = query.all('.episodetextinfo:nth-child(3)').map((el) => query.all(el, 'a', true));
const channels = query.all('.episodetextinfo:nth-child(2) a', true).map((channel) => slugify(channel, ''));
if ([urls.length, posters.length, titles.length, actors.length, channels.length].every((value, index, array) => value === array[0])) { // make sure every set has the same number of items
const releases = urls.map((url, index) => ({
@@ -51,7 +51,7 @@ function scrapeScene({ query, html }, url) {
}
async function fetchActorReleases({ query }, url, remainingPages, actorName, accReleases = []) {
const releases = scrapeAll({ query }).filter(release => release.actors.includes(actorName));
const releases = scrapeAll({ query }).filter((release) => release.actors.includes(actorName));
if (remainingPages.length > 0) {
const { origin, pathname, searchParams } = new URL(url);

View File

@@ -19,7 +19,7 @@ function scrapeAll(scenes) {
release.stars = query.q('.average-rating', 'data-rating') / 10;
release.poster = query.img('.adimage');
release.photos = query.imgs('.rollover .roll-image', 'data-imagesrc').map(photo => [
release.photos = query.imgs('.rollover .roll-image', 'data-imagesrc').map((photo) => [
photo.replace('410/', '830/'),
photo,
]);
@@ -40,13 +40,13 @@ async function scrapeScene({ query }, url) {
release.description = query.q('.description-text', true);
release.date = query.date('.shoot-date', 'MMMM DD, YYYY');
release.actors = query.all('.names a', true).map(actor => actor.replace(/,\s*/, ''));
release.actors = query.all('.names a', true).map((actor) => actor.replace(/,\s*/, ''));
release.director = query.q('.director-name', true);
release.photos = query.imgs('.gallery .thumb img, #gallerySlider .gallery-img', 'data-image-file');
release.poster = query.poster();
release.tags = query.all('.tag-list a[href*="/tag"]', true).map(tag => tag.replace(/,\s*/, ''));
release.tags = query.all('.tag-list a[href*="/tag"]', true).map((tag) => tag.replace(/,\s*/, ''));
const trailer = query.q('.player span[data-type="trailer-src"]', 'data-url');
@@ -154,7 +154,7 @@ async function fetchProfile({ name: actorName }, entity, include) {
const searchRes = await qu.getAll(`https://kink.com/search?type=performers&q=${actorName}`, '.model');
if (searchRes.ok) {
const actorItem = searchRes.items.find(item => item.query.exists(`.model-link img[alt="${actorName}"]`));
const actorItem = searchRes.items.find((item) => item.query.exists(`.model-link img[alt="${actorName}"]`));
if (actorItem) {
const actorPath = actorItem.query.url('.model-link');

View File

@@ -126,7 +126,7 @@ async function scrapeScene(html, url, site, useGallery) {
'1080p': 1080,
};
release.trailer = data.clip.qualities.map(trailer => ({
release.trailer = data.clip.qualities.map((trailer) => ({
src: trailer.src,
type: trailer.type,
quality: qualityMap[trailer.quality] || trailer.quality,
@@ -147,10 +147,10 @@ async function scrapeProfile(html, _url, actorName) {
};
const avatarEl = document.querySelector('.model--avatar img[src^="http"]');
const entries = Array.from(document.querySelectorAll('.model--description tr'), el => el.textContent.replace(/\n/g, '').split(':'));
const entries = Array.from(document.querySelectorAll('.model--description tr'), (el) => el.textContent.replace(/\n/g, '').split(':'));
const bio = entries
.filter(entry => entry.length === 2) // ignore entries without ':' (About section, see Blanche Bradburry)
.filter((entry) => entry.length === 2) // ignore entries without ':' (About section, see Blanche Bradburry)
.reduce((acc, [key, value]) => ({ ...acc, [key.trim()]: value.trim() }), {});
profile.birthPlace = bio.Nationality;
@@ -184,7 +184,7 @@ async function fetchProfile({ name: actorName }) {
const res = await http.get(`https://www.legalporno.com/api/autocomplete/search?q=${actorName.replace(' ', '+')}`);
const data = res.body;
const result = data.terms.find(item => item.type === 'model');
const result = data.terms.find((item) => item.type === 'model');
if (result) {
const bioRes = await http.get(result.url);

View File

@@ -65,7 +65,7 @@ async function fetchPhotos(url) {
const res = await qu.get(url, '.et_post_gallery');
if (res.ok) {
return res.item.query.urls('a').map(imgUrl => ({
return res.item.query.urls('a').map((imgUrl) => ({
src: imgUrl,
referer: url,
}));
@@ -89,14 +89,14 @@ async function scrapeScene({ query }, url, channel, include) {
release.date = query.date('.vid_date', 'MMMM D, YYYY');
release.duration = query.dur('.vid_length');
release.actors = query.all('.vid_infos a[href*="author/"]').map(actorEl => ({
release.actors = query.all('.vid_infos a[href*="author/"]').map((actorEl) => ({
name: query.cnt(actorEl),
url: query.url(actorEl, null),
}));
release.tags = query.cnts('.vid_infos a[rel="tag"]');
const posterData = data['@graph']?.find(item => item['@type'] === 'ImageObject');
const posterData = data['@graph']?.find((item) => item['@type'] === 'ImageObject');
const poster = posterData?.url
|| query.q('meta[property="og:image"]', 'content')

View File

@@ -20,7 +20,7 @@ function scrapeAll(scenes) {
release.duration = query.dur('.total-time');
const [poster, ...primaryPhotos] = query.imgs('a img');
const secondaryPhotos = query.styles('.thumb-top, .thumb-bottom, .thumb-mouseover', 'background-image').map(style => style.match(/url\((.*)\)/)[1]);
const secondaryPhotos = query.styles('.thumb-top, .thumb-bottom, .thumb-mouseover', 'background-image').map((style) => style.match(/url\((.*)\)/)[1]);
release.poster = poster;
release.photos = primaryPhotos.concat(secondaryPhotos);

View File

@@ -14,14 +14,14 @@ const { inchesToCm, lbsToKg } = require('../utils/convert');
function getThumbs(scene) {
if (scene.images.poster) {
return Object.values(scene.images.poster) // can be { 0: {}, 1: {}, ... } instead of array
.filter(img => typeof img === 'object') // remove alternateText property
.map(image => image.xl.url);
.filter((img) => typeof img === 'object') // remove alternateText property
.map((image) => image.xl.url);
}
if (scene.images.card_main_rect) {
return scene.images.card_main_rect
.concat(scene.images.card_secondary_rect || [])
.map(image => image.xl.url.replace('.thumb', ''));
.map((image) => image.xl.url.replace('.thumb', ''));
}
return [];
@@ -29,14 +29,14 @@ function getThumbs(scene) {
function getVideos(data) {
const teaserSources = data.videos.mediabook?.files;
const trailerSources = data.children.find(child => child.type === 'trailer')?.videos.full?.files;
const trailerSources = data.children.find((child) => child.type === 'trailer')?.videos.full?.files;
const teaser = teaserSources && Object.values(teaserSources).map(source => ({
const teaser = teaserSources && Object.values(teaserSources).map((source) => ({
src: source.urls.view,
quality: parseInt(source.format, 10),
}));
const trailer = trailerSources && Object.values(trailerSources).map(source => ({
const trailer = trailerSources && Object.values(trailerSources).map((source) => ({
src: source.urls.view,
quality: parseInt(source.format, 10),
}));
@@ -59,8 +59,8 @@ function scrapeLatestX(data, site, filterChannel) {
release.date = new Date(data.dateReleased);
release.duration = data.videos.mediabook?.length > 1 ? data.videos.mediabook.length : null;
release.actors = data.actors.map(actor => ({ name: actor.name, gender: actor.gender }));
release.tags = data.tags.map(tag => tag.name);
release.actors = data.actors.map((actor) => ({ name: actor.name, gender: actor.gender }));
release.tags = data.tags.map((tag) => tag.name);
[release.poster, ...release.photos] = getThumbs(data);
@@ -69,15 +69,15 @@ function scrapeLatestX(data, site, filterChannel) {
if (teaser) release.teaser = teaser;
if (trailer) release.trailer = trailer;
release.chapters = data.timeTags?.map(chapter => ({
release.chapters = data.timeTags?.map((chapter) => ({
time: chapter.startTime,
duration: chapter.endTime - chapter.startTime,
tags: [chapter.name],
}));
if ((site.parameters?.extract === true && data.collections.length > 0) // release should not belong to any channel
|| (typeof site.parameters?.extract === 'string' && !data.collections.some(collection => collection.shortName === site.parameters.extract)) // release should belong to specific channel
|| (filterChannel && !data.collections?.some(collection => collection.id === site.parameters?.siteId))) { // used to separate upcoming Brazzers scenes
|| (typeof site.parameters?.extract === 'string' && !data.collections.some((collection) => collection.shortName === site.parameters.extract)) // release should belong to specific channel
|| (filterChannel && !data.collections?.some((collection) => collection.id === site.parameters?.siteId))) { // used to separate upcoming Brazzers scenes
return {
...release,
exclude: true,
@@ -88,11 +88,11 @@ function scrapeLatestX(data, site, filterChannel) {
}
async function scrapeLatest(items, site, filterChannel) {
const latestReleases = items.map(data => scrapeLatestX(data, site, filterChannel));
const latestReleases = items.map((data) => scrapeLatestX(data, site, filterChannel));
return {
scenes: latestReleases.filter(scene => !scene.exclude),
unextracted: latestReleases.filter(scene => scene.exclude),
scenes: latestReleases.filter((scene) => !scene.exclude),
unextracted: latestReleases.filter((scene) => scene.exclude),
};
}
@@ -108,8 +108,8 @@ function scrapeScene(data, url, _site, networkName) {
release.date = new Date(data.dateReleased);
release.duration = data.videos.mediabook?.length > 1 ? data.videos.mediabook.length : null;
release.actors = data.actors.map(actor => ({ name: actor.name, gender: actor.gender }));
release.tags = data.tags.map(tag => tag.name);
release.actors = data.actors.map((actor) => ({ name: actor.name, gender: actor.gender }));
release.tags = data.tags.map((tag) => tag.name);
[release.poster, ...release.photos] = getThumbs(data);
@@ -118,7 +118,7 @@ function scrapeScene(data, url, _site, networkName) {
if (teaser) release.teaser = teaser;
if (trailer) release.trailer = trailer;
release.chapters = data.timeTags?.map(chapter => ({
release.chapters = data.timeTags?.map((chapter) => ({
time: chapter.startTime,
duration: chapter.endTime - chapter.startTime,
tags: [chapter.name],
@@ -213,18 +213,18 @@ function scrapeProfile(data, html, releases = [], networkName) {
|| data.images.card_main_rect[0].xs?.url;
}
const birthdate = query.all('li').find(el => /Date of Birth/.test(el.textContent));
const birthdate = query.all('li').find((el) => /Date of Birth/.test(el.textContent));
if (birthdate) profile.birthdate = query.date(birthdate, 'span', 'MMMM Do, YYYY');
if (data.tags.some(tag => /boob type/i.test(tag.category) && /natural tits/i.test(tag.name))) {
if (data.tags.some((tag) => /boob type/i.test(tag.category) && /natural tits/i.test(tag.name))) {
profile.naturalBoobs = true;
}
if (data.tags.some(tag => /boob type/i.test(tag.category) && /enhanced/i.test(tag.name))) {
if (data.tags.some((tag) => /boob type/i.test(tag.category) && /enhanced/i.test(tag.name))) {
profile.naturalBoobs = false;
}
profile.releases = releases.map(release => scrapeScene(release, null, null, networkName));
profile.releases = releases.map((release) => scrapeScene(release, null, null, networkName));
return profile;
}
@@ -325,7 +325,7 @@ async function fetchProfile({ name: actorName, slug: actorSlug }, { entity, para
});
if (res.statusCode === 200) {
const actorData = res.body.result.find(actor => actor.name.toLowerCase() === actorName.toLowerCase());
const actorData = res.body.result.find((actor) => actor.name.toLowerCase() === actorName.toLowerCase());
if (actorData) {
const actorUrl = `https://www.${entity.slug}.com/${entity.parameters?.actorPath || 'model'}/${actorData.id}/${actorSlug}`;

View File

@@ -69,7 +69,7 @@ function scrapeScene(html, url, site) {
const posterPath = $('video, dl8-video').attr('poster') || $('img.start-card').attr('src');
const poster = posterPath && `https:${posterPath}`;
const photos = $('.contain-scene-images.desktop-only a').map((index, el) => $(el).attr('href')).toArray().filter(Boolean).map(photo => `https:${photo}`);
const photos = $('.contain-scene-images.desktop-only a').map((index, el) => $(el).attr('href')).toArray().filter(Boolean).map((photo) => `https:${photo}`);
const trailerEl = $('source');
const trailerSrc = trailerEl.attr('src');
@@ -120,7 +120,7 @@ async function scrapeProfile(html) {
const releases = query.urls('.scene-item > a:first-child');
const otherPages = query.urls('.pagination a:not([rel=next]):not([rel=prev])');
const olderReleases = await Promise.all(otherPages.map(async page => fetchActorReleases(page)));
const olderReleases = await Promise.all(otherPages.map(async (page) => fetchActorReleases(page)));
profile.releases = releases.concat(olderReleases.flat());

View File

@@ -78,7 +78,7 @@ async function scrapeScene({ query }, url, site) {
release.tags = query.all('.categories a', true);
release.poster = query.poster() || query.img('.fake-video-player img');
release.trailer = query.all('source').map(source => ({
release.trailer = query.all('source').map((source) => ({
src: source.src,
quality: Number(source.getAttribute('res')),
}));
@@ -106,11 +106,11 @@ function scrapeProfile({ query }, _actorName, origin) {
profile.residencePlace = bio.location;
profile.height = heightToCm(bio.height);
[profile.bust, profile.waist, profile.hip] = bio.figure.split('-').map(v => Number(v) || v);
[profile.bust, profile.waist, profile.hip] = bio.figure.split('-').map((v) => Number(v) || v);
profile.avatar = query.img('.model-profile img');
const releases = query.all('.content-grid-item').filter(el => /video\//.test(query.url(el, '.img-wrapper a'))); // filter out photos
const releases = query.all('.content-grid-item').filter((el) => /video\//.test(query.url(el, '.img-wrapper a'))); // filter out photos
profile.releases = scrapeAll(query.initAll(releases), null, origin);
return profile;
@@ -143,7 +143,7 @@ async function fetchProfile({ name: actorName }, { site }) {
if (!resModels.ok) return resModels.status;
const modelPath = resModels.item.qu.all('.content-grid-item a.title').find(el => slugify(el.textContent) === slugify(actorName));
const modelPath = resModels.item.qu.all('.content-grid-item a.title').find((el) => slugify(el.textContent) === slugify(actorName));
if (modelPath) {
const modelUrl = `${origin}${modelPath}`;

View File

@@ -26,7 +26,7 @@ function scrapeAll(months, channel, year) {
gender: 'female',
url: query.url('a.video-pop-up', 'data-modellink', { origin: `${channel.url}/submissive` }),
}]
.filter(actor => !/lockdown/i.test(actor.name))
.filter((actor) => !/lockdown/i.test(actor.name))
.concat({
name: 'Pascal White',
gender: 'male',

View File

@@ -17,21 +17,21 @@ function extractMaleModelsFromTags(tagContainer) {
return [];
}
const tagEls = Array.from(tagContainer.childNodes, node => ({ type: node.nodeType, text: node.textContent.trim() })).filter(node => node.text.length > 0);
const modelLabelIndex = tagEls.findIndex(node => node.text === 'Male Models');
const tagEls = Array.from(tagContainer.childNodes, (node) => ({ type: node.nodeType, text: node.textContent.trim() })).filter((node) => node.text.length > 0);
const modelLabelIndex = tagEls.findIndex((node) => node.text === 'Male Models');
if (modelLabelIndex > -1) {
const nextLabelIndex = tagEls.findIndex((node, index) => index > modelLabelIndex && node.type === 3);
const maleModels = tagEls.slice(modelLabelIndex + 1, nextLabelIndex);
return maleModels.map(model => model.text);
return maleModels.map((model) => model.text);
}
return [];
}
async function extractChannelFromPhoto(photo, channel) {
const siteSlugs = (channel.type === 'network' ? channel.children : channel.parent?.children)?.map(child => child.slug);
const siteSlugs = (channel.type === 'network' ? channel.children : channel.parent?.children)?.map((child) => child.slug);
const channelMatch = photo.match(new RegExp(siteSlugs.join('|')));
if (channelMatch) {
@@ -52,7 +52,7 @@ async function scrapeLatest(scenes, site) {
const slug = new URL(release.url).pathname.split('/')[2];
release.entryId = getHash(`${site.slug}${slug}${release.date.toISOString()}`);
release.actors = release.title.split('&').map(actor => actor.trim());
release.actors = release.title.split('&').map((actor) => actor.trim());
[release.poster, ...release.photos] = query.imgs('.bloc-link img');
@@ -78,7 +78,7 @@ async function scrapeScene({ query }, site, url) {
const uhd = query.cnt('#video-ribbon .container > div > span:nth-child(2)');
if (/4K/.test(uhd)) release.tags = release.tags.concat('4k');
release.photos = query.all('.bxslider_pics img').map(el => el.dataset.original || el.src);
release.photos = query.all('.bxslider_pics img').map((el) => el.dataset.original || el.src);
release.poster = query.poster();
const trailer = query.trailer();

View File

@@ -67,7 +67,7 @@ function scrapeScene({ query, html }, url, entity) {
release.description = query.cnt('.info_container .description');
release.date = query.date('.info_container .info_line:nth-child(1)', 'YYYY-MM-DD') || query.date('.description', 'DD MMMM YYYY', /\d{1,2} \w+ \d{4}/);
release.actors = query.all('.girl_item, .starring .item').map(actorEl => mapActor(actorEl, query, entity));
release.actors = query.all('.girl_item, .starring .item').map((actorEl) => mapActor(actorEl, query, entity));
release.duration = query.duration('.infos .description');
@@ -81,7 +81,7 @@ function scrapeScene({ query, html }, url, entity) {
release.tags = query.cnts('.tags a:not(.more_tag)');
release.poster = removeImageBorder(html.match(/image: "(.*?)"/)?.[1]);
release.trailer = html.match(/url: "(.*mp4.*)"/g)?.map(src => ({
release.trailer = html.match(/url: "(.*mp4.*)"/g)?.map((src) => ({
src: src.match(/"(.*)"/)?.[1],
quality: Number(src.match(/[-/](\d+)p/)?.[1]),
}));

View File

@@ -40,7 +40,7 @@ function scrapeScene({ query }, url, channel) {
release.date = date;
release.datePrecision = precision;
release.actors = query.cnts(details.actors, 'a').map(actor => capitalize(actor, { uncapitalize: true }));
release.actors = query.cnts(details.actors, 'a').map((actor) => capitalize(actor, { uncapitalize: true }));
release.duration = query.duration(details.duration);
release.tags = query.cnts(details.genres, 'a');

View File

@@ -13,7 +13,7 @@ function scrapeAll(scenes) {
release.title = query.cnt('[class*="item-title"] a') || query.q('.bottom .link', 'title');
release.date = query.date('[class*="item-date"]', 'MMM DD, YYYY');
release.actors = query.all('[class*="item-actors"] a').map(el => ({
release.actors = query.all('[class*="item-actors"] a').map((el) => ({
name: query.cnt(el),
url: query.url(el, null),
}));
@@ -44,7 +44,7 @@ function scrapeScene({ query }, url) {
release.description = query.meta('name=description') || query.q('read-even-more', true);
release.date = query.date('.h5-published', 'MMM DD, YYYY', /\w{3} \d{1,2}, \d{4}/);
release.actors = query.all('.video-top-details .actors a[href*="/models"]').map(el => ({
release.actors = query.all('.video-top-details .actors a[href*="/models"]').map((el) => ({
name: query.cnt(el),
url: query.url(el, null),
}));
@@ -53,7 +53,7 @@ function scrapeScene({ query }, url) {
release.tags = query.all('.video-top-details a[href*="/categories"], .video-top-details a[href*="/tags"]', true);
release.poster = query.img('.poster img') || query.meta('itemprop=thumbnailUrl');
release.photos = query.imgs('#gallery-thumbs [class*="thumb"]', 'data-bg').slice(1).map(photo => [ // first image is poster
release.photos = query.imgs('#gallery-thumbs [class*="thumb"]', 'data-bg').slice(1).map((photo) => [ // first image is poster
photo.replace('512x288', '1472x828'),
photo,
]);

View File

@@ -16,7 +16,7 @@ const hairMap = {
async function scrapeProfile(html, _url, actorName) {
const { document } = new JSDOM(html).window;
const entries = Array.from(document.querySelectorAll('.infoPiece'), el => el.textContent.replace(/\n|\t/g, '').split(':'));
const entries = Array.from(document.querySelectorAll('.infoPiece'), (el) => el.textContent.replace(/\n|\t/g, '').split(':'));
const bio = entries.reduce((acc, [key, value]) => (key ? { ...acc, [key.trim()]: value.trim() } : acc), {});
const profile = {
@@ -47,7 +47,7 @@ async function scrapeProfile(html, _url, actorName) {
if (bio.Tattoos) profile.hasTattoos = bio.Tattoos === 'Yes';
if (avatarEl && !/default\//.test(avatarEl.src)) profile.avatar = avatarEl.src;
profile.social = Array.from(document.querySelectorAll('.socialList a'), el => el.href).filter(link => link !== 'https://www.twitter.com/'); // PH links to Twitter itself for some reason
profile.social = Array.from(document.querySelectorAll('.socialList a'), (el) => el.href).filter((link) => link !== 'https://www.twitter.com/'); // PH links to Twitter itself for some reason
return profile;
}

View File

@@ -9,7 +9,7 @@ function scrapePhotos(html) {
const { qis } = ex(html, '#photos-page');
const photos = qis('img');
return photos.map(photo => [
return photos.map((photo) => [
photo
.replace('x_800', 'x_xl')
.replace('_tn', ''),
@@ -76,22 +76,22 @@ async function scrapeScene(html, url, site) {
release.actors = qu.all('.value a[href*=models], .value a[href*=performer], .value a[href*=teen-babes]', true);
if (release.actors.length === 0) {
const actorEl = qu.all('.stat').find(stat => /Featuring/.test(stat.textContent));
const actorEl = qu.all('.stat').find((stat) => /Featuring/.test(stat.textContent));
const actorString = qu.text(actorEl);
release.actors = actorString?.split(/,\band\b|,/g).map(actor => actor.trim()) || [];
release.actors = actorString?.split(/,\band\b|,/g).map((actor) => actor.trim()) || [];
}
if (release.actors.length === 0 && site.parameters?.actors) release.actors = site.parameters.actors;
release.tags = qu.all('a[href*=tag]', true);
const dateEl = qu.all('.value').find(el => /\w+ \d+\w+, \d{4}/.test(el.textContent));
const dateEl = qu.all('.value').find((el) => /\w+ \d+\w+, \d{4}/.test(el.textContent));
release.date = qu.date(dateEl, null, 'MMMM Do, YYYY')
|| qu.date('.date', 'MMMM Do, YYYY', /\w+ \d{1,2}\w+, \d{4}/)
|| qu.date('.info .holder', 'MM/DD/YYYY', /\d{2}\/\d{2}\/\d{4}/);
const durationEl = qu.all('value').find(el => /\d{1,3}:\d{2}/.test(el.textContent));
const durationEl = qu.all('value').find((el) => /\d{1,3}:\d{2}/.test(el.textContent));
release.duration = qu.dur(durationEl);
release.poster = qu.poster('video') || qu.img('.flowplayer img') || html.match(/posterImage: '(.*\.jpg)'/)?.[1] || null; // _800.jpg is larger than _xl.jpg in landscape
@@ -100,7 +100,7 @@ async function scrapeScene(html, url, site) {
if (photosUrl) {
release.photos = await fetchPhotos(photosUrl);
} else {
release.photos = qu.imgs('img[src*=ThumbNails], .p-photos .tn img').map(photo => [
release.photos = qu.imgs('img[src*=ThumbNails], .p-photos .tn img').map((photo) => [
photo.replace('_tn', ''),
photo,
]);
@@ -126,7 +126,7 @@ async function scrapeScene(html, url, site) {
function scrapeModels(html, actorName) {
const { qa } = ex(html);
const model = qa('.model a').find(link => link.title === actorName);
const model = qa('.model a').find((link) => link.title === actorName);
return model?.href || null;
}

View File

@@ -15,7 +15,7 @@ function scrapeAll(scenes) {
release.entryId = getEntryId(release.url);
release.title = query.cnt('.title-label a');
release.actors = query.all('.update_models a').map(el => ({
release.actors = query.all('.update_models a').map((el) => ({
name: query.cnt(el),
url: query.url(el, null),
}));
@@ -37,7 +37,7 @@ function scrapeScene({ query }, url) {
release.description = query.cnt('#sceneInfo .description');
release.actors = query.all('#sceneInfo .data-others a[href*="/models"]').map(el => ({
release.actors = query.all('#sceneInfo .data-others a[href*="/models"]').map((el) => ({
name: query.el(el, null, 'title'),
url: query.url(el, null),
}));
@@ -50,8 +50,8 @@ function scrapeScene({ query }, url) {
release.poster = [poster, poster?.replace(/imgw=\w+/, 'imgw=680')];
release.photos = query.imgs('.photos-holder img')
.filter(src => new URL(src).pathname !== posterPathname)
.map(src => [
.filter((src) => new URL(src).pathname !== posterPathname)
.map((src) => [
src.replace(/imgw=\d+/, 'imgw=1284'),
src,
]);
@@ -74,7 +74,7 @@ function scrapeProfileScenes(scenes) {
release.description = query.cnt('.model-update-description');
release.actors = query.all('.model-labels a').map(el => ({
release.actors = query.all('.model-labels a').map((el) => ({
name: query.cnt(el),
url: query.url(el, null),
}));

View File

@@ -13,7 +13,7 @@ function getChannelSlug(channelName, entity) {
}
const channelSlug = slugify(channelName, '', { removePunctuation: true });
const channel = entity.children.find(child => new RegExp(channelSlug).test(child.slug));
const channel = entity.children.find((child) => new RegExp(channelSlug).test(child.slug));
return channel?.slug || null;
}
@@ -27,8 +27,8 @@ function scrapeScene(scene, channel) {
release.title = scene.title;
release.date = qu.extractDate(scene.publishedDate);
release.actors = scene.models?.map(model => model.modelName) || [];
release.actors = scene.models?.map(model => ({
release.actors = scene.models?.map((model) => model.modelName) || [];
release.actors = scene.models?.map((model) => ({
name: model.modelName,
avatar: `https://images.mylfcdn.net/tsv4/model/profiles/${slugify(model.modelName, '_')}.jpg`,
url: `${channel.url}/models/www.mylf.com/models/${model.modelId}`,
@@ -113,7 +113,7 @@ function scrapeProfile(actor, entity) {
}
profile.avatar = actor.img;
profile.scenes = actor.movies?.map(scene => scrapeScene(scene, entity));
profile.scenes = actor.movies?.map((scene) => scrapeScene(scene, entity));
return profile;
}

View File

@@ -25,15 +25,15 @@ function scrapeAll(scenes, entity) {
release.date = moment.utc(scene.year, 'YYYY').toDate();
release.datePrecision = 'year';
release.actors = scene.actors.map(actor => ({
release.actors = scene.actors.map((actor) => ({
name: actor.name.trim(),
avatar: actor.image || null,
})).filter(actor => actor.name && slugify(actor.name) !== 'amateur-girl');
})).filter((actor) => actor.name && slugify(actor.name) !== 'amateur-girl');
release.duration = scene.duration;
release.stars = scene.video_rating_score;
[release.poster, ...release.photos] = scene.screenshots.map(url => prefixUrl(url));
[release.poster, ...release.photos] = scene.screenshots.map((url) => prefixUrl(url));
if (scene.is_gay) {
release.tags = ['gay'];
@@ -64,7 +64,7 @@ async function scrapeScene({ query }, url) {
release.description = query.q('.detail-description', true);
release.duration = query.dur('.detail-meta li:first-child');
const actors = [query.q('.detail-hero-title h1', true)?.trim()].filter(name => name && slugify(name) !== 'amateur-girl');
const actors = [query.q('.detail-hero-title h1', true)?.trim()].filter((name) => name && slugify(name) !== 'amateur-girl');
if (actors.length > 0) {
release.actors = actors;
@@ -143,7 +143,7 @@ async function fetchProfile({ name: actorName }, { entity }, include) {
const res = await http.get(`https://teencoreclub.com/api/actors?query=${actorName}`);
if (res.ok) {
const actor = res.body.data.find(item => slugify(item.name) === slugify(actorName));
const actor = res.body.data.find((item) => slugify(item.name) === slugify(actorName));
if (actor) {
return scrapeProfile(actor, entity, include);

View File

@@ -14,7 +14,7 @@ function scrapeAll(scenes, channel) {
release.title = query.cnt('.title');
release.date = query.date('time', 'MMMM D, YYYY');
release.actors = query.all('.actors a').map(el => ({
release.actors = query.all('.actors a').map((el) => ({
name: query.cnt(el),
url: query.url(el, null),
}));
@@ -29,7 +29,7 @@ function scrapeAll(scenes, channel) {
const siteId = query.url('.site a', 'href', { origin: network.url, object: true })?.searchParams.get('site[]');
if (siteId) {
release.channel = network.children.find(child => child.parameters.siteId.toString() === siteId)?.slug;
release.channel = network.children.find((child) => child.parameters.siteId.toString() === siteId)?.slug;
}
return release;
@@ -48,7 +48,7 @@ function scrapeScene({ query }, url, channel) {
release.date = query.date('.title-line .date', 'MMMM D, YYYY');
release.duration = query.number('.dur') * 60;
release.actors = query.all('.site a[href*="/models"]').map(el => ({
release.actors = query.all('.site a[href*="/models"]').map((el) => ({
name: query.cnt(el),
url: query.url(el, null),
}));
@@ -63,7 +63,7 @@ function scrapeScene({ query }, url, channel) {
const siteId = query.url('.site a[href*="site[]"]', 'href', { origin: network.url, object: true })?.searchParams.get('site[]');
if (siteId) {
release.channel = network.children.find(child => child.parameters.siteId.toString() === siteId)?.slug;
release.channel = network.children.find((child) => child.parameters.siteId.toString() === siteId)?.slug;
}
return release;

View File

@@ -20,7 +20,7 @@ function scrapeSceneX(scene) {
release.date = new Date(scene.release_date);
release.actors = scene.models
.map(actor => (/&/.test(actor.name)
.map((actor) => (/&/.test(actor.name)
? actor.name.split(/\s*&\s*/)
: {
name: actor.name,
@@ -31,7 +31,7 @@ function scrapeSceneX(scene) {
.flat();
release.stars = scene.rating;
release.tags = scene.tags.map(tag => tag.name);
release.tags = scene.tags.map((tag) => tag.name);
if (mime.getType(scene.thumb) === 'image/gif') {
release.teaser = scene.thumb;
@@ -128,7 +128,7 @@ async function fetchProfile(baseActor, entity, options) {
return searchRes.status;
}
const actor = searchRes.body.models.items.find(model => slugify(model.name) === slugify(baseActor.name));
const actor = searchRes.body.models.items.find((model) => slugify(model.name) === slugify(baseActor.name));
if (actor) {
return scrapeProfile(actor, options);

View File

@@ -217,7 +217,7 @@ function gender() {
}
function actors(release) {
const length = release.tags.some(tag => ['dp', 'dap', 'gangbang'].includes(tag))
const length = release.tags.some((tag) => ['dp', 'dap', 'gangbang'].includes(tag))
? Math.floor(Math.random() * 6) + 3
: Math.floor(Math.random() * 3) + 2;
@@ -254,7 +254,7 @@ async function fetchLatest(entity, page, options) {
// const poster = 'sfw/kittens/thumbs/iNEXVlX-RLs.jpeg';
release.poster = `http://${config.web.host}:${config.web.port}/img/${poster}?id=${nanoid()}`; // ensure source is unique
release.photos = photos.map(photo => `http://${config.web.host}:${config.web.port}/img/${photo}?id=${nanoid()}`);
release.photos = photos.map((photo) => `http://${config.web.host}:${config.web.port}/img/${photo}?id=${nanoid()}`);
}
release.tags = await knex('tags')

View File

@@ -17,7 +17,7 @@ function scrapeLatestNative(scenes, site) {
release.date = ed(scene.release_date, 'YYYY-MM-DD');
release.duration = parseInt(scene.runtime, 10) * 60;
release.actors = scene.cast?.map(actor => ({
release.actors = scene.cast?.map((actor) => ({
name: actor.stagename,
gender: actor.gender.toLowerCase(),
avatar: actor.placard,
@@ -38,10 +38,10 @@ function scrapeSceneNative({ html, q, qa }, url, _site) {
release.title = q('.scene-h2-heading', true);
release.description = q('.indie-model-p', true);
const dateString = qa('h5').find(el => /Released/.test(el.textContent)).textContent;
const dateString = qa('h5').find((el) => /Released/.test(el.textContent)).textContent;
release.date = ed(dateString, 'MMM DD, YYYY', /\w+ \d{1,2}, \d{4}/);
const duration = qa('h5').find(el => /Runtime/.test(el.textContent)).textContent;
const duration = qa('h5').find((el) => /Runtime/.test(el.textContent)).textContent;
const [hours, minutes] = duration.match(/\d+/g);
if (minutes) release.duration = (hours * 3600) + (minutes * 60);
@@ -111,7 +111,7 @@ async function fetchSceneWrapper(url, site, release) {
});
if (searchRes.statusCode === 200 && searchRes.body.code === 200) {
const sceneMatch = searchRes.body.responseData.find(item => slugify(item.name) === slugify(scene.title));
const sceneMatch = searchRes.body.responseData.find((item) => slugify(item.name) === slugify(scene.title));
if (sceneMatch) {
return {

View File

@@ -15,7 +15,7 @@ const genderMap = {
function getPosterFallbacks(poster) {
return poster
.filter(image => /landscape/i.test(image.name))
.filter((image) => /landscape/i.test(image.name))
.sort((imageA, imageB) => imageB.height - imageA.height)
.map((image) => {
const sources = [image.src, image.highdpi?.['2x'], image.highdpi?.['3x']];
@@ -23,7 +23,7 @@ function getPosterFallbacks(poster) {
return image.height === 1080 ? sources : sources.reverse();
})
.flat()
.map(src => ({
.map((src) => ({
src,
expectType: {
'binary/octet-stream': 'image/jpeg',
@@ -33,8 +33,8 @@ function getPosterFallbacks(poster) {
function getTeaserFallbacks(teaser) {
return teaser
.filter(video => /landscape/i.test(video.name))
.map(video => ({
.filter((video) => /landscape/i.test(video.name))
.map((video) => ({
src: video.src,
type: video.type,
quality: Number(String(video.height).replace('353', '360')),
@@ -44,7 +44,7 @@ function getTeaserFallbacks(teaser) {
function getAvatarFallbacks(avatar) {
return avatar
.sort((imageA, imageB) => imageB.height - imageA.height)
.map(image => [image.highdpi?.['3x'], image.highdpi?.['2x'], image.src])
.map((image) => [image.highdpi?.['3x'], image.highdpi?.['2x'], image.src])
.flat();
}
@@ -149,7 +149,7 @@ async function getPhotos(url) {
});
const state = htmlRes?.window.__APOLLO_STATE__;
const key = Object.values(state.ROOT_QUERY).find(query => query?.__ref)?.__ref;
const key = Object.values(state.ROOT_QUERY).find((query) => query?.__ref)?.__ref;
const data = state[key];
console.log(data);
@@ -158,7 +158,7 @@ async function getPhotos(url) {
return [];
}
return data.carousel.slice(1).map(photo => photo.main?.[0].src).filter(Boolean);
return data.carousel.slice(1).map((photo) => photo.main?.[0].src).filter(Boolean);
}
function scrapeAll(scenes, site, origin) {
@@ -191,7 +191,7 @@ function scrapeUpcoming(scene, site) {
release.title = scene.targetUrl
.slice(1)
.split('-')
.map(component => `${component.charAt(0).toUpperCase()}${component.slice(1)}`)
.map((component) => `${component.charAt(0).toUpperCase()}${component.slice(1)}`)
.join(' ');
release.url = `${site.url}/videos${scene.targetUrl}`;
@@ -243,7 +243,7 @@ async function scrapeScene(data, url, site, baseRelease, options) {
const trailer = await getTrailer(scene, site, url);
if (trailer) release.trailer = trailer;
release.chapters = data.video.chapters?.video.map(chapter => ({
release.chapters = data.video.chapters?.video.map((chapter) => ({
tags: [chapter.title],
time: chapter.seconds,
}));

View File

@@ -131,7 +131,7 @@ function scrapeScene(html, url) {
release.actors = qu.all('.info-video-models a', true);
release.tags = qu.all('.info-video-category a', true);
release.photos = qu.urls('.swiper-wrapper .swiper-slide a').map(source => source.replace('.jpg/', '.jpg'));
release.photos = qu.urls('.swiper-wrapper .swiper-slide a').map((source) => source.replace('.jpg/', '.jpg'));
release.poster = qu.meta('meta[property="og:image"]');
if (!release.poster) {

View File

@@ -22,7 +22,7 @@ async function getTrailerUrl(release, channel, request) {
});
if (res.ok) {
const trailers = res.body.streams.map(trailer => ({
const trailers = res.body.streams.map((trailer) => ({
src: trailer.url,
quality: Number(trailer.id?.match(/\d+/)?.[0] || trailer?.name.match(/\d+/)?.[0]),
vr: true,
@@ -47,7 +47,7 @@ function scrapeAll(scenes, channel) {
release.title = query.cnt('.card__h');
release.date = query.date('.card__date', 'D MMMM, YYYY');
release.actors = query.all('.card__links a').map(el => ({
release.actors = query.all('.card__links a').map((el) => ({
name: qu.query.cnt(el),
url: qu.query.url(el, null, 'href', { origin: channel.url }),
}));
@@ -82,14 +82,14 @@ async function scrapeScene({ query }, url, channel, baseRelease, options, reques
release.date = query.date('.detail__date', 'D MMMM, YYYY');
release.duration = query.number('.time') * 60;
release.actors = (query.all('.detail__header-lg .detail__models a') || query.all('.detail__header-sm .detail__models a')).map(el => ({
release.actors = (query.all('.detail__header-lg .detail__models a') || query.all('.detail__header-sm .detail__models a')).map((el) => ({
name: qu.query.cnt(el),
url: qu.query.url(el, null, 'href', { origin: channel.url }),
}));
release.tags = query.cnts('.tag-list .tag').concat(query.cnts('.detail__specs-list .detail__specs-item'));
release.photos = query.all('.photo-strip__slide').map(el => ([
release.photos = query.all('.photo-strip__slide').map((el) => ([
qu.query.img(el, null, 'data-src'),
qu.query.img(el, 'img', 'src'),
]));

View File

@@ -18,7 +18,7 @@ function scrapeLatest(html, site) {
release.entryId = scene.dataset.videoId;
release.title = scene.querySelector('.card-title').textContent;
release.date = moment.utc(scene.dataset.date, 'MMMM DD, YYYY').toDate();
release.actors = Array.from(scene.querySelectorAll('.actors a'), el => el.textContent);
release.actors = Array.from(scene.querySelectorAll('.actors a'), (el) => el.textContent);
// slow CDN?
const poster = scene.querySelector('.single-image').dataset.src;
@@ -32,7 +32,7 @@ function scrapeLatest(html, site) {
concurrency: 1,
};
release.photos = Array.from(scene.querySelectorAll('.rollover-thumbs img'), el => ({
release.photos = Array.from(scene.querySelectorAll('.rollover-thumbs img'), (el) => ({
src: (/^http/.test(el.dataset.src) ? el.dataset.src : `https:${el.dataset.src}`),
referer: site.url,
attempts: 5,
@@ -63,7 +63,7 @@ function scrapeScene(html, site, url) {
release.url = url;
release.title = scene.querySelector('.t2019-stitle').textContent.trim();
release.description = scene.querySelector('#t2019-description').textContent.trim();
release.actors = Array.from(scene.querySelectorAll('#t2019-models a'), el => el.textContent);
release.actors = Array.from(scene.querySelectorAll('#t2019-models a'), (el) => el.textContent);
const durationEls = Array.from(scene.querySelectorAll('#t2019-stime span'));
@@ -75,7 +75,7 @@ function scrapeScene(html, site, url) {
}
// unreliable CDN
release.photos = Array.from(scene.querySelectorAll('#t2019-main .t2019-thumbs img'), el => ({
release.photos = Array.from(scene.querySelectorAll('#t2019-main .t2019-thumbs img'), (el) => ({
src: (/^http/.test(el.src) ? el.src : `https:${el.src}`),
referer: site.url,
attempts: 5,