Generating and using URL slugs for releases, improver slugify module. Added 'extract' parameter to MindGeek scraper to get scenes not associate with a channel (see Digital Playground). Added various high res logos.

This commit is contained in:
2020-02-04 03:12:09 +01:00
parent ca33704f51
commit f921bb4ae9
30 changed files with 132 additions and 22 deletions

View File

@@ -58,7 +58,7 @@ async function scrapeScene(scene, site, tokens) {
},
};
release.url = `${site.url}/scene/${release.entryId}/${slugify(release.title, true)}`;
release.url = `${site.url}/scene/${release.entryId}/${slugify(release.title, { encode: true })}`;
release.date = new Date(scene.sites.collection[scene.id].publishDate);
release.poster = scene._resources.primary[0].url;

View File

@@ -26,6 +26,16 @@ function getThumbs(scene) {
}
function scrapeLatestX(data, site) {
if (site.parameters?.extract === true && data.collections.length > 0) {
// release should not belong to any channel
return null;
}
if (typeof site.parameters?.extract === 'string' && !data.collections.some(collection => collection.shortName === site.parameters.extract)) {
// release should belong to specific channel
return null;
}
const { id: entryId, title, description } = data;
const hostname = site.parameters?.native ? site.url : site.network.url;
const url = `${hostname}/scene/${entryId}/`;
@@ -58,7 +68,9 @@ function scrapeLatestX(data, site) {
}
async function scrapeLatest(items, site) {
return Promise.all(items.map(async data => scrapeLatestX(data, site)));
const latestReleases = await Promise.all(items.map(async data => scrapeLatestX(data, site)));
return latestReleases.filter(Boolean);
}
function scrapeScene(data, url, _site) {
@@ -85,10 +97,10 @@ function scrapeScene(data, url, _site) {
};
}
const siteName = data.collections[0].name;
const siteName = data.collections[0]?.name || data.brand;
release.channel = siteName.replace(/\s+/g, '').toLowerCase();
release.url = url || `https://www.realitykings.com/scene/${entryId}/`;
release.url = url || `https://www.${data.brand}.com/scene/${entryId}/`;
return release;
}
@@ -104,6 +116,9 @@ function getUrl(site) {
return `${site.url}/scenes`;
}
if (site.parameters?.extract) {
return `${site.url}/scenes`;
}
if (site.parameters?.siteId) {
return `${site.network.url}/scenes?site=${site.parameters.siteId}`;
@@ -144,7 +159,7 @@ function scrapeProfile(data, html, releases = []) {
if (data.height) profile.height = inchesToCm(data.height);
if (data.weight) profile.weight = lbsToKg(data.weight);
if (data.images.card_main_rect && data.images.card_main_rect[0]) {
if (data.images.card_main_rect?.[0]) {
profile.avatar = data.images.card_main_rect[0].xl?.url
|| data.images.card_main_rect[0].lg?.url
|| data.images.card_main_rect[0].md?.url
@@ -169,7 +184,7 @@ async function fetchLatest(site, page = 1) {
const beforeDate = moment().add('1', 'day').format('YYYY-MM-DD');
const limit = 10;
const apiUrl = site.parameters?.native
const apiUrl = site.parameters?.native || site.parameters?.extract
? `https://site-api.project1service.com/v2/releases?dateReleased=<${beforeDate}&limit=${limit}&offset=${limit * (page - 1)}&orderBy=-dateReleased&type=scene`
: `https://site-api.project1service.com/v2/releases?collectionId=${siteId}&dateReleased=<${beforeDate}&limit=${limit}&offset=${limit * (page - 1)}&orderBy=-dateReleased&type=scene`;

View File

@@ -77,7 +77,7 @@ async function scrapeScene(html, url, site) {
release.actors = qa('.value a[href*=models], .value a[href*=performer], .value a[href*=teen-babes]', true);
if (release.actors.length === 0) {
const actorEl = qa('.stat').find(stat => /Featuring/.test(stat.textContent))
const actorEl = qa('.stat').find(stat => /Featuring/.test(stat.textContent));
const actorString = qtext(actorEl);
console.log(actorString);
@@ -147,7 +147,7 @@ function scrapeProfile(html) {
const bio = qa('.stat').reduce((acc, el) => {
const prop = q(el, '.label', true).slice(0, -1);
const key = slugify(prop, false, '_');
const key = slugify(prop, { delimiter: '_' });
const value = q(el, '.value', true);
return {