Providing duplicate releases in predata. Using duplicates for filtering scenes without channel in Hush scraper.

This commit is contained in:
DebaucheryLibrarian 2020-08-23 02:43:10 +02:00
parent 3a5ea3dd9a
commit 278b74e78c
3 changed files with 34 additions and 22 deletions

View File

@ -122,6 +122,7 @@ async function searchReleases(query, limit = 100) {
} }
module.exports = { module.exports = {
curateRelease,
fetchRelease, fetchRelease,
fetchReleases, fetchReleases,
searchReleases, searchReleases,

View File

@ -107,8 +107,6 @@ function scrapeAllT1(scenes, site, accNetworkReleases) {
// release.entryId = q('.img-div img', 'id')?.match(/set-target-(\d+)/)[1]; // release.entryId = q('.img-div img', 'id')?.match(/set-target-(\d+)/)[1];
release.entryId = deriveEntryId(release); release.entryId = deriveEntryId(release);
console.log(site.name, accNetworkReleases.map(accRelease => accRelease.entryId), release.entryId, accNetworkReleases?.map(accRelease => accRelease.entryId).includes(release.entryId));
if (site.parameters?.accFilter && accNetworkReleases?.map(accRelease => accRelease.entryId).includes(release.entryId)) { if (site.parameters?.accFilter && accNetworkReleases?.map(accRelease => accRelease.entryId).includes(release.entryId)) {
// filter out releases that were already scraped from a categorized site, requires sequential site scraping // filter out releases that were already scraped from a categorized site, requires sequential site scraping
return null; return null;
@ -360,7 +358,7 @@ function scrapeProfileTour({ el, qu }, site) {
return profile; return profile;
} }
async function fetchLatest(site, page = 1, include, { accNetworkReleases }) { async function fetchLatest(site, page = 1, include, { uniqueReleases, duplicateReleases }) {
const url = (site.parameters?.latest && util.format(site.parameters.latest, page)) const url = (site.parameters?.latest && util.format(site.parameters.latest, page))
|| (site.parameters?.t1 && `${site.url}/t1/categories/movies_${page}_d.html`) || (site.parameters?.t1 && `${site.url}/t1/categories/movies_${page}_d.html`)
|| `${site.url}/categories/movies_${page}_d.html`; || `${site.url}/categories/movies_${page}_d.html`;
@ -368,10 +366,10 @@ async function fetchLatest(site, page = 1, include, { accNetworkReleases }) {
const res = await geta(url, '.modelfeature, .item-video, .updateItem'); const res = await geta(url, '.modelfeature, .item-video, .updateItem');
if (!res.ok) return res.status; if (!res.ok) return res.status;
if (site.parameters?.t1) return scrapeAllT1(res.items, site, accNetworkReleases); if (site.parameters?.t1) return scrapeAllT1(res.items, site, [...uniqueReleases, ...duplicateReleases]);
if (site.parameters?.tour) return scrapeAllTour(res.items, site, accNetworkReleases); if (site.parameters?.tour) return scrapeAllTour(res.items, site);
return scrapeAll(res.items, site, accNetworkReleases); return scrapeAll(res.items, site, uniqueReleases);
} }
async function fetchScene(url, site, baseRelease, include, beforeFetchLatest) { async function fetchScene(url, site, baseRelease, include, beforeFetchLatest) {

View File

@ -6,6 +6,7 @@ const moment = require('moment');
const argv = require('./argv'); const argv = require('./argv');
const logger = require('./logger')(__filename); const logger = require('./logger')(__filename);
const knex = require('./knex'); const knex = require('./knex');
const { curateRelease } = require('./releases');
const include = require('./utils/argv-include')(argv); const include = require('./utils/argv-include')(argv);
const scrapers = require('./scrapers/scrapers'); const scrapers = require('./scrapers/scrapers');
const { fetchIncludedEntities } = require('./entities'); const { fetchIncludedEntities } = require('./entities');
@ -16,16 +17,20 @@ async function filterUniqueReleases(latestReleases, accReleases) {
const latestReleaseIdentifiers = latestReleases const latestReleaseIdentifiers = latestReleases
.map(release => [release.entity.id, release.entryId]); .map(release => [release.entity.id, release.entryId]);
const duplicateReleases = await knex('releases') const duplicateReleaseEntries = await knex('releases')
.select(knex.raw('releases.*, row_to_json(entities) as entity'))
.leftJoin('entities', 'entities.id', 'releases.entity_id')
.whereIn(['entity_id', 'entry_id'], latestReleaseIdentifiers); .whereIn(['entity_id', 'entry_id'], latestReleaseIdentifiers);
const duplicateReleases = duplicateReleaseEntries.map(release => curateRelease(release));
// add entry IDs of accumulated releases to prevent an infinite scrape loop // add entry IDs of accumulated releases to prevent an infinite scrape loop
// when one page contains the same release as the previous // when one page contains the same release as the previous
const duplicateReleasesSiteIdAndEntryIds = duplicateReleases const duplicateReleasesSiteIdAndEntryIds = duplicateReleases
.concat(accReleases) .concat(accReleases)
.reduce((acc, release) => { .reduce((acc, release) => {
const entityId = release.entity_id || release.entity.id; const entityId = release.entityId || release.entity.id;
const entryId = release.entry_id || release.entryId; const entryId = release.entryId || release.entryId;
if (!acc[entityId]) acc[entityId] = {}; if (!acc[entityId]) acc[entityId] = {};
acc[entityId][entryId] = true; acc[entityId][entryId] = true;
@ -163,11 +168,12 @@ async function scrapeChannelReleases(scraper, channelEntity, preData) {
scrapeMovies(scraper, channelEntity, preData), scrapeMovies(scraper, channelEntity, preData),
]); ]);
console.log(latestReleases); logger.info(`Fetching ${latestReleases.uniqueReleases.length} latest and ${upcomingReleases.uniqueReleases.length} upcoming updates for '${channelEntity.name}' (${channelEntity.parent?.name})`);
logger.info(`Fetching ${latestReleases.length} latest and ${upcomingReleases.length} upcoming updates for '${channelEntity.name}' (${channelEntity.parent?.name})`); return {
uniqueReleases: [...latestReleases.uniqueReleases, ...upcomingReleases.uniqueReleases],
return [...latestReleases.uniqueReleases, ...upcomingReleases.uniqueReleases]; duplicateReleases: [...latestReleases.duplicateReleases, ...upcomingReleases.duplicateReleases],
};
} }
async function scrapeChannel(channelEntity, accNetworkReleases) { async function scrapeChannel(channelEntity, accNetworkReleases) {
@ -183,12 +189,10 @@ async function scrapeChannel(channelEntity, accNetworkReleases) {
try { try {
const beforeFetchLatest = await scraper.beforeFetchLatest?.(channelEntity); const beforeFetchLatest = await scraper.beforeFetchLatest?.(channelEntity);
const channelEntityReleases = await scrapeChannelReleases(scraper, channelEntity, { return await scrapeChannelReleases(scraper, channelEntity, {
accNetworkReleases, ...accNetworkReleases,
beforeFetchLatest, beforeFetchLatest,
}); });
return channelEntityReleases.map(release => ({ ...release, channelEntity }));
} catch (error) { } catch (error) {
logger.error(`Failed to scrape releases from ${channelEntity.name} using ${scraper.slug}: ${error.message}`); logger.error(`Failed to scrape releases from ${channelEntity.name} using ${scraper.slug}: ${error.message}`);
@ -197,22 +201,31 @@ async function scrapeChannel(channelEntity, accNetworkReleases) {
} }
async function scrapeNetworkSequential(networkEntity) { async function scrapeNetworkSequential(networkEntity) {
return Promise.reduce( const releases = await Promise.reduce(
networkEntity.children, networkEntity.children,
async (chain, channelEntity) => { async (chain, channelEntity) => {
const accNetworkReleases = await chain; const accNetworkReleases = await chain;
const channelReleases = await scrapeChannel(channelEntity, accNetworkReleases); const { uniqueReleases, duplicateReleases } = await scrapeChannel(channelEntity, accNetworkReleases);
return accNetworkReleases.concat(channelReleases); return {
uniqueReleases: accNetworkReleases.uniqueReleases.concat(uniqueReleases),
duplicateReleases: accNetworkReleases.duplicateReleases.concat(duplicateReleases),
};
}, },
Promise.resolve([]), Promise.resolve(emptyReleases),
); );
return releases.uniqueReleases;
} }
async function scrapeNetworkParallel(networkEntity) { async function scrapeNetworkParallel(networkEntity) {
return Promise.map( return Promise.map(
networkEntity.children, networkEntity.children,
async channelEntity => scrapeChannel(channelEntity, networkEntity), async (channelEntity) => {
const { uniqueReleases } = await scrapeChannel(channelEntity, networkEntity);
return uniqueReleases;
},
{ concurrency: 3 }, { concurrency: 3 },
); );
} }