traxxx/src/scrape-sites.js

149 lines
4.7 KiB
JavaScript

'use strict';
const Promise = require('bluebird');
const moment = require('moment');
const argv = require('./argv');
const knex = require('./knex');
const { fetchIncludedSites } = require('./sites');
const scrapers = require('./scrapers/scrapers');
const scrapeRelease = require('./scrape-release');
const { storeReleases } = require('./releases');
function getAfterDate() {
return moment
.utc()
.subtract(...argv.after.split(' '))
.toDate();
}
async function findDuplicateReleaseIds(latestReleases, accReleases) {
const duplicateReleases = await knex('releases')
.whereIn('entry_id', latestReleases.map(({ entryId }) => entryId));
// include accumulated releases as duplicates to prevent an infinite
// loop when the next page contains the same releases as the previous
return new Set(duplicateReleases
.map(release => String(release.entry_id))
.concat(accReleases.map(release => String(release.entryId))));
}
async function scrapeUniqueReleases(scraper, site, afterDate = getAfterDate(), accReleases = [], page = 1) {
const latestReleases = await scraper.fetchLatest(site, page);
if (latestReleases.length === 0) {
return [];
}
const oldestReleaseOnPage = latestReleases.slice(-1)[0].date;
const duplicateReleaseIds = argv.redownload ? new Set() : await findDuplicateReleaseIds(latestReleases, accReleases);
const uniqueReleases = latestReleases
.filter(release => !duplicateReleaseIds.has(String(release.entryId)) // release is already in database
&& moment(release.date).isAfter(afterDate)); // release is older than specified date limit
console.log(`\x1b[90m${site.name}: Scraped page ${page}, ${uniqueReleases.length} unique recent releases\x1b[0m`);
if (
uniqueReleases.length > 0
&& (oldestReleaseOnPage || page < argv.pages)
&& moment(oldestReleaseOnPage).isAfter(afterDate)
) {
// oldest release on page is newer that specified limit, fetch next page
return scrapeUniqueReleases(scraper, site, afterDate, accReleases.concat(uniqueReleases), page + 1);
}
return accReleases.concat(uniqueReleases);
}
async function scrapeUpcomingReleases(scraper, site) {
if (scraper.fetchUpcoming) {
const upcomingReleases = await scraper.fetchUpcoming(site);
return upcomingReleases.map(release => ({ ...release, upcoming: true }));
}
return [];
}
async function deepFetchReleases(baseReleases) {
return Promise.map(baseReleases, async (release) => {
if (release.url) {
try {
const fullRelease = await scrapeRelease(release.url, release, true);
return {
...release,
...fullRelease,
deep: true,
};
} catch (error) {
return {
...release,
deep: false,
};
}
}
return release;
}, {
concurrency: 2,
});
}
async function scrapeSiteReleases(scraper, site) {
const [newReleases, upcomingReleases] = await Promise.all([
scrapeUniqueReleases(scraper, site), // fetch basic release info from scene overview
scrapeUpcomingReleases(scraper, site), // fetch basic release info from upcoming overview
]);
console.log(`${site.name}: Found ${newReleases.length} recent releases, ${upcomingReleases.length} upcoming releases`);
const baseReleases = [...newReleases, ...upcomingReleases];
if (argv.deep) {
// follow URL for every release
return deepFetchReleases(baseReleases);
}
return baseReleases;
}
async function scrapeReleases() {
const networks = await fetchIncludedSites();
const scrapedReleases = await Promise.map(networks, async network => Promise.map(network.sites, async (site) => {
const scraper = scrapers.releases[site.slug] || scrapers.releases[site.network.slug];
if (!scraper) {
console.warn(`No scraper found for '${site.name}' (${site.slug})`);
return [];
}
try {
return await scrapeSiteReleases(scraper, site);
} catch (error) {
if (argv.debug) {
console.error(`${site.name}: Failed to scrape releases`, error);
}
console.warn(`${site.id}: Failed to scrape releases`);
return [];
}
}, {
// 2 network sites at a time
concurrency: 2,
}),
{
// 5 networks at a time
concurrency: 5,
});
if (argv.save) {
await storeReleases(scrapedReleases.flat(2));
}
}
module.exports = scrapeReleases;