Compare commits

...

2 Commits

Author SHA1 Message Date
DebaucheryLibrarian 176573733b 1.246.20 2026-01-24 01:30:19 +01:00
DebaucheryLibrarian e7b9147995 Added channel map to Naughty America. 2026-01-24 01:30:17 +01:00
4 changed files with 46 additions and 5 deletions

4
package-lock.json generated
View File

@ -1,12 +1,12 @@
{
"name": "traxxx",
"version": "1.246.19",
"version": "1.246.20",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "traxxx",
"version": "1.246.19",
"version": "1.246.20",
"license": "ISC",
"dependencies": {
"@aws-sdk/client-s3": "^3.458.0",

View File

@ -1,6 +1,6 @@
{
"name": "traxxx",
"version": "1.246.19",
"version": "1.246.20",
"description": "All the latest porn releases in one place",
"main": "src/app.js",
"scripts": {

View File

@ -5,6 +5,16 @@ const unprint = require('unprint');
const slugify = require('../utils/slugify');
const { stripQuery } = require('../utils/url');
const channelMap = {
spa: 'thespa',
gym: 'thegym',
dormroom: 'thedormroom',
dressingroom: 'thedressingroom',
psepornstarexperience: 'pornstarexperience',
office: 'theoffice',
ta: 'tanda',
};
function scrapeLatest(scenes, channel) {
return scenes.map(({ query }) => {
const release = {};
@ -40,7 +50,9 @@ function scrapeLatest(scenes, channel) {
query.exists('//a[contains(@class, "label-hd") and contains(text(), "HD")]') && 720,
].filter(Boolean);
release.channel = slugify(query.content('.site-title'), '');
const channelSlug = slugify(query.content('.site-title'), '');
release.channel = channelMap[channelSlug] || channelSlug;
// NA affiliate prefers to push more traffic to Naughty America VR, all scenes labeled VR seem to be available on NAVR
release.url = release.tags?.some((tag) => tag.toLowerCase() === 'vr')
@ -95,6 +107,7 @@ function scrapeScene({ query }, { url }) {
query.img('.play-trailer img[data-srcset*="scenes/"]', { attribute: 'data-srcset' }),
query.img('.scenepage-video .playcard'),
query.img('.scene-page .start-card'),
query.poster('dl8-video[poster]'),
].filter(Boolean);
release.photos = query.els('.contain-scene-images.desktop-only .scene-image').map((imgEl) => [
@ -114,7 +127,9 @@ function scrapeScene({ query }, { url }) {
];
}
release.channel = slugify(query.content('.site-title'), '');
const channelSlug = slugify(query.content('.site-title'), '');
release.channel = channelMap[channelSlug] || channelSlug;
release.tags = query.contents('.categories a, .category a');

View File

@ -0,0 +1,26 @@
'use strict';
const knex = require('../knex');
const slugify = require('../utils/slugify');
async function init() {
const channels = await knex('entities')
.select('entities.*')
.leftJoin('entities as parents', 'parents.id', 'entities.parent_id')
.where('parents.slug', 'in', ['naughtyamerica', 'naughtyamericavr']);
const mapped = Object.fromEntries(channels.map((channel) => {
const path = new URL(channel.url).pathname.match(/\/site\/(.*)/)?.[1];
const urlSlug = slugify(path, '');
if (!urlSlug || urlSlug === channel.slug) {
return null;
}
return [urlSlug, channel.slug];
}).filter(Boolean));
console.log(mapped);
}
init();