All files / src/utils news-metadata.ts

98.63% Statements 72/73
88.63% Branches 39/44
100% Functions 9/9
100% Lines 68/68

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346                                                                    3x                 6x 6x   6x 6x 6x 6x 6x 6x                     6x   6x                                           8x 8x 1x   8x 8x 3x 3x     3x 3x 3x     1x           8x 8x 3x 3x 3x 1x           7x                       4x 1x     3x 3x                           1x 1x 1x       3x                                         7x   7x 7x 9x 9x   9x   9x 9x   9x   9x   9x                         7x   7x   7x 7x   7x 7x               3x                                                                           3x                                                                         17x 55x 55x         27x           3x   3x                                                   9x 9x 9x 8x 8x   9x    
// SPDX-FileCopyrightText: 2024-2026 Hack23 AB
// SPDX-License-Identifier: Apache-2.0
 
/**
 * @module Utils/NewsMetadata
 * @description News metadata database management.
 * Maintains a JSON database of article metadata that can be loaded
 * by client-side JavaScript, removing the need to edit all index HTML
 * files when adding new articles.
 */
 
import fs from 'fs';
import path from 'path';
import { NEWS_DIR } from '../constants/config.js';
import {
  getNewsArticles,
  parseArticleFilename,
  formatSlug,
  extractArticleMeta,
} from './file-utils.js';
import type {
  ArticleMetadataEntry,
  NewsMetadataDatabase,
  IntelligenceIndex,
  ArticleIndexEntry,
} from '../types/index.js';
import {
  buildIndexFromEntries,
  detectTrends,
  saveIntelligenceIndex,
} from './intelligence-index.js';
import { detectCategory } from './article-category.js';
 
/** Default path for the metadata database file */
const METADATA_DB_PATH = path.join(NEWS_DIR, 'articles-metadata.json');
 
/**
 * Build metadata database from news article files
 *
 * @param newsDir - News directory path
 * @returns News metadata database object
 */
export function buildMetadataDatabase(newsDir: string = NEWS_DIR): NewsMetadataDatabase {
  const articleFiles = getNewsArticles(newsDir);
  const articles: ArticleMetadataEntry[] = [];
 
  for (const filename of articleFiles) {
    const parsed = parseArticleFilename(filename);
    Eif (parsed) {
      const filepath = path.join(newsDir, filename);
      const meta = extractArticleMeta(filepath);
      articles.push({
        filename: parsed.filename,
        date: parsed.date,
        slug: parsed.slug,
        lang: parsed.lang,
        title: meta.title || formatSlug(parsed.slug),
        description: meta.description,
      });
    }
  }
 
  articles.sort((a, b) => b.date.localeCompare(a.date));
 
  return {
    lastUpdated: new Date().toISOString(),
    articles,
  };
}
 
/**
 * Write metadata database to JSON file.
 *
 * Idempotent at the byte level: if the existing file has an identical
 * `articles` payload, the original `lastUpdated` timestamp is preserved
 * and the file is left untouched. This keeps `aws s3 sync` (size+mtime)
 * from re-uploading `news/articles-metadata.json` on every prebuild rerun
 * when no articles actually changed.
 *
 * @param database - Metadata database to write
 * @param outputPath - Output file path (defaults to news/articles-metadata.json)
 */
export function writeMetadataDatabase(
  database: NewsMetadataDatabase,
  outputPath: string = METADATA_DB_PATH
): void {
  const dir = path.dirname(outputPath);
  if (!fs.existsSync(dir)) {
    fs.mkdirSync(dir, { recursive: true });
  }
  let payload: NewsMetadataDatabase = database;
  if (fs.existsSync(outputPath)) {
    try {
      const existing = JSON.parse(
        fs.readFileSync(outputPath, 'utf-8')
      ) as NewsMetadataDatabase | null;
      const existingArticlesJson = JSON.stringify(existing?.articles ?? null);
      const newArticlesJson = JSON.stringify(database.articles ?? null);
      if (existingArticlesJson === newArticlesJson && typeof existing?.lastUpdated === 'string') {
        // Same articles content — preserve the prior timestamp so the
        // serialised JSON stays byte-identical across reruns.
        payload = { ...database, lastUpdated: existing.lastUpdated };
      }
    } catch {
      // Existing file unreadable / malformed — fall through and overwrite.
    }
  }
  const desired = JSON.stringify(payload, null, 2);
  if (fs.existsSync(outputPath)) {
    try {
      const onDisk = fs.readFileSync(outputPath, 'utf-8');
      if (onDisk === desired) {
        return; // byte-identical; preserve mtime
      }
    } catch {
      // Fall through to overwrite.
    }
  }
  fs.writeFileSync(outputPath, desired, 'utf-8');
}
 
/**
 * Read metadata database from JSON file
 *
 * @param inputPath - Input file path (defaults to news/articles-metadata.json)
 * @returns Metadata database or null if file doesn't exist
 */
export function readMetadataDatabase(
  inputPath: string = METADATA_DB_PATH
): NewsMetadataDatabase | null {
  if (!fs.existsSync(inputPath)) {
    return null;
  }
 
  const content = fs.readFileSync(inputPath, 'utf-8');
  return JSON.parse(content) as NewsMetadataDatabase;
}
 
/**
 * Update metadata database by rescanning the news directory
 *
 * @param newsDir - News directory to scan
 * @param outputPath - Output path for metadata JSON
 * @returns Updated metadata database
 */
export function updateMetadataDatabase(
  newsDir: string = NEWS_DIR,
  outputPath: string = METADATA_DB_PATH
): NewsMetadataDatabase {
  const database = buildMetadataDatabase(newsDir);
  writeMetadataDatabase(database, outputPath);
  return database;
}
 
/** Default path for the intelligence index JSON file */
const INTELLIGENCE_INDEX_PATH = path.join(NEWS_DIR, 'intelligence-index.json');
 
/**
 * Scan the news directory, rebuild the intelligence index from article metadata,
 * and persist the updated index to disk.
 *
 * Starts from a fresh empty index on every call so that articles that have been
 * deleted or renamed are automatically pruned — no stale entries can survive.
 *
 * Each article file is parsed to extract its date, type, language, and metadata.
 * The resulting {@link ArticleIndexEntry} objects are accumulated into an
 * {@link IntelligenceIndex} whose trend detections are refreshed on every call.
 *
 * @param newsDir - News directory to scan for article HTML files
 * @param indexPath - Path to the intelligence index JSON file
 * @returns The rebuilt {@link IntelligenceIndex}
 */
export function updateIntelligenceIndex(
  newsDir: string = NEWS_DIR,
  indexPath: string = INTELLIGENCE_INDEX_PATH
): IntelligenceIndex {
  const articleFiles = getNewsArticles(newsDir);
 
  const entries: ArticleIndexEntry[] = [];
  for (const filename of articleFiles) {
    const parsed = parseArticleFilename(filename);
    Iif (!parsed) continue;
 
    const articleId = `${parsed.date}-${parsed.slug}-${parsed.lang}`;
 
    const filepath = path.join(newsDir, filename);
    const meta = extractArticleMeta(filepath);
 
    const category = detectCategory(parsed.slug);
 
    const keyTopics = deriveKeyTopics(parsed.slug, parsed.lang, meta.title, meta.description);
 
    entries.push({
      id: articleId,
      date: parsed.date,
      type: category,
      lang: parsed.lang,
      keyTopics,
      keyActors: [],
      procedures: [],
      crossReferences: [],
      trendContributions: [],
    });
  }
 
  entries.sort((a, b) => b.date.localeCompare(a.date) || a.id.localeCompare(b.id));
 
  let index = buildIndexFromEntries(entries);
 
  const trends = detectTrends(index);
  index = { ...index, trends, lastUpdated: new Date().toISOString() };
 
  saveIntelligenceIndex(index, indexPath);
  return index;
}
 
/**
 * Stop-words excluded from key topic extraction.
 * Common English function words that carry no policy-domain meaning.
 * Expand this set as needed for EU Parliament domain-specific noise.
 */
const STOP_WORDS = new Set([
  'the',
  'a',
  'an',
  'and',
  'or',
  'but',
  'in',
  'on',
  'at',
  'to',
  'for',
  'of',
  'with',
  'by',
  'from',
  'is',
  'are',
  'was',
  'were',
  'be',
  'been',
  'has',
  'have',
  'had',
  'this',
  'that',
  'it',
  'its',
  'as',
  'not',
  'no',
]);
 
/**
 * Article-type taxonomy tokens that should be excluded from keyTopics
 * to prevent unrelated articles of the same type from appearing "related".
 */
const ARTICLE_TYPE_NOISE = new Set([
  'week',
  'month',
  'quarter',
  'year',
  'term',
  'outlook',
  'election',
  'cycle',
  'ahead',
  'review',
  'breaking',
  'committee',
  'motions',
  'motion',
  'propositions',
  'proposition',
  'proposal',
  'deep',
  'analysis',
  'reports',
  'report',
  'news',
]);
 
/**
 * Extract meaningful words from a text string, excluding stop-words and
 * tokens shorter than `minLength`.
 *
 * Uses Unicode-aware character classes to preserve accented characters
 * (e.g. "é", "ü") and non-Latin scripts (AR, HE, JA, KO, ZH).
 *
 * @param text - Input text to tokenise
 * @param tokens - Set to accumulate tokens into
 * @param minLength - Minimum cleaned token length (inclusive)
 */
function extractTokens(text: string, tokens: Set<string>, minLength: number): void {
  for (const word of text.toLowerCase().split(/[\s\-_]+/)) {
    const cleaned = word.replace(/[^\p{L}\p{N}]/gu, '');
    if (
      cleaned.length >= minLength &&
      !STOP_WORDS.has(cleaned) &&
      !ARTICLE_TYPE_NOISE.has(cleaned)
    ) {
      tokens.add(cleaned);
    }
  }
}
 
/** Minimum token length for slug-derived topics (shorter segments are too generic) */
const MIN_SLUG_TOKEN_LENGTH = 3;
/** Minimum token length for title/description-derived topics (stricter to reduce noise) */
const MIN_METADATA_TOKEN_LENGTH = 4;
 
/**
 * Derive key topics from the article slug and extracted metadata.
 *
 * Splits the slug on hyphens to produce topic tokens and appends any
 * meaningful words from the title and description. Common stop-words
 * and very short tokens are filtered out.
 *
 * For non-English articles, title and description tokens are skipped because
 * the {@link STOP_WORDS} set is English-only and would let through common
 * function words in other languages, creating noisy cross-language trends.
 * The slug (which is structured and language-neutral) is always tokenised.
 *
 * @param slug - Article slug (e.g. "week-ahead" or "breaking")
 * @param lang - ISO 639-1 language code (e.g. "en", "fr", "de")
 * @param title - Article title extracted from HTML (may be empty)
 * @param description - Article description extracted from HTML (may be empty)
 * @returns Deduplicated array of key topic strings
 */
function deriveKeyTopics(
  slug: string,
  lang: string,
  title?: string,
  description?: string
): string[] {
  const tokens = new Set<string>();
  extractTokens(slug, tokens, MIN_SLUG_TOKEN_LENGTH);
  if (lang === 'en') {
    if (title) extractTokens(title, tokens, MIN_METADATA_TOKEN_LENGTH);
    if (description) extractTokens(description, tokens, MIN_METADATA_TOKEN_LENGTH);
  }
  return [...tokens];
}