Press n or j to go to the next uncovered block, b, p or k for the previous block.
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 | 2x 6x 6x 6x 6x 6x 6x 6x 6x 6x 6x 3x 3x 1x 3x 3x 1x 2x 2x 1x 1x 1x 2x 7x 7x 7x 9x 9x 9x 9x 9x 9x 9x 9x 7x 7x 7x 7x 7x 7x 2x 2x 17x 55x 55x 27x 2x 2x 9x 9x 9x 8x 8x 9x | // SPDX-FileCopyrightText: 2024-2026 Hack23 AB
// SPDX-License-Identifier: Apache-2.0
/**
* @module Utils/NewsMetadata
* @description News metadata database management.
* Maintains a JSON database of article metadata that can be loaded
* by client-side JavaScript, removing the need to edit all index HTML
* files when adding new articles.
*/
import fs from 'fs';
import path from 'path';
import { NEWS_DIR } from '../constants/config.js';
import {
getNewsArticles,
parseArticleFilename,
formatSlug,
extractArticleMeta,
} from './file-utils.js';
import type {
ArticleMetadataEntry,
NewsMetadataDatabase,
IntelligenceIndex,
ArticleIndexEntry,
} from '../types/index.js';
import {
buildIndexFromEntries,
detectTrends,
saveIntelligenceIndex,
} from './intelligence-index.js';
import { detectCategory } from './article-category.js';
/** Default path for the metadata database file */
const METADATA_DB_PATH = path.join(NEWS_DIR, 'articles-metadata.json');
/**
* Build metadata database from news article files
*
* @param newsDir - News directory path
* @returns News metadata database object
*/
export function buildMetadataDatabase(newsDir: string = NEWS_DIR): NewsMetadataDatabase {
const articleFiles = getNewsArticles(newsDir);
const articles: ArticleMetadataEntry[] = [];
for (const filename of articleFiles) {
const parsed = parseArticleFilename(filename);
Eif (parsed) {
const filepath = path.join(newsDir, filename);
const meta = extractArticleMeta(filepath);
articles.push({
filename: parsed.filename,
date: parsed.date,
slug: parsed.slug,
lang: parsed.lang,
title: meta.title || formatSlug(parsed.slug),
description: meta.description,
});
}
}
// Sort by date (newest first)
articles.sort((a, b) => b.date.localeCompare(a.date));
return {
lastUpdated: new Date().toISOString(),
articles,
};
}
/**
* Write metadata database to JSON file
*
* @param database - Metadata database to write
* @param outputPath - Output file path (defaults to news/articles-metadata.json)
*/
export function writeMetadataDatabase(
database: NewsMetadataDatabase,
outputPath: string = METADATA_DB_PATH
): void {
const dir = path.dirname(outputPath);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
fs.writeFileSync(outputPath, JSON.stringify(database, null, 2), 'utf-8');
}
/**
* Read metadata database from JSON file
*
* @param inputPath - Input file path (defaults to news/articles-metadata.json)
* @returns Metadata database or null if file doesn't exist
*/
export function readMetadataDatabase(
inputPath: string = METADATA_DB_PATH
): NewsMetadataDatabase | null {
if (!fs.existsSync(inputPath)) {
return null;
}
const content = fs.readFileSync(inputPath, 'utf-8');
return JSON.parse(content) as NewsMetadataDatabase;
}
/**
* Update metadata database by rescanning the news directory
*
* @param newsDir - News directory to scan
* @param outputPath - Output path for metadata JSON
* @returns Updated metadata database
*/
export function updateMetadataDatabase(
newsDir: string = NEWS_DIR,
outputPath: string = METADATA_DB_PATH
): NewsMetadataDatabase {
const database = buildMetadataDatabase(newsDir);
writeMetadataDatabase(database, outputPath);
return database;
}
/** Default path for the intelligence index JSON file */
const INTELLIGENCE_INDEX_PATH = path.join(NEWS_DIR, 'intelligence-index.json');
/**
* Scan the news directory, rebuild the intelligence index from article metadata,
* and persist the updated index to disk.
*
* Starts from a fresh empty index on every call so that articles that have been
* deleted or renamed are automatically pruned — no stale entries can survive.
*
* Each article file is parsed to extract its date, type, language, and metadata.
* The resulting {@link ArticleIndexEntry} objects are accumulated into an
* {@link IntelligenceIndex} whose trend detections are refreshed on every call.
*
* @param newsDir - News directory to scan for article HTML files
* @param indexPath - Path to the intelligence index JSON file
* @returns The rebuilt {@link IntelligenceIndex}
*/
export function updateIntelligenceIndex(
newsDir: string = NEWS_DIR,
indexPath: string = INTELLIGENCE_INDEX_PATH
): IntelligenceIndex {
const articleFiles = getNewsArticles(newsDir);
// Collect all entries in a single pass, then build the index in O(n) time
const entries: ArticleIndexEntry[] = [];
for (const filename of articleFiles) {
const parsed = parseArticleFilename(filename);
Iif (!parsed) continue;
const articleId = `${parsed.date}-${parsed.slug}-${parsed.lang}`;
const filepath = path.join(newsDir, filename);
const meta = extractArticleMeta(filepath);
// Derive the ArticleCategory from the slug using the shared detection logic
const category = detectCategory(parsed.slug);
// Extract meaningful key topics from the slug and article metadata
const keyTopics = deriveKeyTopics(parsed.slug, parsed.lang, meta.title, meta.description);
entries.push({
id: articleId,
date: parsed.date,
type: category,
lang: parsed.lang,
keyTopics,
keyActors: [],
procedures: [],
crossReferences: [],
trendContributions: [],
});
}
// Sort deterministically (date desc, then id asc) so the persisted index
// does not churn between runs due to platform-dependent readdir ordering.
entries.sort((a, b) => b.date.localeCompare(a.date) || a.id.localeCompare(b.id));
let index = buildIndexFromEntries(entries);
// Refresh trend detections
const trends = detectTrends(index);
index = { ...index, trends, lastUpdated: new Date().toISOString() };
saveIntelligenceIndex(index, indexPath);
return index;
}
/**
* Stop-words excluded from key topic extraction.
* Common English function words that carry no policy-domain meaning.
* Expand this set as needed for EU Parliament domain-specific noise.
*/
const STOP_WORDS = new Set([
'the',
'a',
'an',
'and',
'or',
'but',
'in',
'on',
'at',
'to',
'for',
'of',
'with',
'by',
'from',
'is',
'are',
'was',
'were',
'be',
'been',
'has',
'have',
'had',
'this',
'that',
'it',
'its',
'as',
'not',
'no',
]);
/**
* Article-type taxonomy tokens that should be excluded from keyTopics
* to prevent unrelated articles of the same type from appearing "related".
*/
const ARTICLE_TYPE_NOISE = new Set([
'week',
'month',
'year',
'ahead',
'review',
'breaking',
'committee',
'motions',
'motion',
'propositions',
'proposition',
'proposal',
'deep',
'analysis',
'reports',
'report',
'news',
]);
/**
* Extract meaningful words from a text string, excluding stop-words and
* tokens shorter than `minLength`.
*
* Uses Unicode-aware character classes to preserve accented characters
* (e.g. "é", "ü") and non-Latin scripts (AR, HE, JA, KO, ZH).
*
* @param text - Input text to tokenise
* @param tokens - Set to accumulate tokens into
* @param minLength - Minimum cleaned token length (inclusive)
*/
function extractTokens(text: string, tokens: Set<string>, minLength: number): void {
for (const word of text.toLowerCase().split(/[\s\-_]+/)) {
const cleaned = word.replace(/[^\p{L}\p{N}]/gu, '');
if (
cleaned.length >= minLength &&
!STOP_WORDS.has(cleaned) &&
!ARTICLE_TYPE_NOISE.has(cleaned)
) {
tokens.add(cleaned);
}
}
}
/** Minimum token length for slug-derived topics (shorter segments are too generic) */
const MIN_SLUG_TOKEN_LENGTH = 3;
/** Minimum token length for title/description-derived topics (stricter to reduce noise) */
const MIN_METADATA_TOKEN_LENGTH = 4;
/**
* Derive key topics from the article slug and extracted metadata.
*
* Splits the slug on hyphens to produce topic tokens and appends any
* meaningful words from the title and description. Common stop-words
* and very short tokens are filtered out.
*
* For non-English articles, title and description tokens are skipped because
* the {@link STOP_WORDS} set is English-only and would let through common
* function words in other languages, creating noisy cross-language trends.
* The slug (which is structured and language-neutral) is always tokenised.
*
* @param slug - Article slug (e.g. "week-ahead" or "breaking")
* @param lang - ISO 639-1 language code (e.g. "en", "fr", "de")
* @param title - Article title extracted from HTML (may be empty)
* @param description - Article description extracted from HTML (may be empty)
* @returns Deduplicated array of key topic strings
*/
function deriveKeyTopics(
slug: string,
lang: string,
title?: string,
description?: string
): string[] {
const tokens = new Set<string>();
extractTokens(slug, tokens, MIN_SLUG_TOKEN_LENGTH);
// Only apply title/description tokenisation for English articles where
// STOP_WORDS provides meaningful filtering; non-English articles rely
// on slug tokens to avoid noisy cross-language relations.
if (lang === 'en') {
if (title) extractTokens(title, tokens, MIN_METADATA_TOKEN_LENGTH);
if (description) extractTokens(description, tokens, MIN_METADATA_TOKEN_LENGTH);
}
return [...tokens];
}
|