diff --git a/.github/workflows/featured-software.yml b/.github/workflows/featured-software.yml
new file mode 100644
index 000000000..266ab6b26
--- /dev/null
+++ b/.github/workflows/featured-software.yml
@@ -0,0 +1,127 @@
+name: Generate Featured Content
+
+on:
+ push:
+ workflow_dispatch:
+ inputs:
+ gemini_api_key:
+ description: 'Google Gemini API Key for AI-powered summary generation'
+ required: false
+ type: string
+ schedule:
+ # Run every Monday at 00:00 UTC
+ - cron: '0 0 * * 1'
+
+env:
+ GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY || inputs.gemini_api_key }}
+
+permissions:
+ contents: write
+
+jobs:
+ generate-featured-content:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout current repository
+ uses: actions/checkout@v4
+
+ - name: Checkout armbian/configng repository
+ uses: actions/checkout@v4
+ with:
+ repository: armbian/configng
+ path: configng
+
+ - name: Copy config.software.json
+ run: |
+ if [ -f "configng/tools/json/config.software.json" ]; then
+ echo "Copying config.software.json..."
+ cp configng/tools/json/config.software.json ./config.software.json
+ echo "File copied successfully"
+ else
+ echo "Warning: config.software.json not found in configng repository"
+ fi
+
+ - name: Install Python dependencies
+ run: |
+ pip3 install --upgrade google-genai pyyaml
+
+ - name: Generate featured content
+ env:
+ GHOST_API_KEY: ${{ secrets.GHOST_API_KEY }}
+ GHOST_API_URL: ${{ secrets.GHOST_API_URL }}
+ GITHUB_TOKEN: ${{ secrets.SPONSORS }}
+ run: |
+ set -x # Debug mode
+
+ cd featured-content/scripts
+
+ # Create temporary directory for intermediate results
+ mkdir -p /tmp/featured-steps
+
+ # 1. Fetch GitHub releases
+ echo "=== Step 1: Fetching GitHub releases ==="
+ python3 github_releases.py --limit 5 > /tmp/featured-steps/01-releases.json || echo "[]" > /tmp/featured-steps/01-releases.json
+
+ # 2. Fetch Ghost news
+ echo "=== Step 2: Fetching Ghost news ==="
+ if [ -n "$GHOST_API_URL" ]; then
+ python3 ghost_news.py --url "$GHOST_API_URL" --limit 5 > /tmp/featured-steps/02-news.json || echo "[]" > /tmp/featured-steps/02-news.json
+ else
+ echo "[]" > /tmp/featured-steps/02-news.json
+ fi
+
+ # 3. Fetch GitHub sponsors
+ echo "=== Step 3: Fetching GitHub sponsors ==="
+ python3 sponsors.py --limit 10 > /tmp/featured-steps/03-sponsors.json || echo "[]" > /tmp/featured-steps/03-sponsors.json
+
+ # 4. Process software list (optional)
+ echo "=== Step 4: Processing software list ==="
+ if [ -f "../../config.software.json" ]; then
+ python3 software_list.py --config ../../config.software.json --limit 10 > /tmp/featured-steps/04-software.json || echo "[]" > /tmp/featured-steps/04-software.json
+ else
+ echo "[]" > /tmp/featured-steps/04-software.json
+ fi
+
+ # 5. Load manual entries
+ echo "=== Step 5: Loading manual entries ==="
+ python3 manual_list.py --file ../manual_featured.yml > /tmp/featured-steps/05-manual.json || echo "[]" > /tmp/featured-steps/05-manual.json
+
+ # 6. Select N entries from each category (N=2 means 2 per type)
+ echo "=== Step 6: Selecting featured entries ==="
+ python3 select_featured.py 2 /tmp/featured-steps/*.json > ../../featured-content.json
+
+ # Display counts
+ echo "=== Summary ==="
+ for step in /tmp/featured-steps/*.json; do
+ name=$(basename "$step" .json)
+ count=$(cat "$step" | jq '. | length' 2>/dev/null || echo 0)
+ echo " $name: $count"
+ done
+
+ - name: Update generation timestamp
+ run: |
+ # Replace null timestamp with actual ISO timestamp
+ TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
+ sed -i "s/\"generated_at\": null/\"generated_at\": \"$TIMESTAMP\"/" featured-content.json || true
+
+ - name: Display generated JSON
+ run: |
+ echo "=== Generated Featured Content JSON ==="
+ cat featured-content.json
+
+ - name: Commit and push changes
+ run: |
+ git config --local user.email "github-actions[bot]@users.noreply.github.com"
+ git config --local user.name "github-actions[bot]"
+
+ # Add the generated file
+ git add featured-content.json
+
+ # Check if there are changes to commit
+ if git diff --staged --quiet; then
+ echo "No changes to commit"
+ else
+ git commit -m "Generate featured content selection [skip ci]"
+ git push
+ fi
diff --git a/featured-content.json b/featured-content.json
new file mode 100644
index 000000000..91e185428
--- /dev/null
+++ b/featured-content.json
@@ -0,0 +1,201 @@
+{
+ "entries": [
+ {
+ "type": "service",
+ "id": "armbian-sponsors",
+ "name": "Support Armbian Development",
+ "url": "https://github.com/sponsors/armbian",
+ "title": "Become a Sponsor",
+ "summary": "Support continued Armbian development through sponsorship. Contributors get access to beta builds, priority support, and direct influence on project direction.",
+ "author": {
+ "name": "Armbian Project",
+ "handle": "@armbian"
+ },
+ "tags": [
+ "support",
+ "sponsorship",
+ "donate",
+ "community"
+ ],
+ "motd": {
+ "line": "Support Armbian: Become a sponsor",
+ "hint": "https://github.com/sponsors/armbian"
+ },
+ "manual": true
+ },
+ {
+ "type": "sponsor",
+ "id": "sponsor-4",
+ "name": "Sponsor: ahoneybun",
+ "url": "https://github.com/ahoneybun",
+ "title": "Aaron Honeycutt - Silver Sponsor",
+ "summary": "Sponsor of Armbian build",
+ "image": "https://avatars.githubusercontent.com/u/4884946?u=2fdb7e87d7a96c50f031762f20a946b48f73bcb2&v=4",
+ "author": {
+ "name": "Aaron Honeycutt",
+ "handle": "@ahoneybun"
+ },
+ "tags": [
+ "sponsor",
+ "silver",
+ "community"
+ ],
+ "motd": {
+ "line": "Silver level supporter"
+ }
+ },
+ {
+ "type": "news",
+ "id": "ghost-696407e693dc320001185cd0",
+ "name": "Armbian",
+ "url": "http://blog.armbian.com/forget-third-party-utilities-meet-armbian-imager/",
+ "title": "Forget third-party utilities: meet Armbian Imager",
+ "summary": "Armbian Imager eliminates the guesswork from flashing SBC images. Real-time board detection, persistent caching, and built-in safety make installation fast, simple, and risk-free",
+ "published_at": "2026-01-17T14:43:51.000+01:00",
+ "image": "http://blog.armbian.com/content/images/2026/01/introducing-armbian-imager.png",
+ "author": {
+ "name": "Daniele Briguglio"
+ },
+ "tags": [
+ "Armbian",
+ "SBC",
+ "imager",
+ "linux"
+ ]
+ },
+ {
+ "type": "software",
+ "id": "ART001",
+ "name": "CDN router",
+ "url": "",
+ "title": "CDN router",
+ "summary": "Router for repository mirror automation",
+ "author": {
+ "name": "efectn",
+ "handle": "efectn"
+ },
+ "tags": [
+ "armbian"
+ ],
+ "motd": {
+ "line": "Featured: CDN router",
+ "hint": "armbian-config \u2192 Software"
+ }
+ },
+ {
+ "type": "contribution",
+ "id": "release-v26.2.0-trunk.146",
+ "name": "Weekly digest",
+ "url": "https://github.com/armbian/build/releases/tag/v26.2.0-trunk.146",
+ "title": "Armbian Weekly digest Released",
+ "summary": "This week in Armbian development saw a series of refinements and maintenance updates across the build and configuration repositories. Contributors focused on workflow standardization, cosmetic improve...",
+ "published_at": "2025-12-29T06:49:33Z",
+ "highlights": [],
+ "tags": [
+ "release",
+ "update",
+ "armbian"
+ ]
+ },
+ {
+ "type": "contribution",
+ "id": "release-v26.2.0-trunk.278",
+ "name": "Weekly digest",
+ "url": "https://github.com/armbian/build/releases/tag/v26.2.0-trunk.278",
+ "title": "Armbian Weekly digest Released",
+ "summary": "This week\u2019s Armbian development saw a major cleanup of legacy toolchain code, alongside numerous board-specific improvements and kernel updates. Support was added for the Nuvoton MA35D1 NuMaker IoT bo...",
+ "published_at": "2026-01-19T06:52:55Z",
+ "highlights": [],
+ "tags": [
+ "release",
+ "update",
+ "armbian"
+ ]
+ },
+ {
+ "type": "news",
+ "id": "ghost-69782e9693dc320001185e07",
+ "name": "Armbian News",
+ "url": "http://blog.armbian.com/github-highlights-12/",
+ "title": "Github Highlights",
+ "summary": "This week\u2019s Armbian development saw a wide range of updates focused on automation, hardware support, and workflow improvements. Key highlights include the introduction of automatic YAML target generation, expanded support for Hetzner ARM64 runners, and enhancements to the redirector update workflow with cache mirror support. Several board-specific fixes and feature additions were made, including improved power cycle handling for meson-sm1 devices and new binary files for RK35 series components. ",
+ "published_at": "2026-01-27T04:20:04.000+01:00",
+ "image": "http://blog.armbian.com/content/images/2026/01/githubhighlights-2-2.webp",
+ "author": {
+ "name": "Michael Robinson"
+ },
+ "tags": []
+ },
+ {
+ "type": "sponsor",
+ "id": "sponsor-9",
+ "name": "Sponsor: Nardol",
+ "url": "https://github.com/Nardol",
+ "title": "Patrick ZAJDA - Silver Sponsor",
+ "summary": "Sponsor of Armbian build",
+ "image": "https://avatars.githubusercontent.com/u/2864821?v=4",
+ "author": {
+ "name": "Patrick ZAJDA",
+ "handle": "@Nardol"
+ },
+ "tags": [
+ "sponsor",
+ "silver",
+ "community"
+ ],
+ "motd": {
+ "line": "Silver level supporter"
+ }
+ },
+ {
+ "type": "service",
+ "id": "armbian-actions",
+ "name": "Armbian Actions",
+ "url": "https://actions.armbian.com/",
+ "title": "Armbian CI/CD Infrastructure",
+ "summary": "Automated build and testing infrastructure for Armbian firmware. Check build status, download latest images, and track releases across all supported boards.",
+ "author": {
+ "name": "Armbian",
+ "handle": "@armbian"
+ },
+ "tags": [
+ "infrastructure",
+ "ci-cd",
+ "build",
+ "automation"
+ ],
+ "motd": {
+ "line": "Armbian Actions: Track builds and releases",
+ "hint": "https://actions.armbian.com"
+ },
+ "manual": true
+ },
+ {
+ "type": "software",
+ "id": "EMB001",
+ "name": "Emby",
+ "url": "",
+ "title": "Emby",
+ "summary": "Emby organizes video, music, live TV, and photos",
+ "author": {
+ "name": "schwar3kat",
+ "handle": "schwar3kat"
+ },
+ "tags": [
+ "media"
+ ],
+ "motd": {
+ "line": "Featured: Emby",
+ "hint": "armbian-config \u2192 Software"
+ }
+ }
+ ],
+ "count": 10,
+ "sources": {
+ "contribution": 5,
+ "news": 5,
+ "sponsor": 10,
+ "software": 10,
+ "service": 6
+ }
+}
diff --git a/featured-content/manual_featured.yml b/featured-content/manual_featured.yml
new file mode 100644
index 000000000..9a325c381
--- /dev/null
+++ b/featured-content/manual_featured.yml
@@ -0,0 +1,94 @@
+armbian_featured_software:
+ cadence: weekly
+ source_url: https://docs.armbian.com/
+
+ entries:
+ - id: armbian-actions
+ week: 1
+ type: service
+ name: "Armbian Actions"
+ url: https://actions.armbian.com/
+ title: "Armbian CI/CD Infrastructure"
+ summary: "Automated build and testing infrastructure for Armbian firmware. Check build status, download latest images, and track releases across all supported boards."
+ author:
+ name: "Armbian"
+ handle: "@armbian"
+ tags: [infrastructure, ci-cd, build, automation]
+ motd:
+ line: "Armbian Actions: Track builds and releases"
+ hint: "https://actions.armbian.com"
+
+ - id: armbian-mirrors
+ week: 2
+ type: service
+ name: "Armbian Mirror Network"
+ url: https://docs.armbian.com/Mirrors/
+ title: "Global Mirror Infrastructure"
+ summary: "Access Armbian repositories from mirrors worldwide for faster downloads and reduced latency. Learn how to host your own mirror or find the nearest one to you."
+ author:
+ name: "Armbian"
+ handle: "@armbian"
+ tags: [infrastructure, network, mirrors, download]
+ motd:
+ line: "Armbian Mirrors: Fast downloads worldwide"
+ hint: "https://docs.armbian.com/Mirrors/"
+
+ - id: armbian-documentation
+ week: 3
+ type: service
+ name: "Armbian Documentation"
+ url: https://docs.armbian.com/
+ title: "Comprehensive User Guides"
+ summary: "Complete documentation covering installation, configuration, hardware support, troubleshooting, and advanced topics for all Armbian users."
+ author:
+ name: "Armbian"
+ handle: "@armbian"
+ tags: [documentation, guides, support, learning]
+ motd:
+ line: "Armbian Docs: Your complete guide"
+ hint: "https://docs.armbian.com"
+
+ - id: armbian-forum
+ week: 4
+ type: service
+ name: "Armbian Community Forum"
+ url: https://forum.armbian.com/
+ title: "Community Support & Discussion"
+ summary: "Join thousands of users discussing Armbian on SBCs. Get help, share projects, and stay updated with the latest developments."
+ author:
+ name: "Armbian Community"
+ handle: "@armbian"
+ tags: [community, support, forum, discussion]
+ motd:
+ line: "Armbian Forum: Community support"
+ hint: "https://forum.armbian.com"
+
+ - id: armbian-github
+ week: 5
+ type: service
+ name: "Armbian on GitHub"
+ url: https://github.com/armbian/
+ title: "Open Source Development"
+ summary: "Explore Armbian's repositories including build framework, documentation, tools, and board configurations. Contribute and star our projects."
+ author:
+ name: "Armbian"
+ handle: "@armbian"
+ tags: [development, github, open-source, contribute]
+ motd:
+ line: "Armbian on GitHub: Open development"
+ hint: "https://github.com/armbian/"
+
+ - id: armbian-sponsors
+ week: 6
+ type: service
+ name: "Support Armbian Development"
+ url: https://github.com/sponsors/armbian
+ title: "Become a Sponsor"
+ summary: "Support continued Armbian development through sponsorship. Contributors get access to beta builds, priority support, and direct influence on project direction."
+ author:
+ name: "Armbian Project"
+ handle: "@armbian"
+ tags: [support, sponsorship, donate, community]
+ motd:
+ line: "Support Armbian: Become a sponsor"
+ hint: "https://github.com/sponsors/armbian"
diff --git a/featured-content/scripts/.gitignore b/featured-content/scripts/.gitignore
new file mode 100644
index 000000000..c771f8264
--- /dev/null
+++ b/featured-content/scripts/.gitignore
@@ -0,0 +1,2 @@
+featured-content/scripts/__pycache__/
+*.pyc
diff --git a/featured-content/scripts/ai_helper.py b/featured-content/scripts/ai_helper.py
new file mode 100755
index 000000000..ca8c025f9
--- /dev/null
+++ b/featured-content/scripts/ai_helper.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+"""
+AI Helper Module - Provides AI-powered content rewriting functionality.
+
+This module can be used standalone to rewrite content, or imported by other modules.
+"""
+import json
+import os
+import sys
+
+
+def rewrite_summary_with_ai(title, summary, entry_name, entry_type="software"):
+ """
+ Use AI to rewrite summary as engaging description.
+
+ Args:
+ title: The original title
+ summary: The original summary to rewrite
+ entry_name: Name of the software/item
+ entry_type: Type of entry ("software" or "contribution")
+
+ Returns:
+ Rewritten summary string, or original if AI fails
+ """
+ api_key = os.environ.get('GEMINI_API_KEY')
+
+ if not api_key:
+ print(f" No API key found, keeping original summary", file=sys.stderr)
+ return summary
+
+ try:
+ from google import genai
+ from google.genai import types
+
+ client = genai.Client(api_key=api_key)
+ model_name = 'gemini-2.5-flash'
+
+ # Create context-specific prompt
+ if entry_type == "contribution":
+ context = f"""Convert the title and summary into an engaging description.
+
+Title: {title}
+Summary: {summary[:200]}
+
+Requirements:
+- 15 to 80 characters
+- Return ONLY the description text"""
+ else:
+ context = f"""Convert the title and summary into an engaging description.
+
+Software: {entry_name}
+Title: {title}
+Summary: {summary}
+
+Requirements:
+- 15 to 80 characters
+- Make it engaging and informative"""
+
+ response = client.models.generate_content(
+ model=model_name,
+ contents=context,
+ config=types.GenerateContentConfig(
+ temperature=0.9,
+ max_output_tokens=100,
+ )
+ )
+
+ # Handle empty or blocked responses
+ if response.text is None:
+ print(f" AI returned empty response, using original summary", file=sys.stderr)
+ return summary
+
+ new_summary = response.text.strip().strip('"\'')
+ # Remove any markdown formatting
+ import re
+ new_summary = re.sub(r'^[\*\_\-]+|[\*\_\-]+$', '', new_summary).strip()
+
+ # Ensure it's under 80 chars and not empty
+ if len(new_summary) > 80:
+ new_summary = new_summary[:77] + "..."
+
+ if not new_summary or len(new_summary) < 15:
+ print(f" AI returned too short result, using original summary", file=sys.stderr)
+ return summary
+
+ print(f" AI summary rewrite: '{summary[:30]}...' -> '{new_summary}' ({len(new_summary)} chars)")
+ return new_summary
+
+ except ImportError as e:
+ print(f" AI rewrite failed: google-genai not installed ({e}), using original summary", file=sys.stderr)
+ return summary
+ except Exception as e:
+ print(f" AI rewrite failed: {e}, using original summary", file=sys.stderr)
+ return summary
+
+
+def process_entries_with_ai(entries):
+ """
+ Process a list of entries and rewrite their summaries with AI.
+
+ Args:
+ entries: List of entry dictionaries with 'title', 'summary', 'name', 'type' keys
+
+ Returns:
+ List of entries with rewritten summaries (adds 'summary_original' key)
+ """
+ for entry in entries:
+ if 'summary' in entry:
+ original_summary = entry['summary']
+ entry['summary'] = rewrite_summary_with_ai(
+ entry.get('title', ''),
+ original_summary,
+ entry.get('name', ''),
+ entry.get('type', 'software')
+ )
+ entry['summary_original'] = original_summary # Keep original for reference
+
+ return entries
+
+
+def main():
+ """CLI interface for testing AI functionality."""
+ if len(sys.argv) < 3:
+ print("Usage: ai_helper.py
[entry_name] [entry_type]")
+ print("Example: ai_helper.py 'My Software' 'This is a description.' 'MySoftware' software")
+ sys.exit(1)
+
+ title = sys.argv[1]
+ summary = sys.argv[2]
+ entry_name = sys.argv[3] if len(sys.argv) > 3 else title
+ entry_type = sys.argv[4] if len(sys.argv) > 4 else "software"
+
+ result = rewrite_summary_with_ai(title, summary, entry_name, entry_type)
+ print(f"\nOriginal: {summary}")
+ print(f"Rewritten: {result}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/featured-content/scripts/ghost_news.py b/featured-content/scripts/ghost_news.py
new file mode 100755
index 000000000..7b2e1bed0
--- /dev/null
+++ b/featured-content/scripts/ghost_news.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python3
+"""
+Ghost News Module - Fetches news from Ghost CMS.
+
+This module can be used standalone or imported by the main orchestrator.
+Outputs JSON array of news/contribution entries.
+"""
+import json
+import os
+import sys
+import urllib.request
+
+
+def fetch_ghost_news(api_url, limit=5):
+ """
+ Fetch news posts from Ghost CMS API.
+
+ Args:
+ api_url: Ghost CMS API URL (e.g., https://blog.example.com/ghost/api/v3/content/posts/)
+ limit: Number of posts to fetch (default: 5)
+
+ Returns:
+ List of news post dictionaries
+ """
+ api_key = os.environ.get('GHOST_API_KEY', os.environ.get('GHOST_ADMIN_API_KEY'))
+
+ if not api_key:
+ print("Warning: No Ghost API key found, returning empty list", file=sys.stderr)
+ return []
+
+ url = f"{api_url}?key={api_key}&limit={limit}&include=tags,authors"
+ headers = {
+ 'Accept': 'application/json',
+ 'User-Agent': 'Armbian-Featured-Content/1.0'
+ }
+
+ try:
+ req = urllib.request.Request(url, headers=headers)
+ with urllib.request.urlopen(req) as response:
+ data = json.loads(response.read().decode())
+ posts = data.get('posts', [])
+ print(f"Fetched {len(posts)} posts from Ghost", file=sys.stderr)
+ return posts
+ except Exception as e:
+ print(f"Error fetching Ghost posts: {e}", file=sys.stderr)
+ return []
+
+
+def process_ghost_news(posts, use_ai=True):
+ """
+ Process Ghost posts into news entries.
+
+ Args:
+ posts: Raw posts data from Ghost API
+ use_ai: Whether to use AI to rewrite summaries
+
+ Returns:
+ List of news entry dictionaries
+ """
+ from ai_helper import rewrite_summary_with_ai
+
+ news_entries = []
+
+ for post in posts:
+ title = post.get('title', '')
+ url = post.get('url', '')
+ published_at = post.get('published_at', '')
+ excerpt = post.get('excerpt', '')
+ feature_image = post.get('feature_image', '')
+
+ # Get author info
+ authors = post.get('authors', [])
+ author_name = authors[0].get('name', 'Armbian Team') if authors else 'Armbian Team'
+
+ # Get tags
+ tags = post.get('tags', [])
+ tag_names = [tag.get('name', '') for tag in tags]
+
+ # Get primary tag (first tag)
+ primary_tag = tags[0].get('name', '') if tags else ''
+ entry_name = primary_tag.replace('-', ' ').title() if primary_tag else 'Armbian News'
+
+ # Use excerpt or create summary from HTML
+ summary = excerpt
+ if not summary:
+ # Could parse HTML content here, but for now use a placeholder
+ summary = f"Latest update from {title}"
+
+ # Rewrite summary with AI
+ if use_ai:
+ summary = rewrite_summary_with_ai(title, summary, entry_name, "news")
+
+ entry = {
+ "type": "news",
+ "id": f"ghost-{post.get('id', 'unknown')}",
+ "name": entry_name,
+ "url": url,
+ "title": title,
+ "summary": summary,
+ "published_at": published_at,
+ "image": feature_image,
+ "author": {"name": author_name},
+ "tags": tag_names
+ }
+ news_entries.append(entry)
+
+ return news_entries
+
+
+def main():
+ """CLI interface for testing Ghost news functionality."""
+ import argparse
+
+ parser = argparse.ArgumentParser(description='Fetch Ghost news')
+ parser.add_argument('--url', required=True, help='Ghost API URL (e.g., https://blog.example.com/ghost/api/v3/content/posts/)')
+ parser.add_argument('--limit', type=int, default=5, help='Number of posts to fetch')
+ parser.add_argument('--no-ai', action='store_true', help='Skip AI rewriting')
+ args = parser.parse_args()
+
+ print(f"Fetching {args.limit} posts from Ghost...", file=sys.stderr)
+
+ # Fetch posts
+ posts = fetch_ghost_news(args.url, args.limit)
+
+ # Process into entries
+ entries = process_ghost_news(posts, use_ai=not args.no_ai)
+
+ # Output JSON
+ print(json.dumps(entries, indent=2))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/featured-content/scripts/github_releases.py b/featured-content/scripts/github_releases.py
new file mode 100755
index 000000000..c5ed8c558
--- /dev/null
+++ b/featured-content/scripts/github_releases.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+"""
+GitHub Releases Module - Fetches Armbian build releases from GitHub API.
+
+This module can be used standalone or imported by the main orchestrator.
+Outputs JSON array of contribution entries.
+"""
+import json
+import os
+import sys
+import re
+
+
+def fetch_releases(limit=5):
+ """
+ Fetch Armbian build releases from GitHub API.
+
+ Args:
+ limit: Number of releases to fetch (default: 5)
+
+ Returns:
+ List of contribution entry dictionaries
+ """
+ import urllib.request
+
+ url = f"https://api.github.com/repos/armbian/build/releases?per_page={limit}"
+
+ try:
+ with urllib.request.urlopen(url) as response:
+ data = json.loads(response.read().decode())
+ print(f"Fetched {len(data)} releases from GitHub", file=sys.stderr)
+ return data
+ except Exception as e:
+ print(f"Error fetching releases: {e}", file=sys.stderr)
+ return []
+
+
+def process_releases(releases, limit=5, use_ai=True):
+ """
+ Process GitHub releases into contribution entries.
+
+ Args:
+ releases: Raw releases data from GitHub API
+ limit: Maximum number of releases to process
+ use_ai: Whether to use AI to rewrite summaries
+
+ Returns:
+ List of contribution entry dictionaries
+ """
+ contribution_entries = []
+
+ for release in releases[:limit]:
+ if release.get('prerelease', False):
+ continue
+
+ tag_name = release.get('tag_name', 'unknown')
+ name = release.get('name', '') or tag_name
+ body = release.get('body', '')
+ html_url = release.get('html_url', '')
+ published_at = release.get('published_at', '')
+
+ # Extract key highlights from release body
+ highlights = []
+ if body:
+ lines = body.split('\n')
+ for line in lines:
+ line = line.strip()
+ if re.match(r'^[\*\-\+•]\s+\*\*', line):
+ match = re.search(r'\*\*(.+?)\*\*', line)
+ if match:
+ highlights.append(match.group(1))
+ elif re.match(r'^\d+\.\s+', line):
+ highlights.append(line.split(' ', 1)[1][:100] if len(line.split(' ', 1)) > 1 else line[:100])
+
+ if len(highlights) >= 3:
+ break
+
+ # Create a summary from body
+ summary = body[:200].strip() + '...' if len(body) > 200 else body.strip()
+ if not summary:
+ summary = f"Armbian release {tag_name} with various improvements and bug fixes."
+
+ contribution = {
+ "type": "contribution",
+ "id": f"release-{tag_name}",
+ "name": name,
+ "url": html_url,
+ "title": f"Armbian {name} Released",
+ "summary": summary,
+ "published_at": published_at,
+ "highlights": highlights[:3],
+ "tags": ["release", "update", "armbian"]
+ }
+ contribution_entries.append(contribution)
+
+ return contribution_entries
+
+
+def main():
+ """CLI interface for testing GitHub releases functionality."""
+ import argparse
+
+ parser = argparse.ArgumentParser(description='Fetch GitHub releases')
+ parser.add_argument('--limit', type=int, default=5, help='Number of releases to fetch')
+ parser.add_argument('--no-ai', action='store_true', help='Skip AI rewriting')
+ args = parser.parse_args()
+
+ print(f"Fetching {args.limit} releases from GitHub...", file=sys.stderr)
+
+ # Fetch releases
+ releases = fetch_releases(args.limit)
+
+ # Process into entries
+ entries = process_releases(releases, args.limit, use_ai=not args.no_ai)
+
+ # Output JSON
+ print(json.dumps(entries, indent=2))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/featured-content/scripts/manual_list.py b/featured-content/scripts/manual_list.py
new file mode 100755
index 000000000..fb1bbd7bb
--- /dev/null
+++ b/featured-content/scripts/manual_list.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python3
+"""
+Manual List Module - Manually curated featured software entries.
+
+This module can be used standalone or imported by the main orchestrator.
+Reads from YAML file and outputs JSON array of manual software entries.
+
+YAML format example:
+ entries:
+ - id: my-software
+ name: "My Software"
+ type: software
+ url: https://example.com
+ title: "Amazing Software"
+ summary: "Brief description"
+ author:
+ name: "Author Name"
+ handle: "@author"
+ tags: [tag1, tag2]
+ motd:
+ line: "Message line"
+ hint: "Hint text"
+"""
+import json
+import os
+import sys
+
+try:
+ import yaml
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+
+def load_manual_list(manual_path='manual_featured.yml'):
+ """
+ Load manually curated featured entries from YAML file.
+
+ Args:
+ manual_path: Path to manual featured YAML file
+
+ Returns:
+ Dictionary containing manual entries data
+ """
+ if not os.path.exists(manual_path):
+ print(f"Warning: {manual_path} not found, returning empty list", file=sys.stderr)
+ return {"entries": []}
+
+ if not HAS_YAML:
+ print(f"Warning: PyYAML not installed, run 'pip install pyyaml'", file=sys.stderr)
+ return {"entries": []}
+
+ try:
+ with open(manual_path, 'r') as f:
+ data = yaml.safe_load(f)
+ # Handle nested structure (armbian_featured_software.entries)
+ if isinstance(data, dict):
+ # Check for nested structure
+ if 'armbian_featured_software' in data:
+ entries = data['armbian_featured_software'].get('entries', [])
+ else:
+ entries = data.get('entries', [])
+ elif isinstance(data, list):
+ entries = data
+ else:
+ entries = []
+ print(f"Loaded {len(entries)} manual entries from YAML", file=sys.stderr)
+ return {"entries": entries}
+ except Exception as e:
+ print(f"Error loading manual list: {e}", file=sys.stderr)
+ return {"entries": []}
+
+
+def process_manual_entries(data, use_ai=True):
+ """
+ Process manual entries into featured entries.
+
+ Args:
+ data: Data dictionary containing entries array
+ use_ai: Whether to use AI to rewrite summaries
+
+ Returns:
+ List of software entry dictionaries
+ """
+ from ai_helper import rewrite_summary_with_ai
+
+ entries = data.get('entries', [])
+ manual_entries = []
+
+ for entry in entries:
+ entry_type = entry.get('type', 'software')
+ name = entry.get('name', entry.get('id', ''))
+ title = entry.get('title', name)
+ summary = entry.get('summary', '')
+ url = entry.get('url', '')
+ entry_id = entry.get('id', name)
+
+ # Get author
+ author_data = entry.get('author', {})
+ if isinstance(author_data, dict):
+ author = {"name": author_data.get('name', 'Armbian'), "handle": author_data.get('handle', '')}
+ else:
+ author = {"name": 'Armbian'}
+
+ # Get tags
+ tags = entry.get('tags', [])
+ if not isinstance(tags, list):
+ tags = []
+
+ # Get motd
+ motd = entry.get('motd', {})
+ if not motd or not isinstance(motd, dict):
+ motd = {"line": f"Featured: {name}", "hint": ""}
+
+ # Rewrite summary with AI
+ summary = rewrite_summary_with_ai(title, summary, name, entry_type)
+
+ manual_entry = {
+ "type": entry_type,
+ "id": entry_id,
+ "name": name,
+ "url": url,
+ "title": title,
+ "summary": summary,
+ "author": author,
+ "tags": tags,
+ "motd": motd,
+ "manual": True # Mark as manual entry
+ }
+ manual_entries.append(manual_entry)
+
+ return manual_entries
+
+
+def main():
+ """CLI interface for testing manual list functionality."""
+ import argparse
+
+ parser = argparse.ArgumentParser(description='Load manual featured entries from YAML')
+ parser.add_argument('--file', default='manual_featured.yml', help='Path to manual featured YAML file')
+ parser.add_argument('--no-ai', action='store_true', help='Skip AI rewriting')
+ args = parser.parse_args()
+
+ print(f"Loading manual entries from {args.file}...", file=sys.stderr)
+
+ # Load data
+ data = load_manual_list(args.file)
+
+ # Process entries
+ entries = process_manual_entries(data, use_ai=not args.no_ai)
+
+ # Output JSON
+ print(json.dumps(entries, indent=2))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/featured-content/scripts/orchestrator.py b/featured-content/scripts/orchestrator.py
new file mode 100755
index 000000000..18c7b6dd5
--- /dev/null
+++ b/featured-content/scripts/orchestrator.py
@@ -0,0 +1,287 @@
+#!/usr/bin/env python3
+"""
+Featured Content Orchestrator - Main script that merges all content sources.
+
+This script:
+1. Fetches content from multiple sources (releases, news, forum, sponsors, software, manual)
+2. Merges all entries into a unified list
+3. Selects a subset of entries (3-5) with type diversity
+4. Outputs final featured-content.json
+"""
+import argparse
+import json
+import os
+import random
+import sys
+from typing import List, Dict, Any
+
+
+def fetch_all_content(config_file: str, forum_url: str, ghost_url: str,
+ software_limit: int = 10, releases_limit: int = 5,
+ sponsors_limit: int = 10, forum_limit: int = 5,
+ ghost_limit: int = 5, use_ai: bool = True) -> tuple:
+ """
+ Fetch content from all available sources.
+
+ Args:
+ config_file: Path to config.software.json
+ forum_url: Discourse forum URL
+ ghost_url: Ghost CMS API URL
+ software_limit: Max software entries to fetch
+ releases_limit: Max releases to fetch
+ sponsors_limit: Max sponsors to fetch
+ forum_limit: Max forum posts to fetch
+ ghost_limit: Max news posts to fetch
+ use_ai: Whether to use AI rewriting
+
+ Returns:
+ Tuple of (all_entries, counts_dict) where counts_dict has actual counts per source
+ """
+ all_entries = []
+ counts = {
+ "github_releases": 0,
+ "ghost_news": 0,
+ "forum_posts": 0,
+ "sponsors": 0,
+ "software": 0,
+ "manual": 0
+ }
+
+ # Import modules
+ import github_releases
+ import ghost_news
+ import sponsors
+ import software_list
+ import manual_list
+
+ # Conditionally import forum_posts if forum_url is provided
+ if forum_url:
+ import forum_posts
+
+ # 1. GitHub Releases
+ print("Fetching GitHub releases...", file=sys.stderr)
+ releases = github_releases.fetch_releases(limit=releases_limit)
+ if releases:
+ release_entries = github_releases.process_releases(releases, limit=releases_limit, use_ai=use_ai)
+ all_entries.extend(release_entries)
+ counts["github_releases"] = len(release_entries)
+ print(f" Added {len(release_entries)} release entries", file=sys.stderr)
+
+ # 2. Ghost News
+ if ghost_url:
+ print("Fetching Ghost news...", file=sys.stderr)
+ posts = ghost_news.fetch_ghost_news(ghost_url, limit=ghost_limit)
+ if posts:
+ news_entries = ghost_news.process_ghost_news(posts, use_ai=use_ai)
+ all_entries.extend(news_entries)
+ counts["ghost_news"] = len(news_entries)
+ print(f" Added {len(news_entries)} news entries", file=sys.stderr)
+
+ # 3. Forum Posts
+ if forum_url:
+ print("Fetching forum posts...", file=sys.stderr)
+ topics, users = forum_posts.fetch_promoted_posts(forum_url, limit=forum_limit)
+ if topics:
+ forum_entries = forum_posts.process_forum_posts(topics, users, use_ai=use_ai)
+ all_entries.extend(forum_entries)
+ counts["forum_posts"] = len(forum_entries)
+ print(f" Added {len(forum_entries)} forum entries", file=sys.stderr)
+
+ # 4. GitHub Sponsors
+ print("Fetching GitHub sponsors...", file=sys.stderr)
+ sponsor_data = sponsors.fetch_sponsors(limit=sponsors_limit)
+ if sponsor_data:
+ sponsor_entries = sponsors.process_sponsors(sponsor_data, use_ai=use_ai)
+ all_entries.extend(sponsor_entries)
+ counts["sponsors"] = len(sponsor_entries)
+ print(f" Added {len(sponsor_entries)} sponsor entries", file=sys.stderr)
+
+ # 5. Software List from Armbian config
+ if config_file and os.path.exists(config_file):
+ print(f"Loading software config from {config_file}...", file=sys.stderr)
+ config = software_list.load_software_config(config_file)
+ if config:
+ software_entries = software_list.process_software_entries(
+ config, limit=software_limit, featured_only=True, use_ai=use_ai
+ )
+ all_entries.extend(software_entries)
+ counts["software"] = len(software_entries)
+ print(f" Added {len(software_entries)} software entries", file=sys.stderr)
+
+ # 6. Manual curated entries
+ print("Loading manual featured entries...", file=sys.stderr)
+ # Look in parent directory (featured-content/) for manual_featured.yml
+ import os
+ manual_path = os.path.join(os.path.dirname(__file__), '..', 'manual_featured.yml')
+ manual_data = manual_list.load_manual_list(manual_path)
+ if manual_data:
+ manual_entries = manual_list.process_manual_entries(manual_data, use_ai=use_ai)
+ all_entries.extend(manual_entries)
+ counts["manual"] = len(manual_entries)
+ print(f" Added {len(manual_entries)} manual entries", file=sys.stderr)
+
+ return all_entries, counts
+
+
+def select_diverse_entries(entries: List[Dict[str, Any]], target_count: int = 5) -> List[Dict[str, Any]]:
+ """
+ Select a diverse subset of entries by type with randomization.
+
+ Args:
+ entries: All available entries
+ target_count: Target number of entries to select
+
+ Returns:
+ Selected entries with type diversity and randomness
+ """
+ if not entries:
+ return []
+
+ # Group entries by type
+ by_type: Dict[str, List[Dict[str, Any]]] = {}
+ for entry in entries:
+ entry_type = entry.get('type', 'unknown')
+ if entry_type not in by_type:
+ by_type[entry_type] = []
+ by_type[entry_type].append(entry)
+
+ # Shuffle each type list
+ for entry_type in by_type:
+ random.shuffle(by_type[entry_type])
+
+ # Create a pool of all entries with their types
+ pool = []
+ for entry_type, entries_list in by_type.items():
+ for entry in entries_list:
+ pool.append((entry_type, entry))
+
+ # Shuffle the entire pool for randomness
+ random.shuffle(pool)
+
+ # Select entries ensuring type diversity
+ selected = []
+ used_types = set()
+ diversifying = True
+
+ for entry_type, entry in pool:
+ if len(selected) >= target_count:
+ break
+
+ # While diversifying, try to get one from each type first
+ if diversifying:
+ if entry_type not in used_types:
+ selected.append(entry)
+ used_types.add(entry_type)
+
+ # Once we've seen all types, stop diversifying
+ if len(used_types) == len(by_type):
+ diversifying = False
+ else:
+ # After diversity achieved, take anything
+ selected.append(entry)
+
+ # If we still don't have enough, just add more entries
+ if len(selected) < target_count:
+ for entry in entries:
+ if entry not in selected:
+ selected.append(entry)
+ if len(selected) >= target_count:
+ break
+
+ return selected
+
+
+def sort_entries(entries: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
+ """
+ Sort entries by type for consistent output.
+
+ Args:
+ entries: Entries to sort
+
+ Returns:
+ Sorted entries
+ """
+ type_order = {
+ 'software': 0,
+ 'contribution': 1,
+ 'news': 2,
+ 'discussion': 3,
+ 'sponsor': 4,
+ 'manual': 5
+ }
+
+ def sort_key(entry):
+ entry_type = entry.get('type', 'unknown')
+ return type_order.get(entry_type, 999)
+
+ return sorted(entries, key=sort_key)
+
+
+def main():
+ """CLI interface for orchestrator."""
+ parser = argparse.ArgumentParser(description='Featured Content Orchestrator')
+ parser.add_argument('--config', default='config.software.json', help='Path to config.software.json')
+ parser.add_argument('--forum-url', help='Discourse forum URL (e.g., https://forum.armbian.com)')
+ parser.add_argument('--ghost-url', help='Ghost CMS API URL')
+ parser.add_argument('--software-limit', type=int, default=10, help='Max software entries')
+ parser.add_argument('--releases-limit', type=int, default=5, help='Max releases to fetch')
+ parser.add_argument('--sponsors-limit', type=int, default=10, help='Max sponsors to fetch')
+ parser.add_argument('--forum-limit', type=int, default=5, help='Max forum posts')
+ parser.add_argument('--ghost-limit', type=int, default=5, help='Max news posts')
+ parser.add_argument('--count', type=int, default=5, help='Target number of final entries')
+ parser.add_argument('--no-ai', action='store_true', help='Skip AI rewriting')
+ parser.add_argument('--no-shuffle', action='store_true', help='Disable randomization (useful for testing)')
+ parser.add_argument('--all', action='store_true', help='Output all entries (no selection)')
+ args = parser.parse_args()
+
+ # Set random seed for reproducibility (unless disabled)
+ if not args.no_shuffle:
+ random.seed()
+
+ print("=== Featured Content Orchestrator ===", file=sys.stderr)
+ print(f"Target entries: {args.count}", file=sys.stderr)
+ print(f"AI rewriting: {not args.no_ai}", file=sys.stderr)
+
+ # Fetch all content
+ all_entries, counts = fetch_all_content(
+ config_file=args.config,
+ forum_url=args.forum_url,
+ ghost_url=args.ghost_url,
+ software_limit=args.software_limit,
+ releases_limit=args.releases_limit,
+ sponsors_limit=args.sponsors_limit,
+ forum_limit=args.forum_limit,
+ ghost_limit=args.ghost_limit,
+ use_ai=not args.no_ai
+ )
+
+ print(f"\nTotal entries fetched: {len(all_entries)}", file=sys.stderr)
+
+ if not all_entries:
+ print("No entries fetched, outputting empty array", file=sys.stderr)
+ print("[]")
+ return
+
+ # Select subset (unless --all specified)
+ if args.all:
+ final_entries = all_entries
+ else:
+ final_entries = select_diverse_entries(all_entries, target_count=args.count)
+
+ print(f"Selected {len(final_entries)} entries for output", file=sys.stderr)
+
+ # Sort by type
+ final_entries = sort_entries(final_entries)
+
+ # Output JSON
+ output = {
+ "entries": final_entries,
+ "count": len(final_entries),
+ "sources": counts
+ }
+
+ print(json.dumps(output, indent=2))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/featured-content/scripts/select_featured.py b/featured-content/scripts/select_featured.py
new file mode 100755
index 000000000..3421b0926
--- /dev/null
+++ b/featured-content/scripts/select_featured.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python3
+"""
+Select diverse featured entries from all sources.
+
+Reads multiple JSON files with entries and selects N items from each type.
+"""
+import json
+import random
+import sys
+from collections import defaultdict
+from pathlib import Path
+
+
+def main():
+ if len(sys.argv) < 3:
+ print("Usage: select_featured.py [input2.json] ...")
+ sys.exit(1)
+
+ per_category_count = int(sys.argv[1])
+ input_files = sys.argv[2:]
+
+ # Load all entries
+ all_entries = []
+ for input_file in input_files:
+ try:
+ with open(input_file) as f:
+ entries = json.load(f)
+ if isinstance(entries, list):
+ all_entries.extend(entries)
+ print(f"Loaded {len(entries)} from {input_file}", file=sys.stderr)
+ except Exception as e:
+ print(f"Warning: Could not load {input_file}: {e}", file=sys.stderr)
+
+ if not all_entries:
+ print("No entries loaded", file=sys.stderr)
+ print(json.dumps({"entries": [], "count": 0, "sources": {}}, indent=2))
+ sys.exit(0)
+
+ # Group by type
+ by_type = defaultdict(list)
+ for entry in all_entries:
+ entry_type = entry.get('type', 'unknown')
+ by_type[entry_type].append(entry)
+
+ # Count sources
+ sources = {}
+ for entry_type, entries_list in by_type.items():
+ sources[entry_type] = len(entries_list)
+
+ # Select N random entries from each type
+ selected = []
+ random.seed()
+
+ for entry_type, entries in by_type.items():
+ # Shuffle and take first N
+ random.shuffle(entries)
+ count = min(per_category_count, len(entries))
+ selected.extend(entries[:count])
+ print(f"Selected {count}/{len(entries)} from type '{entry_type}'", file=sys.stderr)
+
+ # Shuffle final selection for variety
+ random.shuffle(selected)
+
+ # Output
+ output = {
+ "entries": selected,
+ "count": len(selected),
+ "sources": sources
+ }
+
+ print(json.dumps(output, indent=2))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/featured-content/scripts/software_list.py b/featured-content/scripts/software_list.py
new file mode 100755
index 000000000..29cab1280
--- /dev/null
+++ b/featured-content/scripts/software_list.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python3
+"""
+Software List Module - Extracts software entries from Armbian config.json.
+
+This module can be used standalone or imported by the main orchestrator.
+Outputs JSON array of software entries.
+
+Handles the actual config.software.json structure with nested menu format.
+"""
+import json
+import os
+import sys
+
+
+def load_software_config(config_path='config.software.json'):
+ """
+ Load software configuration from Armbian config.json file.
+
+ Args:
+ config_path: Path to config.software.json file
+
+ Returns:
+ Dictionary containing software configuration data
+ """
+ if not os.path.exists(config_path):
+ print(f"Error: {config_path} not found", file=sys.stderr)
+ return {}
+
+ try:
+ with open(config_path, 'r') as f:
+ config = json.load(f)
+ print(f"Loaded software config from {config_path}", file=sys.stderr)
+ return config
+ except Exception as e:
+ print(f"Error loading config: {e}", file=sys.stderr)
+ return {}
+
+
+def extract_software_from_menu(menu_items, parent_id=""):
+ """
+ Recursively extract software items from menu structure.
+
+ Args:
+ menu_items: List of menu items (can be nested)
+ parent_id: Parent menu ID for context
+
+ Returns:
+ List of software entry dictionaries
+ """
+ software_entries = []
+
+ for item in menu_items:
+ item_id = item.get('id', '')
+ description = item.get('description', '')
+ short = item.get('short', description)
+ author = item.get('author', '')
+ status = item.get('status', '')
+
+ # Check if this has nested items (submenu)
+ if 'sub' in item:
+ # Recursively process submenu
+ software_entries.extend(
+ extract_software_from_menu(item['sub'], item_id)
+ )
+
+ # Only include first item in each software group (xxx001 = install)
+ # Skip xxx002, xxx003, etc. which are remove/purge/config actions
+ if not item_id.endswith('001'):
+ continue
+
+ # Skip remove/purge/uninstall/disable actions
+ name_lower = (short or description).lower()
+ skip_keywords = ['remove', 'purge', 'uninstall', 'disable']
+ if any(keyword in name_lower for keyword in skip_keywords):
+ continue
+
+ # Only include items that look like installable software
+ # They typically have a module, command, or are not just containers
+ is_container = 'sub' in item and not item.get('module')
+
+ if not is_container and item_id:
+ # Build entry
+ entry = {
+ 'id': item_id,
+ 'name': short or description,
+ 'description': description,
+ 'author': author,
+ 'status': status,
+ 'parent_menu': parent_id
+ }
+
+ # Get URL if available (from about or other fields)
+ about = item.get('about', '')
+ if about and about.startswith('http'):
+ entry['url'] = about
+
+ software_entries.append(entry)
+
+ return software_entries
+
+
+def process_software_entries(config, limit=None, featured_only=True, use_ai=True):
+ """
+ Process software entries from Armbian config into featured entries.
+
+ Args:
+ config: Loaded configuration dictionary
+ limit: Maximum number of entries to process (default: all)
+ featured_only: Only include entries marked as featured (not used for menu format)
+ use_ai: Whether to use AI to rewrite summaries
+
+ Returns:
+ List of software entry dictionaries
+ """
+ from ai_helper import rewrite_summary_with_ai
+
+ # Extract all software from menu structure
+ menu = config.get('menu', [])
+ all_software = extract_software_from_menu(menu)
+
+ print(f"Found {len(all_software)} software items in menu", file=sys.stderr)
+
+ # For menu format, we'll feature items from key categories
+ # Priority categories that contain popular software
+ featured_categories = {
+ 'DNS', 'Backup', 'Media', 'Smart Home', 'Cloud',
+ 'Containers', 'Desktop', 'Network', 'Security'
+ }
+
+ # Filter to items in featured categories or with high-quality metadata
+ featured_items = []
+ for item in all_software:
+ parent = item.get('parent_menu', '')
+ name = item.get('name', '').lower()
+ description = item.get('description', '').lower()
+
+ # Include if in featured category or has good metadata
+ if (parent in featured_categories or
+ item.get('status') == 'Stable' or
+ any(cat in name or cat in description for cat in
+ ['server', 'cloud', 'dns', 'media', 'home', 'backup', 'docker', 'desktop'])):
+ featured_items.append(item)
+
+ print(f"Filtered to {len(featured_items)} featured items", file=sys.stderr)
+
+ # Shuffle for variety (if not using AI, we want some randomness)
+ import random
+ random.shuffle(featured_items)
+
+ # Apply limit
+ if limit:
+ featured_items = featured_items[:limit]
+
+ # Convert to featured entry format
+ software_entries = []
+ for item in featured_items:
+ item_id = item.get('id', '')
+ name = item.get('name', item_id)
+ description = item.get('description', f'{name} software for ARM devices')
+ url = item.get('url', '')
+ author_name = item.get('author', 'Armbian').lstrip('@')
+
+ # Clean up author handle
+ author_handle = item.get('author', '').lstrip('@')
+
+ # Determine tags based on parent category and keywords
+ parent = item.get('parent_menu', '')
+ desc_lower = description.lower()
+
+ tags = [parent.lower()] if parent else []
+
+ # Add keyword-based tags
+ keyword_tags = {
+ 'dns': 'dns',
+ 'backup': 'backup',
+ 'media': 'media',
+ 'server': 'server',
+ 'cloud': 'cloud',
+ 'home': 'smart-home',
+ 'container': 'containers',
+ 'docker': 'containers',
+ 'desktop': 'desktop',
+ 'network': 'networking',
+ 'security': 'security',
+ 'privacy': 'privacy'
+ }
+
+ for keyword, tag in keyword_tags.items():
+ if keyword in desc_lower and tag not in tags:
+ tags.append(tag)
+
+ # Create motd
+ motd_line = f"Featured: {name}"
+ motd_hint = "armbian-config → Software"
+
+ # Rewrite summary with AI
+ title = name
+ summary = rewrite_summary_with_ai(title, description, name, "software")
+
+ software_entry = {
+ "type": "software",
+ "id": item_id,
+ "name": name,
+ "url": url,
+ "title": title,
+ "summary": summary,
+ "author": {
+ "name": author_name,
+ "handle": author_handle
+ },
+ "tags": tags[:5], # Limit to 5 tags
+ "motd": {
+ "line": motd_line,
+ "hint": motd_hint
+ }
+ }
+ software_entries.append(software_entry)
+
+ print(f"Processed {len(software_entries)} software entries", file=sys.stderr)
+ return software_entries
+
+
+def main():
+ """CLI interface for testing software list functionality."""
+ import argparse
+
+ parser = argparse.ArgumentParser(description='Extract software from Armbian config')
+ parser.add_argument('--config', default='config.software.json', help='Path to config.software.json')
+ parser.add_argument('--limit', type=int, help='Limit number of entries')
+ parser.add_argument('--all', action='store_true', help='Include all entries (not just featured)')
+ parser.add_argument('--no-ai', action='store_true', help='Skip AI rewriting')
+ args = parser.parse_args()
+
+ print("Loading software configuration...", file=sys.stderr)
+
+ # Load config
+ config = load_software_config(args.config)
+
+ # Process entries
+ entries = process_software_entries(
+ config,
+ limit=args.limit,
+ featured_only=not args.all,
+ use_ai=not args.no_ai
+ )
+
+ # Output JSON
+ print(json.dumps(entries, indent=2))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/featured-content/scripts/sponsors.py b/featured-content/scripts/sponsors.py
new file mode 100755
index 000000000..9b59f2878
--- /dev/null
+++ b/featured-content/scripts/sponsors.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python3
+"""
+GitHub Sponsors Module - Fetches sponsors for armbian/build.
+
+This module can be used standalone or imported by the main orchestrator.
+Outputs JSON array of sponsor entries.
+"""
+import json
+import os
+import sys
+import urllib.request
+
+
+def fetch_sponsors(repo="armbian/build", limit=10):
+ """
+ Fetch sponsors from GitHub Sponsors API.
+
+ Args:
+ repo: GitHub repository (default: armbian/build)
+ limit: Number of sponsors to fetch (default: 10)
+
+ Returns:
+ List of sponsor dictionaries
+ """
+ # GitHub GraphQL API for sponsors
+ api_key = os.environ.get('GITHUB_TOKEN')
+ if not api_key:
+ print("Error: GITHUB_TOKEN environment variable not set", file=sys.stderr)
+ print("Hint: Ensure the workflow has: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}", file=sys.stderr)
+ return []
+
+ # Debug: Show token presence (truncated for security)
+ token_preview = api_key[:8] + "..." if len(api_key) > 8 else "..."
+ print(f"Debug: GITHUB_TOKEN found: {token_preview} (len={len(api_key)})", file=sys.stderr)
+
+ # GraphQL query for organization sponsors (as maintainer receiving sponsors)
+ query = {
+ "query": f"""
+ {{
+ organization(login: "armbian") {{
+ sponsorshipsAsMaintainer(first: {limit}, includePrivate: true) {{
+ nodes {{
+ createdAt
+ sponsorEntity {{
+ __typename
+ ... on Organization {{
+ name
+ login
+ url
+ description
+ avatarUrl
+ location
+ websiteUrl
+ }}
+ ... on User {{
+ name
+ login
+ url
+ avatarUrl
+ location
+ websiteUrl
+ }}
+ }}
+ tier {{
+ monthlyPriceInDollars
+ name
+ description
+ }}
+ }}
+ }}
+ }}
+ }}
+ """
+ }
+
+ url = "https://api.github.com/graphql"
+ headers = {
+ 'Authorization': f'Bearer {api_key}',
+ 'Accept': 'application/json',
+ 'Content-Type': 'application/json',
+ 'User-Agent': 'Armbian-Featured-Content/1.0'
+ }
+
+ try:
+ print(f"Debug: Sending request to {url}", file=sys.stderr)
+ req = urllib.request.Request(url, headers=headers, data=json.dumps(query).encode())
+ with urllib.request.urlopen(req) as response:
+ status = response.status
+ body = response.read().decode()
+ print(f"Debug: Response status: {status}", file=sys.stderr)
+ print(f"Debug: Response length: {len(body)} bytes", file=sys.stderr)
+
+ data = json.loads(body)
+
+ # Debug: Show full response for small responses
+ if len(body) < 200:
+ print(f"Debug: Full response: {body}", file=sys.stderr)
+
+ # Debug: Show response structure
+ if 'errors' in data:
+ print(f"Debug: GraphQL errors:", file=sys.stderr)
+ for err in data.get('errors', []):
+ print(f" - {err.get('message', 'Unknown error')}", file=sys.stderr)
+ if 'type' in err:
+ print(f" Type: {err['type']}", file=sys.stderr)
+ if 'path' in err:
+ print(f" Path: {err['path']}", file=sys.stderr)
+
+ org_data = data.get('data', {}).get('organization')
+ if not org_data:
+ print("Debug: No 'organization' in response data", file=sys.stderr)
+ print(f"Debug: Available keys in data: {list(data.get('data', {}).keys())}", file=sys.stderr)
+ return []
+
+ sponsors_data = org_data.get('sponsorshipsAsMaintainer')
+ if not sponsors_data:
+ print("Debug: No 'sponsorshipsAsMaintainer' field in organization data", file=sys.stderr)
+ print(f"Debug: Available keys in organization: {list(org_data.keys())}", file=sys.stderr)
+ return []
+
+ # Debug: Show if the field exists but is null/empty
+ if sponsors_data is None:
+ print("Debug: 'sponsorshipsAsMaintainer' is null - GitHub Sponsors may not be enabled for this org", file=sys.stderr)
+ return []
+
+ sponsors = sponsors_data.get('nodes', [])
+ print(f"Fetched {len(sponsors)} sponsors from GitHub", file=sys.stderr)
+
+ # Debug: Show first sponsor structure if available
+ if sponsors:
+ print(f"Debug: First sponsor keys: {list(sponsors[0].keys())}", file=sys.stderr)
+
+ return sponsors
+ except urllib.error.HTTPError as e:
+ print(f"Error: HTTP {e.code} - {e.reason}", file=sys.stderr)
+ body = e.read().decode()
+ print(f"Debug: Error response: {body[:500]}", file=sys.stderr)
+ return []
+ except Exception as e:
+ print(f"Error fetching sponsors: {type(e).__name__}: {e}", file=sys.stderr)
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ return []
+
+
+def process_sponsors(sponsors, use_ai=True):
+ """
+ Process GitHub sponsors into contribution entries.
+
+ Args:
+ sponsors: Raw sponsors data from GitHub GraphQL API
+ use_ai: Whether to use AI to rewrite summaries
+
+ Returns:
+ List of sponsor entry dictionaries
+ """
+ from ai_helper import rewrite_summary_with_ai
+
+ sponsor_entries = []
+ print(f"Debug: Processing {len(sponsors)} raw sponsor nodes", file=sys.stderr)
+
+ for idx, sponsor_node in enumerate(sponsors):
+ sponsor = sponsor_node.get('sponsorEntity', {})
+ tier = sponsor_node.get('tier') or {}
+ monthly_price = tier.get('monthlyPriceInDollars', 0)
+
+ # Debug: Show sponsor processing info
+ sponsor_name = sponsor.get('name') or sponsor.get('login', 'Unknown')
+ tier_info = f"${monthly_price}/mo" if monthly_price > 0 else "no tier info"
+ print(f"Debug: Sponsor {idx+1}: {sponsor_name} | {tier_info}", file=sys.stderr)
+
+ # Get sponsor info
+ name = sponsor.get('name', 'Unknown Sponsor')
+ login = sponsor.get('login') if 'login' in sponsor else None
+ description = sponsor.get('description', f"Sponsor of Armbian build")[:200]
+
+ url = sponsor.get('url') or sponsor.get('websiteUrl', '')
+ avatar_url = sponsor.get('avatarUrl', '')
+ location = sponsor.get('location', '')
+
+ # Create entry name
+ entry_name = f"Sponsor: {name}"
+ if login:
+ entry_name = f"Sponsor: {login}"
+
+ # Rewrite summary with AI
+ summary = description
+ if use_ai:
+ summary = rewrite_summary_with_ai(name, description, entry_name, "sponsor")
+
+ # Get tier based on monthly price (or default to "Supporter" if no pricing info)
+ if monthly_price >= 1000:
+ tier_name = "Platinum"
+ elif monthly_price >= 100:
+ tier_name = "Gold"
+ elif monthly_price > 0:
+ tier_name = "Silver"
+ else:
+ tier_name = "Supporter"
+
+ entry = {
+ "type": "sponsor",
+ "id": f"sponsor-{idx}",
+ "name": entry_name,
+ "url": url,
+ "title": f"{name} - {tier_name} Sponsor",
+ "summary": summary,
+ "image": avatar_url,
+ "author": {"name": name, "handle": f"@{login}"} if login else {"name": name},
+ "tags": ["sponsor", tier_name.lower(), "community"],
+ "motd": {"line": f"{tier_name} level supporter"}
+ }
+ sponsor_entries.append(entry)
+ print(f" ✓ Added as {tier_name} sponsor", file=sys.stderr)
+
+ print(f"Debug: Processed {len(sponsor_entries)} active sponsors", file=sys.stderr)
+ return sponsor_entries
+
+
+def main():
+ """CLI interface for testing sponsors functionality."""
+ import argparse
+
+ parser = argparse.ArgumentParser(description='Fetch GitHub sponsors')
+ parser.add_argument('--repo', default='armbian/build', help='GitHub repository')
+ parser.add_argument('--limit', type=int, default=10, help='Number of sponsors to fetch')
+ parser.add_argument('--no-ai', action='store_true', help='Skip AI rewriting')
+ args = parser.parse_args()
+
+ print(f"Fetching up to {args.limit} sponsors from {args.repo}...", file=sys.stderr)
+
+ # Fetch sponsors
+ sponsors = fetch_sponsors(args.repo, args.limit)
+
+ # Process into entries
+ entries = process_sponsors(sponsors, use_ai=not args.no_ai)
+
+ # Output JSON
+ print(json.dumps(entries, indent=2))
+
+
+if __name__ == "__main__":
+ main()