-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
345 lines (285 loc) · 11.8 KB
/
main.py
File metadata and controls
345 lines (285 loc) · 11.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
#!/usr/bin/env python3
"""
Main orchestration script for the AI-powered web crawler.
Coordinates crawling, processing, and Obsidian vault generation.
"""
import argparse
from pathlib import Path
from tqdm import tqdm
from config import CrawlerConfig
from database_enhanced import EnhancedCrawlerDatabase
from crawler import WebCrawler
from content_processor import ContentProcessor
from llm_normalizer import LLMNormalizer
from obsidian_writer import ObsidianWriter
from embeddings_manager import EmbeddingsManager
from semantic_linker import build_semantic_links
from markdown_linter import lint_and_format
from link_validator import validate_internal_wikilinks, validate_external_links, find_wikilinks
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='AI-Powered Web Crawler with Obsidian Output',
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--seeds',
nargs='+',
help='Seed URLs to start crawling (overrides .env)'
)
parser.add_argument(
'--max-pages',
type=int,
help='Maximum number of pages to crawl (overrides .env)'
)
parser.add_argument(
'--max-depth',
type=int,
help='Maximum crawl depth (overrides .env)'
)
parser.add_argument(
'--skip-llm',
action='store_true',
help='Skip LLM processing for faster crawling'
)
parser.add_argument(
'--resume',
action='store_true',
help='Resume previous crawl from database'
)
parser.add_argument(
'--use-langgraph',
action='store_true',
help='Run the pipeline via LangGraph orchestration'
)
parser.add_argument(
'--vault-dir',
type=str,
help='Override Obsidian vault output directory'
)
parser.add_argument(
'--allowed-domains',
nargs='+',
help='Restrict crawl to these domains'
)
parser.add_argument(
'--docs-prefix',
type=str,
help='Restrict docs discovery to URLs with this path prefix (e.g., /api-reference)'
)
parser.add_argument(
'--full-docs',
action='store_true',
help='Discover all documentation URLs under the given seed (docs mode)'
)
return parser.parse_args()
def process_pages(db: EnhancedCrawlerDatabase, llm_normalizer: LLMNormalizer = None, use_llm: bool = True):
"""
Process unprocessed pages: extract content, improve titles, generate markdown.
Args:
db: Database instance
llm_normalizer: LLM normalizer instance (optional)
use_llm: Whether to use LLM for enhancement
"""
print("\n📝 Processing crawled pages...")
unprocessed = db.get_unprocessed_pages(limit=1000)
if not unprocessed:
print("No unprocessed pages found.")
return
for page in tqdm(unprocessed, desc="Processing pages"):
url = page['url']
html_content = page['content']
if not html_content:
continue
# Extract clean content
processed = ContentProcessor.extract_content(html_content, url)
metadata = {'tags': [CrawlerConfig.TAG_PREFIX]}
summary = ''
page_type = ''
lang = 'en'
# LLM enrichment if enabled
if use_llm and llm_normalizer:
try:
improved_title = llm_normalizer.improve_title(
processed['title'], processed['content'][:500]
)
processed['title'] = improved_title
metadata['tags'] = llm_normalizer.extract_tags(improved_title, processed['content'])
summary = llm_normalizer.summarize(improved_title, processed['content'])
page_type, lang = llm_normalizer.classify(improved_title, processed['content'])
except Exception as e:
print(f"⚠️ LLM processing failed for {url}: {e}")
processed['metadata'] = metadata
# Markdown lint/format (non-blocking)
md_formatted, lint_notes = lint_and_format(processed['markdown_content'])
processed['markdown_content'] = md_formatted
# Build embeddings and semantic links (best-effort)
semantic_similar = []
try:
em = EmbeddingsManager(CrawlerConfig)
doc_id = processed['slug']
em.upsert_page(doc_id, processed['content'], metadata={"url": url})
semantic_similar = build_semantic_links(CrawlerConfig, doc_id, processed['content'])
except Exception as e:
print(f"⚠️ Embeddings/semantic failed for {url}: {e}")
# Link validation (best-effort)
missing_internal = []
external_errors = []
try:
existing_slugs = set(db.get_all_slugs()) if hasattr(db, 'get_all_slugs') else set()
missing_internal = validate_internal_wikilinks(processed['markdown_content'], existing_slugs)
# crude external URL extraction
import re
ext_urls = re.findall(r"https?://[^\s)>'\]\"]+", processed['markdown_content'])
ext_result = validate_external_links(ext_urls[:50], timeout=CrawlerConfig.EXTERNAL_LINK_TIMEOUT, retries=CrawlerConfig.EXTERNAL_LINK_RETRIES)
external_errors = [f"{u} -> {code}" for u, code in ext_result.items() if code >= 400 or code == 0]
except Exception as e:
print(f"⚠️ Link validation failed for {url}: {e}")
# Update database
page_data = {
'title': processed['title'],
'slug': processed['slug'],
'content': processed['content'],
'markdown_content': processed['markdown_content'],
'word_count': processed['word_count'],
'content_hash': processed['checksum'],
'depth': page['crawl_depth'],
'metadata': processed['metadata'],
'status': 'processed',
'summary': summary,
'type': page_type,
'lang': lang,
'semantic_similar': semantic_similar,
'missing_internal_links': missing_internal,
'external_link_errors': external_errors,
}
if lint_notes:
page_data['metadata']['lint_notes'] = lint_notes
# Use enhanced upsert method
db.upsert_page_enhanced(url, page_data)
# Store links
if processed['links']:
db.add_links(url, processed['links'])
def write_vault(db: EnhancedCrawlerDatabase, writer: ObsidianWriter):
"""
Write processed pages to Obsidian vault.
Args:
db: Database instance
writer: Obsidian writer instance
"""
print("\n📚 Writing Obsidian vault...")
# Get all processed pages
processed_pages = []
all_urls = db.get_all_urls()
for url in tqdm(all_urls, desc="Loading pages"):
page = db.get_page(url)
if page and page.get('processed') and not page.get('written_to_vault'):
processed_pages.append(page)
if not processed_pages:
print("No pages ready for vault writing.")
return
# Write each page
for page in tqdm(processed_pages, desc="Writing files"):
try:
# Get backlinks
backlinks = db.get_backlinks(page['url'])
# Write to vault
filepath = writer.write_page(page, backlinks)
# Mark as written
db.mark_page_processed(page['url'], written=True)
except Exception as e:
print(f"❌ Failed to write {page['url']}: {e}")
print(f"\n✅ Wrote {len(processed_pages)} pages to vault: {writer.vault_dir}")
def main():
"""Main execution function."""
args = parse_args()
# Override config with CLI args
if args.seeds:
CrawlerConfig.SEED_URLS = args.seeds
if args.max_pages:
CrawlerConfig.MAX_PAGES = args.max_pages
if args.max_depth:
CrawlerConfig.MAX_DEPTH = args.max_depth
# Apply optional overrides before validation
if args.vault_dir:
CrawlerConfig.VAULT_DIR = Path(args.vault_dir)
if args.allowed_domains:
CrawlerConfig.ALLOWED_DOMAINS = args.allowed_domains
# Validate configuration
if not CrawlerConfig.validate():
print("❌ Invalid configuration. Please check your .env file or CLI args.")
return 1
# Display configuration
CrawlerConfig.display()
# Optional LangGraph path
if args.use_langgraph:
try:
from orchestrator import run_with_langgraph
result = run_with_langgraph(
seeds=CrawlerConfig.SEED_URLS,
max_pages=CrawlerConfig.MAX_PAGES,
max_depth=CrawlerConfig.MAX_DEPTH,
skip_llm=args.skip_llm,
resume=args.resume,
vault_dir=str(CrawlerConfig.VAULT_DIR),
allowed_domains=CrawlerConfig.ALLOWED_DOMAINS,
docs_prefix=args.docs_prefix,
full_docs=args.full_docs,
)
print(f"\n✅ Pipeline completed ({result.get('mode')}). Processed={result.get('processed', 0)}, Written={result.get('written', 0)}")
return 0
except Exception as e:
print(f"⚠️ LangGraph execution failed: {e}. Falling back to sequential mode…")
# Initialize enhanced database with frontier, entities, and LLM tracking
print(f"\n🗄️ Initializing enhanced database: {CrawlerConfig.DATABASE_PATH}")
db = EnhancedCrawlerDatabase(CrawlerConfig.DATABASE_PATH)
crawler = WebCrawler(db, CrawlerConfig)
writer = ObsidianWriter(CrawlerConfig.VAULT_DIR)
# Initialize LLM if not skipped
llm_normalizer = None
if not args.skip_llm:
try:
print("🤖 Initializing LLM normalizer (LM Studio)...")
llm_normalizer = LLMNormalizer(CrawlerConfig, db=db)
print("✅ LLM ready")
except Exception as e:
print(f"⚠️ LLM initialization failed: {e}")
print("Continuing without LLM enhancement...")
# Phase 1: Crawl
if not args.resume:
print("\n🕷️ Phase 1: Crawling")
crawler.initialize(CrawlerConfig.SEED_URLS)
pages_crawled = crawler.run()
print(f"✅ Crawled {pages_crawled} pages")
else:
print("▶️ Resuming from previous crawl")
# Show enhanced statistics
stats = db.get_enhanced_statistics()
print(f"\n📊 Enhanced Database Statistics:")
print(f" Total pages: {stats.get('total_pages', 0)}")
print(f" Processed: {stats.get('processed_pages', 0)}")
print(f" Unique sites: {stats.get('unique_sites', 0)}")
print(f" Total links: {stats.get('total_links', 0)}")
print(f" Total entities: {stats.get('total_entities', 0)}")
print(f" Frontier size: {stats.get('frontier_size', 0)}")
# Show LLM stats if available
llm_stats = stats.get('llm_operations', {})
if llm_stats and llm_stats.get('total_ops', 0) > 0:
print(f" LLM operations: {llm_stats.get('total_ops', 0)} ({llm_stats.get('successful_ops', 0)} successful)")
# Phase 2: Process
print("\n🔄 Phase 2: Processing")
process_pages(db, llm_normalizer, use_llm=not args.skip_llm)
# Phase 3: Write vault
print("\n📝 Phase 3: Writing Obsidian Vault")
write_vault(db, writer)
# Final enhanced statistics
final_stats = db.get_enhanced_statistics()
print(f"\n🎉 Crawl Complete!")
print(f" Total pages: {final_stats.get('total_pages', 0)}")
print(f" Processed pages: {final_stats.get('processed_pages', 0)}")
print(f" Total entities: {final_stats.get('total_entities', 0)}")
print(f" Vault location: {CrawlerConfig.VAULT_DIR}")
print(f" Database: {CrawlerConfig.DATABASE_PATH}")
return 0
if __name__ == "__main__":
exit(main())