This repository was archived by the owner on Jan 15, 2026. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexport.py
More file actions
87 lines (73 loc) · 3.09 KB
/
export.py
File metadata and controls
87 lines (73 loc) · 3.09 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import os
import json
import shutil
import download
from toc import *
def load_json(file_path):
with open(file_path, 'r', encoding='utf-8') as file:
data = json.load(file)
return data
def write_json(file_path, data):
with open(file_path, 'w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=False, indent=4)
def process_top_level(info, sub_nav, site_url):
first_title = info['title']
first_out = os.path.join('cache', first_title)
print(first_title)
os.makedirs(first_out, exist_ok=True)
toc = {
"title": info["title"],
"subtitle": info["subtitle"],
"authors": info["authors"],
"info": info["info"],
"content": []
}
pdf_tasks = []
sections_map = {}
for item in sub_nav:
for second_title, third_list in item.items():
second_out = os.path.join(first_out, second_title)
os.makedirs(second_out, exist_ok=True)
sections_map.setdefault(second_title, [])
for third_file in third_list:
md_path = os.path.join('docs', third_file.replace('/', os.sep))
third_title = extract_title(md_path)
pdf_path = os.path.join(first_out, second_title, third_title + ".pdf")
html_url = site_url.rstrip('/') + "/" + third_file.replace('.md', '/index.html').replace('index/index.html', 'index.html') + "?export=true"
pdf_tasks.append([html_url, pdf_path])
sections_map[second_title].append((third_title, pdf_path))
download.convertHtmlToPdf(pdf_tasks)
original_dir = os.getcwd()
for second_title, items in sections_map.items():
section = {"title": second_title, "sections": []}
for third_title, pdf_path in items:
pdf_dir = os.path.dirname(pdf_path)
temp_pdf = os.path.join(pdf_dir, "test.pdf")
shutil.copy(pdf_path, temp_pdf)
os.chdir(pdf_dir)
os.system(f"pdf2svg test.pdf {third_title}.%04d.svg all")
os.chdir(original_dir)
os.remove(temp_pdf)
svg_files = sorted([f for f in os.listdir(pdf_dir) if f.startswith(third_title) and f.endswith('.svg')])
section["sections"].append({
"title": third_title,
"pages": [os.path.join(first_title, second_title, svg) for svg in svg_files]
})
toc["content"].append(section)
shutil.copy('script/main.typ', 'cache/main.typ')
write_json('cache/toc.json', toc)
typst_in = os.path.join('cache', "main.typ")
typst_out = os.path.join('cache', info['filename'])
print(f"typst compile {typst_in} {typst_out}")
os.system(f"typst compile {typst_in} {typst_out}")
shutil.rmtree(first_out)
os.remove('cache/main.typ')
os.remove('cache/toc.json')
if __name__ == "__main__":
os.system('export TYPST_FONT_PATHS="./script/fonts"')
info = load_json('info.json')
site_url = info['project']['site_url']
for item in info['nav']:
if not 'export' in item:
continue
process_top_level(item['export'], item['children'], site_url)