memory-leaks

Still reachable: lots of words in many pages.
git clone https://git.kevinlegouguec.net/memory-leaks
Log | Files | Refs | README | LICENSE

generate-deps.py (2625B)


      1 #!/usr/bin/env python3
      2 
      3 """Write dependencies for all website pages in makefile syntax.
      4 
      5 We want to compute:
      6 
      7 - a list of leaf pages,
      8 
      9 - a list of indices,
     10 
     11 - dependencies for leaf pages (READMEs excluded):
     12   OUTPUT/foo/bar.html: foo/bar.txt | OUTPUT/foo
     13 
     14 - dependencies for READMEs:
     15   OUTPUT/foo/index.html: foo/README.txt foo | OUTPUT/foo
     16 
     17 - dependencies for autogenerated indices:
     18   OUTPUT/foo/index.html: foo | OUTPUT/foo
     19 """
     20 
     21 import json
     22 from os import path
     23 from sys import argv, exit
     24 
     25 from git import Repo
     26 
     27 from helpers import deserialize_directories
     28 
     29 
     30 def parse_arguments(args):
     31     if len(args) != 4:
     32         exit(f'Usage: {argv[0]} SITE-TREE OUTPUT-FILE OUTPUT-DIR')
     33 
     34     return argv[1], argv[2], argv[3]
     35 
     36 
     37 def pjoin(directory, item):
     38     return (
     39         path.join(directory, item)
     40         if item            # Avoid trailing slash for top-level files.
     41         else directory
     42     )
     43 
     44 
     45 def write_dependencies(deps_file, directories, top_dir, out_dir):
     46     pages = []
     47     readmes = {}
     48 
     49     for dpath, d in directories.items():
     50         src_dir = pjoin(top_dir, dpath)
     51         html_dir = pjoin(out_dir, dpath)
     52 
     53         for f in d.files:
     54             src_path = path.join(src_dir, f)
     55             name, _ = path.splitext(f)
     56 
     57             if name == 'README':
     58                 readmes[dpath] = f
     59                 continue
     60 
     61             html_path = path.join(html_dir, name+'.html')
     62             print(f'{html_path}: {src_path} | {html_dir}', file=deps_file)
     63             pages.append(html_path)
     64 
     65     print(file=deps_file)
     66 
     67     for dpath in directories:
     68         src_dir = pjoin(top_dir, dpath)
     69         html_dir = pjoin(out_dir, dpath)
     70         html_path = path.join(html_dir, 'index.html')
     71 
     72         if dpath in readmes:
     73             src_path = path.join(src_dir, readmes[dpath])
     74             print(f'{html_path}: {src_path} {src_dir} | {html_dir}', file=deps_file)
     75             continue
     76 
     77         print(f'{html_path}: {src_dir} | {html_dir}', file=deps_file)
     78 
     79     print(file=deps_file)
     80 
     81     print(f'pages = {" ".join(pages)}', file=deps_file)
     82 
     83     indices = (path.join(out_dir, dpath, 'index.html') for dpath in directories)
     84     print(f'indices = {" ".join(indices)}', file=deps_file)
     85 
     86 
     87 def main(arguments):
     88     tree_file, deps_file, out_dir = parse_arguments(arguments)
     89 
     90     repository = Repo(search_parent_directories=True)
     91     top_dir = path.relpath(repository.working_dir, path.curdir)
     92 
     93     with open(tree_file) as tree:
     94         directories = deserialize_directories(json.load(tree))
     95 
     96     with open(deps_file, 'w') as deps:
     97         write_dependencies(deps, directories, top_dir, out_dir)
     98 
     99 
    100 if __name__ == '__main__':
    101     main(argv)