From 1256e547731ce41c6b65cbbb3cd1ba3497c61257 Mon Sep 17 00:00:00 2001 From: Dries Schaumont <5946712+DriesSchaumont@users.noreply.github.com> Date: Mon, 21 Aug 2023 14:29:55 +0200 Subject: [PATCH] Add new architecture diagram (#47) * Add new architecture diagram * Various content * Install d2 on actions * Apply suggestions from code review Co-authored-by: Robrecht Cannoodt * Add integration section * Add more contents for pipeline entrypoints * Fix list * Update roadmap * Apply suggestions from code review --- .github/workflows/quarto_gh_pages.yml | 4 + _extensions/d2/_extension.yml | 7 + _extensions/d2/node_modules/.bin/d2-filter | 12 + _extensions/d2/node_modules/.bin/node-which | 52 + _extensions/d2/node_modules/.bin/rimraf | 68 + _extensions/d2/node_modules/.bin/uvu | 35 + .../d2/node_modules/.package-lock.json | 1003 ++++++++ .../d2/node_modules/@types/debug/LICENSE | 21 + .../d2/node_modules/@types/debug/README.md | 74 + .../d2/node_modules/@types/debug/index.d.ts | 54 + .../d2/node_modules/@types/debug/package.json | 57 + .../d2/node_modules/@types/mdast/LICENSE | 21 + .../d2/node_modules/@types/mdast/README.md | 16 + .../d2/node_modules/@types/mdast/index.d.ts | 346 +++ .../d2/node_modules/@types/mdast/package.json | 42 + _extensions/d2/node_modules/@types/ms/LICENSE | 21 + .../d2/node_modules/@types/ms/README.md | 16 + .../d2/node_modules/@types/ms/index.d.ts | 25 + .../d2/node_modules/@types/ms/package.json | 24 + .../d2/node_modules/@types/unist/LICENSE | 21 + .../d2/node_modules/@types/unist/README.md | 16 + .../d2/node_modules/@types/unist/index.d.ts | 114 + .../d2/node_modules/@types/unist/package.json | 55 + _extensions/d2/node_modules/bail/index.d.ts | 10 + _extensions/d2/node_modules/bail/index.js | 12 + _extensions/d2/node_modules/bail/license | 22 + _extensions/d2/node_modules/bail/package.json | 73 + _extensions/d2/node_modules/bail/readme.md | 147 ++ .../balanced-match/.github/FUNDING.yml | 2 + .../d2/node_modules/balanced-match/LICENSE.md | 21 + .../d2/node_modules/balanced-match/README.md | 97 + .../d2/node_modules/balanced-match/index.js | 62 + .../node_modules/balanced-match/package.json | 48 + .../d2/node_modules/brace-expansion/LICENSE | 21 + .../d2/node_modules/brace-expansion/README.md | 129 + .../d2/node_modules/brace-expansion/index.js | 201 ++ .../node_modules/brace-expansion/package.json | 47 + .../character-entities/index.d.ts | 6 + .../node_modules/character-entities/index.js | 2132 +++++++++++++++++ .../node_modules/character-entities/license | 22 + .../character-entities/package.json | 78 + .../node_modules/character-entities/readme.md | 152 ++ .../d2/node_modules/concat-map/.travis.yml | 4 + .../d2/node_modules/concat-map/LICENSE | 18 + .../node_modules/concat-map/README.markdown | 62 + .../d2/node_modules/concat-map/example/map.js | 6 + .../d2/node_modules/concat-map/index.js | 13 + .../d2/node_modules/concat-map/package.json | 43 + .../d2/node_modules/concat-map/test/map.js | 39 + _extensions/d2/node_modules/d2-filter/LICENSE | 21 + .../d2/node_modules/d2-filter/README.md | 73 + .../d2/node_modules/d2-filter/filter-shim.js | 12 + .../d2/node_modules/d2-filter/lib/filter.js | 188 ++ .../node_modules/d2-filter/lib/filter.js.map | 1 + .../d2/node_modules/d2-filter/package.json | 46 + _extensions/d2/node_modules/debug/LICENSE | 20 + _extensions/d2/node_modules/debug/README.md | 481 ++++ .../d2/node_modules/debug/package.json | 59 + .../d2/node_modules/debug/src/browser.js | 269 +++ .../d2/node_modules/debug/src/common.js | 274 +++ .../d2/node_modules/debug/src/index.js | 10 + _extensions/d2/node_modules/debug/src/node.js | 263 ++ .../index.d.ts | 12 + .../index.dom.d.ts | 6 + .../index.dom.js | 33 + .../decode-named-character-reference/index.js | 18 + .../decode-named-character-reference/license | 22 + .../package.json | 89 + .../readme.md | 135 ++ .../d2/node_modules/dequal/dist/index.js | 86 + .../d2/node_modules/dequal/dist/index.min.js | 1 + .../d2/node_modules/dequal/dist/index.mjs | 84 + _extensions/d2/node_modules/dequal/index.d.ts | 1 + _extensions/d2/node_modules/dequal/license | 21 + .../d2/node_modules/dequal/lite/index.d.ts | 1 + .../d2/node_modules/dequal/lite/index.js | 31 + .../d2/node_modules/dequal/lite/index.min.js | 1 + .../d2/node_modules/dequal/lite/index.mjs | 29 + .../d2/node_modules/dequal/package.json | 57 + _extensions/d2/node_modules/dequal/readme.md | 112 + .../d2/node_modules/diff/CONTRIBUTING.md | 39 + _extensions/d2/node_modules/diff/LICENSE | 31 + _extensions/d2/node_modules/diff/README.md | 211 ++ _extensions/d2/node_modules/diff/dist/diff.js | 1627 +++++++++++++ .../d2/node_modules/diff/dist/diff.min.js | 38 + .../d2/node_modules/diff/lib/convert/dmp.js | 32 + .../d2/node_modules/diff/lib/convert/xml.js | 42 + .../d2/node_modules/diff/lib/diff/array.js | 45 + .../d2/node_modules/diff/lib/diff/base.js | 307 +++ .../node_modules/diff/lib/diff/character.js | 37 + .../d2/node_modules/diff/lib/diff/css.js | 41 + .../d2/node_modules/diff/lib/diff/json.js | 163 ++ .../d2/node_modules/diff/lib/diff/line.js | 89 + .../d2/node_modules/diff/lib/diff/sentence.js | 41 + .../d2/node_modules/diff/lib/diff/word.js | 108 + .../d2/node_modules/diff/lib/index.es6.js | 1561 ++++++++++++ _extensions/d2/node_modules/diff/lib/index.js | 216 ++ .../d2/node_modules/diff/lib/index.mjs | 1561 ++++++++++++ .../d2/node_modules/diff/lib/patch/apply.js | 238 ++ .../d2/node_modules/diff/lib/patch/create.js | 272 +++ .../d2/node_modules/diff/lib/patch/merge.js | 613 +++++ .../d2/node_modules/diff/lib/patch/parse.js | 167 ++ .../d2/node_modules/diff/lib/util/array.js | 32 + .../diff/lib/util/distance-iterator.js | 57 + .../d2/node_modules/diff/lib/util/params.js | 24 + _extensions/d2/node_modules/diff/package.json | 87 + _extensions/d2/node_modules/diff/runtime.js | 3 + .../d2/node_modules/extend/.editorconfig | 20 + _extensions/d2/node_modules/extend/.eslintrc | 17 + _extensions/d2/node_modules/extend/.jscs.json | 175 ++ .../d2/node_modules/extend/.travis.yml | 230 ++ .../d2/node_modules/extend/CHANGELOG.md | 83 + _extensions/d2/node_modules/extend/LICENSE | 23 + _extensions/d2/node_modules/extend/README.md | 81 + .../d2/node_modules/extend/component.json | 32 + _extensions/d2/node_modules/extend/index.js | 117 + .../d2/node_modules/extend/package.json | 42 + .../d2/node_modules/fs.realpath/LICENSE | 43 + .../d2/node_modules/fs.realpath/README.md | 33 + .../d2/node_modules/fs.realpath/index.js | 66 + .../d2/node_modules/fs.realpath/old.js | 303 +++ .../d2/node_modules/fs.realpath/package.json | 26 + .../d2/node_modules/get-stdin/index.d.ts | 33 + .../d2/node_modules/get-stdin/index.js | 52 + _extensions/d2/node_modules/get-stdin/license | 9 + .../d2/node_modules/get-stdin/package.json | 39 + .../d2/node_modules/get-stdin/readme.md | 55 + _extensions/d2/node_modules/glob/LICENSE | 21 + _extensions/d2/node_modules/glob/README.md | 378 +++ _extensions/d2/node_modules/glob/common.js | 238 ++ _extensions/d2/node_modules/glob/glob.js | 790 ++++++ _extensions/d2/node_modules/glob/package.json | 55 + _extensions/d2/node_modules/glob/sync.js | 486 ++++ _extensions/d2/node_modules/inflight/LICENSE | 15 + .../d2/node_modules/inflight/README.md | 37 + .../d2/node_modules/inflight/inflight.js | 54 + .../d2/node_modules/inflight/package.json | 29 + _extensions/d2/node_modules/inherits/LICENSE | 16 + .../d2/node_modules/inherits/README.md | 42 + .../d2/node_modules/inherits/inherits.js | 9 + .../node_modules/inherits/inherits_browser.js | 27 + .../d2/node_modules/inherits/package.json | 29 + _extensions/d2/node_modules/is-buffer/LICENSE | 21 + .../d2/node_modules/is-buffer/README.md | 54 + .../d2/node_modules/is-buffer/index.d.ts | 2 + .../d2/node_modules/is-buffer/index.js | 11 + .../d2/node_modules/is-buffer/package.json | 65 + .../d2/node_modules/is-plain-obj/index.d.ts | 35 + .../d2/node_modules/is-plain-obj/index.js | 8 + .../d2/node_modules/is-plain-obj/license | 9 + .../d2/node_modules/is-plain-obj/package.json | 41 + .../d2/node_modules/is-plain-obj/readme.md | 58 + _extensions/d2/node_modules/isexe/.npmignore | 2 + _extensions/d2/node_modules/isexe/LICENSE | 15 + _extensions/d2/node_modules/isexe/README.md | 51 + _extensions/d2/node_modules/isexe/index.js | 57 + _extensions/d2/node_modules/isexe/mode.js | 41 + .../d2/node_modules/isexe/package.json | 31 + .../d2/node_modules/isexe/test/basic.js | 221 ++ _extensions/d2/node_modules/isexe/windows.js | 42 + _extensions/d2/node_modules/kleur/colors.d.ts | 38 + _extensions/d2/node_modules/kleur/colors.js | 53 + _extensions/d2/node_modules/kleur/colors.mjs | 53 + _extensions/d2/node_modules/kleur/index.d.ts | 45 + _extensions/d2/node_modules/kleur/index.js | 110 + _extensions/d2/node_modules/kleur/index.mjs | 110 + _extensions/d2/node_modules/kleur/license | 21 + .../d2/node_modules/kleur/package.json | 51 + _extensions/d2/node_modules/kleur/readme.md | 232 ++ .../d2/node_modules/longest-streak/index.d.ts | 11 + .../d2/node_modules/longest-streak/index.js | 36 + .../d2/node_modules/longest-streak/license | 22 + .../node_modules/longest-streak/package.json | 73 + .../d2/node_modules/longest-streak/readme.md | 150 ++ .../mdast-util-from-markdown/dev/index.d.ts | 78 + .../mdast-util-from-markdown/dev/index.js | 2 + .../dev/lib/index.d.ts | 184 ++ .../mdast-util-from-markdown/dev/lib/index.js | 1468 ++++++++++++ .../mdast-util-from-markdown/index.d.ts | 78 + .../mdast-util-from-markdown/index.js | 2 + .../mdast-util-from-markdown/lib/index.d.ts | 184 ++ .../mdast-util-from-markdown/lib/index.js | 1390 +++++++++++ .../mdast-util-from-markdown/license | 22 + .../mdast-util-from-markdown/package.json | 120 + .../mdast-util-from-markdown/readme.md | 544 +++++ .../mdast-util-phrasing/index.d.ts | 1 + .../node_modules/mdast-util-phrasing/index.js | 1 + .../mdast-util-phrasing/lib/index.d.ts | 12 + .../mdast-util-phrasing/lib/index.js | 31 + .../node_modules/mdast-util-phrasing/license | 23 + .../mdast-util-phrasing/package.json | 78 + .../mdast-util-phrasing/readme.md | 195 ++ .../mdast-util-to-markdown/index.d.ts | 327 +++ .../mdast-util-to-markdown/index.js | 2 + .../mdast-util-to-markdown/lib/configure.d.ts | 12 + .../mdast-util-to-markdown/lib/configure.js | 39 + .../lib/handle/blockquote.d.ts | 25 + .../lib/handle/blockquote.js | 32 + .../lib/handle/break.d.ts | 17 + .../lib/handle/break.js | 32 + .../lib/handle/code.d.ts | 18 + .../mdast-util-to-markdown/lib/handle/code.js | 78 + .../lib/handle/definition.d.ts | 17 + .../lib/handle/definition.js | 78 + .../lib/handle/emphasis.d.ts | 32 + .../lib/handle/emphasis.js | 48 + .../lib/handle/heading.d.ts | 17 + .../lib/handle/heading.js | 80 + .../lib/handle/html.d.ts | 14 + .../mdast-util-to-markdown/lib/handle/html.js | 20 + .../lib/handle/image-reference.d.ts | 25 + .../lib/handle/image-reference.js | 65 + .../lib/handle/image.d.ts | 25 + .../lib/handle/image.js | 84 + .../lib/handle/index.d.ts | 41 + .../lib/handle/index.js | 45 + .../lib/handle/inline-code.d.ts | 22 + .../lib/handle/inline-code.js | 79 + .../lib/handle/link-reference.d.ts | 25 + .../lib/handle/link-reference.js | 65 + .../lib/handle/link.d.ts | 33 + .../mdast-util-to-markdown/lib/handle/link.js | 116 + .../lib/handle/list-item.d.ts | 18 + .../lib/handle/list-item.js | 65 + .../lib/handle/list.d.ts | 17 + .../mdast-util-to-markdown/lib/handle/list.js | 113 + .../lib/handle/paragraph.d.ts | 23 + .../lib/handle/paragraph.js | 22 + .../lib/handle/root.d.ts | 17 + .../mdast-util-to-markdown/lib/handle/root.js | 23 + .../lib/handle/strong.d.ts | 32 + .../lib/handle/strong.js | 48 + .../lib/handle/text.d.ts | 23 + .../mdast-util-to-markdown/lib/handle/text.js | 17 + .../lib/handle/thematic-break.d.ts | 14 + .../lib/handle/thematic-break.js | 22 + .../mdast-util-to-markdown/lib/index.d.ts | 25 + .../mdast-util-to-markdown/lib/index.js | 188 ++ .../mdast-util-to-markdown/lib/join.d.ts | 3 + .../mdast-util-to-markdown/lib/join.js | 51 + .../mdast-util-to-markdown/lib/types.d.ts | 466 ++++ .../mdast-util-to-markdown/lib/types.js | 408 ++++ .../mdast-util-to-markdown/lib/unsafe.d.ts | 4 + .../mdast-util-to-markdown/lib/unsafe.js | 147 ++ .../lib/util/association.d.ts | 2 + .../lib/util/association.js | 33 + .../lib/util/check-bullet-ordered-other.d.ts | 9 + .../lib/util/check-bullet-ordered-other.js | 39 + .../lib/util/check-bullet-ordered.d.ts | 13 + .../lib/util/check-bullet-ordered.js | 22 + .../lib/util/check-bullet-other.d.ts | 9 + .../lib/util/check-bullet-other.js | 39 + .../lib/util/check-bullet.d.ts | 13 + .../lib/util/check-bullet.js | 22 + .../lib/util/check-emphasis.d.ts | 13 + .../lib/util/check-emphasis.js | 22 + .../lib/util/check-fence.d.ts | 13 + .../lib/util/check-fence.js | 22 + .../lib/util/check-list-item-indent.d.ts | 13 + .../lib/util/check-list-item-indent.js | 28 + .../lib/util/check-quote.d.ts | 13 + .../lib/util/check-quote.js | 22 + .../lib/util/check-rule-repetition.d.ts | 13 + .../lib/util/check-rule-repetition.js | 22 + .../lib/util/check-rule.d.ts | 13 + .../lib/util/check-rule.js | 22 + .../lib/util/check-strong.d.ts | 13 + .../lib/util/check-strong.js | 22 + .../lib/util/container-flow.d.ts | 29 + .../lib/util/container-flow.js | 87 + .../lib/util/container-phrasing.d.ts | 33 + .../lib/util/container-phrasing.js | 97 + .../lib/util/format-code-as-indented.d.ts | 12 + .../lib/util/format-code-as-indented.js | 22 + .../lib/util/format-heading-as-setext.d.ts | 8 + .../lib/util/format-heading-as-setext.js | 34 + .../lib/util/format-link-as-autolink.d.ts | 8 + .../lib/util/format-link-as-autolink.js | 34 + .../lib/util/indent-lines.d.ts | 5 + .../lib/util/indent-lines.js | 35 + .../lib/util/pattern-compile.d.ts | 9 + .../lib/util/pattern-compile.js | 25 + .../lib/util/pattern-in-scope.d.ts | 15 + .../lib/util/pattern-in-scope.js | 42 + .../mdast-util-to-markdown/lib/util/safe.d.ts | 32 + .../mdast-util-to-markdown/lib/util/safe.js | 177 ++ .../lib/util/track.d.ts | 7 + .../mdast-util-to-markdown/lib/util/track.js | 58 + .../mdast-util-to-markdown/license | 22 + .../mdast-util-to-markdown/package.json | 100 + .../mdast-util-to-markdown/readme.md | 732 ++++++ .../mdast-util-to-string/index.d.ts | 2 + .../mdast-util-to-string/index.js | 5 + .../mdast-util-to-string/lib/index.d.ts | 31 + .../mdast-util-to-string/lib/index.js | 108 + .../node_modules/mdast-util-to-string/license | 22 + .../mdast-util-to-string/package.json | 79 + .../mdast-util-to-string/readme.md | 216 ++ .../micromark-core-commonmark/dev/index.d.ts | 22 + .../micromark-core-commonmark/dev/index.js | 22 + .../dev/lib/attention.d.ts | 10 + .../dev/lib/attention.js | 262 ++ .../dev/lib/autolink.d.ts | 5 + .../dev/lib/autolink.js | 159 ++ .../dev/lib/blank-line.d.ts | 5 + .../dev/lib/blank-line.js | 23 + .../dev/lib/block-quote.d.ts | 6 + .../dev/lib/block-quote.js | 81 + .../dev/lib/character-escape.d.ts | 5 + .../dev/lib/character-escape.js | 44 + .../dev/lib/character-reference.d.ts | 7 + .../dev/lib/character-reference.js | 109 + .../dev/lib/code-fenced.d.ts | 6 + .../dev/lib/code-fenced.js | 234 ++ .../dev/lib/code-indented.d.ts | 7 + .../dev/lib/code-indented.js | 121 + .../dev/lib/code-text.d.ts | 8 + .../dev/lib/code-text.js | 200 ++ .../dev/lib/content.d.ts | 10 + .../dev/lib/content.js | 133 + .../dev/lib/definition.d.ts | 5 + .../dev/lib/definition.js | 135 ++ .../dev/lib/hard-break-escape.d.ts | 5 + .../dev/lib/hard-break-escape.js | 41 + .../dev/lib/heading-atx.d.ts | 7 + .../dev/lib/heading-atx.js | 162 ++ .../dev/lib/html-flow.d.ts | 7 + .../dev/lib/html-flow.js | 573 +++++ .../dev/lib/html-text.d.ts | 6 + .../dev/lib/html-text.js | 489 ++++ .../dev/lib/label-end.d.ts | 9 + .../dev/lib/label-end.js | 384 +++ .../dev/lib/label-start-image.d.ts | 5 + .../dev/lib/label-start-image.js | 60 + .../dev/lib/label-start-link.d.ts | 5 + .../dev/lib/label-start-link.js | 48 + .../dev/lib/line-ending.d.ts | 5 + .../dev/lib/line-ending.js | 27 + .../dev/lib/list.d.ts | 16 + .../micromark-core-commonmark/dev/lib/list.js | 276 +++ .../dev/lib/setext-underline.d.ts | 7 + .../dev/lib/setext-underline.js | 148 ++ .../dev/lib/thematic-break.d.ts | 6 + .../dev/lib/thematic-break.js | 76 + .../micromark-core-commonmark/index.d.ts | 22 + .../micromark-core-commonmark/index.js | 22 + .../lib/attention.d.ts | 10 + .../lib/attention.js | 235 ++ .../lib/autolink.d.ts | 5 + .../micromark-core-commonmark/lib/autolink.js | 140 ++ .../lib/blank-line.d.ts | 5 + .../lib/blank-line.js | 23 + .../lib/block-quote.d.ts | 6 + .../lib/block-quote.js | 75 + .../lib/character-escape.d.ts | 5 + .../lib/character-escape.js | 39 + .../lib/character-reference.d.ts | 7 + .../lib/character-reference.js | 104 + .../lib/code-fenced.d.ts | 6 + .../lib/code-fenced.js | 234 ++ .../lib/code-indented.d.ts | 7 + .../lib/code-indented.js | 109 + .../lib/code-text.d.ts | 8 + .../lib/code-text.js | 186 ++ .../lib/content.d.ts | 10 + .../micromark-core-commonmark/lib/content.js | 124 + .../lib/definition.d.ts | 5 + .../lib/definition.js | 131 + .../lib/hard-break-escape.d.ts | 5 + .../lib/hard-break-escape.js | 36 + .../lib/heading-atx.d.ts | 7 + .../lib/heading-atx.js | 147 ++ .../lib/html-flow.d.ts | 7 + .../lib/html-flow.js | 561 +++++ .../lib/html-text.d.ts | 6 + .../lib/html-text.js | 479 ++++ .../lib/label-end.d.ts | 9 + .../lib/label-end.js | 367 +++ .../lib/label-start-image.d.ts | 5 + .../lib/label-start-image.js | 55 + .../lib/label-start-link.d.ts | 5 + .../lib/label-start-link.js | 43 + .../lib/line-ending.d.ts | 5 + .../lib/line-ending.js | 26 + .../micromark-core-commonmark/lib/list.d.ts | 16 + .../micromark-core-commonmark/lib/list.js | 269 +++ .../lib/setext-underline.d.ts | 7 + .../lib/setext-underline.js | 134 ++ .../lib/thematic-break.d.ts | 6 + .../lib/thematic-break.js | 61 + .../micromark-core-commonmark/package.json | 69 + .../micromark-core-commonmark/readme.md | 117 + .../dev/index.d.ts | 25 + .../dev/index.js | 170 ++ .../micromark-factory-destination/index.d.ts | 25 + .../micromark-factory-destination/index.js | 151 ++ .../package.json | 55 + .../micromark-factory-destination/readme.md | 165 ++ .../micromark-factory-label/dev/index.d.ts | 21 + .../micromark-factory-label/dev/index.js | 114 + .../micromark-factory-label/index.d.ts | 21 + .../micromark-factory-label/index.js | 108 + .../micromark-factory-label/package.json | 56 + .../micromark-factory-label/readme.md | 158 ++ .../micromark-factory-space/dev/index.d.ts | 15 + .../micromark-factory-space/dev/index.js | 41 + .../micromark-factory-space/index.d.ts | 15 + .../micromark-factory-space/index.js | 39 + .../micromark-factory-space/package.json | 54 + .../micromark-factory-space/readme.md | 162 ++ .../micromark-factory-title/dev/index.d.ts | 20 + .../micromark-factory-title/dev/index.js | 103 + .../micromark-factory-title/index.d.ts | 20 + .../micromark-factory-title/index.js | 92 + .../micromark-factory-title/package.json | 57 + .../micromark-factory-title/readme.md | 164 ++ .../dev/index.d.ts | 12 + .../micromark-factory-whitespace/dev/index.js | 40 + .../micromark-factory-whitespace/index.d.ts | 12 + .../micromark-factory-whitespace/index.js | 37 + .../micromark-factory-whitespace/package.json | 56 + .../micromark-factory-whitespace/readme.md | 142 ++ .../micromark-util-character/dev/index.d.ts | 55 + .../micromark-util-character/dev/index.js | 201 ++ .../dev/lib/unicode-punctuation-regex.d.ts | 1 + .../dev/lib/unicode-punctuation-regex.js | 8 + .../micromark-util-character/index.d.ts | 55 + .../micromark-util-character/index.js | 195 ++ .../lib/unicode-punctuation-regex.d.ts | 1 + .../lib/unicode-punctuation-regex.js | 8 + .../micromark-util-character/package.json | 56 + .../micromark-util-character/readme.md | 275 +++ .../micromark-util-chunked/dev/index.d.ts | 35 + .../micromark-util-chunked/dev/index.js | 79 + .../micromark-util-chunked/index.d.ts | 35 + .../micromark-util-chunked/index.js | 69 + .../micromark-util-chunked/package.json | 56 + .../micromark-util-chunked/readme.md | 155 ++ .../dev/index.d.ts | 14 + .../dev/index.js | 37 + .../index.d.ts | 14 + .../index.js | 34 + .../package.json | 58 + .../readme.md | 141 ++ .../index.d.ts | 21 + .../index.js | 123 + .../package.json | 53 + .../readme.md | 139 ++ .../dev/index.d.ts | 14 + .../dev/index.js | 37 + .../index.d.ts | 14 + .../index.js | 30 + .../package.json | 58 + .../readme.md | 124 + .../dev/index.d.ts | 10 + .../micromark-util-decode-string/dev/index.js | 47 + .../micromark-util-decode-string/index.d.ts | 10 + .../micromark-util-decode-string/index.js | 40 + .../micromark-util-decode-string/package.json | 61 + .../micromark-util-decode-string/readme.md | 122 + .../micromark-util-encode/index.d.ts | 11 + .../micromark-util-encode/index.js | 24 + .../micromark-util-encode/package.json | 48 + .../micromark-util-encode/readme.md | 121 + .../micromark-util-html-tag-name/index.d.ts | 22 + .../micromark-util-html-tag-name/index.js | 85 + .../micromark-util-html-tag-name/package.json | 49 + .../micromark-util-html-tag-name/readme.md | 148 ++ .../dev/index.d.ts | 7 + .../dev/index.js | 25 + .../index.d.ts | 7 + .../index.js | 20 + .../package.json | 56 + .../readme.md | 129 + .../micromark-util-resolve-all/index.d.ts | 23 + .../micromark-util-resolve-all/index.js | 30 + .../micromark-util-resolve-all/package.json | 50 + .../micromark-util-resolve-all/readme.md | 182 ++ .../dev/index.d.ts | 33 + .../micromark-util-sanitize-uri/dev/index.js | 120 + .../micromark-util-sanitize-uri/index.d.ts | 33 + .../micromark-util-sanitize-uri/index.js | 111 + .../micromark-util-sanitize-uri/package.json | 58 + .../micromark-util-sanitize-uri/readme.md | 171 ++ .../micromark-util-subtokenize/dev/index.d.ts | 12 + .../micromark-util-subtokenize/dev/index.js | 259 ++ .../micromark-util-subtokenize/index.d.ts | 12 + .../micromark-util-subtokenize/index.js | 247 ++ .../micromark-util-subtokenize/package.json | 57 + .../micromark-util-subtokenize/readme.md | 126 + .../micromark-util-symbol/codes.d.ts | 138 ++ .../micromark-util-symbol/codes.js | 158 ++ .../micromark-util-symbol/constants.d.ts | 36 + .../micromark-util-symbol/constants.js | 44 + .../micromark-util-symbol/package.json | 60 + .../micromark-util-symbol/readme.md | 122 + .../micromark-util-symbol/types.d.ts | 105 + .../micromark-util-symbol/types.js | 451 ++++ .../micromark-util-symbol/values.d.ts | 101 + .../micromark-util-symbol/values.js | 107 + .../micromark-util-types/index.d.ts | 738 ++++++ .../micromark-util-types/index.js | 543 +++++ .../micromark-util-types/package.json | 42 + .../micromark-util-types/readme.md | 108 + .../d2/node_modules/micromark/dev/index.d.ts | 14 + .../d2/node_modules/micromark/dev/index.js | 42 + .../micromark/dev/lib/compile.d.ts | 25 + .../node_modules/micromark/dev/lib/compile.js | 990 ++++++++ .../micromark/dev/lib/constructs.d.ts | 19 + .../micromark/dev/lib/constructs.js | 101 + .../micromark/dev/lib/create-tokenizer.d.ts | 40 + .../micromark/dev/lib/create-tokenizer.js | 654 +++++ .../micromark/dev/lib/initialize/content.d.ts | 6 + .../micromark/dev/lib/initialize/content.js | 93 + .../dev/lib/initialize/document.d.ts | 12 + .../micromark/dev/lib/initialize/document.js | 422 ++++ .../micromark/dev/lib/initialize/flow.d.ts | 5 + .../micromark/dev/lib/initialize/flow.js | 79 + .../micromark/dev/lib/initialize/text.d.ts | 11 + .../micromark/dev/lib/initialize/text.js | 225 ++ .../node_modules/micromark/dev/lib/parse.d.ts | 13 + .../node_modules/micromark/dev/lib/parse.js | 52 + .../micromark/dev/lib/postprocess.d.ts | 8 + .../micromark/dev/lib/postprocess.js | 17 + .../micromark/dev/lib/preprocess.d.ts | 13 + .../micromark/dev/lib/preprocess.js | 133 + .../d2/node_modules/micromark/dev/stream.d.ts | 22 + .../d2/node_modules/micromark/dev/stream.js | 206 ++ .../d2/node_modules/micromark/index.d.ts | 14 + .../d2/node_modules/micromark/index.js | 40 + .../node_modules/micromark/lib/compile.d.ts | 25 + .../d2/node_modules/micromark/lib/compile.js | 969 ++++++++ .../micromark/lib/constructs.d.ts | 19 + .../node_modules/micromark/lib/constructs.js | 105 + .../micromark/lib/create-tokenizer.d.ts | 40 + .../micromark/lib/create-tokenizer.js | 595 +++++ .../micromark/lib/initialize/content.d.ts | 6 + .../micromark/lib/initialize/content.js | 79 + .../micromark/lib/initialize/document.d.ts | 12 + .../micromark/lib/initialize/document.js | 373 +++ .../micromark/lib/initialize/flow.d.ts | 5 + .../micromark/lib/initialize/flow.js | 65 + .../micromark/lib/initialize/text.d.ts | 11 + .../micromark/lib/initialize/text.js | 218 ++ .../d2/node_modules/micromark/lib/parse.d.ts | 13 + .../d2/node_modules/micromark/lib/parse.js | 52 + .../micromark/lib/postprocess.d.ts | 8 + .../node_modules/micromark/lib/postprocess.js | 16 + .../micromark/lib/preprocess.d.ts | 13 + .../node_modules/micromark/lib/preprocess.js | 129 + .../d2/node_modules/micromark/package.json | 129 + .../d2/node_modules/micromark/readme.md | 1751 ++++++++++++++ .../d2/node_modules/micromark/stream.d.ts | 22 + .../d2/node_modules/micromark/stream.js | 196 ++ _extensions/d2/node_modules/minimatch/LICENSE | 15 + .../d2/node_modules/minimatch/README.md | 230 ++ .../d2/node_modules/minimatch/minimatch.js | 947 ++++++++ .../d2/node_modules/minimatch/package.json | 33 + _extensions/d2/node_modules/mri/index.d.ts | 21 + _extensions/d2/node_modules/mri/lib/index.js | 119 + _extensions/d2/node_modules/mri/lib/index.mjs | 119 + _extensions/d2/node_modules/mri/license.md | 21 + _extensions/d2/node_modules/mri/package.json | 43 + _extensions/d2/node_modules/mri/readme.md | 166 ++ _extensions/d2/node_modules/ms/index.js | 162 ++ _extensions/d2/node_modules/ms/license.md | 21 + _extensions/d2/node_modules/ms/package.json | 37 + _extensions/d2/node_modules/ms/readme.md | 60 + _extensions/d2/node_modules/once/LICENSE | 15 + _extensions/d2/node_modules/once/README.md | 79 + _extensions/d2/node_modules/once/once.js | 42 + _extensions/d2/node_modules/once/package.json | 33 + .../node_modules/pandoc-filter/.editorconfig | 12 + .../node_modules/pandoc-filter/.gitattributes | 1 + .../d2/node_modules/pandoc-filter/.jshintrc | 19 + .../d2/node_modules/pandoc-filter/.travis.yml | 4 + .../d2/node_modules/pandoc-filter/LICENSE | 20 + .../d2/node_modules/pandoc-filter/README.md | 86 + .../d2/node_modules/pandoc-filter/index.d.ts | 240 ++ .../d2/node_modules/pandoc-filter/index.js | 265 ++ .../node_modules/pandoc-filter/package.json | 42 + .../node_modules/pandoc-filter/tsconfig.json | 10 + .../d2/node_modules/path-is-absolute/index.js | 20 + .../d2/node_modules/path-is-absolute/license | 21 + .../path-is-absolute/package.json | 43 + .../node_modules/path-is-absolute/readme.md | 59 + .../d2/node_modules/remark-parse/index.d.ts | 8 + .../d2/node_modules/remark-parse/index.js | 3 + .../node_modules/remark-parse/lib/index.d.ts | 6 + .../d2/node_modules/remark-parse/lib/index.js | 28 + .../d2/node_modules/remark-parse/license | 22 + .../d2/node_modules/remark-parse/package.json | 59 + .../d2/node_modules/remark-parse/readme.md | 410 ++++ .../node_modules/remark-stringify/index.d.ts | 8 + .../d2/node_modules/remark-stringify/index.js | 3 + .../remark-stringify/lib/index.d.ts | 5 + .../remark-stringify/lib/index.js | 31 + .../d2/node_modules/remark-stringify/license | 22 + .../remark-stringify/package.json | 59 + .../node_modules/remark-stringify/readme.md | 434 ++++ _extensions/d2/node_modules/remark/index.d.ts | 6 + _extensions/d2/node_modules/remark/index.js | 5 + _extensions/d2/node_modules/remark/license | 22 + .../d2/node_modules/remark/package.json | 57 + _extensions/d2/node_modules/remark/readme.md | 377 +++ .../d2/node_modules/rimraf/CHANGELOG.md | 65 + _extensions/d2/node_modules/rimraf/LICENSE | 15 + _extensions/d2/node_modules/rimraf/README.md | 101 + _extensions/d2/node_modules/rimraf/bin.js | 68 + .../d2/node_modules/rimraf/package.json | 32 + _extensions/d2/node_modules/rimraf/rimraf.js | 360 +++ _extensions/d2/node_modules/sade/index.d.ts | 37 + _extensions/d2/node_modules/sade/lib/index.js | 1 + .../d2/node_modules/sade/lib/index.mjs | 1 + _extensions/d2/node_modules/sade/license | 21 + _extensions/d2/node_modules/sade/package.json | 45 + _extensions/d2/node_modules/sade/readme.md | 672 ++++++ _extensions/d2/node_modules/tmp/CHANGELOG.md | 288 +++ _extensions/d2/node_modules/tmp/LICENSE | 21 + _extensions/d2/node_modules/tmp/README.md | 365 +++ _extensions/d2/node_modules/tmp/lib/tmp.js | 780 ++++++ _extensions/d2/node_modules/tmp/package.json | 58 + _extensions/d2/node_modules/trough/index.d.ts | 49 + _extensions/d2/node_modules/trough/index.js | 160 ++ _extensions/d2/node_modules/trough/license | 21 + .../d2/node_modules/trough/package.json | 78 + _extensions/d2/node_modules/trough/readme.md | 400 ++++ .../d2/node_modules/unified/index.d.ts | 863 +++++++ _extensions/d2/node_modules/unified/index.js | 1 + .../d2/node_modules/unified/lib/index.d.ts | 19 + .../d2/node_modules/unified/lib/index.js | 599 +++++ _extensions/d2/node_modules/unified/license | 21 + .../d2/node_modules/unified/package.json | 107 + _extensions/d2/node_modules/unified/readme.md | 1450 +++++++++++ .../d2/node_modules/unist-util-is/index.d.ts | 13 + .../d2/node_modules/unist-util-is/index.js | 22 + .../node_modules/unist-util-is/lib/index.d.ts | 207 ++ .../node_modules/unist-util-is/lib/index.js | 301 +++ .../d2/node_modules/unist-util-is/license | 22 + .../node_modules/unist-util-is/package.json | 88 + .../d2/node_modules/unist-util-is/readme.md | 419 ++++ .../unist-util-stringify-position/index.d.ts | 1 + .../unist-util-stringify-position/index.js | 1 + .../lib/index.d.ts | 61 + .../lib/index.js | 84 + .../unist-util-stringify-position/license | 22 + .../package.json | 80 + .../unist-util-stringify-position/readme.md | 203 ++ .../complex-types.d.ts | 14 + .../unist-util-visit-parents/index.d.ts | 10 + .../unist-util-visit-parents/index.js | 2 + .../lib/color.browser.d.ts | 5 + .../lib/color.browser.js | 7 + .../unist-util-visit-parents/lib/color.d.ts | 5 + .../unist-util-visit-parents/lib/color.js | 7 + .../lib/complex-types.d.ts | 67 + .../unist-util-visit-parents/lib/index.d.ts | 92 + .../unist-util-visit-parents/lib/index.js | 241 ++ .../unist-util-visit-parents/license | 22 + .../unist-util-visit-parents/package.json | 104 + .../unist-util-visit-parents/readme.md | 385 +++ .../unist-util-visit/complex-types.d.ts | 2 + .../node_modules/unist-util-visit/index.d.ts | 9 + .../d2/node_modules/unist-util-visit/index.js | 2 + .../unist-util-visit/lib/index.d.ts | 137 ++ .../unist-util-visit/lib/index.js | 182 ++ .../d2/node_modules/unist-util-visit/license | 22 + .../unist-util-visit/package.json | 103 + .../node_modules/unist-util-visit/readme.md | 318 +++ .../d2/node_modules/uvu/assert/index.d.ts | 47 + .../d2/node_modules/uvu/assert/index.js | 173 ++ .../d2/node_modules/uvu/assert/index.mjs | 160 ++ _extensions/d2/node_modules/uvu/bin.js | 35 + .../d2/node_modules/uvu/diff/index.d.ts | 5 + _extensions/d2/node_modules/uvu/diff/index.js | 228 ++ .../d2/node_modules/uvu/diff/index.mjs | 219 ++ _extensions/d2/node_modules/uvu/dist/index.js | 167 ++ .../d2/node_modules/uvu/dist/index.mjs | 163 ++ _extensions/d2/node_modules/uvu/index.d.ts | 27 + _extensions/d2/node_modules/uvu/license | 21 + _extensions/d2/node_modules/uvu/package.json | 85 + .../d2/node_modules/uvu/parse/index.d.ts | 22 + .../d2/node_modules/uvu/parse/index.js | 50 + .../d2/node_modules/uvu/parse/index.mjs | 51 + _extensions/d2/node_modules/uvu/readme.md | 137 ++ .../d2/node_modules/uvu/run/index.d.ts | 2 + _extensions/d2/node_modules/uvu/run/index.js | 12 + _extensions/d2/node_modules/uvu/run/index.mjs | 13 + .../d2/node_modules/vfile-message/index.d.ts | 1 + .../d2/node_modules/vfile-message/index.js | 1 + .../node_modules/vfile-message/lib/index.d.ts | 125 + .../node_modules/vfile-message/lib/index.js | 225 ++ .../d2/node_modules/vfile-message/license | 22 + .../node_modules/vfile-message/package.json | 78 + .../d2/node_modules/vfile-message/readme.md | 244 ++ _extensions/d2/node_modules/vfile/index.d.ts | 64 + _extensions/d2/node_modules/vfile/index.js | 1 + .../d2/node_modules/vfile/lib/index.d.ts | 355 +++ .../d2/node_modules/vfile/lib/index.js | 520 ++++ .../vfile/lib/minpath.browser.d.ts | 46 + .../node_modules/vfile/lib/minpath.browser.js | 422 ++++ .../d2/node_modules/vfile/lib/minpath.d.ts | 1 + .../d2/node_modules/vfile/lib/minpath.js | 1 + .../vfile/lib/minproc.browser.d.ts | 5 + .../node_modules/vfile/lib/minproc.browser.js | 8 + .../d2/node_modules/vfile/lib/minproc.d.ts | 1 + .../d2/node_modules/vfile/lib/minproc.js | 1 + .../vfile/lib/minurl.browser.d.ts | 9 + .../node_modules/vfile/lib/minurl.browser.js | 78 + .../d2/node_modules/vfile/lib/minurl.d.ts | 2 + .../d2/node_modules/vfile/lib/minurl.js | 2 + .../node_modules/vfile/lib/minurl.shared.d.ts | 42 + .../node_modules/vfile/lib/minurl.shared.js | 37 + _extensions/d2/node_modules/vfile/license | 21 + .../d2/node_modules/vfile/package.json | 111 + _extensions/d2/node_modules/vfile/readme.md | 786 ++++++ _extensions/d2/node_modules/which/LICENSE | 15 + _extensions/d2/node_modules/which/README.md | 51 + .../d2/node_modules/which/bin/which.js | 52 + .../d2/node_modules/which/lib/index.js | 115 + .../d2/node_modules/which/package.json | 51 + _extensions/d2/node_modules/wrappy/LICENSE | 15 + _extensions/d2/node_modules/wrappy/README.md | 36 + .../d2/node_modules/wrappy/package.json | 29 + _extensions/d2/node_modules/wrappy/wrappy.js | 33 + _extensions/d2/node_modules/zwitch/index.d.ts | 67 + _extensions/d2/node_modules/zwitch/index.js | 118 + _extensions/d2/node_modules/zwitch/license | 22 + .../d2/node_modules/zwitch/package.json | 72 + _extensions/d2/node_modules/zwitch/readme.md | 226 ++ _quarto.yml | 2 + contributing/creating_components.qmd | 121 +- contributing/project_structure.qmd | 14 +- fundamentals/architecture.qmd | 934 +++++++- fundamentals/concepts.qmd | 74 +- fundamentals/philosophy.qmd | 15 +- fundamentals/roadmap.qmd | 12 +- images/anndata_schema.svg | 175 ++ 738 files changed, 78557 insertions(+), 151 deletions(-) create mode 100644 _extensions/d2/_extension.yml create mode 100644 _extensions/d2/node_modules/.bin/d2-filter create mode 100644 _extensions/d2/node_modules/.bin/node-which create mode 100644 _extensions/d2/node_modules/.bin/rimraf create mode 100644 _extensions/d2/node_modules/.bin/uvu create mode 100644 _extensions/d2/node_modules/.package-lock.json create mode 100644 _extensions/d2/node_modules/@types/debug/LICENSE create mode 100644 _extensions/d2/node_modules/@types/debug/README.md create mode 100644 _extensions/d2/node_modules/@types/debug/index.d.ts create mode 100644 _extensions/d2/node_modules/@types/debug/package.json create mode 100644 _extensions/d2/node_modules/@types/mdast/LICENSE create mode 100644 _extensions/d2/node_modules/@types/mdast/README.md create mode 100644 _extensions/d2/node_modules/@types/mdast/index.d.ts create mode 100644 _extensions/d2/node_modules/@types/mdast/package.json create mode 100644 _extensions/d2/node_modules/@types/ms/LICENSE create mode 100644 _extensions/d2/node_modules/@types/ms/README.md create mode 100644 _extensions/d2/node_modules/@types/ms/index.d.ts create mode 100644 _extensions/d2/node_modules/@types/ms/package.json create mode 100644 _extensions/d2/node_modules/@types/unist/LICENSE create mode 100644 _extensions/d2/node_modules/@types/unist/README.md create mode 100644 _extensions/d2/node_modules/@types/unist/index.d.ts create mode 100644 _extensions/d2/node_modules/@types/unist/package.json create mode 100644 _extensions/d2/node_modules/bail/index.d.ts create mode 100644 _extensions/d2/node_modules/bail/index.js create mode 100644 _extensions/d2/node_modules/bail/license create mode 100644 _extensions/d2/node_modules/bail/package.json create mode 100644 _extensions/d2/node_modules/bail/readme.md create mode 100644 _extensions/d2/node_modules/balanced-match/.github/FUNDING.yml create mode 100644 _extensions/d2/node_modules/balanced-match/LICENSE.md create mode 100644 _extensions/d2/node_modules/balanced-match/README.md create mode 100644 _extensions/d2/node_modules/balanced-match/index.js create mode 100644 _extensions/d2/node_modules/balanced-match/package.json create mode 100644 _extensions/d2/node_modules/brace-expansion/LICENSE create mode 100644 _extensions/d2/node_modules/brace-expansion/README.md create mode 100644 _extensions/d2/node_modules/brace-expansion/index.js create mode 100644 _extensions/d2/node_modules/brace-expansion/package.json create mode 100644 _extensions/d2/node_modules/character-entities/index.d.ts create mode 100644 _extensions/d2/node_modules/character-entities/index.js create mode 100644 _extensions/d2/node_modules/character-entities/license create mode 100644 _extensions/d2/node_modules/character-entities/package.json create mode 100644 _extensions/d2/node_modules/character-entities/readme.md create mode 100644 _extensions/d2/node_modules/concat-map/.travis.yml create mode 100644 _extensions/d2/node_modules/concat-map/LICENSE create mode 100644 _extensions/d2/node_modules/concat-map/README.markdown create mode 100644 _extensions/d2/node_modules/concat-map/example/map.js create mode 100644 _extensions/d2/node_modules/concat-map/index.js create mode 100644 _extensions/d2/node_modules/concat-map/package.json create mode 100644 _extensions/d2/node_modules/concat-map/test/map.js create mode 100644 _extensions/d2/node_modules/d2-filter/LICENSE create mode 100644 _extensions/d2/node_modules/d2-filter/README.md create mode 100644 _extensions/d2/node_modules/d2-filter/filter-shim.js create mode 100644 _extensions/d2/node_modules/d2-filter/lib/filter.js create mode 100644 _extensions/d2/node_modules/d2-filter/lib/filter.js.map create mode 100644 _extensions/d2/node_modules/d2-filter/package.json create mode 100644 _extensions/d2/node_modules/debug/LICENSE create mode 100644 _extensions/d2/node_modules/debug/README.md create mode 100644 _extensions/d2/node_modules/debug/package.json create mode 100644 _extensions/d2/node_modules/debug/src/browser.js create mode 100644 _extensions/d2/node_modules/debug/src/common.js create mode 100644 _extensions/d2/node_modules/debug/src/index.js create mode 100644 _extensions/d2/node_modules/debug/src/node.js create mode 100644 _extensions/d2/node_modules/decode-named-character-reference/index.d.ts create mode 100644 _extensions/d2/node_modules/decode-named-character-reference/index.dom.d.ts create mode 100644 _extensions/d2/node_modules/decode-named-character-reference/index.dom.js create mode 100644 _extensions/d2/node_modules/decode-named-character-reference/index.js create mode 100644 _extensions/d2/node_modules/decode-named-character-reference/license create mode 100644 _extensions/d2/node_modules/decode-named-character-reference/package.json create mode 100644 _extensions/d2/node_modules/decode-named-character-reference/readme.md create mode 100644 _extensions/d2/node_modules/dequal/dist/index.js create mode 100644 _extensions/d2/node_modules/dequal/dist/index.min.js create mode 100644 _extensions/d2/node_modules/dequal/dist/index.mjs create mode 100644 _extensions/d2/node_modules/dequal/index.d.ts create mode 100644 _extensions/d2/node_modules/dequal/license create mode 100644 _extensions/d2/node_modules/dequal/lite/index.d.ts create mode 100644 _extensions/d2/node_modules/dequal/lite/index.js create mode 100644 _extensions/d2/node_modules/dequal/lite/index.min.js create mode 100644 _extensions/d2/node_modules/dequal/lite/index.mjs create mode 100644 _extensions/d2/node_modules/dequal/package.json create mode 100644 _extensions/d2/node_modules/dequal/readme.md create mode 100644 _extensions/d2/node_modules/diff/CONTRIBUTING.md create mode 100644 _extensions/d2/node_modules/diff/LICENSE create mode 100644 _extensions/d2/node_modules/diff/README.md create mode 100644 _extensions/d2/node_modules/diff/dist/diff.js create mode 100644 _extensions/d2/node_modules/diff/dist/diff.min.js create mode 100644 _extensions/d2/node_modules/diff/lib/convert/dmp.js create mode 100644 _extensions/d2/node_modules/diff/lib/convert/xml.js create mode 100644 _extensions/d2/node_modules/diff/lib/diff/array.js create mode 100644 _extensions/d2/node_modules/diff/lib/diff/base.js create mode 100644 _extensions/d2/node_modules/diff/lib/diff/character.js create mode 100644 _extensions/d2/node_modules/diff/lib/diff/css.js create mode 100644 _extensions/d2/node_modules/diff/lib/diff/json.js create mode 100644 _extensions/d2/node_modules/diff/lib/diff/line.js create mode 100644 _extensions/d2/node_modules/diff/lib/diff/sentence.js create mode 100644 _extensions/d2/node_modules/diff/lib/diff/word.js create mode 100644 _extensions/d2/node_modules/diff/lib/index.es6.js create mode 100644 _extensions/d2/node_modules/diff/lib/index.js create mode 100644 _extensions/d2/node_modules/diff/lib/index.mjs create mode 100644 _extensions/d2/node_modules/diff/lib/patch/apply.js create mode 100644 _extensions/d2/node_modules/diff/lib/patch/create.js create mode 100644 _extensions/d2/node_modules/diff/lib/patch/merge.js create mode 100644 _extensions/d2/node_modules/diff/lib/patch/parse.js create mode 100644 _extensions/d2/node_modules/diff/lib/util/array.js create mode 100644 _extensions/d2/node_modules/diff/lib/util/distance-iterator.js create mode 100644 _extensions/d2/node_modules/diff/lib/util/params.js create mode 100644 _extensions/d2/node_modules/diff/package.json create mode 100644 _extensions/d2/node_modules/diff/runtime.js create mode 100644 _extensions/d2/node_modules/extend/.editorconfig create mode 100644 _extensions/d2/node_modules/extend/.eslintrc create mode 100644 _extensions/d2/node_modules/extend/.jscs.json create mode 100644 _extensions/d2/node_modules/extend/.travis.yml create mode 100644 _extensions/d2/node_modules/extend/CHANGELOG.md create mode 100644 _extensions/d2/node_modules/extend/LICENSE create mode 100644 _extensions/d2/node_modules/extend/README.md create mode 100644 _extensions/d2/node_modules/extend/component.json create mode 100644 _extensions/d2/node_modules/extend/index.js create mode 100644 _extensions/d2/node_modules/extend/package.json create mode 100644 _extensions/d2/node_modules/fs.realpath/LICENSE create mode 100644 _extensions/d2/node_modules/fs.realpath/README.md create mode 100644 _extensions/d2/node_modules/fs.realpath/index.js create mode 100644 _extensions/d2/node_modules/fs.realpath/old.js create mode 100644 _extensions/d2/node_modules/fs.realpath/package.json create mode 100644 _extensions/d2/node_modules/get-stdin/index.d.ts create mode 100644 _extensions/d2/node_modules/get-stdin/index.js create mode 100644 _extensions/d2/node_modules/get-stdin/license create mode 100644 _extensions/d2/node_modules/get-stdin/package.json create mode 100644 _extensions/d2/node_modules/get-stdin/readme.md create mode 100644 _extensions/d2/node_modules/glob/LICENSE create mode 100644 _extensions/d2/node_modules/glob/README.md create mode 100644 _extensions/d2/node_modules/glob/common.js create mode 100644 _extensions/d2/node_modules/glob/glob.js create mode 100644 _extensions/d2/node_modules/glob/package.json create mode 100644 _extensions/d2/node_modules/glob/sync.js create mode 100644 _extensions/d2/node_modules/inflight/LICENSE create mode 100644 _extensions/d2/node_modules/inflight/README.md create mode 100644 _extensions/d2/node_modules/inflight/inflight.js create mode 100644 _extensions/d2/node_modules/inflight/package.json create mode 100644 _extensions/d2/node_modules/inherits/LICENSE create mode 100644 _extensions/d2/node_modules/inherits/README.md create mode 100644 _extensions/d2/node_modules/inherits/inherits.js create mode 100644 _extensions/d2/node_modules/inherits/inherits_browser.js create mode 100644 _extensions/d2/node_modules/inherits/package.json create mode 100644 _extensions/d2/node_modules/is-buffer/LICENSE create mode 100644 _extensions/d2/node_modules/is-buffer/README.md create mode 100644 _extensions/d2/node_modules/is-buffer/index.d.ts create mode 100644 _extensions/d2/node_modules/is-buffer/index.js create mode 100644 _extensions/d2/node_modules/is-buffer/package.json create mode 100644 _extensions/d2/node_modules/is-plain-obj/index.d.ts create mode 100644 _extensions/d2/node_modules/is-plain-obj/index.js create mode 100644 _extensions/d2/node_modules/is-plain-obj/license create mode 100644 _extensions/d2/node_modules/is-plain-obj/package.json create mode 100644 _extensions/d2/node_modules/is-plain-obj/readme.md create mode 100644 _extensions/d2/node_modules/isexe/.npmignore create mode 100644 _extensions/d2/node_modules/isexe/LICENSE create mode 100644 _extensions/d2/node_modules/isexe/README.md create mode 100644 _extensions/d2/node_modules/isexe/index.js create mode 100644 _extensions/d2/node_modules/isexe/mode.js create mode 100644 _extensions/d2/node_modules/isexe/package.json create mode 100644 _extensions/d2/node_modules/isexe/test/basic.js create mode 100644 _extensions/d2/node_modules/isexe/windows.js create mode 100644 _extensions/d2/node_modules/kleur/colors.d.ts create mode 100644 _extensions/d2/node_modules/kleur/colors.js create mode 100644 _extensions/d2/node_modules/kleur/colors.mjs create mode 100644 _extensions/d2/node_modules/kleur/index.d.ts create mode 100644 _extensions/d2/node_modules/kleur/index.js create mode 100644 _extensions/d2/node_modules/kleur/index.mjs create mode 100644 _extensions/d2/node_modules/kleur/license create mode 100644 _extensions/d2/node_modules/kleur/package.json create mode 100644 _extensions/d2/node_modules/kleur/readme.md create mode 100644 _extensions/d2/node_modules/longest-streak/index.d.ts create mode 100644 _extensions/d2/node_modules/longest-streak/index.js create mode 100644 _extensions/d2/node_modules/longest-streak/license create mode 100644 _extensions/d2/node_modules/longest-streak/package.json create mode 100644 _extensions/d2/node_modules/longest-streak/readme.md create mode 100644 _extensions/d2/node_modules/mdast-util-from-markdown/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-from-markdown/dev/index.js create mode 100644 _extensions/d2/node_modules/mdast-util-from-markdown/dev/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-from-markdown/dev/lib/index.js create mode 100644 _extensions/d2/node_modules/mdast-util-from-markdown/index.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-from-markdown/index.js create mode 100644 _extensions/d2/node_modules/mdast-util-from-markdown/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-from-markdown/lib/index.js create mode 100644 _extensions/d2/node_modules/mdast-util-from-markdown/license create mode 100644 _extensions/d2/node_modules/mdast-util-from-markdown/package.json create mode 100644 _extensions/d2/node_modules/mdast-util-from-markdown/readme.md create mode 100644 _extensions/d2/node_modules/mdast-util-phrasing/index.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-phrasing/index.js create mode 100644 _extensions/d2/node_modules/mdast-util-phrasing/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-phrasing/lib/index.js create mode 100644 _extensions/d2/node_modules/mdast-util-phrasing/license create mode 100644 _extensions/d2/node_modules/mdast-util-phrasing/package.json create mode 100644 _extensions/d2/node_modules/mdast-util-phrasing/readme.md create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/index.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/index.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/configure.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/configure.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/blockquote.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/blockquote.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/break.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/break.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/code.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/code.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/definition.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/definition.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/emphasis.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/emphasis.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/heading.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/heading.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/html.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/html.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image-reference.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image-reference.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/index.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/index.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/inline-code.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/inline-code.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link-reference.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link-reference.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list-item.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list-item.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/paragraph.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/paragraph.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/root.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/root.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/strong.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/strong.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/text.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/text.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/thematic-break.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/thematic-break.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/index.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/join.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/join.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/types.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/types.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/unsafe.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/unsafe.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/association.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/association.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered-other.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered-other.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-other.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-other.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-emphasis.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-emphasis.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-fence.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-fence.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-list-item-indent.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-list-item-indent.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-quote.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-quote.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule-repetition.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule-repetition.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-strong.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-strong.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-flow.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-flow.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-phrasing.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-phrasing.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-code-as-indented.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-code-as-indented.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-heading-as-setext.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-heading-as-setext.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-link-as-autolink.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-link-as-autolink.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/indent-lines.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/indent-lines.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-compile.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-compile.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-in-scope.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-in-scope.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/safe.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/safe.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/track.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/lib/util/track.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/license create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/package.json create mode 100644 _extensions/d2/node_modules/mdast-util-to-markdown/readme.md create mode 100644 _extensions/d2/node_modules/mdast-util-to-string/index.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-string/index.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-string/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/mdast-util-to-string/lib/index.js create mode 100644 _extensions/d2/node_modules/mdast-util-to-string/license create mode 100644 _extensions/d2/node_modules/mdast-util-to-string/package.json create mode 100644 _extensions/d2/node_modules/mdast-util-to-string/readme.md create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/attention.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/attention.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/autolink.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/autolink.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/blank-line.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/blank-line.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/block-quote.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/block-quote.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-escape.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-escape.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-reference.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-reference.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-fenced.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-fenced.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-indented.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-indented.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-text.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-text.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/content.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/content.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/definition.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/definition.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/hard-break-escape.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/hard-break-escape.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/heading-atx.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/heading-atx.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-flow.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-flow.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-text.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-text.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-end.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-end.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-image.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-image.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-link.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-link.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/line-ending.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/line-ending.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/list.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/list.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/setext-underline.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/setext-underline.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/thematic-break.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/dev/lib/thematic-break.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/index.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/attention.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/attention.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/autolink.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/autolink.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/blank-line.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/blank-line.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/block-quote.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/block-quote.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/character-escape.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/character-escape.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/character-reference.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/character-reference.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/code-fenced.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/code-fenced.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/code-indented.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/code-indented.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/code-text.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/code-text.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/content.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/content.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/definition.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/definition.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/hard-break-escape.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/hard-break-escape.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/heading-atx.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/heading-atx.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/html-flow.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/html-flow.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/html-text.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/html-text.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/label-end.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/label-end.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-image.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-image.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-link.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-link.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/line-ending.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/line-ending.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/list.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/list.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/setext-underline.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/setext-underline.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/thematic-break.d.ts create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/lib/thematic-break.js create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/package.json create mode 100644 _extensions/d2/node_modules/micromark-core-commonmark/readme.md create mode 100644 _extensions/d2/node_modules/micromark-factory-destination/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-factory-destination/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-factory-destination/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-factory-destination/index.js create mode 100644 _extensions/d2/node_modules/micromark-factory-destination/package.json create mode 100644 _extensions/d2/node_modules/micromark-factory-destination/readme.md create mode 100644 _extensions/d2/node_modules/micromark-factory-label/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-factory-label/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-factory-label/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-factory-label/index.js create mode 100644 _extensions/d2/node_modules/micromark-factory-label/package.json create mode 100644 _extensions/d2/node_modules/micromark-factory-label/readme.md create mode 100644 _extensions/d2/node_modules/micromark-factory-space/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-factory-space/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-factory-space/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-factory-space/index.js create mode 100644 _extensions/d2/node_modules/micromark-factory-space/package.json create mode 100644 _extensions/d2/node_modules/micromark-factory-space/readme.md create mode 100644 _extensions/d2/node_modules/micromark-factory-title/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-factory-title/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-factory-title/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-factory-title/index.js create mode 100644 _extensions/d2/node_modules/micromark-factory-title/package.json create mode 100644 _extensions/d2/node_modules/micromark-factory-title/readme.md create mode 100644 _extensions/d2/node_modules/micromark-factory-whitespace/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-factory-whitespace/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-factory-whitespace/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-factory-whitespace/index.js create mode 100644 _extensions/d2/node_modules/micromark-factory-whitespace/package.json create mode 100644 _extensions/d2/node_modules/micromark-factory-whitespace/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-character/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-character/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-character/dev/lib/unicode-punctuation-regex.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-character/dev/lib/unicode-punctuation-regex.js create mode 100644 _extensions/d2/node_modules/micromark-util-character/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-character/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-character/lib/unicode-punctuation-regex.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-character/lib/unicode-punctuation-regex.js create mode 100644 _extensions/d2/node_modules/micromark-util-character/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-character/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-chunked/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-chunked/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-chunked/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-chunked/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-chunked/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-chunked/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-classify-character/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-classify-character/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-classify-character/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-classify-character/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-classify-character/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-classify-character/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-combine-extensions/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-combine-extensions/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-combine-extensions/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-combine-extensions/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-decode-string/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-decode-string/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-decode-string/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-decode-string/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-decode-string/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-decode-string/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-encode/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-encode/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-encode/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-encode/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-html-tag-name/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-html-tag-name/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-html-tag-name/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-html-tag-name/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-normalize-identifier/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-normalize-identifier/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-normalize-identifier/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-normalize-identifier/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-normalize-identifier/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-normalize-identifier/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-resolve-all/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-resolve-all/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-resolve-all/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-resolve-all/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-sanitize-uri/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-sanitize-uri/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-sanitize-uri/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-sanitize-uri/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-sanitize-uri/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-sanitize-uri/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-subtokenize/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-subtokenize/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-subtokenize/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-subtokenize/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-subtokenize/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-subtokenize/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-symbol/codes.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-symbol/codes.js create mode 100644 _extensions/d2/node_modules/micromark-util-symbol/constants.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-symbol/constants.js create mode 100644 _extensions/d2/node_modules/micromark-util-symbol/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-symbol/readme.md create mode 100644 _extensions/d2/node_modules/micromark-util-symbol/types.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-symbol/types.js create mode 100644 _extensions/d2/node_modules/micromark-util-symbol/values.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-symbol/values.js create mode 100644 _extensions/d2/node_modules/micromark-util-types/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark-util-types/index.js create mode 100644 _extensions/d2/node_modules/micromark-util-types/package.json create mode 100644 _extensions/d2/node_modules/micromark-util-types/readme.md create mode 100644 _extensions/d2/node_modules/micromark/dev/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark/dev/index.js create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/compile.d.ts create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/compile.js create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/constructs.d.ts create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/constructs.js create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/create-tokenizer.d.ts create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/create-tokenizer.js create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/initialize/content.d.ts create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/initialize/content.js create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/initialize/document.d.ts create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/initialize/document.js create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/initialize/flow.d.ts create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/initialize/flow.js create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/initialize/text.d.ts create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/initialize/text.js create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/parse.d.ts create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/parse.js create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/postprocess.d.ts create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/postprocess.js create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/preprocess.d.ts create mode 100644 _extensions/d2/node_modules/micromark/dev/lib/preprocess.js create mode 100644 _extensions/d2/node_modules/micromark/dev/stream.d.ts create mode 100644 _extensions/d2/node_modules/micromark/dev/stream.js create mode 100644 _extensions/d2/node_modules/micromark/index.d.ts create mode 100644 _extensions/d2/node_modules/micromark/index.js create mode 100644 _extensions/d2/node_modules/micromark/lib/compile.d.ts create mode 100644 _extensions/d2/node_modules/micromark/lib/compile.js create mode 100644 _extensions/d2/node_modules/micromark/lib/constructs.d.ts create mode 100644 _extensions/d2/node_modules/micromark/lib/constructs.js create mode 100644 _extensions/d2/node_modules/micromark/lib/create-tokenizer.d.ts create mode 100644 _extensions/d2/node_modules/micromark/lib/create-tokenizer.js create mode 100644 _extensions/d2/node_modules/micromark/lib/initialize/content.d.ts create mode 100644 _extensions/d2/node_modules/micromark/lib/initialize/content.js create mode 100644 _extensions/d2/node_modules/micromark/lib/initialize/document.d.ts create mode 100644 _extensions/d2/node_modules/micromark/lib/initialize/document.js create mode 100644 _extensions/d2/node_modules/micromark/lib/initialize/flow.d.ts create mode 100644 _extensions/d2/node_modules/micromark/lib/initialize/flow.js create mode 100644 _extensions/d2/node_modules/micromark/lib/initialize/text.d.ts create mode 100644 _extensions/d2/node_modules/micromark/lib/initialize/text.js create mode 100644 _extensions/d2/node_modules/micromark/lib/parse.d.ts create mode 100644 _extensions/d2/node_modules/micromark/lib/parse.js create mode 100644 _extensions/d2/node_modules/micromark/lib/postprocess.d.ts create mode 100644 _extensions/d2/node_modules/micromark/lib/postprocess.js create mode 100644 _extensions/d2/node_modules/micromark/lib/preprocess.d.ts create mode 100644 _extensions/d2/node_modules/micromark/lib/preprocess.js create mode 100644 _extensions/d2/node_modules/micromark/package.json create mode 100644 _extensions/d2/node_modules/micromark/readme.md create mode 100644 _extensions/d2/node_modules/micromark/stream.d.ts create mode 100644 _extensions/d2/node_modules/micromark/stream.js create mode 100644 _extensions/d2/node_modules/minimatch/LICENSE create mode 100644 _extensions/d2/node_modules/minimatch/README.md create mode 100644 _extensions/d2/node_modules/minimatch/minimatch.js create mode 100644 _extensions/d2/node_modules/minimatch/package.json create mode 100644 _extensions/d2/node_modules/mri/index.d.ts create mode 100644 _extensions/d2/node_modules/mri/lib/index.js create mode 100644 _extensions/d2/node_modules/mri/lib/index.mjs create mode 100644 _extensions/d2/node_modules/mri/license.md create mode 100644 _extensions/d2/node_modules/mri/package.json create mode 100644 _extensions/d2/node_modules/mri/readme.md create mode 100644 _extensions/d2/node_modules/ms/index.js create mode 100644 _extensions/d2/node_modules/ms/license.md create mode 100644 _extensions/d2/node_modules/ms/package.json create mode 100644 _extensions/d2/node_modules/ms/readme.md create mode 100644 _extensions/d2/node_modules/once/LICENSE create mode 100644 _extensions/d2/node_modules/once/README.md create mode 100644 _extensions/d2/node_modules/once/once.js create mode 100644 _extensions/d2/node_modules/once/package.json create mode 100644 _extensions/d2/node_modules/pandoc-filter/.editorconfig create mode 100644 _extensions/d2/node_modules/pandoc-filter/.gitattributes create mode 100644 _extensions/d2/node_modules/pandoc-filter/.jshintrc create mode 100644 _extensions/d2/node_modules/pandoc-filter/.travis.yml create mode 100644 _extensions/d2/node_modules/pandoc-filter/LICENSE create mode 100644 _extensions/d2/node_modules/pandoc-filter/README.md create mode 100644 _extensions/d2/node_modules/pandoc-filter/index.d.ts create mode 100644 _extensions/d2/node_modules/pandoc-filter/index.js create mode 100644 _extensions/d2/node_modules/pandoc-filter/package.json create mode 100644 _extensions/d2/node_modules/pandoc-filter/tsconfig.json create mode 100644 _extensions/d2/node_modules/path-is-absolute/index.js create mode 100644 _extensions/d2/node_modules/path-is-absolute/license create mode 100644 _extensions/d2/node_modules/path-is-absolute/package.json create mode 100644 _extensions/d2/node_modules/path-is-absolute/readme.md create mode 100644 _extensions/d2/node_modules/remark-parse/index.d.ts create mode 100644 _extensions/d2/node_modules/remark-parse/index.js create mode 100644 _extensions/d2/node_modules/remark-parse/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/remark-parse/lib/index.js create mode 100644 _extensions/d2/node_modules/remark-parse/license create mode 100644 _extensions/d2/node_modules/remark-parse/package.json create mode 100644 _extensions/d2/node_modules/remark-parse/readme.md create mode 100644 _extensions/d2/node_modules/remark-stringify/index.d.ts create mode 100644 _extensions/d2/node_modules/remark-stringify/index.js create mode 100644 _extensions/d2/node_modules/remark-stringify/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/remark-stringify/lib/index.js create mode 100644 _extensions/d2/node_modules/remark-stringify/license create mode 100644 _extensions/d2/node_modules/remark-stringify/package.json create mode 100644 _extensions/d2/node_modules/remark-stringify/readme.md create mode 100644 _extensions/d2/node_modules/remark/index.d.ts create mode 100644 _extensions/d2/node_modules/remark/index.js create mode 100644 _extensions/d2/node_modules/remark/license create mode 100644 _extensions/d2/node_modules/remark/package.json create mode 100644 _extensions/d2/node_modules/remark/readme.md create mode 100644 _extensions/d2/node_modules/rimraf/CHANGELOG.md create mode 100644 _extensions/d2/node_modules/rimraf/LICENSE create mode 100644 _extensions/d2/node_modules/rimraf/README.md create mode 100644 _extensions/d2/node_modules/rimraf/bin.js create mode 100644 _extensions/d2/node_modules/rimraf/package.json create mode 100644 _extensions/d2/node_modules/rimraf/rimraf.js create mode 100644 _extensions/d2/node_modules/sade/index.d.ts create mode 100644 _extensions/d2/node_modules/sade/lib/index.js create mode 100644 _extensions/d2/node_modules/sade/lib/index.mjs create mode 100644 _extensions/d2/node_modules/sade/license create mode 100644 _extensions/d2/node_modules/sade/package.json create mode 100644 _extensions/d2/node_modules/sade/readme.md create mode 100644 _extensions/d2/node_modules/tmp/CHANGELOG.md create mode 100644 _extensions/d2/node_modules/tmp/LICENSE create mode 100644 _extensions/d2/node_modules/tmp/README.md create mode 100644 _extensions/d2/node_modules/tmp/lib/tmp.js create mode 100644 _extensions/d2/node_modules/tmp/package.json create mode 100644 _extensions/d2/node_modules/trough/index.d.ts create mode 100644 _extensions/d2/node_modules/trough/index.js create mode 100644 _extensions/d2/node_modules/trough/license create mode 100644 _extensions/d2/node_modules/trough/package.json create mode 100644 _extensions/d2/node_modules/trough/readme.md create mode 100644 _extensions/d2/node_modules/unified/index.d.ts create mode 100644 _extensions/d2/node_modules/unified/index.js create mode 100644 _extensions/d2/node_modules/unified/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/unified/lib/index.js create mode 100644 _extensions/d2/node_modules/unified/license create mode 100644 _extensions/d2/node_modules/unified/package.json create mode 100644 _extensions/d2/node_modules/unified/readme.md create mode 100644 _extensions/d2/node_modules/unist-util-is/index.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-is/index.js create mode 100644 _extensions/d2/node_modules/unist-util-is/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-is/lib/index.js create mode 100644 _extensions/d2/node_modules/unist-util-is/license create mode 100644 _extensions/d2/node_modules/unist-util-is/package.json create mode 100644 _extensions/d2/node_modules/unist-util-is/readme.md create mode 100644 _extensions/d2/node_modules/unist-util-stringify-position/index.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-stringify-position/index.js create mode 100644 _extensions/d2/node_modules/unist-util-stringify-position/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-stringify-position/lib/index.js create mode 100644 _extensions/d2/node_modules/unist-util-stringify-position/license create mode 100644 _extensions/d2/node_modules/unist-util-stringify-position/package.json create mode 100644 _extensions/d2/node_modules/unist-util-stringify-position/readme.md create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/complex-types.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/index.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/index.js create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/lib/color.browser.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/lib/color.browser.js create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/lib/color.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/lib/color.js create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/lib/complex-types.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/lib/index.js create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/license create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/package.json create mode 100644 _extensions/d2/node_modules/unist-util-visit-parents/readme.md create mode 100644 _extensions/d2/node_modules/unist-util-visit/complex-types.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-visit/index.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-visit/index.js create mode 100644 _extensions/d2/node_modules/unist-util-visit/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/unist-util-visit/lib/index.js create mode 100644 _extensions/d2/node_modules/unist-util-visit/license create mode 100644 _extensions/d2/node_modules/unist-util-visit/package.json create mode 100644 _extensions/d2/node_modules/unist-util-visit/readme.md create mode 100644 _extensions/d2/node_modules/uvu/assert/index.d.ts create mode 100644 _extensions/d2/node_modules/uvu/assert/index.js create mode 100644 _extensions/d2/node_modules/uvu/assert/index.mjs create mode 100644 _extensions/d2/node_modules/uvu/bin.js create mode 100644 _extensions/d2/node_modules/uvu/diff/index.d.ts create mode 100644 _extensions/d2/node_modules/uvu/diff/index.js create mode 100644 _extensions/d2/node_modules/uvu/diff/index.mjs create mode 100644 _extensions/d2/node_modules/uvu/dist/index.js create mode 100644 _extensions/d2/node_modules/uvu/dist/index.mjs create mode 100644 _extensions/d2/node_modules/uvu/index.d.ts create mode 100644 _extensions/d2/node_modules/uvu/license create mode 100644 _extensions/d2/node_modules/uvu/package.json create mode 100644 _extensions/d2/node_modules/uvu/parse/index.d.ts create mode 100644 _extensions/d2/node_modules/uvu/parse/index.js create mode 100644 _extensions/d2/node_modules/uvu/parse/index.mjs create mode 100644 _extensions/d2/node_modules/uvu/readme.md create mode 100644 _extensions/d2/node_modules/uvu/run/index.d.ts create mode 100644 _extensions/d2/node_modules/uvu/run/index.js create mode 100644 _extensions/d2/node_modules/uvu/run/index.mjs create mode 100644 _extensions/d2/node_modules/vfile-message/index.d.ts create mode 100644 _extensions/d2/node_modules/vfile-message/index.js create mode 100644 _extensions/d2/node_modules/vfile-message/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/vfile-message/lib/index.js create mode 100644 _extensions/d2/node_modules/vfile-message/license create mode 100644 _extensions/d2/node_modules/vfile-message/package.json create mode 100644 _extensions/d2/node_modules/vfile-message/readme.md create mode 100644 _extensions/d2/node_modules/vfile/index.d.ts create mode 100644 _extensions/d2/node_modules/vfile/index.js create mode 100644 _extensions/d2/node_modules/vfile/lib/index.d.ts create mode 100644 _extensions/d2/node_modules/vfile/lib/index.js create mode 100644 _extensions/d2/node_modules/vfile/lib/minpath.browser.d.ts create mode 100644 _extensions/d2/node_modules/vfile/lib/minpath.browser.js create mode 100644 _extensions/d2/node_modules/vfile/lib/minpath.d.ts create mode 100644 _extensions/d2/node_modules/vfile/lib/minpath.js create mode 100644 _extensions/d2/node_modules/vfile/lib/minproc.browser.d.ts create mode 100644 _extensions/d2/node_modules/vfile/lib/minproc.browser.js create mode 100644 _extensions/d2/node_modules/vfile/lib/minproc.d.ts create mode 100644 _extensions/d2/node_modules/vfile/lib/minproc.js create mode 100644 _extensions/d2/node_modules/vfile/lib/minurl.browser.d.ts create mode 100644 _extensions/d2/node_modules/vfile/lib/minurl.browser.js create mode 100644 _extensions/d2/node_modules/vfile/lib/minurl.d.ts create mode 100644 _extensions/d2/node_modules/vfile/lib/minurl.js create mode 100644 _extensions/d2/node_modules/vfile/lib/minurl.shared.d.ts create mode 100644 _extensions/d2/node_modules/vfile/lib/minurl.shared.js create mode 100644 _extensions/d2/node_modules/vfile/license create mode 100644 _extensions/d2/node_modules/vfile/package.json create mode 100644 _extensions/d2/node_modules/vfile/readme.md create mode 100644 _extensions/d2/node_modules/which/LICENSE create mode 100644 _extensions/d2/node_modules/which/README.md create mode 100644 _extensions/d2/node_modules/which/bin/which.js create mode 100644 _extensions/d2/node_modules/which/lib/index.js create mode 100644 _extensions/d2/node_modules/which/package.json create mode 100644 _extensions/d2/node_modules/wrappy/LICENSE create mode 100644 _extensions/d2/node_modules/wrappy/README.md create mode 100644 _extensions/d2/node_modules/wrappy/package.json create mode 100644 _extensions/d2/node_modules/wrappy/wrappy.js create mode 100644 _extensions/d2/node_modules/zwitch/index.d.ts create mode 100644 _extensions/d2/node_modules/zwitch/index.js create mode 100644 _extensions/d2/node_modules/zwitch/license create mode 100644 _extensions/d2/node_modules/zwitch/package.json create mode 100644 _extensions/d2/node_modules/zwitch/readme.md create mode 100644 images/anndata_schema.svg diff --git a/.github/workflows/quarto_gh_pages.yml b/.github/workflows/quarto_gh_pages.yml index 02740b86..68bb4ef6 100644 --- a/.github/workflows/quarto_gh_pages.yml +++ b/.github/workflows/quarto_gh_pages.yml @@ -30,6 +30,10 @@ jobs: - uses: viash-io/viash-actions/setup@v4 + - name: "Install d2" + run: | + curl -fsSL https://d2lang.com/install.sh | sh -s -- + - name: Install Viash uses: viash-io/viash-actions/setup@main diff --git a/_extensions/d2/_extension.yml b/_extensions/d2/_extension.yml new file mode 100644 index 00000000..68a2fb32 --- /dev/null +++ b/_extensions/d2/_extension.yml @@ -0,0 +1,7 @@ +title: D2 +author: Toni Verbeiren +version: 1.0.0 +quarto-required: ">=1.3.433" +contributes: + filters: + - node_modules/d2-filter/filter-shim.js diff --git a/_extensions/d2/node_modules/.bin/d2-filter b/_extensions/d2/node_modules/.bin/d2-filter new file mode 100644 index 00000000..fd3f832e --- /dev/null +++ b/_extensions/d2/node_modules/.bin/d2-filter @@ -0,0 +1,12 @@ +#!/usr/bin/env node + +var pandoc = require("pandoc-filter"); +var which = require("which") +var filter = require("./lib/filter") + +var resolvedOrNull = which.sync("d2", { nothrow: true }); +if (resolvedOrNull === null) { + console.error("d2 is not installed"); + return; +} +pandoc.stdio(filter.action); diff --git a/_extensions/d2/node_modules/.bin/node-which b/_extensions/d2/node_modules/.bin/node-which new file mode 100644 index 00000000..6df16f21 --- /dev/null +++ b/_extensions/d2/node_modules/.bin/node-which @@ -0,0 +1,52 @@ +#!/usr/bin/env node + +const which = require('../lib') +const argv = process.argv.slice(2) + +const usage = (err) => { + if (err) { + console.error(`which: ${err}`) + } + console.error('usage: which [-as] program ...') + process.exit(1) +} + +if (!argv.length) { + return usage() +} + +let dashdash = false +const [commands, flags] = argv.reduce((acc, arg) => { + if (dashdash || arg === '--') { + dashdash = true + return acc + } + + if (!/^-/.test(arg)) { + acc[0].push(arg) + return acc + } + + for (const flag of arg.slice(1).split('')) { + if (flag === 's') { + acc[1].silent = true + } else if (flag === 'a') { + acc[1].all = true + } else { + usage(`illegal option -- ${flag}`) + } + } + + return acc +}, [[], {}]) + +for (const command of commands) { + try { + const res = which.sync(command, { all: flags.all }) + if (!flags.silent) { + console.log([].concat(res).join('\n')) + } + } catch (err) { + process.exitCode = 1 + } +} diff --git a/_extensions/d2/node_modules/.bin/rimraf b/_extensions/d2/node_modules/.bin/rimraf new file mode 100644 index 00000000..023814cc --- /dev/null +++ b/_extensions/d2/node_modules/.bin/rimraf @@ -0,0 +1,68 @@ +#!/usr/bin/env node + +const rimraf = require('./') + +const path = require('path') + +const isRoot = arg => /^(\/|[a-zA-Z]:\\)$/.test(path.resolve(arg)) +const filterOutRoot = arg => { + const ok = preserveRoot === false || !isRoot(arg) + if (!ok) { + console.error(`refusing to remove ${arg}`) + console.error('Set --no-preserve-root to allow this') + } + return ok +} + +let help = false +let dashdash = false +let noglob = false +let preserveRoot = true +const args = process.argv.slice(2).filter(arg => { + if (dashdash) + return !!arg + else if (arg === '--') + dashdash = true + else if (arg === '--no-glob' || arg === '-G') + noglob = true + else if (arg === '--glob' || arg === '-g') + noglob = false + else if (arg.match(/^(-+|\/)(h(elp)?|\?)$/)) + help = true + else if (arg === '--preserve-root') + preserveRoot = true + else if (arg === '--no-preserve-root') + preserveRoot = false + else + return !!arg +}).filter(arg => !preserveRoot || filterOutRoot(arg)) + +const go = n => { + if (n >= args.length) + return + const options = noglob ? { glob: false } : {} + rimraf(args[n], options, er => { + if (er) + throw er + go(n+1) + }) +} + +if (help || args.length === 0) { + // If they didn't ask for help, then this is not a "success" + const log = help ? console.log : console.error + log('Usage: rimraf [ ...]') + log('') + log(' Deletes all files and folders at "path" recursively.') + log('') + log('Options:') + log('') + log(' -h, --help Display this usage info') + log(' -G, --no-glob Do not expand glob patterns in arguments') + log(' -g, --glob Expand glob patterns in arguments (default)') + log(' --preserve-root Do not remove \'/\' (default)') + log(' --no-preserve-root Do not treat \'/\' specially') + log(' -- Stop parsing flags') + process.exit(help ? 0 : 1) +} else + go(0) diff --git a/_extensions/d2/node_modules/.bin/uvu b/_extensions/d2/node_modules/.bin/uvu new file mode 100644 index 00000000..3ba0e3b9 --- /dev/null +++ b/_extensions/d2/node_modules/.bin/uvu @@ -0,0 +1,35 @@ +#!/usr/bin/env node +const sade = require('sade'); +const pkg = require('./package'); +const { parse } = require('./parse'); + +const dimport = x => new Function(`return import(${ JSON.stringify(x) })`).call(0); + +const hasImport = (() => { + try { new Function('import').call(0) } + catch (err) { return !/unexpected/i.test(err.message) } +})(); + +sade('uvu [dir] [pattern]') + .version(pkg.version) + .option('-b, --bail', 'Exit on first failure') + .option('-i, --ignore', 'Any file patterns to ignore') + .option('-r, --require', 'Additional module(s) to preload') + .option('-C, --cwd', 'The current directory to resolve from', '.') + .option('-c, --color', 'Print colorized output', true) + .action(async (dir, pattern, opts) => { + try { + if (opts.color) process.env.FORCE_COLOR = '1'; + let ctx = await parse(dir, pattern, opts); + + if (!ctx.requires && hasImport) { + await dimport('uvu/run').then(m => m.run(ctx.suites, opts)); + } else { + await require('uvu/run').run(ctx.suites, opts); + } + } catch (err) { + console.error(err.stack || err.message); + process.exit(1); + } + }) + .parse(process.argv); diff --git a/_extensions/d2/node_modules/.package-lock.json b/_extensions/d2/node_modules/.package-lock.json new file mode 100644 index 00000000..7a9d3d75 --- /dev/null +++ b/_extensions/d2/node_modules/.package-lock.json @@ -0,0 +1,1003 @@ +{ + "name": "viash_nxf_course", + "lockfileVersion": 3, + "requires": true, + "packages": { + "node_modules/@types/debug": { + "version": "4.1.7", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.7.tgz", + "integrity": "sha512-9AonUzyTjXXhEOa0DnqpzZi6VHlqKMswga9EXjpXnnqxwLtdvPPtlO8evrI5D9S6asFRCQ6v+wpiUKbw+vKqyg==", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/mdast": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.11.tgz", + "integrity": "sha512-Y/uImid8aAwrEA24/1tcRZwpxX3pIFTSilcNDKSPn+Y2iDywSEachzRuvgAYYLR3wpGXAsMbv5lvKLDZLeYPAw==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/ms": { + "version": "0.7.31", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.31.tgz", + "integrity": "sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA==" + }, + "node_modules/@types/unist": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.6.tgz", + "integrity": "sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ==" + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/d2-filter": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/d2-filter/-/d2-filter-1.4.0.tgz", + "integrity": "sha512-WW17ynL62B9fXq/pKMhyEu+N4VyP3BCgdSHgN72ERGDqaYENzkPPC0AIwKFG5ITvEHAfKjocNKG1twAXuXgo8A==", + "dependencies": { + "pandoc-filter": "^2.1.0", + "tmp": "^0.2.1", + "which": "^3.0.0" + }, + "bin": { + "d2-filter": "filter-shim.js" + } + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", + "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/diff": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.1.0.tgz", + "integrity": "sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw==", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" + }, + "node_modules/get-stdin": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-7.0.0.tgz", + "integrity": "sha512-zRKcywvrXlXsA0v0i9Io4KDRaAw7+a1ZpjRwl9Wox8PFlVCCHra7E9c4kqXCoCM9nR5tBkaTTZRBoCm60bFqTQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/is-buffer": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz", + "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "engines": { + "node": ">=4" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/kleur": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", + "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.0.tgz", + "integrity": "sha512-HN3W1gRIuN/ZW295c7zi7g9lVBllMgZE40RxCX37wrTPWXCWtpvOZdfnuK+1WNpvZje6XuJeI3Wnb4TJEUem+g==", + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "mdast-util-to-string": "^3.1.0", + "micromark": "^3.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-decode-string": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "unist-util-stringify-position": "^3.0.0", + "uvu": "^0.5.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-3.0.1.tgz", + "integrity": "sha512-WmI1gTXUBJo4/ZmSk79Wcb2HcjPJBzM1nlI/OUWA8yk2X9ik3ffNbBGsU+09BFmXaL1IBb9fiuvq6/KMiNycSg==", + "dependencies": { + "@types/mdast": "^3.0.0", + "unist-util-is": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-1.5.0.tgz", + "integrity": "sha512-bbv7TPv/WC49thZPg3jXuqzuvI45IL2EVAr/KxF0BSdHsU0ceFHOmwQn6evxAh1GaoK/6GQ1wp4R4oW2+LFL/A==", + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^3.0.0", + "mdast-util-to-string": "^3.0.0", + "micromark-util-decode-string": "^1.0.0", + "unist-util-visit": "^4.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz", + "integrity": "sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==", + "dependencies": { + "@types/mdast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-3.1.0.tgz", + "integrity": "sha512-6Mj0yHLdUZjHnOPgr5xfWIMqMWS12zDN6iws9SLuSz76W8jTtAv24MN4/CL7gJrl5vtxGInkkqDv/JIoRsQOvA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "micromark-core-commonmark": "^1.0.1", + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-combine-extensions": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-sanitize-uri": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-1.0.6.tgz", + "integrity": "sha512-K+PkJTxqjFfSNkfAhp4GB+cZPfQd6dxtTXnf+RjZOV7T4EEXnvgzOcnp+eSTmpGk9d1S9sL6/lqrgSNn/s0HZA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-factory-destination": "^1.0.0", + "micromark-factory-label": "^1.0.0", + "micromark-factory-space": "^1.0.0", + "micromark-factory-title": "^1.0.0", + "micromark-factory-whitespace": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-classify-character": "^1.0.0", + "micromark-util-html-tag-name": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-factory-destination": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-1.0.0.tgz", + "integrity": "sha512-eUBA7Rs1/xtTVun9TmV3gjfPz2wEwgK5R5xcbIM5ZYAtvGF6JkyaDsj0agx8urXnO31tEO6Ug83iVH3tdedLnw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-1.0.2.tgz", + "integrity": "sha512-CTIwxlOnU7dEshXDQ+dsr2n+yxpP0+fn271pu0bwDIS8uqfFcumXpj5mLn3hSC8iw2MUr6Gx8EcKng1dD7i6hg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.0.0.tgz", + "integrity": "sha512-qUmqs4kj9a5yBnk3JMLyjtWYN6Mzfcx8uJfi5XAveBniDevmZasdGBba5b4QsvRcAkmvGo5ACmSUmyGiKTLZew==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-1.0.2.tgz", + "integrity": "sha512-zily+Nr4yFqgMGRKLpTVsNl5L4PMu485fGFDOQJQBl2NFpjGte1e86zC0da93wf97jrc4+2G2GQudFMHn3IX+A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-1.0.0.tgz", + "integrity": "sha512-Qx7uEyahU1lt1RnsECBiuEbfr9INjQTGa6Err+gF3g0Tx4YEviPbqqGKNv/NrBaE7dVHdn1bVZKM/n5I/Bak7A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.1.0.tgz", + "integrity": "sha512-agJ5B3unGNJ9rJvADMJ5ZiYjBRyDpzKAOk01Kpi1TKhlT1APx3XZk6eN7RtSz1erbWHC2L8T3xLZ81wdtGRZzg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-1.0.0.tgz", + "integrity": "sha512-5e8xTis5tEZKgesfbQMKRCyzvffRRUX+lK/y+DvsMFdabAicPkkZV6gO+FEWi9RfuKKoxxPwNL+dFF0SMImc1g==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-1.0.0.tgz", + "integrity": "sha512-F8oW2KKrQRb3vS5ud5HIqBVkCqQi224Nm55o5wYLzY/9PwHGXC01tr3d7+TqHHz6zrKQ72Okwtvm/xQm6OVNZA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.0.0.tgz", + "integrity": "sha512-J8H058vFBdo/6+AsjHp2NF7AJ02SZtWaVUjsayNFeAiydTxUwViQPxN0Hf8dp4FmCQi0UUFovFsEyRSUmFH3MA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.0.0.tgz", + "integrity": "sha512-OzO9AI5VUtrTD7KSdagf4MWgHMtET17Ua1fIpXTpuhclCqD8egFWo85GxSGvxgkGS74bEahvtM0WP0HjvV0e4w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-1.0.2.tgz", + "integrity": "sha512-DLT5Ho02qr6QWVNYbRZ3RYOSSWWFuH3tJexd3dgN1odEuPNxCngTCXJum7+ViRAd9BbdxCvMToPOD/IvVhzG6Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-1.0.1.tgz", + "integrity": "sha512-U2s5YdnAYexjKDel31SVMPbfi+eF8y1U4pfiRW/Y8EFVCy/vgxk/2wWTxzcqE71LHtCuCzlBDRU2a5CQ5j+mQA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-html-tag-name": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.1.0.tgz", + "integrity": "sha512-BKlClMmYROy9UiV03SwNmckkjn8QHVaWkqoAqzivabvdGcwNGMMMH/5szAnywmsTBUzDsU57/mFi0sp4BQO6dA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.0.0.tgz", + "integrity": "sha512-yg+zrL14bBTFrQ7n35CmByWUTFsgst5JhA4gJYoty4Dqzj4Z4Fr/DHekSS5aLfH9bdlfnSvKAWsAgJhIbogyBg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-1.0.0.tgz", + "integrity": "sha512-CB/AGk98u50k42kvgaMM94wzBqozSzDDaonKU7P7jwQIuH2RU0TeBqGYJz2WY1UdihhjweivStrJ2JdkdEmcfw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-types": "^1.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.1.0.tgz", + "integrity": "sha512-RoxtuSCX6sUNtxhbmsEFQfWzs8VN7cTctmBPvYivo98xb/kDEoTCtJQX5wyzIYEmk/lvNFTat4hL8oW0KndFpg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-symbol": "^1.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-1.0.2.tgz", + "integrity": "sha512-d90uqCnXp/cy4G881Ub4psE57Sf8YD0pim9QdjCRNjfas2M1u6Lbt+XZK9gnHL2XFhnozZiEdCa9CNfXSfQ6xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.0.1.tgz", + "integrity": "sha512-oKDEMK2u5qqAptasDAwWDXq0tG9AssVwAx3E9bBF3t/shRIGsWIRG+cGafs2p/SnDSOecnt6hZPCE2o6lHfFmQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-types": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.0.2.tgz", + "integrity": "sha512-DCfg/T8fcrhrRKTPjRrw/5LLvdGV7BHySf/1LOZx7TzWZdYRjogNtyNq885z3nNallwr3QUKARjqvHqX1/7t+w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/mri": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz", + "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/pandoc-filter": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/pandoc-filter/-/pandoc-filter-2.1.0.tgz", + "integrity": "sha512-uE6RSbzR+WsiBZgHa6u4WfvNy3DeUKtLFFJBbq03DFMmlXrtvqyJuIS0jrxM6v1VEJUlaY5lu/Tb6v3hnINWkw==", + "dependencies": { + "get-stdin": "~7.0.0" + }, + "engines": { + "node": ">=7.6.0" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/remark": { + "version": "14.0.2", + "resolved": "https://registry.npmjs.org/remark/-/remark-14.0.2.tgz", + "integrity": "sha512-A3ARm2V4BgiRXaUo5K0dRvJ1lbogrbXnhkJRmD0yw092/Yl0kOCZt1k9ZeElEwkZsWGsMumz6qL5MfNJH9nOBA==", + "dependencies": { + "@types/mdast": "^3.0.0", + "remark-parse": "^10.0.0", + "remark-stringify": "^10.0.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-10.0.1.tgz", + "integrity": "sha512-1fUyHr2jLsVOkhbvPRBJ5zTKZZyD6yZzYaWCS6BPBdQ8vEMBCH+9zNCDA6tET/zHCi/jLqjCWtlJZUPk+DbnFw==", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-from-markdown": "^1.0.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-stringify": { + "version": "10.0.2", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-10.0.2.tgz", + "integrity": "sha512-6wV3pvbPvHkbNnWB0wdDvVFHOe1hBRAx1Q/5g/EpH4RppAII6J8Gnwe7VbHuXaoKIF6LAg6ExTel/+kNqSQ7lw==", + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.0.0", + "unified": "^10.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sade": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/sade/-/sade-1.8.1.tgz", + "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==", + "dependencies": { + "mri": "^1.1.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tmp": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz", + "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==", + "dependencies": { + "rimraf": "^3.0.0" + }, + "engines": { + "node": ">=8.17.0" + } + }, + "node_modules/trough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz", + "integrity": "sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/unified": { + "version": "10.1.2", + "resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz", + "integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==", + "dependencies": { + "@types/unist": "^2.0.0", + "bail": "^2.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.2.1.tgz", + "integrity": "sha512-u9njyyfEh43npf1M+yGKDGVPbY/JWEemg5nH05ncKPfi+kBbKBJoTdsogMu33uhytuLlv9y0O7GH7fEdwLdLQw==", + "dependencies": { + "@types/unist": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz", + "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==", + "dependencies": { + "@types/unist": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.2.tgz", + "integrity": "sha512-MSd8OUGISqHdVvfY9TPhyK2VdUrPgxkUtWSuMHF6XAAFuL4LokseigBnZtPnJMu+FbynTkFNnFlyjxpVKujMRg==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.1.1" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "5.1.3", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.3.tgz", + "integrity": "sha512-x6+y8g7wWMyQhL1iZfhIPhDAs7Xwbn9nRosDXl7qoPTSCy0yNxnKc+hWokFifWQIDGi154rdUqKvbCa4+1kLhg==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/uvu": { + "version": "0.5.6", + "resolved": "https://registry.npmjs.org/uvu/-/uvu-0.5.6.tgz", + "integrity": "sha512-+g8ENReyr8YsOc6fv/NVJs2vFdHBnBNdfE49rshrTzDWOlUx4Gq7KOS2GD8eqhy2j+Ejq29+SbKH8yjkAqXqoA==", + "dependencies": { + "dequal": "^2.0.0", + "diff": "^5.0.0", + "kleur": "^4.0.3", + "sade": "^1.7.3" + }, + "bin": { + "uvu": "bin.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/vfile": { + "version": "5.3.7", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.7.tgz", + "integrity": "sha512-r7qlzkgErKjobAmyNIkkSpizsFPYiUPuJb5pNW1RB4JcYVZhs4lIbVqk8XPk033CV/1z8ss5pkax8SuhGpcG8g==", + "dependencies": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^3.0.0", + "vfile-message": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.4.tgz", + "integrity": "sha512-fa0Z6P8HUrQN4BZaX05SIVXic+7kE3b05PWAtPuYP9QLHsLKYR7/AlLW3NtOrpXRLeawpDLMsVkmk5DG0NXgWw==", + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/which": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/which/-/which-3.0.1.tgz", + "integrity": "sha512-XA1b62dzQzLfaEOSQFTCOd5KFf/1VSzZo7/7TUjnya6u0vGGKzU96UQBZTAThCb2j4/xjBAyii1OhRLJEivHvg==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/which.js" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/_extensions/d2/node_modules/@types/debug/LICENSE b/_extensions/d2/node_modules/@types/debug/LICENSE new file mode 100644 index 00000000..9e841e7a --- /dev/null +++ b/_extensions/d2/node_modules/@types/debug/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/_extensions/d2/node_modules/@types/debug/README.md b/_extensions/d2/node_modules/@types/debug/README.md new file mode 100644 index 00000000..fe91f788 --- /dev/null +++ b/_extensions/d2/node_modules/@types/debug/README.md @@ -0,0 +1,74 @@ +# Installation +> `npm install --save @types/debug` + +# Summary +This package contains type definitions for debug (https://github.com/visionmedia/debug). + +# Details +Files were exported from https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/debug. +## [index.d.ts](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/debug/index.d.ts) +````ts +// Type definitions for debug 4.1 +// Project: https://github.com/visionmedia/debug +// Definitions by: Seon-Wook Park +// Gal Talmor +// John McLaughlin +// Brasten Sager +// Nicolas Penin +// Kristian Brünn +// Caleb Gregory +// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped + +declare var debug: debug.Debug & { debug: debug.Debug; default: debug.Debug }; + +export = debug; +export as namespace debug; + +declare namespace debug { + interface Debug { + (namespace: string): Debugger; + coerce: (val: any) => any; + disable: () => string; + enable: (namespaces: string) => void; + enabled: (namespaces: string) => boolean; + formatArgs: (this: Debugger, args: any[]) => void; + log: (...args: any[]) => any; + selectColor: (namespace: string) => string | number; + humanize: typeof import('ms'); + + names: RegExp[]; + skips: RegExp[]; + + formatters: Formatters; + } + + type IDebug = Debug; + + interface Formatters { + [formatter: string]: (v: any) => string; + } + + type IDebugger = Debugger; + + interface Debugger { + (formatter: any, ...args: any[]): void; + + color: string; + diff: number; + enabled: boolean; + log: (...args: any[]) => any; + namespace: string; + destroy: () => boolean; + extend: (namespace: string, delimiter?: string) => Debugger; + } +} + +```` + +### Additional Details + * Last updated: Sat, 24 Jul 2021 08:01:14 GMT + * Dependencies: [@types/ms](https://npmjs.com/package/@types/ms) + * Global values: `debug` + +# Credits +These definitions were written by [Seon-Wook Park](https://github.com/swook), [Gal Talmor](https://github.com/galtalmor), [John McLaughlin](https://github.com/zamb3zi), [Brasten Sager](https://github.com/brasten), [Nicolas Penin](https://github.com/npenin), [Kristian Brünn](https://github.com/kristianmitk), and [Caleb Gregory](https://github.com/calebgregory). diff --git a/_extensions/d2/node_modules/@types/debug/index.d.ts b/_extensions/d2/node_modules/@types/debug/index.d.ts new file mode 100644 index 00000000..6eb52787 --- /dev/null +++ b/_extensions/d2/node_modules/@types/debug/index.d.ts @@ -0,0 +1,54 @@ +// Type definitions for debug 4.1 +// Project: https://github.com/visionmedia/debug +// Definitions by: Seon-Wook Park +// Gal Talmor +// John McLaughlin +// Brasten Sager +// Nicolas Penin +// Kristian Brünn +// Caleb Gregory +// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped + +declare var debug: debug.Debug & { debug: debug.Debug; default: debug.Debug }; + +export = debug; +export as namespace debug; + +declare namespace debug { + interface Debug { + (namespace: string): Debugger; + coerce: (val: any) => any; + disable: () => string; + enable: (namespaces: string) => void; + enabled: (namespaces: string) => boolean; + formatArgs: (this: Debugger, args: any[]) => void; + log: (...args: any[]) => any; + selectColor: (namespace: string) => string | number; + humanize: typeof import('ms'); + + names: RegExp[]; + skips: RegExp[]; + + formatters: Formatters; + } + + type IDebug = Debug; + + interface Formatters { + [formatter: string]: (v: any) => string; + } + + type IDebugger = Debugger; + + interface Debugger { + (formatter: any, ...args: any[]): void; + + color: string; + diff: number; + enabled: boolean; + log: (...args: any[]) => any; + namespace: string; + destroy: () => boolean; + extend: (namespace: string, delimiter?: string) => Debugger; + } +} diff --git a/_extensions/d2/node_modules/@types/debug/package.json b/_extensions/d2/node_modules/@types/debug/package.json new file mode 100644 index 00000000..fba20779 --- /dev/null +++ b/_extensions/d2/node_modules/@types/debug/package.json @@ -0,0 +1,57 @@ +{ + "name": "@types/debug", + "version": "4.1.7", + "description": "TypeScript definitions for debug", + "homepage": "https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/debug", + "license": "MIT", + "contributors": [ + { + "name": "Seon-Wook Park", + "url": "https://github.com/swook", + "githubUsername": "swook" + }, + { + "name": "Gal Talmor", + "url": "https://github.com/galtalmor", + "githubUsername": "galtalmor" + }, + { + "name": "John McLaughlin", + "url": "https://github.com/zamb3zi", + "githubUsername": "zamb3zi" + }, + { + "name": "Brasten Sager", + "url": "https://github.com/brasten", + "githubUsername": "brasten" + }, + { + "name": "Nicolas Penin", + "url": "https://github.com/npenin", + "githubUsername": "npenin" + }, + { + "name": "Kristian Brünn", + "url": "https://github.com/kristianmitk", + "githubUsername": "kristianmitk" + }, + { + "name": "Caleb Gregory", + "url": "https://github.com/calebgregory", + "githubUsername": "calebgregory" + } + ], + "main": "", + "types": "index.d.ts", + "repository": { + "type": "git", + "url": "https://github.com/DefinitelyTyped/DefinitelyTyped.git", + "directory": "types/debug" + }, + "scripts": {}, + "dependencies": { + "@types/ms": "*" + }, + "typesPublisherContentHash": "b83b27a0dee1329b5308b30bc0a4193efda8f025b3f5d9301130acb5be89a5b7", + "typeScriptVersion": "3.6" +} \ No newline at end of file diff --git a/_extensions/d2/node_modules/@types/mdast/LICENSE b/_extensions/d2/node_modules/@types/mdast/LICENSE new file mode 100644 index 00000000..9e841e7a --- /dev/null +++ b/_extensions/d2/node_modules/@types/mdast/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/_extensions/d2/node_modules/@types/mdast/README.md b/_extensions/d2/node_modules/@types/mdast/README.md new file mode 100644 index 00000000..1a742e37 --- /dev/null +++ b/_extensions/d2/node_modules/@types/mdast/README.md @@ -0,0 +1,16 @@ +# Installation +> `npm install --save @types/mdast` + +# Summary +This package contains type definitions for Mdast (https://github.com/syntax-tree/mdast). + +# Details +Files were exported from https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/mdast. + +### Additional Details + * Last updated: Tue, 21 Mar 2023 00:02:51 GMT + * Dependencies: [@types/unist](https://npmjs.com/package/@types/unist) + * Global values: none + +# Credits +These definitions were written by [Christian Murphy](https://github.com/ChristianMurphy), [Jun Lu](https://github.com/lujun2), [Remco Haszing](https://github.com/remcohaszing), and [Titus Wormer](https://github.com/wooorm). diff --git a/_extensions/d2/node_modules/@types/mdast/index.d.ts b/_extensions/d2/node_modules/@types/mdast/index.d.ts new file mode 100644 index 00000000..da85eccf --- /dev/null +++ b/_extensions/d2/node_modules/@types/mdast/index.d.ts @@ -0,0 +1,346 @@ +// Type definitions for Mdast 3.0 +// Project: https://github.com/syntax-tree/mdast, https://github.com/wooorm/mdast +// Definitions by: Christian Murphy +// Jun Lu +// Remco Haszing +// Titus Wormer +// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped +// TypeScript Version: 3.0 + +import { Parent as UnistParent, Literal as UnistLiteral, Node } from 'unist'; + +export type AlignType = 'left' | 'right' | 'center' | null; + +export type ReferenceType = 'shortcut' | 'collapsed' | 'full'; + +/** + * This map registers all node types that may be used where markdown block content is accepted. + * + * These types are accepted inside block quotes, list items, footnotes, and roots. + * + * This interface can be augmented to register custom node types. + * + * @example + * declare module 'mdast' { + * interface BlockContentMap { + * // Allow using math nodes defined by `remark-math`. + * math: Math; + * } + * } + */ +export interface BlockContentMap { + paragraph: Paragraph; + heading: Heading; + thematicBreak: ThematicBreak; + blockquote: Blockquote; + list: List; + table: Table; + html: HTML; + code: Code; +} + +/** + * This map registers all frontmatter node types. + * + * This interface can be augmented to register custom node types. + * + * @example + * declare module 'mdast' { + * interface FrontmatterContentMap { + * // Allow using toml nodes defined by `remark-frontmatter`. + * toml: TOML; + * } + * } + */ +export interface FrontmatterContentMap { + yaml: YAML; +} + +/** + * This map registers all node definition types. + * + * This interface can be augmented to register custom node types. + * + * @example + * declare module 'mdast' { + * interface DefinitionContentMap { + * custom: Custom; + * } + * } + */ +export interface DefinitionContentMap { + definition: Definition; + footnoteDefinition: FootnoteDefinition; +} + +/** + * This map registers all node types that are acceptable in a static phrasing context. + * + * This interface can be augmented to register custom node types in a phrasing context, including links and link + * references. + * + * @example + * declare module 'mdast' { + * interface StaticPhrasingContentMap { + * mdxJsxTextElement: MDXJSXTextElement; + * } + * } + */ +export interface StaticPhrasingContentMap { + text: Text; + emphasis: Emphasis; + strong: Strong; + delete: Delete; + html: HTML; + inlineCode: InlineCode; + break: Break; + image: Image; + imageReference: ImageReference; + footnote: Footnote; + footnoteReference: FootnoteReference; +} + +/** + * This map registers all node types that are acceptable in a (interactive) phrasing context (so not in links). + * + * This interface can be augmented to register custom node types in a phrasing context, excluding links and link + * references. + * + * @example + * declare module 'mdast' { + * interface PhrasingContentMap { + * custom: Custom; + * } + * } + */ +export interface PhrasingContentMap extends StaticPhrasingContentMap { + link: Link; + linkReference: LinkReference; +} + +/** + * This map registers all node types that are acceptable inside lists. + * + * This interface can be augmented to register custom node types that are acceptable inside lists. + * + * @example + * declare module 'mdast' { + * interface ListContentMap { + * custom: Custom; + * } + * } + */ +export interface ListContentMap { + listItem: ListItem; +} + +/** + * This map registers all node types that are acceptable inside tables (not table cells). + * + * This interface can be augmented to register custom node types that are acceptable inside tables. + * + * @example + * declare module 'mdast' { + * interface TableContentMap { + * custom: Custom; + * } + * } + */ +export interface TableContentMap { + tableRow: TableRow; +} + +/** + * This map registers all node types that are acceptable inside tables rows (not table cells). + * + * This interface can be augmented to register custom node types that are acceptable inside table rows. + * + * @example + * declare module 'mdast' { + * interface RowContentMap { + * custom: Custom; + * } + * } + */ +export interface RowContentMap { + tableCell: TableCell; +} + +export type Content = TopLevelContent | ListContent | TableContent | RowContent | PhrasingContent; + +export type TopLevelContent = BlockContent | FrontmatterContent | DefinitionContent; + +export type BlockContent = BlockContentMap[keyof BlockContentMap]; + +export type FrontmatterContent = FrontmatterContentMap[keyof FrontmatterContentMap]; + +export type DefinitionContent = DefinitionContentMap[keyof DefinitionContentMap]; + +export type ListContent = ListContentMap[keyof ListContentMap]; + +export type TableContent = TableContentMap[keyof TableContentMap]; + +export type RowContent = RowContentMap[keyof RowContentMap]; + +export type PhrasingContent = PhrasingContentMap[keyof PhrasingContentMap]; + +export type StaticPhrasingContent = StaticPhrasingContentMap[keyof StaticPhrasingContentMap]; + +export interface Parent extends UnistParent { + children: Content[]; +} + +export interface Literal extends UnistLiteral { + value: string; +} + +export interface Root extends Parent { + type: 'root'; +} + +export interface Paragraph extends Parent { + type: 'paragraph'; + children: PhrasingContent[]; +} + +export interface Heading extends Parent { + type: 'heading'; + depth: 1 | 2 | 3 | 4 | 5 | 6; + children: PhrasingContent[]; +} + +export interface ThematicBreak extends Node { + type: 'thematicBreak'; +} + +export interface Blockquote extends Parent { + type: 'blockquote'; + children: Array; +} + +export interface List extends Parent { + type: 'list'; + ordered?: boolean | null | undefined; + start?: number | null | undefined; + spread?: boolean | null | undefined; + children: ListContent[]; +} + +export interface ListItem extends Parent { + type: 'listItem'; + checked?: boolean | null | undefined; + spread?: boolean | null | undefined; + children: Array; +} + +export interface Table extends Parent { + type: 'table'; + align?: AlignType[] | null | undefined; + children: TableContent[]; +} + +export interface TableRow extends Parent { + type: 'tableRow'; + children: RowContent[]; +} + +export interface TableCell extends Parent { + type: 'tableCell'; + children: PhrasingContent[]; +} + +export interface HTML extends Literal { + type: 'html'; +} + +export interface Code extends Literal { + type: 'code'; + lang?: string | null | undefined; + meta?: string | null | undefined; +} + +export interface YAML extends Literal { + type: 'yaml'; +} + +export interface Definition extends Node, Association, Resource { + type: 'definition'; +} + +export interface FootnoteDefinition extends Parent, Association { + type: 'footnoteDefinition'; + children: Array; +} + +export interface Text extends Literal { + type: 'text'; +} + +export interface Emphasis extends Parent { + type: 'emphasis'; + children: PhrasingContent[]; +} + +export interface Strong extends Parent { + type: 'strong'; + children: PhrasingContent[]; +} + +export interface Delete extends Parent { + type: 'delete'; + children: PhrasingContent[]; +} + +export interface InlineCode extends Literal { + type: 'inlineCode'; +} + +export interface Break extends Node { + type: 'break'; +} + +export interface Link extends Parent, Resource { + type: 'link'; + children: StaticPhrasingContent[]; +} + +export interface Image extends Node, Resource, Alternative { + type: 'image'; +} + +export interface LinkReference extends Parent, Reference { + type: 'linkReference'; + children: StaticPhrasingContent[]; +} + +export interface ImageReference extends Node, Reference, Alternative { + type: 'imageReference'; +} + +export interface Footnote extends Parent { + type: 'footnote'; + children: PhrasingContent[]; +} + +export interface FootnoteReference extends Node, Association { + type: 'footnoteReference'; +} + +// Mixin +export interface Resource { + url: string; + title?: string | null | undefined; +} + +export interface Association { + identifier: string; + label?: string | null | undefined; +} + +export interface Reference extends Association { + referenceType: ReferenceType; +} + +export interface Alternative { + alt?: string | null | undefined; +} diff --git a/_extensions/d2/node_modules/@types/mdast/package.json b/_extensions/d2/node_modules/@types/mdast/package.json new file mode 100644 index 00000000..294aa76d --- /dev/null +++ b/_extensions/d2/node_modules/@types/mdast/package.json @@ -0,0 +1,42 @@ +{ + "name": "@types/mdast", + "version": "3.0.11", + "description": "TypeScript definitions for Mdast", + "homepage": "https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/mdast", + "license": "MIT", + "contributors": [ + { + "name": "Christian Murphy", + "url": "https://github.com/ChristianMurphy", + "githubUsername": "ChristianMurphy" + }, + { + "name": "Jun Lu", + "url": "https://github.com/lujun2", + "githubUsername": "lujun2" + }, + { + "name": "Remco Haszing", + "url": "https://github.com/remcohaszing", + "githubUsername": "remcohaszing" + }, + { + "name": "Titus Wormer", + "url": "https://github.com/wooorm", + "githubUsername": "wooorm" + } + ], + "main": "", + "types": "index.d.ts", + "repository": { + "type": "git", + "url": "https://github.com/DefinitelyTyped/DefinitelyTyped.git", + "directory": "types/mdast" + }, + "scripts": {}, + "dependencies": { + "@types/unist": "*" + }, + "typesPublisherContentHash": "3cbb57b89f230aa1b6dd7967bcdac99de4dcfb625ffbb6cbb03dd8185965d216", + "typeScriptVersion": "4.3" +} \ No newline at end of file diff --git a/_extensions/d2/node_modules/@types/ms/LICENSE b/_extensions/d2/node_modules/@types/ms/LICENSE new file mode 100644 index 00000000..4b1ad51b --- /dev/null +++ b/_extensions/d2/node_modules/@types/ms/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/_extensions/d2/node_modules/@types/ms/README.md b/_extensions/d2/node_modules/@types/ms/README.md new file mode 100644 index 00000000..f67543f6 --- /dev/null +++ b/_extensions/d2/node_modules/@types/ms/README.md @@ -0,0 +1,16 @@ +# Installation +> `npm install --save @types/ms` + +# Summary +This package contains type definitions for ms (https://github.com/zeit/ms). + +# Details +Files were exported from https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/ms + +Additional Details + * Last updated: Wed, 04 Sep 2019 20:48:21 GMT + * Dependencies: none + * Global values: none + +# Credits +These definitions were written by Zhiyuan Wang . diff --git a/_extensions/d2/node_modules/@types/ms/index.d.ts b/_extensions/d2/node_modules/@types/ms/index.d.ts new file mode 100644 index 00000000..be9f0a97 --- /dev/null +++ b/_extensions/d2/node_modules/@types/ms/index.d.ts @@ -0,0 +1,25 @@ +// Type definitions for ms v0.7.1 +// Project: https://github.com/zeit/ms +// Definitions by: Zhiyuan Wang +// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped + + + +/** +* Short/Long format for `value`. +* +* @param {Number} value +* @param {{long: boolean}} options +* @return {String} +*/ +declare function ms(value: number, options?: { long: boolean }): string; + +/** +* Parse the given `value` and return milliseconds. +* +* @param {String} value +* @return {Number} +*/ +declare function ms(value: string): number; + +export = ms; diff --git a/_extensions/d2/node_modules/@types/ms/package.json b/_extensions/d2/node_modules/@types/ms/package.json new file mode 100644 index 00000000..354e8aa3 --- /dev/null +++ b/_extensions/d2/node_modules/@types/ms/package.json @@ -0,0 +1,24 @@ +{ + "name": "@types/ms", + "version": "0.7.31", + "description": "TypeScript definitions for ms", + "license": "MIT", + "contributors": [ + { + "name": "Zhiyuan Wang", + "url": "https://github.com/danny8002", + "githubUsername": "danny8002" + } + ], + "main": "", + "types": "index", + "repository": { + "type": "git", + "url": "https://github.com/DefinitelyTyped/DefinitelyTyped.git", + "directory": "types/ms" + }, + "scripts": {}, + "dependencies": {}, + "typesPublisherContentHash": "ff2ed90b1d3539f07c5e91fe5cac8d4aa504a3290632a4e76a02d1684dcfabfc", + "typeScriptVersion": "2.0" +} \ No newline at end of file diff --git a/_extensions/d2/node_modules/@types/unist/LICENSE b/_extensions/d2/node_modules/@types/unist/LICENSE new file mode 100644 index 00000000..9e841e7a --- /dev/null +++ b/_extensions/d2/node_modules/@types/unist/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE diff --git a/_extensions/d2/node_modules/@types/unist/README.md b/_extensions/d2/node_modules/@types/unist/README.md new file mode 100644 index 00000000..283ae4d3 --- /dev/null +++ b/_extensions/d2/node_modules/@types/unist/README.md @@ -0,0 +1,16 @@ +# Installation +> `npm install --save @types/unist` + +# Summary +This package contains type definitions for Unist (https://github.com/syntax-tree/unist). + +# Details +Files were exported from https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/unist. + +### Additional Details + * Last updated: Thu, 15 Jul 2021 00:31:23 GMT + * Dependencies: none + * Global values: none + +# Credits +These definitions were written by [bizen241](https://github.com/bizen241), [Jun Lu](https://github.com/lujun2), [Hernan Rajchert](https://github.com/hrajchert), [Titus Wormer](https://github.com/wooorm), [Junyoung Choi](https://github.com/rokt33r), [Ben Moon](https://github.com/GuiltyDolphin), and [JounQin](https://github.com/JounQin). diff --git a/_extensions/d2/node_modules/@types/unist/index.d.ts b/_extensions/d2/node_modules/@types/unist/index.d.ts new file mode 100644 index 00000000..e65bc878 --- /dev/null +++ b/_extensions/d2/node_modules/@types/unist/index.d.ts @@ -0,0 +1,114 @@ +// Type definitions for non-npm package Unist 2.0 +// Project: https://github.com/syntax-tree/unist +// Definitions by: bizen241 +// Jun Lu +// Hernan Rajchert +// Titus Wormer +// Junyoung Choi +// Ben Moon +// JounQin +// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped +// TypeScript Version: 3.0 + +/** + * Syntactic units in unist syntax trees are called nodes. + * + * @typeParam TData Information from the ecosystem. Useful for more specific {@link Node.data}. + */ +export interface Node { + /** + * The variant of a node. + */ + type: string; + + /** + * Information from the ecosystem. + */ + data?: TData | undefined; + + /** + * Location of a node in a source document. + * Must not be present if a node is generated. + */ + position?: Position | undefined; +} + +/** + * Information associated by the ecosystem with the node. + * Space is guaranteed to never be specified by unist or specifications + * implementing unist. + */ +export interface Data { + [key: string]: unknown; +} + +/** + * Location of a node in a source file. + */ +export interface Position { + /** + * Place of the first character of the parsed source region. + */ + start: Point; + + /** + * Place of the first character after the parsed source region. + */ + end: Point; + + /** + * Start column at each index (plus start line) in the source region, + * for elements that span multiple lines. + */ + indent?: number[] | undefined; +} + +/** + * One place in a source file. + */ +export interface Point { + /** + * Line in a source file (1-indexed integer). + */ + line: number; + + /** + * Column in a source file (1-indexed integer). + */ + column: number; + /** + * Character in a source file (0-indexed integer). + */ + offset?: number | undefined; +} + +/** + * Util for extracting type of {@link Node.data} + * + * @typeParam TNode Specific node type such as {@link Node} with {@link Data}, {@link Literal}, etc. + * + * @example `NodeData>` -> `{ key: string }` + */ +export type NodeData> = TNode extends Node ? TData : never; + +/** + * Nodes containing other nodes. + * + * @typeParam ChildNode Node item of {@link Parent.children} + */ +export interface Parent = Node, TData extends object = NodeData> + extends Node { + /** + * List representing the children of a node. + */ + children: ChildNode[]; +} + +/** + * Nodes containing a value. + * + * @typeParam Value Specific value type of {@link Literal.value} such as `string` for `Text` node + */ +export interface Literal extends Node { + value: Value; +} diff --git a/_extensions/d2/node_modules/@types/unist/package.json b/_extensions/d2/node_modules/@types/unist/package.json new file mode 100644 index 00000000..ab4403f5 --- /dev/null +++ b/_extensions/d2/node_modules/@types/unist/package.json @@ -0,0 +1,55 @@ +{ + "name": "@types/unist", + "version": "2.0.6", + "description": "TypeScript definitions for Unist", + "homepage": "https://github.com/DefinitelyTyped/DefinitelyTyped/tree/master/types/unist", + "license": "MIT", + "contributors": [ + { + "name": "bizen241", + "url": "https://github.com/bizen241", + "githubUsername": "bizen241" + }, + { + "name": "Jun Lu", + "url": "https://github.com/lujun2", + "githubUsername": "lujun2" + }, + { + "name": "Hernan Rajchert", + "url": "https://github.com/hrajchert", + "githubUsername": "hrajchert" + }, + { + "name": "Titus Wormer", + "url": "https://github.com/wooorm", + "githubUsername": "wooorm" + }, + { + "name": "Junyoung Choi", + "url": "https://github.com/rokt33r", + "githubUsername": "rokt33r" + }, + { + "name": "Ben Moon", + "url": "https://github.com/GuiltyDolphin", + "githubUsername": "GuiltyDolphin" + }, + { + "name": "JounQin", + "url": "https://github.com/JounQin", + "githubUsername": "JounQin" + } + ], + "main": "", + "types": "index.d.ts", + "repository": { + "type": "git", + "url": "https://github.com/DefinitelyTyped/DefinitelyTyped.git", + "directory": "types/unist" + }, + "scripts": {}, + "dependencies": {}, + "typesPublisherContentHash": "da5f2c3f967fa07869161991244bb7f9a237ac20494b902ffcb9108deb9b4e12", + "typeScriptVersion": "3.6" +} \ No newline at end of file diff --git a/_extensions/d2/node_modules/bail/index.d.ts b/_extensions/d2/node_modules/bail/index.d.ts new file mode 100644 index 00000000..88fe9aa6 --- /dev/null +++ b/_extensions/d2/node_modules/bail/index.d.ts @@ -0,0 +1,10 @@ +/** + * Throw a given error. + * + * @param {Error|null|undefined} [error] + * Maybe error. + * @returns {asserts error is null|undefined} + */ +export function bail( + error?: Error | null | undefined +): asserts error is null | undefined diff --git a/_extensions/d2/node_modules/bail/index.js b/_extensions/d2/node_modules/bail/index.js new file mode 100644 index 00000000..d0bc4424 --- /dev/null +++ b/_extensions/d2/node_modules/bail/index.js @@ -0,0 +1,12 @@ +/** + * Throw a given error. + * + * @param {Error|null|undefined} [error] + * Maybe error. + * @returns {asserts error is null|undefined} + */ +export function bail(error) { + if (error) { + throw error + } +} diff --git a/_extensions/d2/node_modules/bail/license b/_extensions/d2/node_modules/bail/license new file mode 100644 index 00000000..32e7a3d9 --- /dev/null +++ b/_extensions/d2/node_modules/bail/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2015 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/bail/package.json b/_extensions/d2/node_modules/bail/package.json new file mode 100644 index 00000000..d56f6faf --- /dev/null +++ b/_extensions/d2/node_modules/bail/package.json @@ -0,0 +1,73 @@ +{ + "name": "bail", + "version": "2.0.2", + "description": "Throw a given error", + "license": "MIT", + "keywords": [ + "fail", + "bail", + "throw", + "callback", + "error" + ], + "repository": "wooorm/bail", + "bugs": "https://github.com/wooorm/bail/issues", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "index.d.ts", + "index.js" + ], + "devDependencies": { + "@types/tape": "^4.0.0", + "c8": "^7.0.0", + "prettier": "^2.0.0", + "remark-cli": "^10.0.0", + "remark-preset-wooorm": "^9.0.0", + "rimraf": "^3.0.0", + "tape": "^5.0.0", + "tsd": "^0.18.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "xo": "^0.46.0" + }, + "scripts": { + "prepublishOnly": "npm run build && npm run format", + "build": "rimraf \"*.d.ts\" && tsc && tsd && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test.js", + "test-coverage": "c8 --check-coverage --branches 100 --functions 100 --lines 100 --statements 100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true + } +} diff --git a/_extensions/d2/node_modules/bail/readme.md b/_extensions/d2/node_modules/bail/readme.md new file mode 100644 index 00000000..8cd18235 --- /dev/null +++ b/_extensions/d2/node_modules/bail/readme.md @@ -0,0 +1,147 @@ +# bail + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] + +Throw if given an error. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`bail(err?)`](#bailerr) +* [Types](#types) +* [Compatibility](#compatibility) +* [Security](#security) +* [Related](#related) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +This package throws a given error. + +## When should I use this? + +Use this package if you’re building some scripts that might theoretically get +errors but frequently don’t and you keep writing `if (error) throw error` over +and over again and you’re just really done with that. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 12.20+, 14.14+, or 16.0+), install with [npm][]: + +```sh +npm install bail +``` + +In Deno with [Skypack][]: + +```js +import {bail} from 'https://cdn.skypack.dev/bail@2?dts' +``` + +In browsers with [Skypack][]: + +```html + +``` + +## Use + +```js +import {bail} from 'bail' + +bail() + +bail(new Error('failure')) +// Error: failure +// at repl:1:6 +// at REPLServer.defaultEval (repl.js:154:27) +// … +``` + +## API + +This package exports the following identifier: `bail`. +There is no default export. + +### `bail(err?)` + +Throw a given error (`Error?`). + +## Types + +This package is fully typed with [TypeScript][]. +There are no extra exported types. + +## Compatibility + +This package is at least compatible with all maintained versions of Node.js. +As of now, that is Node.js 12.20+, 14.14+, and 16.0+. +It also works in Deno and modern browsers. + +## Security + +This package is safe. + +## Related + +* [`noop`][noop] +* [`noop2`][noop2] +* [`noop3`][noop3] + +## Contribute + +Yes please! +See [How to Contribute to Open Source][contribute]. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/wooorm/bail/workflows/main/badge.svg + +[build]: https://github.com/wooorm/bail/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/wooorm/bail.svg + +[coverage]: https://codecov.io/github/wooorm/bail + +[downloads-badge]: https://img.shields.io/npm/dm/bail.svg + +[downloads]: https://www.npmjs.com/package/bail + +[size-badge]: https://img.shields.io/bundlephobia/minzip/bail.svg + +[size]: https://bundlephobia.com/result?p=bail + +[npm]: https://docs.npmjs.com/cli/install + +[skypack]: https://www.skypack.dev + +[license]: license + +[author]: https://wooorm.com + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[typescript]: https://www.typescriptlang.org + +[contribute]: https://opensource.guide/how-to-contribute/ + +[noop]: https://www.npmjs.com/package/noop + +[noop2]: https://www.npmjs.com/package/noop2 + +[noop3]: https://www.npmjs.com/package/noop3 diff --git a/_extensions/d2/node_modules/balanced-match/.github/FUNDING.yml b/_extensions/d2/node_modules/balanced-match/.github/FUNDING.yml new file mode 100644 index 00000000..cea8b16e --- /dev/null +++ b/_extensions/d2/node_modules/balanced-match/.github/FUNDING.yml @@ -0,0 +1,2 @@ +tidelift: "npm/balanced-match" +patreon: juliangruber diff --git a/_extensions/d2/node_modules/balanced-match/LICENSE.md b/_extensions/d2/node_modules/balanced-match/LICENSE.md new file mode 100644 index 00000000..2cdc8e41 --- /dev/null +++ b/_extensions/d2/node_modules/balanced-match/LICENSE.md @@ -0,0 +1,21 @@ +(MIT) + +Copyright (c) 2013 Julian Gruber <julian@juliangruber.com> + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/_extensions/d2/node_modules/balanced-match/README.md b/_extensions/d2/node_modules/balanced-match/README.md new file mode 100644 index 00000000..d2a48b6b --- /dev/null +++ b/_extensions/d2/node_modules/balanced-match/README.md @@ -0,0 +1,97 @@ +# balanced-match + +Match balanced string pairs, like `{` and `}` or `` and ``. Supports regular expressions as well! + +[![build status](https://secure.travis-ci.org/juliangruber/balanced-match.svg)](http://travis-ci.org/juliangruber/balanced-match) +[![downloads](https://img.shields.io/npm/dm/balanced-match.svg)](https://www.npmjs.org/package/balanced-match) + +[![testling badge](https://ci.testling.com/juliangruber/balanced-match.png)](https://ci.testling.com/juliangruber/balanced-match) + +## Example + +Get the first matching pair of braces: + +```js +var balanced = require('balanced-match'); + +console.log(balanced('{', '}', 'pre{in{nested}}post')); +console.log(balanced('{', '}', 'pre{first}between{second}post')); +console.log(balanced(/\s+\{\s+/, /\s+\}\s+/, 'pre { in{nest} } post')); +``` + +The matches are: + +```bash +$ node example.js +{ start: 3, end: 14, pre: 'pre', body: 'in{nested}', post: 'post' } +{ start: 3, + end: 9, + pre: 'pre', + body: 'first', + post: 'between{second}post' } +{ start: 3, end: 17, pre: 'pre', body: 'in{nest}', post: 'post' } +``` + +## API + +### var m = balanced(a, b, str) + +For the first non-nested matching pair of `a` and `b` in `str`, return an +object with those keys: + +* **start** the index of the first match of `a` +* **end** the index of the matching `b` +* **pre** the preamble, `a` and `b` not included +* **body** the match, `a` and `b` not included +* **post** the postscript, `a` and `b` not included + +If there's no match, `undefined` will be returned. + +If the `str` contains more `a` than `b` / there are unmatched pairs, the first match that was closed will be used. For example, `{{a}` will match `['{', 'a', '']` and `{a}}` will match `['', 'a', '}']`. + +### var r = balanced.range(a, b, str) + +For the first non-nested matching pair of `a` and `b` in `str`, return an +array with indexes: `[ , ]`. + +If there's no match, `undefined` will be returned. + +If the `str` contains more `a` than `b` / there are unmatched pairs, the first match that was closed will be used. For example, `{{a}` will match `[ 1, 3 ]` and `{a}}` will match `[0, 2]`. + +## Installation + +With [npm](https://npmjs.org) do: + +```bash +npm install balanced-match +``` + +## Security contact information + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure. + +## License + +(MIT) + +Copyright (c) 2013 Julian Gruber <julian@juliangruber.com> + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/_extensions/d2/node_modules/balanced-match/index.js b/_extensions/d2/node_modules/balanced-match/index.js new file mode 100644 index 00000000..c67a6460 --- /dev/null +++ b/_extensions/d2/node_modules/balanced-match/index.js @@ -0,0 +1,62 @@ +'use strict'; +module.exports = balanced; +function balanced(a, b, str) { + if (a instanceof RegExp) a = maybeMatch(a, str); + if (b instanceof RegExp) b = maybeMatch(b, str); + + var r = range(a, b, str); + + return r && { + start: r[0], + end: r[1], + pre: str.slice(0, r[0]), + body: str.slice(r[0] + a.length, r[1]), + post: str.slice(r[1] + b.length) + }; +} + +function maybeMatch(reg, str) { + var m = str.match(reg); + return m ? m[0] : null; +} + +balanced.range = range; +function range(a, b, str) { + var begs, beg, left, right, result; + var ai = str.indexOf(a); + var bi = str.indexOf(b, ai + 1); + var i = ai; + + if (ai >= 0 && bi > 0) { + if(a===b) { + return [ai, bi]; + } + begs = []; + left = str.length; + + while (i >= 0 && !result) { + if (i == ai) { + begs.push(i); + ai = str.indexOf(a, i + 1); + } else if (begs.length == 1) { + result = [ begs.pop(), bi ]; + } else { + beg = begs.pop(); + if (beg < left) { + left = beg; + right = bi; + } + + bi = str.indexOf(b, i + 1); + } + + i = ai < bi && ai >= 0 ? ai : bi; + } + + if (begs.length) { + result = [ left, right ]; + } + } + + return result; +} diff --git a/_extensions/d2/node_modules/balanced-match/package.json b/_extensions/d2/node_modules/balanced-match/package.json new file mode 100644 index 00000000..ce6073e0 --- /dev/null +++ b/_extensions/d2/node_modules/balanced-match/package.json @@ -0,0 +1,48 @@ +{ + "name": "balanced-match", + "description": "Match balanced character pairs, like \"{\" and \"}\"", + "version": "1.0.2", + "repository": { + "type": "git", + "url": "git://github.com/juliangruber/balanced-match.git" + }, + "homepage": "https://github.com/juliangruber/balanced-match", + "main": "index.js", + "scripts": { + "test": "tape test/test.js", + "bench": "matcha test/bench.js" + }, + "devDependencies": { + "matcha": "^0.7.0", + "tape": "^4.6.0" + }, + "keywords": [ + "match", + "regexp", + "test", + "balanced", + "parse" + ], + "author": { + "name": "Julian Gruber", + "email": "mail@juliangruber.com", + "url": "http://juliangruber.com" + }, + "license": "MIT", + "testling": { + "files": "test/*.js", + "browsers": [ + "ie/8..latest", + "firefox/20..latest", + "firefox/nightly", + "chrome/25..latest", + "chrome/canary", + "opera/12..latest", + "opera/next", + "safari/5.1..latest", + "ipad/6.0..latest", + "iphone/6.0..latest", + "android-browser/4.2..latest" + ] + } +} diff --git a/_extensions/d2/node_modules/brace-expansion/LICENSE b/_extensions/d2/node_modules/brace-expansion/LICENSE new file mode 100644 index 00000000..de322667 --- /dev/null +++ b/_extensions/d2/node_modules/brace-expansion/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2013 Julian Gruber + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/_extensions/d2/node_modules/brace-expansion/README.md b/_extensions/d2/node_modules/brace-expansion/README.md new file mode 100644 index 00000000..6b4e0e16 --- /dev/null +++ b/_extensions/d2/node_modules/brace-expansion/README.md @@ -0,0 +1,129 @@ +# brace-expansion + +[Brace expansion](https://www.gnu.org/software/bash/manual/html_node/Brace-Expansion.html), +as known from sh/bash, in JavaScript. + +[![build status](https://secure.travis-ci.org/juliangruber/brace-expansion.svg)](http://travis-ci.org/juliangruber/brace-expansion) +[![downloads](https://img.shields.io/npm/dm/brace-expansion.svg)](https://www.npmjs.org/package/brace-expansion) +[![Greenkeeper badge](https://badges.greenkeeper.io/juliangruber/brace-expansion.svg)](https://greenkeeper.io/) + +[![testling badge](https://ci.testling.com/juliangruber/brace-expansion.png)](https://ci.testling.com/juliangruber/brace-expansion) + +## Example + +```js +var expand = require('brace-expansion'); + +expand('file-{a,b,c}.jpg') +// => ['file-a.jpg', 'file-b.jpg', 'file-c.jpg'] + +expand('-v{,,}') +// => ['-v', '-v', '-v'] + +expand('file{0..2}.jpg') +// => ['file0.jpg', 'file1.jpg', 'file2.jpg'] + +expand('file-{a..c}.jpg') +// => ['file-a.jpg', 'file-b.jpg', 'file-c.jpg'] + +expand('file{2..0}.jpg') +// => ['file2.jpg', 'file1.jpg', 'file0.jpg'] + +expand('file{0..4..2}.jpg') +// => ['file0.jpg', 'file2.jpg', 'file4.jpg'] + +expand('file-{a..e..2}.jpg') +// => ['file-a.jpg', 'file-c.jpg', 'file-e.jpg'] + +expand('file{00..10..5}.jpg') +// => ['file00.jpg', 'file05.jpg', 'file10.jpg'] + +expand('{{A..C},{a..c}}') +// => ['A', 'B', 'C', 'a', 'b', 'c'] + +expand('ppp{,config,oe{,conf}}') +// => ['ppp', 'pppconfig', 'pppoe', 'pppoeconf'] +``` + +## API + +```js +var expand = require('brace-expansion'); +``` + +### var expanded = expand(str) + +Return an array of all possible and valid expansions of `str`. If none are +found, `[str]` is returned. + +Valid expansions are: + +```js +/^(.*,)+(.+)?$/ +// {a,b,...} +``` + +A comma separated list of options, like `{a,b}` or `{a,{b,c}}` or `{,a,}`. + +```js +/^-?\d+\.\.-?\d+(\.\.-?\d+)?$/ +// {x..y[..incr]} +``` + +A numeric sequence from `x` to `y` inclusive, with optional increment. +If `x` or `y` start with a leading `0`, all the numbers will be padded +to have equal length. Negative numbers and backwards iteration work too. + +```js +/^-?\d+\.\.-?\d+(\.\.-?\d+)?$/ +// {x..y[..incr]} +``` + +An alphabetic sequence from `x` to `y` inclusive, with optional increment. +`x` and `y` must be exactly one character, and if given, `incr` must be a +number. + +For compatibility reasons, the string `${` is not eligible for brace expansion. + +## Installation + +With [npm](https://npmjs.org) do: + +```bash +npm install brace-expansion +``` + +## Contributors + +- [Julian Gruber](https://github.com/juliangruber) +- [Isaac Z. Schlueter](https://github.com/isaacs) + +## Sponsors + +This module is proudly supported by my [Sponsors](https://github.com/juliangruber/sponsors)! + +Do you want to support modules like this to improve their quality, stability and weigh in on new features? Then please consider donating to my [Patreon](https://www.patreon.com/juliangruber). Not sure how much of my modules you're using? Try [feross/thanks](https://github.com/feross/thanks)! + +## License + +(MIT) + +Copyright (c) 2013 Julian Gruber <julian@juliangruber.com> + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/_extensions/d2/node_modules/brace-expansion/index.js b/_extensions/d2/node_modules/brace-expansion/index.js new file mode 100644 index 00000000..0478be81 --- /dev/null +++ b/_extensions/d2/node_modules/brace-expansion/index.js @@ -0,0 +1,201 @@ +var concatMap = require('concat-map'); +var balanced = require('balanced-match'); + +module.exports = expandTop; + +var escSlash = '\0SLASH'+Math.random()+'\0'; +var escOpen = '\0OPEN'+Math.random()+'\0'; +var escClose = '\0CLOSE'+Math.random()+'\0'; +var escComma = '\0COMMA'+Math.random()+'\0'; +var escPeriod = '\0PERIOD'+Math.random()+'\0'; + +function numeric(str) { + return parseInt(str, 10) == str + ? parseInt(str, 10) + : str.charCodeAt(0); +} + +function escapeBraces(str) { + return str.split('\\\\').join(escSlash) + .split('\\{').join(escOpen) + .split('\\}').join(escClose) + .split('\\,').join(escComma) + .split('\\.').join(escPeriod); +} + +function unescapeBraces(str) { + return str.split(escSlash).join('\\') + .split(escOpen).join('{') + .split(escClose).join('}') + .split(escComma).join(',') + .split(escPeriod).join('.'); +} + + +// Basically just str.split(","), but handling cases +// where we have nested braced sections, which should be +// treated as individual members, like {a,{b,c},d} +function parseCommaParts(str) { + if (!str) + return ['']; + + var parts = []; + var m = balanced('{', '}', str); + + if (!m) + return str.split(','); + + var pre = m.pre; + var body = m.body; + var post = m.post; + var p = pre.split(','); + + p[p.length-1] += '{' + body + '}'; + var postParts = parseCommaParts(post); + if (post.length) { + p[p.length-1] += postParts.shift(); + p.push.apply(p, postParts); + } + + parts.push.apply(parts, p); + + return parts; +} + +function expandTop(str) { + if (!str) + return []; + + // I don't know why Bash 4.3 does this, but it does. + // Anything starting with {} will have the first two bytes preserved + // but *only* at the top level, so {},a}b will not expand to anything, + // but a{},b}c will be expanded to [a}c,abc]. + // One could argue that this is a bug in Bash, but since the goal of + // this module is to match Bash's rules, we escape a leading {} + if (str.substr(0, 2) === '{}') { + str = '\\{\\}' + str.substr(2); + } + + return expand(escapeBraces(str), true).map(unescapeBraces); +} + +function identity(e) { + return e; +} + +function embrace(str) { + return '{' + str + '}'; +} +function isPadded(el) { + return /^-?0\d/.test(el); +} + +function lte(i, y) { + return i <= y; +} +function gte(i, y) { + return i >= y; +} + +function expand(str, isTop) { + var expansions = []; + + var m = balanced('{', '}', str); + if (!m || /\$$/.test(m.pre)) return [str]; + + var isNumericSequence = /^-?\d+\.\.-?\d+(?:\.\.-?\d+)?$/.test(m.body); + var isAlphaSequence = /^[a-zA-Z]\.\.[a-zA-Z](?:\.\.-?\d+)?$/.test(m.body); + var isSequence = isNumericSequence || isAlphaSequence; + var isOptions = m.body.indexOf(',') >= 0; + if (!isSequence && !isOptions) { + // {a},b} + if (m.post.match(/,.*\}/)) { + str = m.pre + '{' + m.body + escClose + m.post; + return expand(str); + } + return [str]; + } + + var n; + if (isSequence) { + n = m.body.split(/\.\./); + } else { + n = parseCommaParts(m.body); + if (n.length === 1) { + // x{{a,b}}y ==> x{a}y x{b}y + n = expand(n[0], false).map(embrace); + if (n.length === 1) { + var post = m.post.length + ? expand(m.post, false) + : ['']; + return post.map(function(p) { + return m.pre + n[0] + p; + }); + } + } + } + + // at this point, n is the parts, and we know it's not a comma set + // with a single entry. + + // no need to expand pre, since it is guaranteed to be free of brace-sets + var pre = m.pre; + var post = m.post.length + ? expand(m.post, false) + : ['']; + + var N; + + if (isSequence) { + var x = numeric(n[0]); + var y = numeric(n[1]); + var width = Math.max(n[0].length, n[1].length) + var incr = n.length == 3 + ? Math.abs(numeric(n[2])) + : 1; + var test = lte; + var reverse = y < x; + if (reverse) { + incr *= -1; + test = gte; + } + var pad = n.some(isPadded); + + N = []; + + for (var i = x; test(i, y); i += incr) { + var c; + if (isAlphaSequence) { + c = String.fromCharCode(i); + if (c === '\\') + c = ''; + } else { + c = String(i); + if (pad) { + var need = width - c.length; + if (need > 0) { + var z = new Array(need + 1).join('0'); + if (i < 0) + c = '-' + z + c.slice(1); + else + c = z + c; + } + } + } + N.push(c); + } + } else { + N = concatMap(n, function(el) { return expand(el, false) }); + } + + for (var j = 0; j < N.length; j++) { + for (var k = 0; k < post.length; k++) { + var expansion = pre + N[j] + post[k]; + if (!isTop || isSequence || expansion) + expansions.push(expansion); + } + } + + return expansions; +} + diff --git a/_extensions/d2/node_modules/brace-expansion/package.json b/_extensions/d2/node_modules/brace-expansion/package.json new file mode 100644 index 00000000..a18faa8f --- /dev/null +++ b/_extensions/d2/node_modules/brace-expansion/package.json @@ -0,0 +1,47 @@ +{ + "name": "brace-expansion", + "description": "Brace expansion as known from sh/bash", + "version": "1.1.11", + "repository": { + "type": "git", + "url": "git://github.com/juliangruber/brace-expansion.git" + }, + "homepage": "https://github.com/juliangruber/brace-expansion", + "main": "index.js", + "scripts": { + "test": "tape test/*.js", + "gentest": "bash test/generate.sh", + "bench": "matcha test/perf/bench.js" + }, + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + }, + "devDependencies": { + "matcha": "^0.7.0", + "tape": "^4.6.0" + }, + "keywords": [], + "author": { + "name": "Julian Gruber", + "email": "mail@juliangruber.com", + "url": "http://juliangruber.com" + }, + "license": "MIT", + "testling": { + "files": "test/*.js", + "browsers": [ + "ie/8..latest", + "firefox/20..latest", + "firefox/nightly", + "chrome/25..latest", + "chrome/canary", + "opera/12..latest", + "opera/next", + "safari/5.1..latest", + "ipad/6.0..latest", + "iphone/6.0..latest", + "android-browser/4.2..latest" + ] + } +} diff --git a/_extensions/d2/node_modules/character-entities/index.d.ts b/_extensions/d2/node_modules/character-entities/index.d.ts new file mode 100644 index 00000000..aa7e651a --- /dev/null +++ b/_extensions/d2/node_modules/character-entities/index.d.ts @@ -0,0 +1,6 @@ +/** + * Map of named character references. + * + * @type {Record} + */ +export const characterEntities: Record diff --git a/_extensions/d2/node_modules/character-entities/index.js b/_extensions/d2/node_modules/character-entities/index.js new file mode 100644 index 00000000..9222e7a7 --- /dev/null +++ b/_extensions/d2/node_modules/character-entities/index.js @@ -0,0 +1,2132 @@ +/** + * Map of named character references. + * + * @type {Record} + */ +export const characterEntities = { + AElig: 'Æ', + AMP: '&', + Aacute: 'Á', + Abreve: 'Ă', + Acirc: 'Â', + Acy: 'А', + Afr: '𝔄', + Agrave: 'À', + Alpha: 'Α', + Amacr: 'Ā', + And: '⩓', + Aogon: 'Ą', + Aopf: '𝔸', + ApplyFunction: '⁡', + Aring: 'Å', + Ascr: '𝒜', + Assign: '≔', + Atilde: 'Ã', + Auml: 'Ä', + Backslash: '∖', + Barv: '⫧', + Barwed: '⌆', + Bcy: 'Б', + Because: '∵', + Bernoullis: 'ℬ', + Beta: 'Β', + Bfr: '𝔅', + Bopf: '𝔹', + Breve: '˘', + Bscr: 'ℬ', + Bumpeq: '≎', + CHcy: 'Ч', + COPY: '©', + Cacute: 'Ć', + Cap: '⋒', + CapitalDifferentialD: 'ⅅ', + Cayleys: 'ℭ', + Ccaron: 'Č', + Ccedil: 'Ç', + Ccirc: 'Ĉ', + Cconint: '∰', + Cdot: 'Ċ', + Cedilla: '¸', + CenterDot: '·', + Cfr: 'ℭ', + Chi: 'Χ', + CircleDot: '⊙', + CircleMinus: '⊖', + CirclePlus: '⊕', + CircleTimes: '⊗', + ClockwiseContourIntegral: '∲', + CloseCurlyDoubleQuote: '”', + CloseCurlyQuote: '’', + Colon: '∷', + Colone: '⩴', + Congruent: '≡', + Conint: '∯', + ContourIntegral: '∮', + Copf: 'ℂ', + Coproduct: '∐', + CounterClockwiseContourIntegral: '∳', + Cross: '⨯', + Cscr: '𝒞', + Cup: '⋓', + CupCap: '≍', + DD: 'ⅅ', + DDotrahd: '⤑', + DJcy: 'Ђ', + DScy: 'Ѕ', + DZcy: 'Џ', + Dagger: '‡', + Darr: '↡', + Dashv: '⫤', + Dcaron: 'Ď', + Dcy: 'Д', + Del: '∇', + Delta: 'Δ', + Dfr: '𝔇', + DiacriticalAcute: '´', + DiacriticalDot: '˙', + DiacriticalDoubleAcute: '˝', + DiacriticalGrave: '`', + DiacriticalTilde: '˜', + Diamond: '⋄', + DifferentialD: 'ⅆ', + Dopf: '𝔻', + Dot: '¨', + DotDot: '⃜', + DotEqual: '≐', + DoubleContourIntegral: '∯', + DoubleDot: '¨', + DoubleDownArrow: '⇓', + DoubleLeftArrow: '⇐', + DoubleLeftRightArrow: '⇔', + DoubleLeftTee: '⫤', + DoubleLongLeftArrow: '⟸', + DoubleLongLeftRightArrow: '⟺', + DoubleLongRightArrow: '⟹', + DoubleRightArrow: '⇒', + DoubleRightTee: '⊨', + DoubleUpArrow: '⇑', + DoubleUpDownArrow: '⇕', + DoubleVerticalBar: '∥', + DownArrow: '↓', + DownArrowBar: '⤓', + DownArrowUpArrow: '⇵', + DownBreve: '̑', + DownLeftRightVector: '⥐', + DownLeftTeeVector: '⥞', + DownLeftVector: '↽', + DownLeftVectorBar: '⥖', + DownRightTeeVector: '⥟', + DownRightVector: '⇁', + DownRightVectorBar: '⥗', + DownTee: '⊤', + DownTeeArrow: '↧', + Downarrow: '⇓', + Dscr: '𝒟', + Dstrok: 'Đ', + ENG: 'Ŋ', + ETH: 'Ð', + Eacute: 'É', + Ecaron: 'Ě', + Ecirc: 'Ê', + Ecy: 'Э', + Edot: 'Ė', + Efr: '𝔈', + Egrave: 'È', + Element: '∈', + Emacr: 'Ē', + EmptySmallSquare: '◻', + EmptyVerySmallSquare: '▫', + Eogon: 'Ę', + Eopf: '𝔼', + Epsilon: 'Ε', + Equal: '⩵', + EqualTilde: '≂', + Equilibrium: '⇌', + Escr: 'ℰ', + Esim: '⩳', + Eta: 'Η', + Euml: 'Ë', + Exists: '∃', + ExponentialE: 'ⅇ', + Fcy: 'Ф', + Ffr: '𝔉', + FilledSmallSquare: '◼', + FilledVerySmallSquare: '▪', + Fopf: '𝔽', + ForAll: '∀', + Fouriertrf: 'ℱ', + Fscr: 'ℱ', + GJcy: 'Ѓ', + GT: '>', + Gamma: 'Γ', + Gammad: 'Ϝ', + Gbreve: 'Ğ', + Gcedil: 'Ģ', + Gcirc: 'Ĝ', + Gcy: 'Г', + Gdot: 'Ġ', + Gfr: '𝔊', + Gg: '⋙', + Gopf: '𝔾', + GreaterEqual: '≥', + GreaterEqualLess: '⋛', + GreaterFullEqual: '≧', + GreaterGreater: '⪢', + GreaterLess: '≷', + GreaterSlantEqual: '⩾', + GreaterTilde: '≳', + Gscr: '𝒢', + Gt: '≫', + HARDcy: 'Ъ', + Hacek: 'ˇ', + Hat: '^', + Hcirc: 'Ĥ', + Hfr: 'ℌ', + HilbertSpace: 'ℋ', + Hopf: 'ℍ', + HorizontalLine: '─', + Hscr: 'ℋ', + Hstrok: 'Ħ', + HumpDownHump: '≎', + HumpEqual: '≏', + IEcy: 'Е', + IJlig: 'IJ', + IOcy: 'Ё', + Iacute: 'Í', + Icirc: 'Î', + Icy: 'И', + Idot: 'İ', + Ifr: 'ℑ', + Igrave: 'Ì', + Im: 'ℑ', + Imacr: 'Ī', + ImaginaryI: 'ⅈ', + Implies: '⇒', + Int: '∬', + Integral: '∫', + Intersection: '⋂', + InvisibleComma: '⁣', + InvisibleTimes: '⁢', + Iogon: 'Į', + Iopf: '𝕀', + Iota: 'Ι', + Iscr: 'ℐ', + Itilde: 'Ĩ', + Iukcy: 'І', + Iuml: 'Ï', + Jcirc: 'Ĵ', + Jcy: 'Й', + Jfr: '𝔍', + Jopf: '𝕁', + Jscr: '𝒥', + Jsercy: 'Ј', + Jukcy: 'Є', + KHcy: 'Х', + KJcy: 'Ќ', + Kappa: 'Κ', + Kcedil: 'Ķ', + Kcy: 'К', + Kfr: '𝔎', + Kopf: '𝕂', + Kscr: '𝒦', + LJcy: 'Љ', + LT: '<', + Lacute: 'Ĺ', + Lambda: 'Λ', + Lang: '⟪', + Laplacetrf: 'ℒ', + Larr: '↞', + Lcaron: 'Ľ', + Lcedil: 'Ļ', + Lcy: 'Л', + LeftAngleBracket: '⟨', + LeftArrow: '←', + LeftArrowBar: '⇤', + LeftArrowRightArrow: '⇆', + LeftCeiling: '⌈', + LeftDoubleBracket: '⟦', + LeftDownTeeVector: '⥡', + LeftDownVector: '⇃', + LeftDownVectorBar: '⥙', + LeftFloor: '⌊', + LeftRightArrow: '↔', + LeftRightVector: '⥎', + LeftTee: '⊣', + LeftTeeArrow: '↤', + LeftTeeVector: '⥚', + LeftTriangle: '⊲', + LeftTriangleBar: '⧏', + LeftTriangleEqual: '⊴', + LeftUpDownVector: '⥑', + LeftUpTeeVector: '⥠', + LeftUpVector: '↿', + LeftUpVectorBar: '⥘', + LeftVector: '↼', + LeftVectorBar: '⥒', + Leftarrow: '⇐', + Leftrightarrow: '⇔', + LessEqualGreater: '⋚', + LessFullEqual: '≦', + LessGreater: '≶', + LessLess: '⪡', + LessSlantEqual: '⩽', + LessTilde: '≲', + Lfr: '𝔏', + Ll: '⋘', + Lleftarrow: '⇚', + Lmidot: 'Ŀ', + LongLeftArrow: '⟵', + LongLeftRightArrow: '⟷', + LongRightArrow: '⟶', + Longleftarrow: '⟸', + Longleftrightarrow: '⟺', + Longrightarrow: '⟹', + Lopf: '𝕃', + LowerLeftArrow: '↙', + LowerRightArrow: '↘', + Lscr: 'ℒ', + Lsh: '↰', + Lstrok: 'Ł', + Lt: '≪', + Map: '⤅', + Mcy: 'М', + MediumSpace: ' ', + Mellintrf: 'ℳ', + Mfr: '𝔐', + MinusPlus: '∓', + Mopf: '𝕄', + Mscr: 'ℳ', + Mu: 'Μ', + NJcy: 'Њ', + Nacute: 'Ń', + Ncaron: 'Ň', + Ncedil: 'Ņ', + Ncy: 'Н', + NegativeMediumSpace: '​', + NegativeThickSpace: '​', + NegativeThinSpace: '​', + NegativeVeryThinSpace: '​', + NestedGreaterGreater: '≫', + NestedLessLess: '≪', + NewLine: '\n', + Nfr: '𝔑', + NoBreak: '⁠', + NonBreakingSpace: ' ', + Nopf: 'ℕ', + Not: '⫬', + NotCongruent: '≢', + NotCupCap: '≭', + NotDoubleVerticalBar: '∦', + NotElement: '∉', + NotEqual: '≠', + NotEqualTilde: '≂̸', + NotExists: '∄', + NotGreater: '≯', + NotGreaterEqual: '≱', + NotGreaterFullEqual: '≧̸', + NotGreaterGreater: '≫̸', + NotGreaterLess: '≹', + NotGreaterSlantEqual: '⩾̸', + NotGreaterTilde: '≵', + NotHumpDownHump: '≎̸', + NotHumpEqual: '≏̸', + NotLeftTriangle: '⋪', + NotLeftTriangleBar: '⧏̸', + NotLeftTriangleEqual: '⋬', + NotLess: '≮', + NotLessEqual: '≰', + NotLessGreater: '≸', + NotLessLess: '≪̸', + NotLessSlantEqual: '⩽̸', + NotLessTilde: '≴', + NotNestedGreaterGreater: '⪢̸', + NotNestedLessLess: '⪡̸', + NotPrecedes: '⊀', + NotPrecedesEqual: '⪯̸', + NotPrecedesSlantEqual: '⋠', + NotReverseElement: '∌', + NotRightTriangle: '⋫', + NotRightTriangleBar: '⧐̸', + NotRightTriangleEqual: '⋭', + NotSquareSubset: '⊏̸', + NotSquareSubsetEqual: '⋢', + NotSquareSuperset: '⊐̸', + NotSquareSupersetEqual: '⋣', + NotSubset: '⊂⃒', + NotSubsetEqual: '⊈', + NotSucceeds: '⊁', + NotSucceedsEqual: '⪰̸', + NotSucceedsSlantEqual: '⋡', + NotSucceedsTilde: '≿̸', + NotSuperset: '⊃⃒', + NotSupersetEqual: '⊉', + NotTilde: '≁', + NotTildeEqual: '≄', + NotTildeFullEqual: '≇', + NotTildeTilde: '≉', + NotVerticalBar: '∤', + Nscr: '𝒩', + Ntilde: 'Ñ', + Nu: 'Ν', + OElig: 'Œ', + Oacute: 'Ó', + Ocirc: 'Ô', + Ocy: 'О', + Odblac: 'Ő', + Ofr: '𝔒', + Ograve: 'Ò', + Omacr: 'Ō', + Omega: 'Ω', + Omicron: 'Ο', + Oopf: '𝕆', + OpenCurlyDoubleQuote: '“', + OpenCurlyQuote: '‘', + Or: '⩔', + Oscr: '𝒪', + Oslash: 'Ø', + Otilde: 'Õ', + Otimes: '⨷', + Ouml: 'Ö', + OverBar: '‾', + OverBrace: '⏞', + OverBracket: '⎴', + OverParenthesis: '⏜', + PartialD: '∂', + Pcy: 'П', + Pfr: '𝔓', + Phi: 'Φ', + Pi: 'Π', + PlusMinus: '±', + Poincareplane: 'ℌ', + Popf: 'ℙ', + Pr: '⪻', + Precedes: '≺', + PrecedesEqual: '⪯', + PrecedesSlantEqual: '≼', + PrecedesTilde: '≾', + Prime: '″', + Product: '∏', + Proportion: '∷', + Proportional: '∝', + Pscr: '𝒫', + Psi: 'Ψ', + QUOT: '"', + Qfr: '𝔔', + Qopf: 'ℚ', + Qscr: '𝒬', + RBarr: '⤐', + REG: '®', + Racute: 'Ŕ', + Rang: '⟫', + Rarr: '↠', + Rarrtl: '⤖', + Rcaron: 'Ř', + Rcedil: 'Ŗ', + Rcy: 'Р', + Re: 'ℜ', + ReverseElement: '∋', + ReverseEquilibrium: '⇋', + ReverseUpEquilibrium: '⥯', + Rfr: 'ℜ', + Rho: 'Ρ', + RightAngleBracket: '⟩', + RightArrow: '→', + RightArrowBar: '⇥', + RightArrowLeftArrow: '⇄', + RightCeiling: '⌉', + RightDoubleBracket: '⟧', + RightDownTeeVector: '⥝', + RightDownVector: '⇂', + RightDownVectorBar: '⥕', + RightFloor: '⌋', + RightTee: '⊢', + RightTeeArrow: '↦', + RightTeeVector: '⥛', + RightTriangle: '⊳', + RightTriangleBar: '⧐', + RightTriangleEqual: '⊵', + RightUpDownVector: '⥏', + RightUpTeeVector: '⥜', + RightUpVector: '↾', + RightUpVectorBar: '⥔', + RightVector: '⇀', + RightVectorBar: '⥓', + Rightarrow: '⇒', + Ropf: 'ℝ', + RoundImplies: '⥰', + Rrightarrow: '⇛', + Rscr: 'ℛ', + Rsh: '↱', + RuleDelayed: '⧴', + SHCHcy: 'Щ', + SHcy: 'Ш', + SOFTcy: 'Ь', + Sacute: 'Ś', + Sc: '⪼', + Scaron: 'Š', + Scedil: 'Ş', + Scirc: 'Ŝ', + Scy: 'С', + Sfr: '𝔖', + ShortDownArrow: '↓', + ShortLeftArrow: '←', + ShortRightArrow: '→', + ShortUpArrow: '↑', + Sigma: 'Σ', + SmallCircle: '∘', + Sopf: '𝕊', + Sqrt: '√', + Square: '□', + SquareIntersection: '⊓', + SquareSubset: '⊏', + SquareSubsetEqual: '⊑', + SquareSuperset: '⊐', + SquareSupersetEqual: '⊒', + SquareUnion: '⊔', + Sscr: '𝒮', + Star: '⋆', + Sub: '⋐', + Subset: '⋐', + SubsetEqual: '⊆', + Succeeds: '≻', + SucceedsEqual: '⪰', + SucceedsSlantEqual: '≽', + SucceedsTilde: '≿', + SuchThat: '∋', + Sum: '∑', + Sup: '⋑', + Superset: '⊃', + SupersetEqual: '⊇', + Supset: '⋑', + THORN: 'Þ', + TRADE: '™', + TSHcy: 'Ћ', + TScy: 'Ц', + Tab: '\t', + Tau: 'Τ', + Tcaron: 'Ť', + Tcedil: 'Ţ', + Tcy: 'Т', + Tfr: '𝔗', + Therefore: '∴', + Theta: 'Θ', + ThickSpace: '  ', + ThinSpace: ' ', + Tilde: '∼', + TildeEqual: '≃', + TildeFullEqual: '≅', + TildeTilde: '≈', + Topf: '𝕋', + TripleDot: '⃛', + Tscr: '𝒯', + Tstrok: 'Ŧ', + Uacute: 'Ú', + Uarr: '↟', + Uarrocir: '⥉', + Ubrcy: 'Ў', + Ubreve: 'Ŭ', + Ucirc: 'Û', + Ucy: 'У', + Udblac: 'Ű', + Ufr: '𝔘', + Ugrave: 'Ù', + Umacr: 'Ū', + UnderBar: '_', + UnderBrace: '⏟', + UnderBracket: '⎵', + UnderParenthesis: '⏝', + Union: '⋃', + UnionPlus: '⊎', + Uogon: 'Ų', + Uopf: '𝕌', + UpArrow: '↑', + UpArrowBar: '⤒', + UpArrowDownArrow: '⇅', + UpDownArrow: '↕', + UpEquilibrium: '⥮', + UpTee: '⊥', + UpTeeArrow: '↥', + Uparrow: '⇑', + Updownarrow: '⇕', + UpperLeftArrow: '↖', + UpperRightArrow: '↗', + Upsi: 'ϒ', + Upsilon: 'Υ', + Uring: 'Ů', + Uscr: '𝒰', + Utilde: 'Ũ', + Uuml: 'Ü', + VDash: '⊫', + Vbar: '⫫', + Vcy: 'В', + Vdash: '⊩', + Vdashl: '⫦', + Vee: '⋁', + Verbar: '‖', + Vert: '‖', + VerticalBar: '∣', + VerticalLine: '|', + VerticalSeparator: '❘', + VerticalTilde: '≀', + VeryThinSpace: ' ', + Vfr: '𝔙', + Vopf: '𝕍', + Vscr: '𝒱', + Vvdash: '⊪', + Wcirc: 'Ŵ', + Wedge: '⋀', + Wfr: '𝔚', + Wopf: '𝕎', + Wscr: '𝒲', + Xfr: '𝔛', + Xi: 'Ξ', + Xopf: '𝕏', + Xscr: '𝒳', + YAcy: 'Я', + YIcy: 'Ї', + YUcy: 'Ю', + Yacute: 'Ý', + Ycirc: 'Ŷ', + Ycy: 'Ы', + Yfr: '𝔜', + Yopf: '𝕐', + Yscr: '𝒴', + Yuml: 'Ÿ', + ZHcy: 'Ж', + Zacute: 'Ź', + Zcaron: 'Ž', + Zcy: 'З', + Zdot: 'Ż', + ZeroWidthSpace: '​', + Zeta: 'Ζ', + Zfr: 'ℨ', + Zopf: 'ℤ', + Zscr: '𝒵', + aacute: 'á', + abreve: 'ă', + ac: '∾', + acE: '∾̳', + acd: '∿', + acirc: 'â', + acute: '´', + acy: 'а', + aelig: 'æ', + af: '⁡', + afr: '𝔞', + agrave: 'à', + alefsym: 'ℵ', + aleph: 'ℵ', + alpha: 'α', + amacr: 'ā', + amalg: '⨿', + amp: '&', + and: '∧', + andand: '⩕', + andd: '⩜', + andslope: '⩘', + andv: '⩚', + ang: '∠', + ange: '⦤', + angle: '∠', + angmsd: '∡', + angmsdaa: '⦨', + angmsdab: '⦩', + angmsdac: '⦪', + angmsdad: '⦫', + angmsdae: '⦬', + angmsdaf: '⦭', + angmsdag: '⦮', + angmsdah: '⦯', + angrt: '∟', + angrtvb: '⊾', + angrtvbd: '⦝', + angsph: '∢', + angst: 'Å', + angzarr: '⍼', + aogon: 'ą', + aopf: '𝕒', + ap: '≈', + apE: '⩰', + apacir: '⩯', + ape: '≊', + apid: '≋', + apos: "'", + approx: '≈', + approxeq: '≊', + aring: 'å', + ascr: '𝒶', + ast: '*', + asymp: '≈', + asympeq: '≍', + atilde: 'ã', + auml: 'ä', + awconint: '∳', + awint: '⨑', + bNot: '⫭', + backcong: '≌', + backepsilon: '϶', + backprime: '‵', + backsim: '∽', + backsimeq: '⋍', + barvee: '⊽', + barwed: '⌅', + barwedge: '⌅', + bbrk: '⎵', + bbrktbrk: '⎶', + bcong: '≌', + bcy: 'б', + bdquo: '„', + becaus: '∵', + because: '∵', + bemptyv: '⦰', + bepsi: '϶', + bernou: 'ℬ', + beta: 'β', + beth: 'ℶ', + between: '≬', + bfr: '𝔟', + bigcap: '⋂', + bigcirc: '◯', + bigcup: '⋃', + bigodot: '⨀', + bigoplus: '⨁', + bigotimes: '⨂', + bigsqcup: '⨆', + bigstar: '★', + bigtriangledown: '▽', + bigtriangleup: '△', + biguplus: '⨄', + bigvee: '⋁', + bigwedge: '⋀', + bkarow: '⤍', + blacklozenge: '⧫', + blacksquare: '▪', + blacktriangle: '▴', + blacktriangledown: '▾', + blacktriangleleft: '◂', + blacktriangleright: '▸', + blank: '␣', + blk12: '▒', + blk14: '░', + blk34: '▓', + block: '█', + bne: '=⃥', + bnequiv: '≡⃥', + bnot: '⌐', + bopf: '𝕓', + bot: '⊥', + bottom: '⊥', + bowtie: '⋈', + boxDL: '╗', + boxDR: '╔', + boxDl: '╖', + boxDr: '╓', + boxH: '═', + boxHD: '╦', + boxHU: '╩', + boxHd: '╤', + boxHu: '╧', + boxUL: '╝', + boxUR: '╚', + boxUl: '╜', + boxUr: '╙', + boxV: '║', + boxVH: '╬', + boxVL: '╣', + boxVR: '╠', + boxVh: '╫', + boxVl: '╢', + boxVr: '╟', + boxbox: '⧉', + boxdL: '╕', + boxdR: '╒', + boxdl: '┐', + boxdr: '┌', + boxh: '─', + boxhD: '╥', + boxhU: '╨', + boxhd: '┬', + boxhu: '┴', + boxminus: '⊟', + boxplus: '⊞', + boxtimes: '⊠', + boxuL: '╛', + boxuR: '╘', + boxul: '┘', + boxur: '└', + boxv: '│', + boxvH: '╪', + boxvL: '╡', + boxvR: '╞', + boxvh: '┼', + boxvl: '┤', + boxvr: '├', + bprime: '‵', + breve: '˘', + brvbar: '¦', + bscr: '𝒷', + bsemi: '⁏', + bsim: '∽', + bsime: '⋍', + bsol: '\\', + bsolb: '⧅', + bsolhsub: '⟈', + bull: '•', + bullet: '•', + bump: '≎', + bumpE: '⪮', + bumpe: '≏', + bumpeq: '≏', + cacute: 'ć', + cap: '∩', + capand: '⩄', + capbrcup: '⩉', + capcap: '⩋', + capcup: '⩇', + capdot: '⩀', + caps: '∩︀', + caret: '⁁', + caron: 'ˇ', + ccaps: '⩍', + ccaron: 'č', + ccedil: 'ç', + ccirc: 'ĉ', + ccups: '⩌', + ccupssm: '⩐', + cdot: 'ċ', + cedil: '¸', + cemptyv: '⦲', + cent: '¢', + centerdot: '·', + cfr: '𝔠', + chcy: 'ч', + check: '✓', + checkmark: '✓', + chi: 'χ', + cir: '○', + cirE: '⧃', + circ: 'ˆ', + circeq: '≗', + circlearrowleft: '↺', + circlearrowright: '↻', + circledR: '®', + circledS: 'Ⓢ', + circledast: '⊛', + circledcirc: '⊚', + circleddash: '⊝', + cire: '≗', + cirfnint: '⨐', + cirmid: '⫯', + cirscir: '⧂', + clubs: '♣', + clubsuit: '♣', + colon: ':', + colone: '≔', + coloneq: '≔', + comma: ',', + commat: '@', + comp: '∁', + compfn: '∘', + complement: '∁', + complexes: 'ℂ', + cong: '≅', + congdot: '⩭', + conint: '∮', + copf: '𝕔', + coprod: '∐', + copy: '©', + copysr: '℗', + crarr: '↵', + cross: '✗', + cscr: '𝒸', + csub: '⫏', + csube: '⫑', + csup: '⫐', + csupe: '⫒', + ctdot: '⋯', + cudarrl: '⤸', + cudarrr: '⤵', + cuepr: '⋞', + cuesc: '⋟', + cularr: '↶', + cularrp: '⤽', + cup: '∪', + cupbrcap: '⩈', + cupcap: '⩆', + cupcup: '⩊', + cupdot: '⊍', + cupor: '⩅', + cups: '∪︀', + curarr: '↷', + curarrm: '⤼', + curlyeqprec: '⋞', + curlyeqsucc: '⋟', + curlyvee: '⋎', + curlywedge: '⋏', + curren: '¤', + curvearrowleft: '↶', + curvearrowright: '↷', + cuvee: '⋎', + cuwed: '⋏', + cwconint: '∲', + cwint: '∱', + cylcty: '⌭', + dArr: '⇓', + dHar: '⥥', + dagger: '†', + daleth: 'ℸ', + darr: '↓', + dash: '‐', + dashv: '⊣', + dbkarow: '⤏', + dblac: '˝', + dcaron: 'ď', + dcy: 'д', + dd: 'ⅆ', + ddagger: '‡', + ddarr: '⇊', + ddotseq: '⩷', + deg: '°', + delta: 'δ', + demptyv: '⦱', + dfisht: '⥿', + dfr: '𝔡', + dharl: '⇃', + dharr: '⇂', + diam: '⋄', + diamond: '⋄', + diamondsuit: '♦', + diams: '♦', + die: '¨', + digamma: 'ϝ', + disin: '⋲', + div: '÷', + divide: '÷', + divideontimes: '⋇', + divonx: '⋇', + djcy: 'ђ', + dlcorn: '⌞', + dlcrop: '⌍', + dollar: '$', + dopf: '𝕕', + dot: '˙', + doteq: '≐', + doteqdot: '≑', + dotminus: '∸', + dotplus: '∔', + dotsquare: '⊡', + doublebarwedge: '⌆', + downarrow: '↓', + downdownarrows: '⇊', + downharpoonleft: '⇃', + downharpoonright: '⇂', + drbkarow: '⤐', + drcorn: '⌟', + drcrop: '⌌', + dscr: '𝒹', + dscy: 'ѕ', + dsol: '⧶', + dstrok: 'đ', + dtdot: '⋱', + dtri: '▿', + dtrif: '▾', + duarr: '⇵', + duhar: '⥯', + dwangle: '⦦', + dzcy: 'џ', + dzigrarr: '⟿', + eDDot: '⩷', + eDot: '≑', + eacute: 'é', + easter: '⩮', + ecaron: 'ě', + ecir: '≖', + ecirc: 'ê', + ecolon: '≕', + ecy: 'э', + edot: 'ė', + ee: 'ⅇ', + efDot: '≒', + efr: '𝔢', + eg: '⪚', + egrave: 'è', + egs: '⪖', + egsdot: '⪘', + el: '⪙', + elinters: '⏧', + ell: 'ℓ', + els: '⪕', + elsdot: '⪗', + emacr: 'ē', + empty: '∅', + emptyset: '∅', + emptyv: '∅', + emsp13: ' ', + emsp14: ' ', + emsp: ' ', + eng: 'ŋ', + ensp: ' ', + eogon: 'ę', + eopf: '𝕖', + epar: '⋕', + eparsl: '⧣', + eplus: '⩱', + epsi: 'ε', + epsilon: 'ε', + epsiv: 'ϵ', + eqcirc: '≖', + eqcolon: '≕', + eqsim: '≂', + eqslantgtr: '⪖', + eqslantless: '⪕', + equals: '=', + equest: '≟', + equiv: '≡', + equivDD: '⩸', + eqvparsl: '⧥', + erDot: '≓', + erarr: '⥱', + escr: 'ℯ', + esdot: '≐', + esim: '≂', + eta: 'η', + eth: 'ð', + euml: 'ë', + euro: '€', + excl: '!', + exist: '∃', + expectation: 'ℰ', + exponentiale: 'ⅇ', + fallingdotseq: '≒', + fcy: 'ф', + female: '♀', + ffilig: 'ffi', + fflig: 'ff', + ffllig: 'ffl', + ffr: '𝔣', + filig: 'fi', + fjlig: 'fj', + flat: '♭', + fllig: 'fl', + fltns: '▱', + fnof: 'ƒ', + fopf: '𝕗', + forall: '∀', + fork: '⋔', + forkv: '⫙', + fpartint: '⨍', + frac12: '½', + frac13: '⅓', + frac14: '¼', + frac15: '⅕', + frac16: '⅙', + frac18: '⅛', + frac23: '⅔', + frac25: '⅖', + frac34: '¾', + frac35: '⅗', + frac38: '⅜', + frac45: '⅘', + frac56: '⅚', + frac58: '⅝', + frac78: '⅞', + frasl: '⁄', + frown: '⌢', + fscr: '𝒻', + gE: '≧', + gEl: '⪌', + gacute: 'ǵ', + gamma: 'γ', + gammad: 'ϝ', + gap: '⪆', + gbreve: 'ğ', + gcirc: 'ĝ', + gcy: 'г', + gdot: 'ġ', + ge: '≥', + gel: '⋛', + geq: '≥', + geqq: '≧', + geqslant: '⩾', + ges: '⩾', + gescc: '⪩', + gesdot: '⪀', + gesdoto: '⪂', + gesdotol: '⪄', + gesl: '⋛︀', + gesles: '⪔', + gfr: '𝔤', + gg: '≫', + ggg: '⋙', + gimel: 'ℷ', + gjcy: 'ѓ', + gl: '≷', + glE: '⪒', + gla: '⪥', + glj: '⪤', + gnE: '≩', + gnap: '⪊', + gnapprox: '⪊', + gne: '⪈', + gneq: '⪈', + gneqq: '≩', + gnsim: '⋧', + gopf: '𝕘', + grave: '`', + gscr: 'ℊ', + gsim: '≳', + gsime: '⪎', + gsiml: '⪐', + gt: '>', + gtcc: '⪧', + gtcir: '⩺', + gtdot: '⋗', + gtlPar: '⦕', + gtquest: '⩼', + gtrapprox: '⪆', + gtrarr: '⥸', + gtrdot: '⋗', + gtreqless: '⋛', + gtreqqless: '⪌', + gtrless: '≷', + gtrsim: '≳', + gvertneqq: '≩︀', + gvnE: '≩︀', + hArr: '⇔', + hairsp: ' ', + half: '½', + hamilt: 'ℋ', + hardcy: 'ъ', + harr: '↔', + harrcir: '⥈', + harrw: '↭', + hbar: 'ℏ', + hcirc: 'ĥ', + hearts: '♥', + heartsuit: '♥', + hellip: '…', + hercon: '⊹', + hfr: '𝔥', + hksearow: '⤥', + hkswarow: '⤦', + hoarr: '⇿', + homtht: '∻', + hookleftarrow: '↩', + hookrightarrow: '↪', + hopf: '𝕙', + horbar: '―', + hscr: '𝒽', + hslash: 'ℏ', + hstrok: 'ħ', + hybull: '⁃', + hyphen: '‐', + iacute: 'í', + ic: '⁣', + icirc: 'î', + icy: 'и', + iecy: 'е', + iexcl: '¡', + iff: '⇔', + ifr: '𝔦', + igrave: 'ì', + ii: 'ⅈ', + iiiint: '⨌', + iiint: '∭', + iinfin: '⧜', + iiota: '℩', + ijlig: 'ij', + imacr: 'ī', + image: 'ℑ', + imagline: 'ℐ', + imagpart: 'ℑ', + imath: 'ı', + imof: '⊷', + imped: 'Ƶ', + in: '∈', + incare: '℅', + infin: '∞', + infintie: '⧝', + inodot: 'ı', + int: '∫', + intcal: '⊺', + integers: 'ℤ', + intercal: '⊺', + intlarhk: '⨗', + intprod: '⨼', + iocy: 'ё', + iogon: 'į', + iopf: '𝕚', + iota: 'ι', + iprod: '⨼', + iquest: '¿', + iscr: '𝒾', + isin: '∈', + isinE: '⋹', + isindot: '⋵', + isins: '⋴', + isinsv: '⋳', + isinv: '∈', + it: '⁢', + itilde: 'ĩ', + iukcy: 'і', + iuml: 'ï', + jcirc: 'ĵ', + jcy: 'й', + jfr: '𝔧', + jmath: 'ȷ', + jopf: '𝕛', + jscr: '𝒿', + jsercy: 'ј', + jukcy: 'є', + kappa: 'κ', + kappav: 'ϰ', + kcedil: 'ķ', + kcy: 'к', + kfr: '𝔨', + kgreen: 'ĸ', + khcy: 'х', + kjcy: 'ќ', + kopf: '𝕜', + kscr: '𝓀', + lAarr: '⇚', + lArr: '⇐', + lAtail: '⤛', + lBarr: '⤎', + lE: '≦', + lEg: '⪋', + lHar: '⥢', + lacute: 'ĺ', + laemptyv: '⦴', + lagran: 'ℒ', + lambda: 'λ', + lang: '⟨', + langd: '⦑', + langle: '⟨', + lap: '⪅', + laquo: '«', + larr: '←', + larrb: '⇤', + larrbfs: '⤟', + larrfs: '⤝', + larrhk: '↩', + larrlp: '↫', + larrpl: '⤹', + larrsim: '⥳', + larrtl: '↢', + lat: '⪫', + latail: '⤙', + late: '⪭', + lates: '⪭︀', + lbarr: '⤌', + lbbrk: '❲', + lbrace: '{', + lbrack: '[', + lbrke: '⦋', + lbrksld: '⦏', + lbrkslu: '⦍', + lcaron: 'ľ', + lcedil: 'ļ', + lceil: '⌈', + lcub: '{', + lcy: 'л', + ldca: '⤶', + ldquo: '“', + ldquor: '„', + ldrdhar: '⥧', + ldrushar: '⥋', + ldsh: '↲', + le: '≤', + leftarrow: '←', + leftarrowtail: '↢', + leftharpoondown: '↽', + leftharpoonup: '↼', + leftleftarrows: '⇇', + leftrightarrow: '↔', + leftrightarrows: '⇆', + leftrightharpoons: '⇋', + leftrightsquigarrow: '↭', + leftthreetimes: '⋋', + leg: '⋚', + leq: '≤', + leqq: '≦', + leqslant: '⩽', + les: '⩽', + lescc: '⪨', + lesdot: '⩿', + lesdoto: '⪁', + lesdotor: '⪃', + lesg: '⋚︀', + lesges: '⪓', + lessapprox: '⪅', + lessdot: '⋖', + lesseqgtr: '⋚', + lesseqqgtr: '⪋', + lessgtr: '≶', + lesssim: '≲', + lfisht: '⥼', + lfloor: '⌊', + lfr: '𝔩', + lg: '≶', + lgE: '⪑', + lhard: '↽', + lharu: '↼', + lharul: '⥪', + lhblk: '▄', + ljcy: 'љ', + ll: '≪', + llarr: '⇇', + llcorner: '⌞', + llhard: '⥫', + lltri: '◺', + lmidot: 'ŀ', + lmoust: '⎰', + lmoustache: '⎰', + lnE: '≨', + lnap: '⪉', + lnapprox: '⪉', + lne: '⪇', + lneq: '⪇', + lneqq: '≨', + lnsim: '⋦', + loang: '⟬', + loarr: '⇽', + lobrk: '⟦', + longleftarrow: '⟵', + longleftrightarrow: '⟷', + longmapsto: '⟼', + longrightarrow: '⟶', + looparrowleft: '↫', + looparrowright: '↬', + lopar: '⦅', + lopf: '𝕝', + loplus: '⨭', + lotimes: '⨴', + lowast: '∗', + lowbar: '_', + loz: '◊', + lozenge: '◊', + lozf: '⧫', + lpar: '(', + lparlt: '⦓', + lrarr: '⇆', + lrcorner: '⌟', + lrhar: '⇋', + lrhard: '⥭', + lrm: '‎', + lrtri: '⊿', + lsaquo: '‹', + lscr: '𝓁', + lsh: '↰', + lsim: '≲', + lsime: '⪍', + lsimg: '⪏', + lsqb: '[', + lsquo: '‘', + lsquor: '‚', + lstrok: 'ł', + lt: '<', + ltcc: '⪦', + ltcir: '⩹', + ltdot: '⋖', + lthree: '⋋', + ltimes: '⋉', + ltlarr: '⥶', + ltquest: '⩻', + ltrPar: '⦖', + ltri: '◃', + ltrie: '⊴', + ltrif: '◂', + lurdshar: '⥊', + luruhar: '⥦', + lvertneqq: '≨︀', + lvnE: '≨︀', + mDDot: '∺', + macr: '¯', + male: '♂', + malt: '✠', + maltese: '✠', + map: '↦', + mapsto: '↦', + mapstodown: '↧', + mapstoleft: '↤', + mapstoup: '↥', + marker: '▮', + mcomma: '⨩', + mcy: 'м', + mdash: '—', + measuredangle: '∡', + mfr: '𝔪', + mho: '℧', + micro: 'µ', + mid: '∣', + midast: '*', + midcir: '⫰', + middot: '·', + minus: '−', + minusb: '⊟', + minusd: '∸', + minusdu: '⨪', + mlcp: '⫛', + mldr: '…', + mnplus: '∓', + models: '⊧', + mopf: '𝕞', + mp: '∓', + mscr: '𝓂', + mstpos: '∾', + mu: 'μ', + multimap: '⊸', + mumap: '⊸', + nGg: '⋙̸', + nGt: '≫⃒', + nGtv: '≫̸', + nLeftarrow: '⇍', + nLeftrightarrow: '⇎', + nLl: '⋘̸', + nLt: '≪⃒', + nLtv: '≪̸', + nRightarrow: '⇏', + nVDash: '⊯', + nVdash: '⊮', + nabla: '∇', + nacute: 'ń', + nang: '∠⃒', + nap: '≉', + napE: '⩰̸', + napid: '≋̸', + napos: 'ʼn', + napprox: '≉', + natur: '♮', + natural: '♮', + naturals: 'ℕ', + nbsp: ' ', + nbump: '≎̸', + nbumpe: '≏̸', + ncap: '⩃', + ncaron: 'ň', + ncedil: 'ņ', + ncong: '≇', + ncongdot: '⩭̸', + ncup: '⩂', + ncy: 'н', + ndash: '–', + ne: '≠', + neArr: '⇗', + nearhk: '⤤', + nearr: '↗', + nearrow: '↗', + nedot: '≐̸', + nequiv: '≢', + nesear: '⤨', + nesim: '≂̸', + nexist: '∄', + nexists: '∄', + nfr: '𝔫', + ngE: '≧̸', + nge: '≱', + ngeq: '≱', + ngeqq: '≧̸', + ngeqslant: '⩾̸', + nges: '⩾̸', + ngsim: '≵', + ngt: '≯', + ngtr: '≯', + nhArr: '⇎', + nharr: '↮', + nhpar: '⫲', + ni: '∋', + nis: '⋼', + nisd: '⋺', + niv: '∋', + njcy: 'њ', + nlArr: '⇍', + nlE: '≦̸', + nlarr: '↚', + nldr: '‥', + nle: '≰', + nleftarrow: '↚', + nleftrightarrow: '↮', + nleq: '≰', + nleqq: '≦̸', + nleqslant: '⩽̸', + nles: '⩽̸', + nless: '≮', + nlsim: '≴', + nlt: '≮', + nltri: '⋪', + nltrie: '⋬', + nmid: '∤', + nopf: '𝕟', + not: '¬', + notin: '∉', + notinE: '⋹̸', + notindot: '⋵̸', + notinva: '∉', + notinvb: '⋷', + notinvc: '⋶', + notni: '∌', + notniva: '∌', + notnivb: '⋾', + notnivc: '⋽', + npar: '∦', + nparallel: '∦', + nparsl: '⫽⃥', + npart: '∂̸', + npolint: '⨔', + npr: '⊀', + nprcue: '⋠', + npre: '⪯̸', + nprec: '⊀', + npreceq: '⪯̸', + nrArr: '⇏', + nrarr: '↛', + nrarrc: '⤳̸', + nrarrw: '↝̸', + nrightarrow: '↛', + nrtri: '⋫', + nrtrie: '⋭', + nsc: '⊁', + nsccue: '⋡', + nsce: '⪰̸', + nscr: '𝓃', + nshortmid: '∤', + nshortparallel: '∦', + nsim: '≁', + nsime: '≄', + nsimeq: '≄', + nsmid: '∤', + nspar: '∦', + nsqsube: '⋢', + nsqsupe: '⋣', + nsub: '⊄', + nsubE: '⫅̸', + nsube: '⊈', + nsubset: '⊂⃒', + nsubseteq: '⊈', + nsubseteqq: '⫅̸', + nsucc: '⊁', + nsucceq: '⪰̸', + nsup: '⊅', + nsupE: '⫆̸', + nsupe: '⊉', + nsupset: '⊃⃒', + nsupseteq: '⊉', + nsupseteqq: '⫆̸', + ntgl: '≹', + ntilde: 'ñ', + ntlg: '≸', + ntriangleleft: '⋪', + ntrianglelefteq: '⋬', + ntriangleright: '⋫', + ntrianglerighteq: '⋭', + nu: 'ν', + num: '#', + numero: '№', + numsp: ' ', + nvDash: '⊭', + nvHarr: '⤄', + nvap: '≍⃒', + nvdash: '⊬', + nvge: '≥⃒', + nvgt: '>⃒', + nvinfin: '⧞', + nvlArr: '⤂', + nvle: '≤⃒', + nvlt: '<⃒', + nvltrie: '⊴⃒', + nvrArr: '⤃', + nvrtrie: '⊵⃒', + nvsim: '∼⃒', + nwArr: '⇖', + nwarhk: '⤣', + nwarr: '↖', + nwarrow: '↖', + nwnear: '⤧', + oS: 'Ⓢ', + oacute: 'ó', + oast: '⊛', + ocir: '⊚', + ocirc: 'ô', + ocy: 'о', + odash: '⊝', + odblac: 'ő', + odiv: '⨸', + odot: '⊙', + odsold: '⦼', + oelig: 'œ', + ofcir: '⦿', + ofr: '𝔬', + ogon: '˛', + ograve: 'ò', + ogt: '⧁', + ohbar: '⦵', + ohm: 'Ω', + oint: '∮', + olarr: '↺', + olcir: '⦾', + olcross: '⦻', + oline: '‾', + olt: '⧀', + omacr: 'ō', + omega: 'ω', + omicron: 'ο', + omid: '⦶', + ominus: '⊖', + oopf: '𝕠', + opar: '⦷', + operp: '⦹', + oplus: '⊕', + or: '∨', + orarr: '↻', + ord: '⩝', + order: 'ℴ', + orderof: 'ℴ', + ordf: 'ª', + ordm: 'º', + origof: '⊶', + oror: '⩖', + orslope: '⩗', + orv: '⩛', + oscr: 'ℴ', + oslash: 'ø', + osol: '⊘', + otilde: 'õ', + otimes: '⊗', + otimesas: '⨶', + ouml: 'ö', + ovbar: '⌽', + par: '∥', + para: '¶', + parallel: '∥', + parsim: '⫳', + parsl: '⫽', + part: '∂', + pcy: 'п', + percnt: '%', + period: '.', + permil: '‰', + perp: '⊥', + pertenk: '‱', + pfr: '𝔭', + phi: 'φ', + phiv: 'ϕ', + phmmat: 'ℳ', + phone: '☎', + pi: 'π', + pitchfork: '⋔', + piv: 'ϖ', + planck: 'ℏ', + planckh: 'ℎ', + plankv: 'ℏ', + plus: '+', + plusacir: '⨣', + plusb: '⊞', + pluscir: '⨢', + plusdo: '∔', + plusdu: '⨥', + pluse: '⩲', + plusmn: '±', + plussim: '⨦', + plustwo: '⨧', + pm: '±', + pointint: '⨕', + popf: '𝕡', + pound: '£', + pr: '≺', + prE: '⪳', + prap: '⪷', + prcue: '≼', + pre: '⪯', + prec: '≺', + precapprox: '⪷', + preccurlyeq: '≼', + preceq: '⪯', + precnapprox: '⪹', + precneqq: '⪵', + precnsim: '⋨', + precsim: '≾', + prime: '′', + primes: 'ℙ', + prnE: '⪵', + prnap: '⪹', + prnsim: '⋨', + prod: '∏', + profalar: '⌮', + profline: '⌒', + profsurf: '⌓', + prop: '∝', + propto: '∝', + prsim: '≾', + prurel: '⊰', + pscr: '𝓅', + psi: 'ψ', + puncsp: ' ', + qfr: '𝔮', + qint: '⨌', + qopf: '𝕢', + qprime: '⁗', + qscr: '𝓆', + quaternions: 'ℍ', + quatint: '⨖', + quest: '?', + questeq: '≟', + quot: '"', + rAarr: '⇛', + rArr: '⇒', + rAtail: '⤜', + rBarr: '⤏', + rHar: '⥤', + race: '∽̱', + racute: 'ŕ', + radic: '√', + raemptyv: '⦳', + rang: '⟩', + rangd: '⦒', + range: '⦥', + rangle: '⟩', + raquo: '»', + rarr: '→', + rarrap: '⥵', + rarrb: '⇥', + rarrbfs: '⤠', + rarrc: '⤳', + rarrfs: '⤞', + rarrhk: '↪', + rarrlp: '↬', + rarrpl: '⥅', + rarrsim: '⥴', + rarrtl: '↣', + rarrw: '↝', + ratail: '⤚', + ratio: '∶', + rationals: 'ℚ', + rbarr: '⤍', + rbbrk: '❳', + rbrace: '}', + rbrack: ']', + rbrke: '⦌', + rbrksld: '⦎', + rbrkslu: '⦐', + rcaron: 'ř', + rcedil: 'ŗ', + rceil: '⌉', + rcub: '}', + rcy: 'р', + rdca: '⤷', + rdldhar: '⥩', + rdquo: '”', + rdquor: '”', + rdsh: '↳', + real: 'ℜ', + realine: 'ℛ', + realpart: 'ℜ', + reals: 'ℝ', + rect: '▭', + reg: '®', + rfisht: '⥽', + rfloor: '⌋', + rfr: '𝔯', + rhard: '⇁', + rharu: '⇀', + rharul: '⥬', + rho: 'ρ', + rhov: 'ϱ', + rightarrow: '→', + rightarrowtail: '↣', + rightharpoondown: '⇁', + rightharpoonup: '⇀', + rightleftarrows: '⇄', + rightleftharpoons: '⇌', + rightrightarrows: '⇉', + rightsquigarrow: '↝', + rightthreetimes: '⋌', + ring: '˚', + risingdotseq: '≓', + rlarr: '⇄', + rlhar: '⇌', + rlm: '‏', + rmoust: '⎱', + rmoustache: '⎱', + rnmid: '⫮', + roang: '⟭', + roarr: '⇾', + robrk: '⟧', + ropar: '⦆', + ropf: '𝕣', + roplus: '⨮', + rotimes: '⨵', + rpar: ')', + rpargt: '⦔', + rppolint: '⨒', + rrarr: '⇉', + rsaquo: '›', + rscr: '𝓇', + rsh: '↱', + rsqb: ']', + rsquo: '’', + rsquor: '’', + rthree: '⋌', + rtimes: '⋊', + rtri: '▹', + rtrie: '⊵', + rtrif: '▸', + rtriltri: '⧎', + ruluhar: '⥨', + rx: '℞', + sacute: 'ś', + sbquo: '‚', + sc: '≻', + scE: '⪴', + scap: '⪸', + scaron: 'š', + sccue: '≽', + sce: '⪰', + scedil: 'ş', + scirc: 'ŝ', + scnE: '⪶', + scnap: '⪺', + scnsim: '⋩', + scpolint: '⨓', + scsim: '≿', + scy: 'с', + sdot: '⋅', + sdotb: '⊡', + sdote: '⩦', + seArr: '⇘', + searhk: '⤥', + searr: '↘', + searrow: '↘', + sect: '§', + semi: ';', + seswar: '⤩', + setminus: '∖', + setmn: '∖', + sext: '✶', + sfr: '𝔰', + sfrown: '⌢', + sharp: '♯', + shchcy: 'щ', + shcy: 'ш', + shortmid: '∣', + shortparallel: '∥', + shy: '­', + sigma: 'σ', + sigmaf: 'ς', + sigmav: 'ς', + sim: '∼', + simdot: '⩪', + sime: '≃', + simeq: '≃', + simg: '⪞', + simgE: '⪠', + siml: '⪝', + simlE: '⪟', + simne: '≆', + simplus: '⨤', + simrarr: '⥲', + slarr: '←', + smallsetminus: '∖', + smashp: '⨳', + smeparsl: '⧤', + smid: '∣', + smile: '⌣', + smt: '⪪', + smte: '⪬', + smtes: '⪬︀', + softcy: 'ь', + sol: '/', + solb: '⧄', + solbar: '⌿', + sopf: '𝕤', + spades: '♠', + spadesuit: '♠', + spar: '∥', + sqcap: '⊓', + sqcaps: '⊓︀', + sqcup: '⊔', + sqcups: '⊔︀', + sqsub: '⊏', + sqsube: '⊑', + sqsubset: '⊏', + sqsubseteq: '⊑', + sqsup: '⊐', + sqsupe: '⊒', + sqsupset: '⊐', + sqsupseteq: '⊒', + squ: '□', + square: '□', + squarf: '▪', + squf: '▪', + srarr: '→', + sscr: '𝓈', + ssetmn: '∖', + ssmile: '⌣', + sstarf: '⋆', + star: '☆', + starf: '★', + straightepsilon: 'ϵ', + straightphi: 'ϕ', + strns: '¯', + sub: '⊂', + subE: '⫅', + subdot: '⪽', + sube: '⊆', + subedot: '⫃', + submult: '⫁', + subnE: '⫋', + subne: '⊊', + subplus: '⪿', + subrarr: '⥹', + subset: '⊂', + subseteq: '⊆', + subseteqq: '⫅', + subsetneq: '⊊', + subsetneqq: '⫋', + subsim: '⫇', + subsub: '⫕', + subsup: '⫓', + succ: '≻', + succapprox: '⪸', + succcurlyeq: '≽', + succeq: '⪰', + succnapprox: '⪺', + succneqq: '⪶', + succnsim: '⋩', + succsim: '≿', + sum: '∑', + sung: '♪', + sup1: '¹', + sup2: '²', + sup3: '³', + sup: '⊃', + supE: '⫆', + supdot: '⪾', + supdsub: '⫘', + supe: '⊇', + supedot: '⫄', + suphsol: '⟉', + suphsub: '⫗', + suplarr: '⥻', + supmult: '⫂', + supnE: '⫌', + supne: '⊋', + supplus: '⫀', + supset: '⊃', + supseteq: '⊇', + supseteqq: '⫆', + supsetneq: '⊋', + supsetneqq: '⫌', + supsim: '⫈', + supsub: '⫔', + supsup: '⫖', + swArr: '⇙', + swarhk: '⤦', + swarr: '↙', + swarrow: '↙', + swnwar: '⤪', + szlig: 'ß', + target: '⌖', + tau: 'τ', + tbrk: '⎴', + tcaron: 'ť', + tcedil: 'ţ', + tcy: 'т', + tdot: '⃛', + telrec: '⌕', + tfr: '𝔱', + there4: '∴', + therefore: '∴', + theta: 'θ', + thetasym: 'ϑ', + thetav: 'ϑ', + thickapprox: '≈', + thicksim: '∼', + thinsp: ' ', + thkap: '≈', + thksim: '∼', + thorn: 'þ', + tilde: '˜', + times: '×', + timesb: '⊠', + timesbar: '⨱', + timesd: '⨰', + tint: '∭', + toea: '⤨', + top: '⊤', + topbot: '⌶', + topcir: '⫱', + topf: '𝕥', + topfork: '⫚', + tosa: '⤩', + tprime: '‴', + trade: '™', + triangle: '▵', + triangledown: '▿', + triangleleft: '◃', + trianglelefteq: '⊴', + triangleq: '≜', + triangleright: '▹', + trianglerighteq: '⊵', + tridot: '◬', + trie: '≜', + triminus: '⨺', + triplus: '⨹', + trisb: '⧍', + tritime: '⨻', + trpezium: '⏢', + tscr: '𝓉', + tscy: 'ц', + tshcy: 'ћ', + tstrok: 'ŧ', + twixt: '≬', + twoheadleftarrow: '↞', + twoheadrightarrow: '↠', + uArr: '⇑', + uHar: '⥣', + uacute: 'ú', + uarr: '↑', + ubrcy: 'ў', + ubreve: 'ŭ', + ucirc: 'û', + ucy: 'у', + udarr: '⇅', + udblac: 'ű', + udhar: '⥮', + ufisht: '⥾', + ufr: '𝔲', + ugrave: 'ù', + uharl: '↿', + uharr: '↾', + uhblk: '▀', + ulcorn: '⌜', + ulcorner: '⌜', + ulcrop: '⌏', + ultri: '◸', + umacr: 'ū', + uml: '¨', + uogon: 'ų', + uopf: '𝕦', + uparrow: '↑', + updownarrow: '↕', + upharpoonleft: '↿', + upharpoonright: '↾', + uplus: '⊎', + upsi: 'υ', + upsih: 'ϒ', + upsilon: 'υ', + upuparrows: '⇈', + urcorn: '⌝', + urcorner: '⌝', + urcrop: '⌎', + uring: 'ů', + urtri: '◹', + uscr: '𝓊', + utdot: '⋰', + utilde: 'ũ', + utri: '▵', + utrif: '▴', + uuarr: '⇈', + uuml: 'ü', + uwangle: '⦧', + vArr: '⇕', + vBar: '⫨', + vBarv: '⫩', + vDash: '⊨', + vangrt: '⦜', + varepsilon: 'ϵ', + varkappa: 'ϰ', + varnothing: '∅', + varphi: 'ϕ', + varpi: 'ϖ', + varpropto: '∝', + varr: '↕', + varrho: 'ϱ', + varsigma: 'ς', + varsubsetneq: '⊊︀', + varsubsetneqq: '⫋︀', + varsupsetneq: '⊋︀', + varsupsetneqq: '⫌︀', + vartheta: 'ϑ', + vartriangleleft: '⊲', + vartriangleright: '⊳', + vcy: 'в', + vdash: '⊢', + vee: '∨', + veebar: '⊻', + veeeq: '≚', + vellip: '⋮', + verbar: '|', + vert: '|', + vfr: '𝔳', + vltri: '⊲', + vnsub: '⊂⃒', + vnsup: '⊃⃒', + vopf: '𝕧', + vprop: '∝', + vrtri: '⊳', + vscr: '𝓋', + vsubnE: '⫋︀', + vsubne: '⊊︀', + vsupnE: '⫌︀', + vsupne: '⊋︀', + vzigzag: '⦚', + wcirc: 'ŵ', + wedbar: '⩟', + wedge: '∧', + wedgeq: '≙', + weierp: '℘', + wfr: '𝔴', + wopf: '𝕨', + wp: '℘', + wr: '≀', + wreath: '≀', + wscr: '𝓌', + xcap: '⋂', + xcirc: '◯', + xcup: '⋃', + xdtri: '▽', + xfr: '𝔵', + xhArr: '⟺', + xharr: '⟷', + xi: 'ξ', + xlArr: '⟸', + xlarr: '⟵', + xmap: '⟼', + xnis: '⋻', + xodot: '⨀', + xopf: '𝕩', + xoplus: '⨁', + xotime: '⨂', + xrArr: '⟹', + xrarr: '⟶', + xscr: '𝓍', + xsqcup: '⨆', + xuplus: '⨄', + xutri: '△', + xvee: '⋁', + xwedge: '⋀', + yacute: 'ý', + yacy: 'я', + ycirc: 'ŷ', + ycy: 'ы', + yen: '¥', + yfr: '𝔶', + yicy: 'ї', + yopf: '𝕪', + yscr: '𝓎', + yucy: 'ю', + yuml: 'ÿ', + zacute: 'ź', + zcaron: 'ž', + zcy: 'з', + zdot: 'ż', + zeetrf: 'ℨ', + zeta: 'ζ', + zfr: '𝔷', + zhcy: 'ж', + zigrarr: '⇝', + zopf: '𝕫', + zscr: '𝓏', + zwj: '‍', + zwnj: '‌' +} diff --git a/_extensions/d2/node_modules/character-entities/license b/_extensions/d2/node_modules/character-entities/license new file mode 100644 index 00000000..32e7a3d9 --- /dev/null +++ b/_extensions/d2/node_modules/character-entities/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2015 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/character-entities/package.json b/_extensions/d2/node_modules/character-entities/package.json new file mode 100644 index 00000000..30f6a539 --- /dev/null +++ b/_extensions/d2/node_modules/character-entities/package.json @@ -0,0 +1,78 @@ +{ + "name": "character-entities", + "version": "2.0.2", + "description": "Map of named character references", + "license": "MIT", + "keywords": [ + "html", + "entity", + "entities", + "character", + "reference", + "name", + "replacement" + ], + "repository": "wooorm/character-entities", + "bugs": "https://github.com/wooorm/character-entities/issues", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "index.d.ts", + "index.js" + ], + "devDependencies": { + "@types/tape": "^4.0.0", + "bail": "^2.0.0", + "c8": "^7.0.0", + "concat-stream": "^2.0.0", + "prettier": "^2.0.0", + "remark-cli": "^10.0.0", + "remark-preset-wooorm": "^9.0.0", + "rimraf": "^3.0.0", + "tape": "^5.0.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "xo": "^0.50.0" + }, + "scripts": { + "generate": "node build", + "prepublishOnly": "npm run build && npm run format", + "build": "rimraf \"*.d.ts\" && tsc && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test.js", + "test-coverage": "c8 --check-coverage --branches 100 --functions 100 --lines 100 --statements 100 --reporter lcov npm run test-api", + "test": "npm run generate && npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/character-entities/readme.md b/_extensions/d2/node_modules/character-entities/readme.md new file mode 100644 index 00000000..16889ca1 --- /dev/null +++ b/_extensions/d2/node_modules/character-entities/readme.md @@ -0,0 +1,152 @@ +# character-entities + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] + +Map of named character references. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [characterEntities](#characterentities) +* [Types](#types) +* [Compatibility](#compatibility) +* [Security](#security) +* [Related](#related) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +This is a map of named character references in HTML (latest) to the characters +they represent. + +## When should I use this? + +Maybe when you’re writing an HTML parser or minifier, but otherwise probably +never! +Even then, it might be better to use [`parse-entities`][parse-entities] or +[`stringify-entities`][stringify-entities]. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 12.20+, 14.14+, 16.0+, 18.0+), install with [npm][]: + +```sh +npm install character-entities +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {characterEntities} from 'https://esm.sh/character-entities@2' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {characterEntities} from 'character-entities' + +console.log(characterEntities.AElig) // => 'Æ' +console.log(characterEntities.aelig) // => 'æ' +console.log(characterEntities.amp) // => '&' +``` + +## API + +This package exports the identifier `characterEntities`. +There is no default export. + +### characterEntities + +Mapping between (case-sensitive) character entity names to replacements. +See [`html.spec.whatwg.org`][html] for more info. + +## Types + +This package is fully typed with [TypeScript][]. + +## Compatibility + +This package is at least compatible with all maintained versions of Node.js. +As of now, that is Node.js 12.20+, 14.14+, 16.0+, and 18.0+. +It also works in Deno and modern browsers. + +## Security + +This package is safe. + +## Related + +* [`wooorm/parse-entities`](https://github.com/wooorm/parse-entities) + — parse (decode) character references +* [`wooorm/stringify-entities`](https://github.com/wooorm/stringify-entities) + — serialize (encode) character references +* [`wooorm/character-entities-html4`](https://github.com/wooorm/character-entities-html4) + — info on named character references in HTML 4 +* [`character-reference-invalid`](https://github.com/wooorm/character-reference-invalid) + — info on invalid numeric character references +* [`character-entities-legacy`](https://github.com/wooorm/character-entities-legacy) + — info on legacy named character references + +## Contribute + +Yes please! +See [How to Contribute to Open Source][contribute]. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/wooorm/character-entities/workflows/main/badge.svg + +[build]: https://github.com/wooorm/character-entities/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/wooorm/character-entities.svg + +[coverage]: https://codecov.io/github/wooorm/character-entities + +[downloads-badge]: https://img.shields.io/npm/dm/character-entities.svg + +[downloads]: https://www.npmjs.com/package/character-entities + +[size-badge]: https://img.shields.io/bundlephobia/minzip/character-entities.svg + +[size]: https://bundlephobia.com/result?p=character-entities + +[npm]: https://docs.npmjs.com/cli/install + +[esmsh]: https://esm.sh + +[license]: license + +[author]: https://wooorm.com + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[typescript]: https://www.typescriptlang.org + +[contribute]: https://opensource.guide/how-to-contribute/ + +[parse-entities]: https://github.com/wooorm/parse-entities + +[stringify-entities]: https://github.com/wooorm/stringify-entities + +[html]: https://html.spec.whatwg.org/multipage/syntax.html#named-character-references diff --git a/_extensions/d2/node_modules/concat-map/.travis.yml b/_extensions/d2/node_modules/concat-map/.travis.yml new file mode 100644 index 00000000..f1d0f13c --- /dev/null +++ b/_extensions/d2/node_modules/concat-map/.travis.yml @@ -0,0 +1,4 @@ +language: node_js +node_js: + - 0.4 + - 0.6 diff --git a/_extensions/d2/node_modules/concat-map/LICENSE b/_extensions/d2/node_modules/concat-map/LICENSE new file mode 100644 index 00000000..ee27ba4b --- /dev/null +++ b/_extensions/d2/node_modules/concat-map/LICENSE @@ -0,0 +1,18 @@ +This software is released under the MIT license: + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/concat-map/README.markdown b/_extensions/d2/node_modules/concat-map/README.markdown new file mode 100644 index 00000000..408f70a1 --- /dev/null +++ b/_extensions/d2/node_modules/concat-map/README.markdown @@ -0,0 +1,62 @@ +concat-map +========== + +Concatenative mapdashery. + +[![browser support](http://ci.testling.com/substack/node-concat-map.png)](http://ci.testling.com/substack/node-concat-map) + +[![build status](https://secure.travis-ci.org/substack/node-concat-map.png)](http://travis-ci.org/substack/node-concat-map) + +example +======= + +``` js +var concatMap = require('concat-map'); +var xs = [ 1, 2, 3, 4, 5, 6 ]; +var ys = concatMap(xs, function (x) { + return x % 2 ? [ x - 0.1, x, x + 0.1 ] : []; +}); +console.dir(ys); +``` + +*** + +``` +[ 0.9, 1, 1.1, 2.9, 3, 3.1, 4.9, 5, 5.1 ] +``` + +methods +======= + +``` js +var concatMap = require('concat-map') +``` + +concatMap(xs, fn) +----------------- + +Return an array of concatenated elements by calling `fn(x, i)` for each element +`x` and each index `i` in the array `xs`. + +When `fn(x, i)` returns an array, its result will be concatenated with the +result array. If `fn(x, i)` returns anything else, that value will be pushed +onto the end of the result array. + +install +======= + +With [npm](http://npmjs.org) do: + +``` +npm install concat-map +``` + +license +======= + +MIT + +notes +===== + +This module was written while sitting high above the ground in a tree. diff --git a/_extensions/d2/node_modules/concat-map/example/map.js b/_extensions/d2/node_modules/concat-map/example/map.js new file mode 100644 index 00000000..33656217 --- /dev/null +++ b/_extensions/d2/node_modules/concat-map/example/map.js @@ -0,0 +1,6 @@ +var concatMap = require('../'); +var xs = [ 1, 2, 3, 4, 5, 6 ]; +var ys = concatMap(xs, function (x) { + return x % 2 ? [ x - 0.1, x, x + 0.1 ] : []; +}); +console.dir(ys); diff --git a/_extensions/d2/node_modules/concat-map/index.js b/_extensions/d2/node_modules/concat-map/index.js new file mode 100644 index 00000000..b29a7812 --- /dev/null +++ b/_extensions/d2/node_modules/concat-map/index.js @@ -0,0 +1,13 @@ +module.exports = function (xs, fn) { + var res = []; + for (var i = 0; i < xs.length; i++) { + var x = fn(xs[i], i); + if (isArray(x)) res.push.apply(res, x); + else res.push(x); + } + return res; +}; + +var isArray = Array.isArray || function (xs) { + return Object.prototype.toString.call(xs) === '[object Array]'; +}; diff --git a/_extensions/d2/node_modules/concat-map/package.json b/_extensions/d2/node_modules/concat-map/package.json new file mode 100644 index 00000000..d3640e6b --- /dev/null +++ b/_extensions/d2/node_modules/concat-map/package.json @@ -0,0 +1,43 @@ +{ + "name" : "concat-map", + "description" : "concatenative mapdashery", + "version" : "0.0.1", + "repository" : { + "type" : "git", + "url" : "git://github.com/substack/node-concat-map.git" + }, + "main" : "index.js", + "keywords" : [ + "concat", + "concatMap", + "map", + "functional", + "higher-order" + ], + "directories" : { + "example" : "example", + "test" : "test" + }, + "scripts" : { + "test" : "tape test/*.js" + }, + "devDependencies" : { + "tape" : "~2.4.0" + }, + "license" : "MIT", + "author" : { + "name" : "James Halliday", + "email" : "mail@substack.net", + "url" : "http://substack.net" + }, + "testling" : { + "files" : "test/*.js", + "browsers" : { + "ie" : [ 6, 7, 8, 9 ], + "ff" : [ 3.5, 10, 15.0 ], + "chrome" : [ 10, 22 ], + "safari" : [ 5.1 ], + "opera" : [ 12 ] + } + } +} diff --git a/_extensions/d2/node_modules/concat-map/test/map.js b/_extensions/d2/node_modules/concat-map/test/map.js new file mode 100644 index 00000000..fdbd7022 --- /dev/null +++ b/_extensions/d2/node_modules/concat-map/test/map.js @@ -0,0 +1,39 @@ +var concatMap = require('../'); +var test = require('tape'); + +test('empty or not', function (t) { + var xs = [ 1, 2, 3, 4, 5, 6 ]; + var ixes = []; + var ys = concatMap(xs, function (x, ix) { + ixes.push(ix); + return x % 2 ? [ x - 0.1, x, x + 0.1 ] : []; + }); + t.same(ys, [ 0.9, 1, 1.1, 2.9, 3, 3.1, 4.9, 5, 5.1 ]); + t.same(ixes, [ 0, 1, 2, 3, 4, 5 ]); + t.end(); +}); + +test('always something', function (t) { + var xs = [ 'a', 'b', 'c', 'd' ]; + var ys = concatMap(xs, function (x) { + return x === 'b' ? [ 'B', 'B', 'B' ] : [ x ]; + }); + t.same(ys, [ 'a', 'B', 'B', 'B', 'c', 'd' ]); + t.end(); +}); + +test('scalars', function (t) { + var xs = [ 'a', 'b', 'c', 'd' ]; + var ys = concatMap(xs, function (x) { + return x === 'b' ? [ 'B', 'B', 'B' ] : x; + }); + t.same(ys, [ 'a', 'B', 'B', 'B', 'c', 'd' ]); + t.end(); +}); + +test('undefs', function (t) { + var xs = [ 'a', 'b', 'c', 'd' ]; + var ys = concatMap(xs, function () {}); + t.same(ys, [ undefined, undefined, undefined, undefined ]); + t.end(); +}); diff --git a/_extensions/d2/node_modules/d2-filter/LICENSE b/_extensions/d2/node_modules/d2-filter/LICENSE new file mode 100644 index 00000000..79c7d8cf --- /dev/null +++ b/_extensions/d2/node_modules/d2-filter/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Omar Zeghouani + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/_extensions/d2/node_modules/d2-filter/README.md b/_extensions/d2/node_modules/d2-filter/README.md new file mode 100644 index 00000000..31dee15d --- /dev/null +++ b/_extensions/d2/node_modules/d2-filter/README.md @@ -0,0 +1,73 @@ +# d2-filter + +`d2-filter` is a pandoc filter that adds [D2](https://d2lang.com) syntax +diagrams in markdown documents. + +## Example + +``` +~~~{.d2 pad=20} +x -> y +~~~ +``` +![Output](https://user-images.githubusercontent.com/59267627/230503069-51bb0a62-68ee-429a-84a8-b42342659268.png) + +## Usage + +```bash +# Installation +npm i d2-filter + +# Unix +pandoc -F d2-filter test.md -o test.pdf +# Windows +pandoc -F d2-filter.cmd test.md -o test.pdf +``` + +## Configuration + +Using attributes of the fenced code block, you can specify: + +- Theme + - Example: `{.d2 theme="Grape soda"}`. + - Default: `0` or `Neutral default` + - Values: Run `d2 themes` for possible options. +- Layout engine + - Example: `{.d2 layout=elk}`. + - Default: `dagre` + - Values: Run `d2 layout` for possible options. +- Image format + - Example: `{.d2 format=png}` + - Default: `svg` + - Values: `svg`, `png`, `pdf` +- Sketch + - Example: `{.d2 sketch=true}` + - Default: `false` +- Image padding + - Example: `{.d2 pad=0}` + - Default: `100` +- Folder + - Example: `{.d2 folder=img}` + - Default: no folder as image is encoded to data URI on `img` tag +- Filename + - Example: `{.d2 filename="test"}` + - Default: `diagram-N` + - `folder` attribute is required to save file locally +- Pandoc caption + - Example: `{.d2 caption="This is a test image"}` + - Default: empty string +- Pandoc image attributes + - Example: `{.d2 width=30 height=20px}` + - Default: empty array + - Values: See pandoc's [`link_attributes`](https://pandoc.org/MANUAL.html#extension-link_attributes) extension + - Note that `#id` and `.class` attributes are not supported, but any + key-value attributes should work + +## Credits + +- [mermaid-filter](https://github.com/raghur/mermaid-filter) +- [mathjax-pandoc-filter](https://github.com/lierdakil/mathjax-pandoc-filter) + +## License + +MIT diff --git a/_extensions/d2/node_modules/d2-filter/filter-shim.js b/_extensions/d2/node_modules/d2-filter/filter-shim.js new file mode 100644 index 00000000..fd3f832e --- /dev/null +++ b/_extensions/d2/node_modules/d2-filter/filter-shim.js @@ -0,0 +1,12 @@ +#!/usr/bin/env node + +var pandoc = require("pandoc-filter"); +var which = require("which") +var filter = require("./lib/filter") + +var resolvedOrNull = which.sync("d2", { nothrow: true }); +if (resolvedOrNull === null) { + console.error("d2 is not installed"); + return; +} +pandoc.stdio(filter.action); diff --git a/_extensions/d2/node_modules/d2-filter/lib/filter.js b/_extensions/d2/node_modules/d2-filter/lib/filter.js new file mode 100644 index 00000000..9b0477d3 --- /dev/null +++ b/_extensions/d2/node_modules/d2-filter/lib/filter.js @@ -0,0 +1,188 @@ +"use strict"; +var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) { + function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } + return new (P || (P = Promise))(function (resolve, reject) { + function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } + function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } + function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } + step((generator = generator.apply(thisArg, _arguments || [])).next()); + }); +}; +var __generator = (this && this.__generator) || function (thisArg, body) { + var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; + return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; + function verb(n) { return function (v) { return step([n, v]); }; } + function step(op) { + if (f) throw new TypeError("Generator is already executing."); + while (g && (g = 0, op[0] && (_ = 0)), _) try { + if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; + if (y = 0, t) op = [op[0] & 2, t.value]; + switch (op[0]) { + case 0: case 1: t = op; break; + case 4: _.label++; return { value: op[1], done: false }; + case 5: _.label++; y = op[1]; op = [0]; continue; + case 7: op = _.ops.pop(); _.trys.pop(); continue; + default: + if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } + if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } + if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } + if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } + if (t[2]) _.ops.pop(); + _.trys.pop(); continue; + } + op = body.call(thisArg, _); + } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } + if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; + } +}; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.action = void 0; +var fs_1 = require("fs"); +var pandoc = require("pandoc-filter"); +var path_1 = require("path"); +var tmp_1 = require("tmp"); +var exec = require("child_process").execSync; +var counter = 0; +var folder = process.cwd(); +var D2Theme; +(function (D2Theme) { + D2Theme[D2Theme["NeutralDefault"] = 0] = "NeutralDefault"; + D2Theme[D2Theme["NeutralGrey"] = 1] = "NeutralGrey"; + D2Theme[D2Theme["FlagshipTerrastruct"] = 3] = "FlagshipTerrastruct"; + D2Theme[D2Theme["CoolClassics"] = 4] = "CoolClassics"; + D2Theme[D2Theme["MixedBerryBlue"] = 5] = "MixedBerryBlue"; + D2Theme[D2Theme["GrapeSoda"] = 6] = "GrapeSoda"; + D2Theme[D2Theme["Aubergine"] = 7] = "Aubergine"; + D2Theme[D2Theme["ColorblindClear"] = 8] = "ColorblindClear"; + D2Theme[D2Theme["VanillaNitroCola"] = 100] = "VanillaNitroCola"; + D2Theme[D2Theme["OrangeCreamsicle"] = 101] = "OrangeCreamsicle"; + D2Theme[D2Theme["ShirelyTemple"] = 102] = "ShirelyTemple"; + D2Theme[D2Theme["EarthTones"] = 103] = "EarthTones"; + D2Theme[D2Theme["EvergladeGreen"] = 104] = "EvergladeGreen"; + D2Theme[D2Theme["ButteredToast"] = 105] = "ButteredToast"; + D2Theme[D2Theme["DarkMauve"] = 200] = "DarkMauve"; + D2Theme[D2Theme["Terminal"] = 300] = "Terminal"; + D2Theme[D2Theme["TerminalGrayscale"] = 301] = "TerminalGrayscale"; + D2Theme[D2Theme["Origami"] = 302] = "Origami"; +})(D2Theme || (D2Theme = {})); +var D2Layout; +(function (D2Layout) { + D2Layout["dagre"] = "dagre"; + D2Layout["elk"] = "elk"; +})(D2Layout || (D2Layout = {})); +var D2Format; +(function (D2Format) { + D2Format["svg"] = "svg"; + D2Format["png"] = "png"; + D2Format["pdf"] = "pdf"; +})(D2Format || (D2Format = {})); +var action = function (elt, _format) { + var _a, _b, _c; + return __awaiter(this, void 0, void 0, function () { + var attrs, content, id, classes, options, imageAttrs, tmpFile, outDir, savePath, newPath, fullCmd, data, data, imageFolder, readStream, writeStream, fig; + return __generator(this, function (_d) { + if (elt.t != "CodeBlock") + return [2 /*return*/, undefined]; + attrs = elt.c[0]; + content = elt.c[1]; + id = attrs[0]; + classes = attrs[1]; + options = { + theme: D2Theme.NeutralDefault, + layout: D2Layout.dagre, + format: D2Format.svg, + sketch: false, + pad: 100, + }; + imageAttrs = []; + if (classes.indexOf("d2") < 0) + return [2 /*return*/, undefined]; + attrs[2].map(function (item) { + switch (item[0]) { + case "theme": + if (+item[1] in D2Theme) { + options.theme = +item[1]; + } + else { + var themeNamePascal = item[1] + .split(" ") + .map(function (word) { return word.charAt(0).toUpperCase() + word.substring(1); }) + .join(""); + if (themeNamePascal in D2Theme) { + options.theme = D2Theme[themeNamePascal]; + } + } + break; + case "sketch": + options.sketch = item[1] === "true"; + break; + case "layout": + if (item[1] in D2Layout) + options.layout = item[1]; + break; + case "format": + if (item[1] in D2Format) + options.format = item[1]; + break; + case "pad": + options.pad = +item[1]; + break; + case "folder": + case "filename": + case "caption": + options[item[0]] = item[1]; + break; + default: + imageAttrs.push(item); + break; + } + }); + counter++; + tmpFile = (0, tmp_1.fileSync)(); + (0, fs_1.writeFileSync)(tmpFile.name, content); + outDir = (_a = options.folder) !== null && _a !== void 0 ? _a : ""; + if (options.caption && !options.filename) { + options.filename = (_b = options.caption) === null || _b === void 0 ? void 0 : _b.replace(/(?:^|\s|["'([{])+\S/g, function (match) { return match.toUpperCase(); }).replace(/\s+/g, "").replace(/\//g, "-"); + } + if (!options.filename) { + options.filename = "diagram-".concat(counter); + } + savePath = tmpFile.name + "." + options.format; + newPath = (0, path_1.join)(outDir, "".concat(options.filename, ".").concat(options.format)); + fullCmd = "d2 --theme=".concat(options.theme, " --layout=").concat(options.layout, " --sketch=").concat(options.sketch, " --pad=").concat(options.pad, " ").concat(tmpFile.name, " ").concat(savePath); + exec(fullCmd); + if (!options.folder) { + if (options.format === "svg") { + data = (0, fs_1.readFileSync)(savePath, "utf8"); + newPath = + "data:image/svg+xml;base64," + Buffer.from(data).toString("base64"); + } + else if (options.format === "pdf") { + newPath = savePath; + } + else { + data = (0, fs_1.readFileSync)(savePath); + newPath = "data:image/png;base64," + Buffer.from(data).toString("base64"); + } + } + else { + imageFolder = (0, path_1.join)(folder, outDir); + if (!(0, fs_1.existsSync)(imageFolder)) { + (0, fs_1.mkdirSync)(imageFolder); + } + readStream = (0, fs_1.createReadStream)(savePath); + writeStream = (0, fs_1.createWriteStream)(newPath); + readStream.on("close", function () { + (0, fs_1.unlinkSync)(savePath); + }); + readStream.pipe(writeStream); + } + fig = options.caption ? "fig:" : ""; + return [2 /*return*/, pandoc.Para([ + pandoc.Image([id, [], imageAttrs], [pandoc.Str((_c = options.caption) !== null && _c !== void 0 ? _c : "")], [newPath, fig]), + ])]; + }); + }); +}; +exports.action = action; +//# sourceMappingURL=filter.js.map \ No newline at end of file diff --git a/_extensions/d2/node_modules/d2-filter/lib/filter.js.map b/_extensions/d2/node_modules/d2-filter/lib/filter.js.map new file mode 100644 index 00000000..d502df8e --- /dev/null +++ b/_extensions/d2/node_modules/d2-filter/lib/filter.js.map @@ -0,0 +1 @@ +{"version":3,"file":"filter.js","sourceRoot":"","sources":["../src/filter.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA,yBAQY;AACZ,sCAAwC;AACxC,6BAA4B;AAC5B,2BAA+B;AAC/B,IAAI,IAAI,GAAG,OAAO,CAAC,eAAe,CAAC,CAAC,QAAQ,CAAC;AAE7C,IAAI,OAAO,GAAG,CAAC,CAAC;AAChB,IAAM,MAAM,GAAG,OAAO,CAAC,GAAG,EAAE,CAAC;AAE7B,IAAK,OAmBJ;AAnBD,WAAK,OAAO;IACV,yDAAkB,CAAA;IAClB,mDAAe,CAAA;IACf,mEAAuB,CAAA;IACvB,qDAAgB,CAAA;IAChB,yDAAkB,CAAA;IAClB,+CAAa,CAAA;IACb,+CAAa,CAAA;IACb,2DAAmB,CAAA;IACnB,+DAAsB,CAAA;IACtB,+DAAsB,CAAA;IACtB,yDAAmB,CAAA;IACnB,mDAAgB,CAAA;IAChB,2DAAoB,CAAA;IACpB,yDAAmB,CAAA;IACnB,iDAAe,CAAA;IACf,+CAAc,CAAA;IACd,iEAAuB,CAAA;IACvB,6CAAa,CAAA;AACf,CAAC,EAnBI,OAAO,KAAP,OAAO,QAmBX;AAED,IAAK,QAGJ;AAHD,WAAK,QAAQ;IACX,2BAAe,CAAA;IACf,uBAAW,CAAA;AACb,CAAC,EAHI,QAAQ,KAAR,QAAQ,QAGZ;AAED,IAAK,QAIJ;AAJD,WAAK,QAAQ;IACX,uBAAW,CAAA;IACX,uBAAW,CAAA;IACX,uBAAW,CAAA;AACb,CAAC,EAJI,QAAQ,KAAR,QAAQ,QAIZ;AAaM,IAAM,MAAM,GAAmC,UACpD,GAAG,EACH,OAAO;;;;;YAEP,IAAI,GAAG,CAAC,CAAC,IAAI,WAAW;gBAAE,sBAAO,SAAS,EAAC;YACrC,KAAK,GAAG,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;YACjB,OAAO,GAAG,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC,CAAC;YACnB,EAAE,GAAG,KAAK,CAAC,CAAC,CAAC,CAAC;YACd,OAAO,GAAG,KAAK,CAAC,CAAC,CAAC,CAAC;YACnB,OAAO,GAAkB;gBAC7B,KAAK,EAAE,OAAO,CAAC,cAAc;gBAC7B,MAAM,EAAE,QAAQ,CAAC,KAAK;gBACtB,MAAM,EAAE,QAAQ,CAAC,GAAG;gBACpB,MAAM,EAAE,KAAK;gBACb,GAAG,EAAE,GAAG;aACT,CAAC;YACI,UAAU,GAAoB,EAAE,CAAC;YAEvC,IAAI,OAAO,CAAC,OAAO,CAAC,IAAI,CAAC,GAAG,CAAC;gBAAE,sBAAO,SAAS,EAAC;YAEhD,KAAK,CAAC,CAAC,CAAC,CAAC,GAAG,CAAC,UAAC,IAAI;gBAChB,QAAQ,IAAI,CAAC,CAAC,CAAC,EAAE;oBACf,KAAK,OAAO;wBACV,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,OAAO,EAAE;4BACvB,OAAO,CAAC,KAAK,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;yBAC1B;6BAAM;4BACL,IAAM,eAAe,GAAG,IAAI,CAAC,CAAC,CAAC;iCAC5B,KAAK,CAAC,GAAG,CAAC;iCACV,GAAG,CAAC,UAAC,IAAI,IAAK,OAAA,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC,WAAW,EAAE,GAAG,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC,EAAhD,CAAgD,CAAC;iCAC/D,IAAI,CAAC,EAAE,CAAC,CAAC;4BACZ,IAAI,eAAe,IAAI,OAAO,EAAE;gCAC9B,OAAO,CAAC,KAAK,GAAG,OAAO,CAAC,eAAuC,CAAC,CAAC;6BAClE;yBACF;wBACD,MAAM;oBACR,KAAK,QAAQ;wBACX,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC,CAAC,KAAK,MAAM,CAAC;wBACpC,MAAM;oBACR,KAAK,QAAQ;wBACX,IAAI,IAAI,CAAC,CAAC,CAAC,IAAI,QAAQ;4BAAE,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC,CAAa,CAAC;wBAC9D,MAAM;oBACR,KAAK,QAAQ;wBACX,IAAI,IAAI,CAAC,CAAC,CAAC,IAAI,QAAQ;4BAAE,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC,CAAa,CAAC;wBAC9D,MAAM;oBACR,KAAK,KAAK;wBACR,OAAO,CAAC,GAAG,GAAG,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;wBACvB,MAAM;oBACR,KAAK,QAAQ,CAAC;oBACd,KAAK,UAAU,CAAC;oBAChB,KAAK,SAAS;wBACZ,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC,CAAC,CAAC;wBAC3B,MAAM;oBACR;wBACE,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;wBACtB,MAAM;iBACT;YACH,CAAC,CAAC,CAAC;YAEH,OAAO,EAAE,CAAC;YAEJ,OAAO,GAAG,IAAA,cAAQ,GAAE,CAAC;YAC3B,IAAA,kBAAa,EAAC,OAAO,CAAC,IAAI,EAAE,OAAO,CAAC,CAAC;YAC/B,MAAM,GAAG,MAAA,OAAO,CAAC,MAAM,mCAAI,EAAE,CAAC;YAEpC,IAAI,OAAO,CAAC,OAAO,IAAI,CAAC,OAAO,CAAC,QAAQ,EAAE;gBACxC,OAAO,CAAC,QAAQ,GAAG,MAAA,OAAO,CAAC,OAAO,0CAC9B,OAAO,CAAC,sBAAsB,EAAE,UAAC,KAAK,IAAK,OAAA,KAAK,CAAC,WAAW,EAAE,EAAnB,CAAmB,EAC/D,OAAO,CAAC,MAAM,EAAE,EAAE,EAClB,OAAO,CAAC,KAAK,EAAE,GAAG,CAAC,CAAC;aACxB;YAED,IAAI,CAAC,OAAO,CAAC,QAAQ,EAAE;gBACrB,OAAO,CAAC,QAAQ,GAAG,kBAAW,OAAO,CAAE,CAAC;aACzC;YAEK,QAAQ,GAAG,OAAO,CAAC,IAAI,GAAG,GAAG,GAAG,OAAO,CAAC,MAAM,CAAC;YACjD,OAAO,GAAG,IAAA,WAAI,EAAC,MAAM,EAAE,UAAG,OAAO,CAAC,QAAQ,cAAI,OAAO,CAAC,MAAM,CAAE,CAAC,CAAC;YAC9D,OAAO,GAAG,qBAAc,OAAO,CAAC,KAAK,uBAAa,OAAO,CAAC,MAAM,uBAAa,OAAO,CAAC,MAAM,oBAAU,OAAO,CAAC,GAAG,cAAI,OAAO,CAAC,IAAI,cAAI,QAAQ,CAAE,CAAC;YACrJ,IAAI,CAAC,OAAO,CAAC,CAAC;YAEd,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE;gBACnB,IAAI,OAAO,CAAC,MAAM,KAAK,KAAK,EAAE;oBACtB,IAAI,GAAG,IAAA,iBAAY,EAAC,QAAQ,EAAE,MAAM,CAAC,CAAC;oBAC5C,OAAO;wBACL,4BAA4B,GAAG,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;iBACvE;qBAAM,IAAI,OAAO,CAAC,MAAM,KAAK,KAAK,EAAE;oBACnC,OAAO,GAAG,QAAQ,CAAC;iBACpB;qBAAM;oBACC,IAAI,GAAG,IAAA,iBAAY,EAAC,QAAQ,CAAC,CAAC;oBACpC,OAAO,GAAG,wBAAwB,GAAG,MAAM,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;iBAC3E;aACF;iBAAM;gBACC,WAAW,GAAG,IAAA,WAAI,EAAC,MAAM,EAAE,MAAM,CAAC,CAAC;gBACzC,IAAI,CAAC,IAAA,eAAU,EAAC,WAAW,CAAC,EAAE;oBAC5B,IAAA,cAAS,EAAC,WAAW,CAAC,CAAC;iBACxB;gBACK,UAAU,GAAG,IAAA,qBAAgB,EAAC,QAAQ,CAAC,CAAC;gBACxC,WAAW,GAAG,IAAA,sBAAiB,EAAC,OAAO,CAAC,CAAC;gBAC/C,UAAU,CAAC,EAAE,CAAC,OAAO,EAAE;oBACrB,IAAA,eAAU,EAAC,QAAQ,CAAC,CAAC;gBACvB,CAAC,CAAC,CAAC;gBACH,UAAU,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;aAC9B;YAEK,GAAG,GAAG,OAAO,CAAC,OAAO,CAAC,CAAC,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,CAAC;YAC1C,sBAAO,MAAM,CAAC,IAAI,CAAC;oBACjB,MAAM,CAAC,KAAK,CACV,CAAC,EAAE,EAAE,EAAE,EAAE,UAAU,CAAC,EACpB,CAAC,MAAM,CAAC,GAAG,CAAC,MAAA,OAAO,CAAC,OAAO,mCAAI,EAAE,CAAC,CAAC,EACnC,CAAC,OAAO,EAAE,GAAG,CAAC,CACf;iBACF,CAAC,EAAC;;;CACJ,CAAC;AAhHW,QAAA,MAAM,UAgHjB"} \ No newline at end of file diff --git a/_extensions/d2/node_modules/d2-filter/package.json b/_extensions/d2/node_modules/d2-filter/package.json new file mode 100644 index 00000000..e26233b8 --- /dev/null +++ b/_extensions/d2/node_modules/d2-filter/package.json @@ -0,0 +1,46 @@ +{ + "name": "d2-filter", + "version": "1.4.0", + "description": "Pandoc filter for D2", + "main": "dist/filter", + "files": [ + "lib", + "filter-shim.js" + ], + "scripts": { + "prepare": "tsc --project .", + "test": "vitest", + "test:run": "vitest run", + "coverage": "vitest run --coverage" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/ram02z/d2-filter.git" + }, + "keywords": [ + "pandoc", + "D2" + ], + "bin": { + "d2-filter": "filter-shim.js" + }, + "author": "Omar Zeghouani", + "license": "MIT", + "bugs": { + "url": "https://github.com/ram02z/d2-filter/issues" + }, + "homepage": "https://github.com/ram02z/d2-filter#readme", + "dependencies": { + "pandoc-filter": "^2.1.0", + "tmp": "^0.2.1", + "which": "^3.0.0" + }, + "devDependencies": { + "@types/node": "^18.15.11", + "@types/tmp": "^0.2.3", + "@types/which": "^3.0.0", + "@vitest/coverage-c8": "^0.30.1", + "typescript": "^5.0.3", + "vitest": "^0.30.1" + } +} diff --git a/_extensions/d2/node_modules/debug/LICENSE b/_extensions/d2/node_modules/debug/LICENSE new file mode 100644 index 00000000..1a9820e2 --- /dev/null +++ b/_extensions/d2/node_modules/debug/LICENSE @@ -0,0 +1,20 @@ +(The MIT License) + +Copyright (c) 2014-2017 TJ Holowaychuk +Copyright (c) 2018-2021 Josh Junon + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software +and associated documentation files (the 'Software'), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial +portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT +LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/_extensions/d2/node_modules/debug/README.md b/_extensions/d2/node_modules/debug/README.md new file mode 100644 index 00000000..e9c3e047 --- /dev/null +++ b/_extensions/d2/node_modules/debug/README.md @@ -0,0 +1,481 @@ +# debug +[![Build Status](https://travis-ci.org/debug-js/debug.svg?branch=master)](https://travis-ci.org/debug-js/debug) [![Coverage Status](https://coveralls.io/repos/github/debug-js/debug/badge.svg?branch=master)](https://coveralls.io/github/debug-js/debug?branch=master) [![Slack](https://visionmedia-community-slackin.now.sh/badge.svg)](https://visionmedia-community-slackin.now.sh/) [![OpenCollective](https://opencollective.com/debug/backers/badge.svg)](#backers) +[![OpenCollective](https://opencollective.com/debug/sponsors/badge.svg)](#sponsors) + + + +A tiny JavaScript debugging utility modelled after Node.js core's debugging +technique. Works in Node.js and web browsers. + +## Installation + +```bash +$ npm install debug +``` + +## Usage + +`debug` exposes a function; simply pass this function the name of your module, and it will return a decorated version of `console.error` for you to pass debug statements to. This will allow you to toggle the debug output for different parts of your module as well as the module as a whole. + +Example [_app.js_](./examples/node/app.js): + +```js +var debug = require('debug')('http') + , http = require('http') + , name = 'My App'; + +// fake app + +debug('booting %o', name); + +http.createServer(function(req, res){ + debug(req.method + ' ' + req.url); + res.end('hello\n'); +}).listen(3000, function(){ + debug('listening'); +}); + +// fake worker of some kind + +require('./worker'); +``` + +Example [_worker.js_](./examples/node/worker.js): + +```js +var a = require('debug')('worker:a') + , b = require('debug')('worker:b'); + +function work() { + a('doing lots of uninteresting work'); + setTimeout(work, Math.random() * 1000); +} + +work(); + +function workb() { + b('doing some work'); + setTimeout(workb, Math.random() * 2000); +} + +workb(); +``` + +The `DEBUG` environment variable is then used to enable these based on space or +comma-delimited names. + +Here are some examples: + +screen shot 2017-08-08 at 12 53 04 pm +screen shot 2017-08-08 at 12 53 38 pm +screen shot 2017-08-08 at 12 53 25 pm + +#### Windows command prompt notes + +##### CMD + +On Windows the environment variable is set using the `set` command. + +```cmd +set DEBUG=*,-not_this +``` + +Example: + +```cmd +set DEBUG=* & node app.js +``` + +##### PowerShell (VS Code default) + +PowerShell uses different syntax to set environment variables. + +```cmd +$env:DEBUG = "*,-not_this" +``` + +Example: + +```cmd +$env:DEBUG='app';node app.js +``` + +Then, run the program to be debugged as usual. + +npm script example: +```js + "windowsDebug": "@powershell -Command $env:DEBUG='*';node app.js", +``` + +## Namespace Colors + +Every debug instance has a color generated for it based on its namespace name. +This helps when visually parsing the debug output to identify which debug instance +a debug line belongs to. + +#### Node.js + +In Node.js, colors are enabled when stderr is a TTY. You also _should_ install +the [`supports-color`](https://npmjs.org/supports-color) module alongside debug, +otherwise debug will only use a small handful of basic colors. + + + +#### Web Browser + +Colors are also enabled on "Web Inspectors" that understand the `%c` formatting +option. These are WebKit web inspectors, Firefox ([since version +31](https://hacks.mozilla.org/2014/05/editable-box-model-multiple-selection-sublime-text-keys-much-more-firefox-developer-tools-episode-31/)) +and the Firebug plugin for Firefox (any version). + + + + +## Millisecond diff + +When actively developing an application it can be useful to see when the time spent between one `debug()` call and the next. Suppose for example you invoke `debug()` before requesting a resource, and after as well, the "+NNNms" will show you how much time was spent between calls. + + + +When stdout is not a TTY, `Date#toISOString()` is used, making it more useful for logging the debug information as shown below: + + + + +## Conventions + +If you're using this in one or more of your libraries, you _should_ use the name of your library so that developers may toggle debugging as desired without guessing names. If you have more than one debuggers you _should_ prefix them with your library name and use ":" to separate features. For example "bodyParser" from Connect would then be "connect:bodyParser". If you append a "*" to the end of your name, it will always be enabled regardless of the setting of the DEBUG environment variable. You can then use it for normal output as well as debug output. + +## Wildcards + +The `*` character may be used as a wildcard. Suppose for example your library has +debuggers named "connect:bodyParser", "connect:compress", "connect:session", +instead of listing all three with +`DEBUG=connect:bodyParser,connect:compress,connect:session`, you may simply do +`DEBUG=connect:*`, or to run everything using this module simply use `DEBUG=*`. + +You can also exclude specific debuggers by prefixing them with a "-" character. +For example, `DEBUG=*,-connect:*` would include all debuggers except those +starting with "connect:". + +## Environment Variables + +When running through Node.js, you can set a few environment variables that will +change the behavior of the debug logging: + +| Name | Purpose | +|-----------|-------------------------------------------------| +| `DEBUG` | Enables/disables specific debugging namespaces. | +| `DEBUG_HIDE_DATE` | Hide date from debug output (non-TTY). | +| `DEBUG_COLORS`| Whether or not to use colors in the debug output. | +| `DEBUG_DEPTH` | Object inspection depth. | +| `DEBUG_SHOW_HIDDEN` | Shows hidden properties on inspected objects. | + + +__Note:__ The environment variables beginning with `DEBUG_` end up being +converted into an Options object that gets used with `%o`/`%O` formatters. +See the Node.js documentation for +[`util.inspect()`](https://nodejs.org/api/util.html#util_util_inspect_object_options) +for the complete list. + +## Formatters + +Debug uses [printf-style](https://wikipedia.org/wiki/Printf_format_string) formatting. +Below are the officially supported formatters: + +| Formatter | Representation | +|-----------|----------------| +| `%O` | Pretty-print an Object on multiple lines. | +| `%o` | Pretty-print an Object all on a single line. | +| `%s` | String. | +| `%d` | Number (both integer and float). | +| `%j` | JSON. Replaced with the string '[Circular]' if the argument contains circular references. | +| `%%` | Single percent sign ('%'). This does not consume an argument. | + + +### Custom formatters + +You can add custom formatters by extending the `debug.formatters` object. +For example, if you wanted to add support for rendering a Buffer as hex with +`%h`, you could do something like: + +```js +const createDebug = require('debug') +createDebug.formatters.h = (v) => { + return v.toString('hex') +} + +// …elsewhere +const debug = createDebug('foo') +debug('this is hex: %h', new Buffer('hello world')) +// foo this is hex: 68656c6c6f20776f726c6421 +0ms +``` + + +## Browser Support + +You can build a browser-ready script using [browserify](https://github.com/substack/node-browserify), +or just use the [browserify-as-a-service](https://wzrd.in/) [build](https://wzrd.in/standalone/debug@latest), +if you don't want to build it yourself. + +Debug's enable state is currently persisted by `localStorage`. +Consider the situation shown below where you have `worker:a` and `worker:b`, +and wish to debug both. You can enable this using `localStorage.debug`: + +```js +localStorage.debug = 'worker:*' +``` + +And then refresh the page. + +```js +a = debug('worker:a'); +b = debug('worker:b'); + +setInterval(function(){ + a('doing some work'); +}, 1000); + +setInterval(function(){ + b('doing some work'); +}, 1200); +``` + +In Chromium-based web browsers (e.g. Brave, Chrome, and Electron), the JavaScript console will—by default—only show messages logged by `debug` if the "Verbose" log level is _enabled_. + + + +## Output streams + + By default `debug` will log to stderr, however this can be configured per-namespace by overriding the `log` method: + +Example [_stdout.js_](./examples/node/stdout.js): + +```js +var debug = require('debug'); +var error = debug('app:error'); + +// by default stderr is used +error('goes to stderr!'); + +var log = debug('app:log'); +// set this namespace to log via console.log +log.log = console.log.bind(console); // don't forget to bind to console! +log('goes to stdout'); +error('still goes to stderr!'); + +// set all output to go via console.info +// overrides all per-namespace log settings +debug.log = console.info.bind(console); +error('now goes to stdout via console.info'); +log('still goes to stdout, but via console.info now'); +``` + +## Extend +You can simply extend debugger +```js +const log = require('debug')('auth'); + +//creates new debug instance with extended namespace +const logSign = log.extend('sign'); +const logLogin = log.extend('login'); + +log('hello'); // auth hello +logSign('hello'); //auth:sign hello +logLogin('hello'); //auth:login hello +``` + +## Set dynamically + +You can also enable debug dynamically by calling the `enable()` method : + +```js +let debug = require('debug'); + +console.log(1, debug.enabled('test')); + +debug.enable('test'); +console.log(2, debug.enabled('test')); + +debug.disable(); +console.log(3, debug.enabled('test')); + +``` + +print : +``` +1 false +2 true +3 false +``` + +Usage : +`enable(namespaces)` +`namespaces` can include modes separated by a colon and wildcards. + +Note that calling `enable()` completely overrides previously set DEBUG variable : + +``` +$ DEBUG=foo node -e 'var dbg = require("debug"); dbg.enable("bar"); console.log(dbg.enabled("foo"))' +=> false +``` + +`disable()` + +Will disable all namespaces. The functions returns the namespaces currently +enabled (and skipped). This can be useful if you want to disable debugging +temporarily without knowing what was enabled to begin with. + +For example: + +```js +let debug = require('debug'); +debug.enable('foo:*,-foo:bar'); +let namespaces = debug.disable(); +debug.enable(namespaces); +``` + +Note: There is no guarantee that the string will be identical to the initial +enable string, but semantically they will be identical. + +## Checking whether a debug target is enabled + +After you've created a debug instance, you can determine whether or not it is +enabled by checking the `enabled` property: + +```javascript +const debug = require('debug')('http'); + +if (debug.enabled) { + // do stuff... +} +``` + +You can also manually toggle this property to force the debug instance to be +enabled or disabled. + +## Usage in child processes + +Due to the way `debug` detects if the output is a TTY or not, colors are not shown in child processes when `stderr` is piped. A solution is to pass the `DEBUG_COLORS=1` environment variable to the child process. +For example: + +```javascript +worker = fork(WORKER_WRAP_PATH, [workerPath], { + stdio: [ + /* stdin: */ 0, + /* stdout: */ 'pipe', + /* stderr: */ 'pipe', + 'ipc', + ], + env: Object.assign({}, process.env, { + DEBUG_COLORS: 1 // without this settings, colors won't be shown + }), +}); + +worker.stderr.pipe(process.stderr, { end: false }); +``` + + +## Authors + + - TJ Holowaychuk + - Nathan Rajlich + - Andrew Rhyne + - Josh Junon + +## Backers + +Support us with a monthly donation and help us continue our activities. [[Become a backer](https://opencollective.com/debug#backer)] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Sponsors + +Become a sponsor and get your logo on our README on Github with a link to your site. [[Become a sponsor](https://opencollective.com/debug#sponsor)] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## License + +(The MIT License) + +Copyright (c) 2014-2017 TJ Holowaychuk <tj@vision-media.ca> +Copyright (c) 2018-2021 Josh Junon + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/debug/package.json b/_extensions/d2/node_modules/debug/package.json new file mode 100644 index 00000000..3bcdc242 --- /dev/null +++ b/_extensions/d2/node_modules/debug/package.json @@ -0,0 +1,59 @@ +{ + "name": "debug", + "version": "4.3.4", + "repository": { + "type": "git", + "url": "git://github.com/debug-js/debug.git" + }, + "description": "Lightweight debugging utility for Node.js and the browser", + "keywords": [ + "debug", + "log", + "debugger" + ], + "files": [ + "src", + "LICENSE", + "README.md" + ], + "author": "Josh Junon ", + "contributors": [ + "TJ Holowaychuk ", + "Nathan Rajlich (http://n8.io)", + "Andrew Rhyne " + ], + "license": "MIT", + "scripts": { + "lint": "xo", + "test": "npm run test:node && npm run test:browser && npm run lint", + "test:node": "istanbul cover _mocha -- test.js", + "test:browser": "karma start --single-run", + "test:coverage": "cat ./coverage/lcov.info | coveralls" + }, + "dependencies": { + "ms": "2.1.2" + }, + "devDependencies": { + "brfs": "^2.0.1", + "browserify": "^16.2.3", + "coveralls": "^3.0.2", + "istanbul": "^0.4.5", + "karma": "^3.1.4", + "karma-browserify": "^6.0.0", + "karma-chrome-launcher": "^2.2.0", + "karma-mocha": "^1.3.0", + "mocha": "^5.2.0", + "mocha-lcov-reporter": "^1.2.0", + "xo": "^0.23.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + }, + "main": "./src/index.js", + "browser": "./src/browser.js", + "engines": { + "node": ">=6.0" + } +} diff --git a/_extensions/d2/node_modules/debug/src/browser.js b/_extensions/d2/node_modules/debug/src/browser.js new file mode 100644 index 00000000..cd0fc35d --- /dev/null +++ b/_extensions/d2/node_modules/debug/src/browser.js @@ -0,0 +1,269 @@ +/* eslint-env browser */ + +/** + * This is the web browser implementation of `debug()`. + */ + +exports.formatArgs = formatArgs; +exports.save = save; +exports.load = load; +exports.useColors = useColors; +exports.storage = localstorage(); +exports.destroy = (() => { + let warned = false; + + return () => { + if (!warned) { + warned = true; + console.warn('Instance method `debug.destroy()` is deprecated and no longer does anything. It will be removed in the next major version of `debug`.'); + } + }; +})(); + +/** + * Colors. + */ + +exports.colors = [ + '#0000CC', + '#0000FF', + '#0033CC', + '#0033FF', + '#0066CC', + '#0066FF', + '#0099CC', + '#0099FF', + '#00CC00', + '#00CC33', + '#00CC66', + '#00CC99', + '#00CCCC', + '#00CCFF', + '#3300CC', + '#3300FF', + '#3333CC', + '#3333FF', + '#3366CC', + '#3366FF', + '#3399CC', + '#3399FF', + '#33CC00', + '#33CC33', + '#33CC66', + '#33CC99', + '#33CCCC', + '#33CCFF', + '#6600CC', + '#6600FF', + '#6633CC', + '#6633FF', + '#66CC00', + '#66CC33', + '#9900CC', + '#9900FF', + '#9933CC', + '#9933FF', + '#99CC00', + '#99CC33', + '#CC0000', + '#CC0033', + '#CC0066', + '#CC0099', + '#CC00CC', + '#CC00FF', + '#CC3300', + '#CC3333', + '#CC3366', + '#CC3399', + '#CC33CC', + '#CC33FF', + '#CC6600', + '#CC6633', + '#CC9900', + '#CC9933', + '#CCCC00', + '#CCCC33', + '#FF0000', + '#FF0033', + '#FF0066', + '#FF0099', + '#FF00CC', + '#FF00FF', + '#FF3300', + '#FF3333', + '#FF3366', + '#FF3399', + '#FF33CC', + '#FF33FF', + '#FF6600', + '#FF6633', + '#FF9900', + '#FF9933', + '#FFCC00', + '#FFCC33' +]; + +/** + * Currently only WebKit-based Web Inspectors, Firefox >= v31, + * and the Firebug extension (any Firefox version) are known + * to support "%c" CSS customizations. + * + * TODO: add a `localStorage` variable to explicitly enable/disable colors + */ + +// eslint-disable-next-line complexity +function useColors() { + // NB: In an Electron preload script, document will be defined but not fully + // initialized. Since we know we're in Chrome, we'll just detect this case + // explicitly + if (typeof window !== 'undefined' && window.process && (window.process.type === 'renderer' || window.process.__nwjs)) { + return true; + } + + // Internet Explorer and Edge do not support colors. + if (typeof navigator !== 'undefined' && navigator.userAgent && navigator.userAgent.toLowerCase().match(/(edge|trident)\/(\d+)/)) { + return false; + } + + // Is webkit? http://stackoverflow.com/a/16459606/376773 + // document is undefined in react-native: https://github.com/facebook/react-native/pull/1632 + return (typeof document !== 'undefined' && document.documentElement && document.documentElement.style && document.documentElement.style.WebkitAppearance) || + // Is firebug? http://stackoverflow.com/a/398120/376773 + (typeof window !== 'undefined' && window.console && (window.console.firebug || (window.console.exception && window.console.table))) || + // Is firefox >= v31? + // https://developer.mozilla.org/en-US/docs/Tools/Web_Console#Styling_messages + (typeof navigator !== 'undefined' && navigator.userAgent && navigator.userAgent.toLowerCase().match(/firefox\/(\d+)/) && parseInt(RegExp.$1, 10) >= 31) || + // Double check webkit in userAgent just in case we are in a worker + (typeof navigator !== 'undefined' && navigator.userAgent && navigator.userAgent.toLowerCase().match(/applewebkit\/(\d+)/)); +} + +/** + * Colorize log arguments if enabled. + * + * @api public + */ + +function formatArgs(args) { + args[0] = (this.useColors ? '%c' : '') + + this.namespace + + (this.useColors ? ' %c' : ' ') + + args[0] + + (this.useColors ? '%c ' : ' ') + + '+' + module.exports.humanize(this.diff); + + if (!this.useColors) { + return; + } + + const c = 'color: ' + this.color; + args.splice(1, 0, c, 'color: inherit'); + + // The final "%c" is somewhat tricky, because there could be other + // arguments passed either before or after the %c, so we need to + // figure out the correct index to insert the CSS into + let index = 0; + let lastC = 0; + args[0].replace(/%[a-zA-Z%]/g, match => { + if (match === '%%') { + return; + } + index++; + if (match === '%c') { + // We only are interested in the *last* %c + // (the user may have provided their own) + lastC = index; + } + }); + + args.splice(lastC, 0, c); +} + +/** + * Invokes `console.debug()` when available. + * No-op when `console.debug` is not a "function". + * If `console.debug` is not available, falls back + * to `console.log`. + * + * @api public + */ +exports.log = console.debug || console.log || (() => {}); + +/** + * Save `namespaces`. + * + * @param {String} namespaces + * @api private + */ +function save(namespaces) { + try { + if (namespaces) { + exports.storage.setItem('debug', namespaces); + } else { + exports.storage.removeItem('debug'); + } + } catch (error) { + // Swallow + // XXX (@Qix-) should we be logging these? + } +} + +/** + * Load `namespaces`. + * + * @return {String} returns the previously persisted debug modes + * @api private + */ +function load() { + let r; + try { + r = exports.storage.getItem('debug'); + } catch (error) { + // Swallow + // XXX (@Qix-) should we be logging these? + } + + // If debug isn't set in LS, and we're in Electron, try to load $DEBUG + if (!r && typeof process !== 'undefined' && 'env' in process) { + r = process.env.DEBUG; + } + + return r; +} + +/** + * Localstorage attempts to return the localstorage. + * + * This is necessary because safari throws + * when a user disables cookies/localstorage + * and you attempt to access it. + * + * @return {LocalStorage} + * @api private + */ + +function localstorage() { + try { + // TVMLKit (Apple TV JS Runtime) does not have a window object, just localStorage in the global context + // The Browser also has localStorage in the global context. + return localStorage; + } catch (error) { + // Swallow + // XXX (@Qix-) should we be logging these? + } +} + +module.exports = require('./common')(exports); + +const {formatters} = module.exports; + +/** + * Map %j to `JSON.stringify()`, since no Web Inspectors do that by default. + */ + +formatters.j = function (v) { + try { + return JSON.stringify(v); + } catch (error) { + return '[UnexpectedJSONParseError]: ' + error.message; + } +}; diff --git a/_extensions/d2/node_modules/debug/src/common.js b/_extensions/d2/node_modules/debug/src/common.js new file mode 100644 index 00000000..e3291b20 --- /dev/null +++ b/_extensions/d2/node_modules/debug/src/common.js @@ -0,0 +1,274 @@ + +/** + * This is the common logic for both the Node.js and web browser + * implementations of `debug()`. + */ + +function setup(env) { + createDebug.debug = createDebug; + createDebug.default = createDebug; + createDebug.coerce = coerce; + createDebug.disable = disable; + createDebug.enable = enable; + createDebug.enabled = enabled; + createDebug.humanize = require('ms'); + createDebug.destroy = destroy; + + Object.keys(env).forEach(key => { + createDebug[key] = env[key]; + }); + + /** + * The currently active debug mode names, and names to skip. + */ + + createDebug.names = []; + createDebug.skips = []; + + /** + * Map of special "%n" handling functions, for the debug "format" argument. + * + * Valid key names are a single, lower or upper-case letter, i.e. "n" and "N". + */ + createDebug.formatters = {}; + + /** + * Selects a color for a debug namespace + * @param {String} namespace The namespace string for the debug instance to be colored + * @return {Number|String} An ANSI color code for the given namespace + * @api private + */ + function selectColor(namespace) { + let hash = 0; + + for (let i = 0; i < namespace.length; i++) { + hash = ((hash << 5) - hash) + namespace.charCodeAt(i); + hash |= 0; // Convert to 32bit integer + } + + return createDebug.colors[Math.abs(hash) % createDebug.colors.length]; + } + createDebug.selectColor = selectColor; + + /** + * Create a debugger with the given `namespace`. + * + * @param {String} namespace + * @return {Function} + * @api public + */ + function createDebug(namespace) { + let prevTime; + let enableOverride = null; + let namespacesCache; + let enabledCache; + + function debug(...args) { + // Disabled? + if (!debug.enabled) { + return; + } + + const self = debug; + + // Set `diff` timestamp + const curr = Number(new Date()); + const ms = curr - (prevTime || curr); + self.diff = ms; + self.prev = prevTime; + self.curr = curr; + prevTime = curr; + + args[0] = createDebug.coerce(args[0]); + + if (typeof args[0] !== 'string') { + // Anything else let's inspect with %O + args.unshift('%O'); + } + + // Apply any `formatters` transformations + let index = 0; + args[0] = args[0].replace(/%([a-zA-Z%])/g, (match, format) => { + // If we encounter an escaped % then don't increase the array index + if (match === '%%') { + return '%'; + } + index++; + const formatter = createDebug.formatters[format]; + if (typeof formatter === 'function') { + const val = args[index]; + match = formatter.call(self, val); + + // Now we need to remove `args[index]` since it's inlined in the `format` + args.splice(index, 1); + index--; + } + return match; + }); + + // Apply env-specific formatting (colors, etc.) + createDebug.formatArgs.call(self, args); + + const logFn = self.log || createDebug.log; + logFn.apply(self, args); + } + + debug.namespace = namespace; + debug.useColors = createDebug.useColors(); + debug.color = createDebug.selectColor(namespace); + debug.extend = extend; + debug.destroy = createDebug.destroy; // XXX Temporary. Will be removed in the next major release. + + Object.defineProperty(debug, 'enabled', { + enumerable: true, + configurable: false, + get: () => { + if (enableOverride !== null) { + return enableOverride; + } + if (namespacesCache !== createDebug.namespaces) { + namespacesCache = createDebug.namespaces; + enabledCache = createDebug.enabled(namespace); + } + + return enabledCache; + }, + set: v => { + enableOverride = v; + } + }); + + // Env-specific initialization logic for debug instances + if (typeof createDebug.init === 'function') { + createDebug.init(debug); + } + + return debug; + } + + function extend(namespace, delimiter) { + const newDebug = createDebug(this.namespace + (typeof delimiter === 'undefined' ? ':' : delimiter) + namespace); + newDebug.log = this.log; + return newDebug; + } + + /** + * Enables a debug mode by namespaces. This can include modes + * separated by a colon and wildcards. + * + * @param {String} namespaces + * @api public + */ + function enable(namespaces) { + createDebug.save(namespaces); + createDebug.namespaces = namespaces; + + createDebug.names = []; + createDebug.skips = []; + + let i; + const split = (typeof namespaces === 'string' ? namespaces : '').split(/[\s,]+/); + const len = split.length; + + for (i = 0; i < len; i++) { + if (!split[i]) { + // ignore empty strings + continue; + } + + namespaces = split[i].replace(/\*/g, '.*?'); + + if (namespaces[0] === '-') { + createDebug.skips.push(new RegExp('^' + namespaces.slice(1) + '$')); + } else { + createDebug.names.push(new RegExp('^' + namespaces + '$')); + } + } + } + + /** + * Disable debug output. + * + * @return {String} namespaces + * @api public + */ + function disable() { + const namespaces = [ + ...createDebug.names.map(toNamespace), + ...createDebug.skips.map(toNamespace).map(namespace => '-' + namespace) + ].join(','); + createDebug.enable(''); + return namespaces; + } + + /** + * Returns true if the given mode name is enabled, false otherwise. + * + * @param {String} name + * @return {Boolean} + * @api public + */ + function enabled(name) { + if (name[name.length - 1] === '*') { + return true; + } + + let i; + let len; + + for (i = 0, len = createDebug.skips.length; i < len; i++) { + if (createDebug.skips[i].test(name)) { + return false; + } + } + + for (i = 0, len = createDebug.names.length; i < len; i++) { + if (createDebug.names[i].test(name)) { + return true; + } + } + + return false; + } + + /** + * Convert regexp to namespace + * + * @param {RegExp} regxep + * @return {String} namespace + * @api private + */ + function toNamespace(regexp) { + return regexp.toString() + .substring(2, regexp.toString().length - 2) + .replace(/\.\*\?$/, '*'); + } + + /** + * Coerce `val`. + * + * @param {Mixed} val + * @return {Mixed} + * @api private + */ + function coerce(val) { + if (val instanceof Error) { + return val.stack || val.message; + } + return val; + } + + /** + * XXX DO NOT USE. This is a temporary stub function. + * XXX It WILL be removed in the next major release. + */ + function destroy() { + console.warn('Instance method `debug.destroy()` is deprecated and no longer does anything. It will be removed in the next major version of `debug`.'); + } + + createDebug.enable(createDebug.load()); + + return createDebug; +} + +module.exports = setup; diff --git a/_extensions/d2/node_modules/debug/src/index.js b/_extensions/d2/node_modules/debug/src/index.js new file mode 100644 index 00000000..bf4c57f2 --- /dev/null +++ b/_extensions/d2/node_modules/debug/src/index.js @@ -0,0 +1,10 @@ +/** + * Detect Electron renderer / nwjs process, which is node, but we should + * treat as a browser. + */ + +if (typeof process === 'undefined' || process.type === 'renderer' || process.browser === true || process.__nwjs) { + module.exports = require('./browser.js'); +} else { + module.exports = require('./node.js'); +} diff --git a/_extensions/d2/node_modules/debug/src/node.js b/_extensions/d2/node_modules/debug/src/node.js new file mode 100644 index 00000000..79bc085c --- /dev/null +++ b/_extensions/d2/node_modules/debug/src/node.js @@ -0,0 +1,263 @@ +/** + * Module dependencies. + */ + +const tty = require('tty'); +const util = require('util'); + +/** + * This is the Node.js implementation of `debug()`. + */ + +exports.init = init; +exports.log = log; +exports.formatArgs = formatArgs; +exports.save = save; +exports.load = load; +exports.useColors = useColors; +exports.destroy = util.deprecate( + () => {}, + 'Instance method `debug.destroy()` is deprecated and no longer does anything. It will be removed in the next major version of `debug`.' +); + +/** + * Colors. + */ + +exports.colors = [6, 2, 3, 4, 5, 1]; + +try { + // Optional dependency (as in, doesn't need to be installed, NOT like optionalDependencies in package.json) + // eslint-disable-next-line import/no-extraneous-dependencies + const supportsColor = require('supports-color'); + + if (supportsColor && (supportsColor.stderr || supportsColor).level >= 2) { + exports.colors = [ + 20, + 21, + 26, + 27, + 32, + 33, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 56, + 57, + 62, + 63, + 68, + 69, + 74, + 75, + 76, + 77, + 78, + 79, + 80, + 81, + 92, + 93, + 98, + 99, + 112, + 113, + 128, + 129, + 134, + 135, + 148, + 149, + 160, + 161, + 162, + 163, + 164, + 165, + 166, + 167, + 168, + 169, + 170, + 171, + 172, + 173, + 178, + 179, + 184, + 185, + 196, + 197, + 198, + 199, + 200, + 201, + 202, + 203, + 204, + 205, + 206, + 207, + 208, + 209, + 214, + 215, + 220, + 221 + ]; + } +} catch (error) { + // Swallow - we only care if `supports-color` is available; it doesn't have to be. +} + +/** + * Build up the default `inspectOpts` object from the environment variables. + * + * $ DEBUG_COLORS=no DEBUG_DEPTH=10 DEBUG_SHOW_HIDDEN=enabled node script.js + */ + +exports.inspectOpts = Object.keys(process.env).filter(key => { + return /^debug_/i.test(key); +}).reduce((obj, key) => { + // Camel-case + const prop = key + .substring(6) + .toLowerCase() + .replace(/_([a-z])/g, (_, k) => { + return k.toUpperCase(); + }); + + // Coerce string value into JS value + let val = process.env[key]; + if (/^(yes|on|true|enabled)$/i.test(val)) { + val = true; + } else if (/^(no|off|false|disabled)$/i.test(val)) { + val = false; + } else if (val === 'null') { + val = null; + } else { + val = Number(val); + } + + obj[prop] = val; + return obj; +}, {}); + +/** + * Is stdout a TTY? Colored output is enabled when `true`. + */ + +function useColors() { + return 'colors' in exports.inspectOpts ? + Boolean(exports.inspectOpts.colors) : + tty.isatty(process.stderr.fd); +} + +/** + * Adds ANSI color escape codes if enabled. + * + * @api public + */ + +function formatArgs(args) { + const {namespace: name, useColors} = this; + + if (useColors) { + const c = this.color; + const colorCode = '\u001B[3' + (c < 8 ? c : '8;5;' + c); + const prefix = ` ${colorCode};1m${name} \u001B[0m`; + + args[0] = prefix + args[0].split('\n').join('\n' + prefix); + args.push(colorCode + 'm+' + module.exports.humanize(this.diff) + '\u001B[0m'); + } else { + args[0] = getDate() + name + ' ' + args[0]; + } +} + +function getDate() { + if (exports.inspectOpts.hideDate) { + return ''; + } + return new Date().toISOString() + ' '; +} + +/** + * Invokes `util.format()` with the specified arguments and writes to stderr. + */ + +function log(...args) { + return process.stderr.write(util.format(...args) + '\n'); +} + +/** + * Save `namespaces`. + * + * @param {String} namespaces + * @api private + */ +function save(namespaces) { + if (namespaces) { + process.env.DEBUG = namespaces; + } else { + // If you set a process.env field to null or undefined, it gets cast to the + // string 'null' or 'undefined'. Just delete instead. + delete process.env.DEBUG; + } +} + +/** + * Load `namespaces`. + * + * @return {String} returns the previously persisted debug modes + * @api private + */ + +function load() { + return process.env.DEBUG; +} + +/** + * Init logic for `debug` instances. + * + * Create a new `inspectOpts` object in case `useColors` is set + * differently for a particular `debug` instance. + */ + +function init(debug) { + debug.inspectOpts = {}; + + const keys = Object.keys(exports.inspectOpts); + for (let i = 0; i < keys.length; i++) { + debug.inspectOpts[keys[i]] = exports.inspectOpts[keys[i]]; + } +} + +module.exports = require('./common')(exports); + +const {formatters} = module.exports; + +/** + * Map %o to `util.inspect()`, all on a single line. + */ + +formatters.o = function (v) { + this.inspectOpts.colors = this.useColors; + return util.inspect(v, this.inspectOpts) + .split('\n') + .map(str => str.trim()) + .join(' '); +}; + +/** + * Map %O to `util.inspect()`, allowing multiple lines if needed. + */ + +formatters.O = function (v) { + this.inspectOpts.colors = this.useColors; + return util.inspect(v, this.inspectOpts); +}; diff --git a/_extensions/d2/node_modules/decode-named-character-reference/index.d.ts b/_extensions/d2/node_modules/decode-named-character-reference/index.d.ts new file mode 100644 index 00000000..f560043f --- /dev/null +++ b/_extensions/d2/node_modules/decode-named-character-reference/index.d.ts @@ -0,0 +1,12 @@ +/** + * Decode a single character reference (without the `&` or `;`). + * You probably only need this when you’re building parsers yourself that follow + * different rules compared to HTML. + * This is optimized to be tiny in browsers. + * + * @param {string} value + * `notin` (named), `#123` (deci), `#x123` (hexa). + * @returns {string|false} + * Decoded reference. + */ +export function decodeNamedCharacterReference(value: string): string | false diff --git a/_extensions/d2/node_modules/decode-named-character-reference/index.dom.d.ts b/_extensions/d2/node_modules/decode-named-character-reference/index.dom.d.ts new file mode 100644 index 00000000..83b9debe --- /dev/null +++ b/_extensions/d2/node_modules/decode-named-character-reference/index.dom.d.ts @@ -0,0 +1,6 @@ +/// +/** + * @param {string} value + * @returns {string|false} + */ +export function decodeNamedCharacterReference(value: string): string | false diff --git a/_extensions/d2/node_modules/decode-named-character-reference/index.dom.js b/_extensions/d2/node_modules/decode-named-character-reference/index.dom.js new file mode 100644 index 00000000..5d0abe28 --- /dev/null +++ b/_extensions/d2/node_modules/decode-named-character-reference/index.dom.js @@ -0,0 +1,33 @@ +/// + +/* eslint-env browser */ + +const element = document.createElement('i') + +/** + * @param {string} value + * @returns {string|false} + */ +export function decodeNamedCharacterReference(value) { + const characterReference = '&' + value + ';' + element.innerHTML = characterReference + const char = element.textContent + + // Some named character references do not require the closing semicolon + // (`¬`, for instance), which leads to situations where parsing the assumed + // named reference of `¬it;` will result in the string `¬it;`. + // When we encounter a trailing semicolon after parsing, and the character + // reference to decode was not a semicolon (`;`), we can assume that the + // matching was not complete. + // @ts-expect-error: TypeScript is wrong that `textContent` on elements can + // yield `null`. + if (char.charCodeAt(char.length - 1) === 59 /* `;` */ && value !== 'semi') { + return false + } + + // If the decoded string is equal to the input, the character reference was + // not valid. + // @ts-expect-error: TypeScript is wrong that `textContent` on elements can + // yield `null`. + return char === characterReference ? false : char +} diff --git a/_extensions/d2/node_modules/decode-named-character-reference/index.js b/_extensions/d2/node_modules/decode-named-character-reference/index.js new file mode 100644 index 00000000..ee3efd5d --- /dev/null +++ b/_extensions/d2/node_modules/decode-named-character-reference/index.js @@ -0,0 +1,18 @@ +import {characterEntities} from 'character-entities' + +const own = {}.hasOwnProperty + +/** + * Decode a single character reference (without the `&` or `;`). + * You probably only need this when you’re building parsers yourself that follow + * different rules compared to HTML. + * This is optimized to be tiny in browsers. + * + * @param {string} value + * `notin` (named), `#123` (deci), `#x123` (hexa). + * @returns {string|false} + * Decoded reference. + */ +export function decodeNamedCharacterReference(value) { + return own.call(characterEntities, value) ? characterEntities[value] : false +} diff --git a/_extensions/d2/node_modules/decode-named-character-reference/license b/_extensions/d2/node_modules/decode-named-character-reference/license new file mode 100644 index 00000000..f4fb31fe --- /dev/null +++ b/_extensions/d2/node_modules/decode-named-character-reference/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2021 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/decode-named-character-reference/package.json b/_extensions/d2/node_modules/decode-named-character-reference/package.json new file mode 100644 index 00000000..5fef2811 --- /dev/null +++ b/_extensions/d2/node_modules/decode-named-character-reference/package.json @@ -0,0 +1,89 @@ +{ + "name": "decode-named-character-reference", + "version": "1.0.2", + "description": "Decode named character references", + "license": "MIT", + "keywords": [ + "decode", + "named", + "character", + "references" + ], + "repository": "wooorm/decode-named-character-reference", + "bugs": "https://github.com/wooorm/decode-named-character-reference/issues", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "index.js", + "index.d.ts", + "index.dom.js", + "index.dom.d.ts" + ], + "exports": { + ".": { + "deno": "./index.js", + "react-native": "./index.js", + "worker": "./index.js", + "browser": "./index.dom.js", + "default": "./index.js" + } + }, + "dependencies": { + "character-entities": "^2.0.0" + }, + "devDependencies": { + "@types/tape": "^4.0.0", + "c8": "^7.0.0", + "prettier": "^2.0.0", + "remark-cli": "^10.0.0", + "remark-preset-wooorm": "^9.0.0", + "rimraf": "^3.0.0", + "tape": "^5.0.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "xo": "^0.49.0" + }, + "scripts": { + "prepublishOnly": "npm run build && npm run format", + "build": "rimraf \"*.d.ts\" && tsc && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test.js", + "test-coverage": "c8 --check-coverage --branches 100 --functions 100 --lines 100 --statements 100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true, + "rules": { + "unicorn/prefer-code-point": "off" + } + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/decode-named-character-reference/readme.md b/_extensions/d2/node_modules/decode-named-character-reference/readme.md new file mode 100644 index 00000000..fc0d91b9 --- /dev/null +++ b/_extensions/d2/node_modules/decode-named-character-reference/readme.md @@ -0,0 +1,135 @@ +# decode-named-character-reference + +[![Build Status][build-badge]][build] +[![Coverage Status][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] + +Decode named character references. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`decodeNamedCharacterReference(value)`](#decodenamedcharacterreferencevalue) +* [Types](#types) +* [Compatibility](#compatibility) +* [Security](#security) +* [Related](#related) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +A workaround for webpack. + +## When should I use this? + +Never use this. +Use [`parse-entities`][parse-entities]. +It uses this. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 12.20+, 14.14+, or 16.0+), install with [npm][]: + +```sh +npm install decode-named-character-reference +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {decodeNamedCharacterReference} from 'https://esm.sh/decode-named-character-reference@1' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {decodeNamedCharacterReference} from 'decode-named-character-reference' + +decodeNamedCharacterReference('amp') //=> '&' +``` + +## API + +This package exports the following identifier: `decodeNamedCharacterReference`. +There is no default export. + +### `decodeNamedCharacterReference(value)` + +Again, use [`parse-entities`][parse-entities]. + +## Types + +This package is fully typed with [TypeScript][]. + +## Compatibility + +This package is at least compatible with all maintained versions of Node.js. +As of now, that is Node.js 12.20+, 14.14+, and 16.0+. +It also works in Deno and modern browsers. + +## Security + +This package is safe. + +## Related + +* [`parse-entities`][parse-entities] + — parse (decode) HTML character references + +## Contribute + +Yes please! +See [How to Contribute to Open Source][contribute]. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/wooorm/decode-named-character-reference/workflows/main/badge.svg + +[build]: https://github.com/wooorm/decode-named-character-reference/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/wooorm/decode-named-character-reference.svg + +[coverage]: https://codecov.io/github/wooorm/decode-named-character-reference + +[downloads-badge]: https://img.shields.io/npm/dm/decode-named-character-reference.svg + +[downloads]: https://www.npmjs.com/package/decode-named-character-reference + +[size-badge]: https://img.shields.io/bundlephobia/minzip/decode-named-character-reference.svg + +[size]: https://bundlephobia.com/result?p=decode-named-character-reference + +[npm]: https://docs.npmjs.com/cli/install + +[esmsh]: https://esm.sh + +[license]: license + +[author]: https://wooorm.com + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[typescript]: https://www.typescriptlang.org + +[contribute]: https://opensource.guide/how-to-contribute/ + +[parse-entities]: https://github.com/wooorm/parse-entities diff --git a/_extensions/d2/node_modules/dequal/dist/index.js b/_extensions/d2/node_modules/dequal/dist/index.js new file mode 100644 index 00000000..7cbd2e7a --- /dev/null +++ b/_extensions/d2/node_modules/dequal/dist/index.js @@ -0,0 +1,86 @@ +var has = Object.prototype.hasOwnProperty; + +function find(iter, tar, key) { + for (key of iter.keys()) { + if (dequal(key, tar)) return key; + } +} + +function dequal(foo, bar) { + var ctor, len, tmp; + if (foo === bar) return true; + + if (foo && bar && (ctor=foo.constructor) === bar.constructor) { + if (ctor === Date) return foo.getTime() === bar.getTime(); + if (ctor === RegExp) return foo.toString() === bar.toString(); + + if (ctor === Array) { + if ((len=foo.length) === bar.length) { + while (len-- && dequal(foo[len], bar[len])); + } + return len === -1; + } + + if (ctor === Set) { + if (foo.size !== bar.size) { + return false; + } + for (len of foo) { + tmp = len; + if (tmp && typeof tmp === 'object') { + tmp = find(bar, tmp); + if (!tmp) return false; + } + if (!bar.has(tmp)) return false; + } + return true; + } + + if (ctor === Map) { + if (foo.size !== bar.size) { + return false; + } + for (len of foo) { + tmp = len[0]; + if (tmp && typeof tmp === 'object') { + tmp = find(bar, tmp); + if (!tmp) return false; + } + if (!dequal(len[1], bar.get(tmp))) { + return false; + } + } + return true; + } + + if (ctor === ArrayBuffer) { + foo = new Uint8Array(foo); + bar = new Uint8Array(bar); + } else if (ctor === DataView) { + if ((len=foo.byteLength) === bar.byteLength) { + while (len-- && foo.getInt8(len) === bar.getInt8(len)); + } + return len === -1; + } + + if (ArrayBuffer.isView(foo)) { + if ((len=foo.byteLength) === bar.byteLength) { + while (len-- && foo[len] === bar[len]); + } + return len === -1; + } + + if (!ctor || typeof foo === 'object') { + len = 0; + for (ctor in foo) { + if (has.call(foo, ctor) && ++len && !has.call(bar, ctor)) return false; + if (!(ctor in bar) || !dequal(foo[ctor], bar[ctor])) return false; + } + return Object.keys(bar).length === len; + } + } + + return foo !== foo && bar !== bar; +} + +exports.dequal = dequal; \ No newline at end of file diff --git a/_extensions/d2/node_modules/dequal/dist/index.min.js b/_extensions/d2/node_modules/dequal/dist/index.min.js new file mode 100644 index 00000000..0149a23c --- /dev/null +++ b/_extensions/d2/node_modules/dequal/dist/index.min.js @@ -0,0 +1 @@ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t(e.dequal={})}(this,(function(e){var t=Object.prototype.hasOwnProperty;function r(e,t,r){for(r of e.keys())if(n(r,t))return r}function n(e,f){var i,o,u;if(e===f)return!0;if(e&&f&&(i=e.constructor)===f.constructor){if(i===Date)return e.getTime()===f.getTime();if(i===RegExp)return e.toString()===f.toString();if(i===Array){if((o=e.length)===f.length)for(;o--&&n(e[o],f[o]););return-1===o}if(i===Set){if(e.size!==f.size)return!1;for(o of e){if((u=o)&&"object"==typeof u&&!(u=r(f,u)))return!1;if(!f.has(u))return!1}return!0}if(i===Map){if(e.size!==f.size)return!1;for(o of e){if((u=o[0])&&"object"==typeof u&&!(u=r(f,u)))return!1;if(!n(o[1],f.get(u)))return!1}return!0}if(i===ArrayBuffer)e=new Uint8Array(e),f=new Uint8Array(f);else if(i===DataView){if((o=e.byteLength)===f.byteLength)for(;o--&&e.getInt8(o)===f.getInt8(o););return-1===o}if(ArrayBuffer.isView(e)){if((o=e.byteLength)===f.byteLength)for(;o--&&e[o]===f[o];);return-1===o}if(!i||"object"==typeof e){for(i in o=0,e){if(t.call(e,i)&&++o&&!t.call(f,i))return!1;if(!(i in f)||!n(e[i],f[i]))return!1}return Object.keys(f).length===o}}return e!=e&&f!=f}e.dequal=n})); \ No newline at end of file diff --git a/_extensions/d2/node_modules/dequal/dist/index.mjs b/_extensions/d2/node_modules/dequal/dist/index.mjs new file mode 100644 index 00000000..d0b1e2db --- /dev/null +++ b/_extensions/d2/node_modules/dequal/dist/index.mjs @@ -0,0 +1,84 @@ +var has = Object.prototype.hasOwnProperty; + +function find(iter, tar, key) { + for (key of iter.keys()) { + if (dequal(key, tar)) return key; + } +} + +export function dequal(foo, bar) { + var ctor, len, tmp; + if (foo === bar) return true; + + if (foo && bar && (ctor=foo.constructor) === bar.constructor) { + if (ctor === Date) return foo.getTime() === bar.getTime(); + if (ctor === RegExp) return foo.toString() === bar.toString(); + + if (ctor === Array) { + if ((len=foo.length) === bar.length) { + while (len-- && dequal(foo[len], bar[len])); + } + return len === -1; + } + + if (ctor === Set) { + if (foo.size !== bar.size) { + return false; + } + for (len of foo) { + tmp = len; + if (tmp && typeof tmp === 'object') { + tmp = find(bar, tmp); + if (!tmp) return false; + } + if (!bar.has(tmp)) return false; + } + return true; + } + + if (ctor === Map) { + if (foo.size !== bar.size) { + return false; + } + for (len of foo) { + tmp = len[0]; + if (tmp && typeof tmp === 'object') { + tmp = find(bar, tmp); + if (!tmp) return false; + } + if (!dequal(len[1], bar.get(tmp))) { + return false; + } + } + return true; + } + + if (ctor === ArrayBuffer) { + foo = new Uint8Array(foo); + bar = new Uint8Array(bar); + } else if (ctor === DataView) { + if ((len=foo.byteLength) === bar.byteLength) { + while (len-- && foo.getInt8(len) === bar.getInt8(len)); + } + return len === -1; + } + + if (ArrayBuffer.isView(foo)) { + if ((len=foo.byteLength) === bar.byteLength) { + while (len-- && foo[len] === bar[len]); + } + return len === -1; + } + + if (!ctor || typeof foo === 'object') { + len = 0; + for (ctor in foo) { + if (has.call(foo, ctor) && ++len && !has.call(bar, ctor)) return false; + if (!(ctor in bar) || !dequal(foo[ctor], bar[ctor])) return false; + } + return Object.keys(bar).length === len; + } + } + + return foo !== foo && bar !== bar; +} diff --git a/_extensions/d2/node_modules/dequal/index.d.ts b/_extensions/d2/node_modules/dequal/index.d.ts new file mode 100644 index 00000000..a9aea5d5 --- /dev/null +++ b/_extensions/d2/node_modules/dequal/index.d.ts @@ -0,0 +1 @@ +export function dequal(foo: any, bar: any): boolean; \ No newline at end of file diff --git a/_extensions/d2/node_modules/dequal/license b/_extensions/d2/node_modules/dequal/license new file mode 100644 index 00000000..a3f96f82 --- /dev/null +++ b/_extensions/d2/node_modules/dequal/license @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Luke Edwards (lukeed.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/_extensions/d2/node_modules/dequal/lite/index.d.ts b/_extensions/d2/node_modules/dequal/lite/index.d.ts new file mode 100644 index 00000000..a9aea5d5 --- /dev/null +++ b/_extensions/d2/node_modules/dequal/lite/index.d.ts @@ -0,0 +1 @@ +export function dequal(foo: any, bar: any): boolean; \ No newline at end of file diff --git a/_extensions/d2/node_modules/dequal/lite/index.js b/_extensions/d2/node_modules/dequal/lite/index.js new file mode 100644 index 00000000..ac3eb6b8 --- /dev/null +++ b/_extensions/d2/node_modules/dequal/lite/index.js @@ -0,0 +1,31 @@ +var has = Object.prototype.hasOwnProperty; + +function dequal(foo, bar) { + var ctor, len; + if (foo === bar) return true; + + if (foo && bar && (ctor=foo.constructor) === bar.constructor) { + if (ctor === Date) return foo.getTime() === bar.getTime(); + if (ctor === RegExp) return foo.toString() === bar.toString(); + + if (ctor === Array) { + if ((len=foo.length) === bar.length) { + while (len-- && dequal(foo[len], bar[len])); + } + return len === -1; + } + + if (!ctor || typeof foo === 'object') { + len = 0; + for (ctor in foo) { + if (has.call(foo, ctor) && ++len && !has.call(bar, ctor)) return false; + if (!(ctor in bar) || !dequal(foo[ctor], bar[ctor])) return false; + } + return Object.keys(bar).length === len; + } + } + + return foo !== foo && bar !== bar; +} + +exports.dequal = dequal; \ No newline at end of file diff --git a/_extensions/d2/node_modules/dequal/lite/index.min.js b/_extensions/d2/node_modules/dequal/lite/index.min.js new file mode 100644 index 00000000..2eaa55fd --- /dev/null +++ b/_extensions/d2/node_modules/dequal/lite/index.min.js @@ -0,0 +1 @@ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t(e.dequal={})}(this,(function(e){var t=Object.prototype.hasOwnProperty;e.dequal=function e(r,n){var o,i;if(r===n)return!0;if(r&&n&&(o=r.constructor)===n.constructor){if(o===Date)return r.getTime()===n.getTime();if(o===RegExp)return r.toString()===n.toString();if(o===Array){if((i=r.length)===n.length)for(;i--&&e(r[i],n[i]););return-1===i}if(!o||"object"==typeof r){for(o in i=0,r){if(t.call(r,o)&&++i&&!t.call(n,o))return!1;if(!(o in n)||!e(r[o],n[o]))return!1}return Object.keys(n).length===i}}return r!=r&&n!=n}})); \ No newline at end of file diff --git a/_extensions/d2/node_modules/dequal/lite/index.mjs b/_extensions/d2/node_modules/dequal/lite/index.mjs new file mode 100644 index 00000000..5820d674 --- /dev/null +++ b/_extensions/d2/node_modules/dequal/lite/index.mjs @@ -0,0 +1,29 @@ +var has = Object.prototype.hasOwnProperty; + +export function dequal(foo, bar) { + var ctor, len; + if (foo === bar) return true; + + if (foo && bar && (ctor=foo.constructor) === bar.constructor) { + if (ctor === Date) return foo.getTime() === bar.getTime(); + if (ctor === RegExp) return foo.toString() === bar.toString(); + + if (ctor === Array) { + if ((len=foo.length) === bar.length) { + while (len-- && dequal(foo[len], bar[len])); + } + return len === -1; + } + + if (!ctor || typeof foo === 'object') { + len = 0; + for (ctor in foo) { + if (has.call(foo, ctor) && ++len && !has.call(bar, ctor)) return false; + if (!(ctor in bar) || !dequal(foo[ctor], bar[ctor])) return false; + } + return Object.keys(bar).length === len; + } + } + + return foo !== foo && bar !== bar; +} diff --git a/_extensions/d2/node_modules/dequal/package.json b/_extensions/d2/node_modules/dequal/package.json new file mode 100644 index 00000000..df1cb29c --- /dev/null +++ b/_extensions/d2/node_modules/dequal/package.json @@ -0,0 +1,57 @@ +{ + "name": "dequal", + "version": "2.0.3", + "repository": "lukeed/dequal", + "description": "A tiny (304B to 489B) utility for check for deep equality", + "unpkg": "dist/index.min.js", + "module": "dist/index.mjs", + "main": "dist/index.js", + "types": "index.d.ts", + "license": "MIT", + "author": { + "name": "Luke Edwards", + "email": "luke.edwards05@gmail.com", + "url": "https://lukeed.com" + }, + "engines": { + "node": ">=6" + }, + "scripts": { + "build": "bundt", + "pretest": "npm run build", + "postbuild": "echo \"lite\" | xargs -n1 cp -v index.d.ts", + "test": "uvu -r esm test" + }, + "files": [ + "*.d.ts", + "dist", + "lite" + ], + "exports": { + ".": { + "types": "./index.d.ts", + "import": "./dist/index.mjs", + "require": "./dist/index.js" + }, + "./lite": { + "types": "./index.d.ts", + "import": "./lite/index.mjs", + "require": "./lite/index.js" + }, + "./package.json": "./package.json" + }, + "modes": { + "lite": "src/lite.js", + "default": "src/index.js" + }, + "keywords": [ + "deep", + "deep-equal", + "equality" + ], + "devDependencies": { + "bundt": "1.0.2", + "esm": "3.2.25", + "uvu": "0.3.2" + } +} diff --git a/_extensions/d2/node_modules/dequal/readme.md b/_extensions/d2/node_modules/dequal/readme.md new file mode 100644 index 00000000..e3341ef4 --- /dev/null +++ b/_extensions/d2/node_modules/dequal/readme.md @@ -0,0 +1,112 @@ +# dequal [![CI](https://github.com/lukeed/dequal/workflows/CI/badge.svg)](https://github.com/lukeed/dequal/actions) + +> A tiny (304B to 489B) utility to check for deep equality + +This module supports comparison of all types, including `Function`, `RegExp`, `Date`, `Set`, `Map`, `TypedArray`s, `DataView`, `null`, `undefined`, and `NaN` values. Complex values (eg, Objects, Arrays, Sets, Maps, etc) are traversed recursively. + +> **Important:** +> * key order **within Objects** does not matter +> * value order **within Arrays** _does_ matter +> * values **within Sets and Maps** use value equality +> * keys **within Maps** use value equality + + +## Install + +``` +$ npm install --save dequal +``` + +## Modes + +There are two "versions" of `dequal` available: + +#### `dequal` +> **Size (gzip):** 489 bytes
+> **Availability:** [CommonJS](https://unpkg.com/dequal/dist/index.js), [ES Module](https://unpkg.com/dequal/dist/index.mjs), [UMD](https://unpkg.com/dequal/dist/index.min.js) + +#### `dequal/lite` +> **Size (gzip):** 304 bytes
+> **Availability:** [CommonJS](https://unpkg.com/dequal/lite/index.js), [ES Module](https://unpkg.com/dequal/lite/index.mjs) + +| | IE9+ | Number | String | Date | RegExp | Object | Array | Class | Set | Map | ArrayBuffer | [TypedArray](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray#TypedArray_objects) | [DataView](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/DataView) | +|-|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:| +| `dequal` | :x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| `dequal/lite` | :+1: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | + +> **Note:** Table scrolls horizontally! + +## Usage + +```js +import { dequal } from 'dequal'; + +dequal(1, 1); //=> true +dequal({}, {}); //=> true +dequal('foo', 'foo'); //=> true +dequal([1, 2, 3], [1, 2, 3]); //=> true +dequal(dequal, dequal); //=> true +dequal(/foo/, /foo/); //=> true +dequal(null, null); //=> true +dequal(NaN, NaN); //=> true +dequal([], []); //=> true +dequal( + [{ a:1 }, [{ b:{ c:[1] } }]], + [{ a:1 }, [{ b:{ c:[1] } }]] +); //=> true + +dequal(1, '1'); //=> false +dequal(null, undefined); //=> false +dequal({ a:1, b:[2,3] }, { a:1, b:[2,5] }); //=> false +dequal(/foo/i, /bar/g); //=> false +``` + +## API + +### dequal(foo, bar) +Returns: `Boolean` + +Both `foo` and `bar` can be of any type.
+A `Boolean` is returned indicating if the two were deeply equal. + + +## Benchmarks + +> Running Node v10.13.0 + +The benchmarks can be found in the [`/bench`](/bench) directory. They are separated into two categories: + +* `basic` – compares an object comprised of `String`, `Number`, `Date`, `Array`, and `Object` values. +* `complex` – like `basic`, but adds `RegExp`, `Map`, `Set`, and `Uint8Array` values. + +> **Note:** Only candidates that pass validation step(s) are listed.
For example, `fast-deep-equal/es6` handles `Set` and `Map` values, but uses _referential equality_ while those listed use _value equality_. + +``` +Load times: + assert 0.109ms + util 0.006ms + fast-deep-equal 0.479ms + lodash/isequal 22.826ms + nano-equal 0.417ms + dequal 0.396ms + dequal/lite 0.264ms + +Benchmark :: basic + assert.deepStrictEqual x 325,262 ops/sec ±0.57% (94 runs sampled) + util.isDeepStrictEqual x 318,812 ops/sec ±0.87% (94 runs sampled) + fast-deep-equal x 1,332,393 ops/sec ±0.36% (93 runs sampled) + lodash.isEqual x 269,129 ops/sec ±0.59% (95 runs sampled) + nano-equal x 1,122,053 ops/sec ±0.36% (96 runs sampled) + dequal/lite x 1,700,972 ops/sec ±0.31% (94 runs sampled) + dequal x 1,698,972 ops/sec ±0.63% (97 runs sampled) + +Benchmark :: complex + assert.deepStrictEqual x 124,518 ops/sec ±0.64% (96 runs sampled) + util.isDeepStrictEqual x 125,113 ops/sec ±0.24% (96 runs sampled) + lodash.isEqual x 58,677 ops/sec ±0.49% (96 runs sampled) + dequal x 345,386 ops/sec ±0.27% (96 runs sampled) +``` + +## License + +MIT © [Luke Edwards](https://lukeed.com) diff --git a/_extensions/d2/node_modules/diff/CONTRIBUTING.md b/_extensions/d2/node_modules/diff/CONTRIBUTING.md new file mode 100644 index 00000000..c8c4fe6c --- /dev/null +++ b/_extensions/d2/node_modules/diff/CONTRIBUTING.md @@ -0,0 +1,39 @@ +# How to Contribute + +## Pull Requests + +We also accept [pull requests][pull-request]! + +Generally we like to see pull requests that + +- Maintain the existing code style +- Are focused on a single change (i.e. avoid large refactoring or style adjustments in untouched code if not the primary goal of the pull request) +- Have [good commit messages](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html) +- Have tests +- Don't decrease the current code coverage (see coverage/lcov-report/index.html) + +## Building + +``` +npm install +npm test +``` + +The `npm test -- dev` implements watching for tests within Node and `karma start` may be used for manual testing in browsers. + +If you notice any problems, please report them to the GitHub issue tracker at +[http://github.com/kpdecker/jsdiff/issues](http://github.com/kpdecker/jsdiff/issues). + +## Releasing + +JsDiff utilizes the [release yeoman generator][generator-release] to perform most release tasks. + +A full release may be completed with the following: + +``` +yo release +npm publish +``` + +[generator-release]: https://github.com/walmartlabs/generator-release +[pull-request]: https://github.com/kpdecker/jsdiff/pull/new/master diff --git a/_extensions/d2/node_modules/diff/LICENSE b/_extensions/d2/node_modules/diff/LICENSE new file mode 100644 index 00000000..4e7146ed --- /dev/null +++ b/_extensions/d2/node_modules/diff/LICENSE @@ -0,0 +1,31 @@ +Software License Agreement (BSD License) + +Copyright (c) 2009-2015, Kevin Decker + +All rights reserved. + +Redistribution and use of this software in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + +* Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + +* Neither the name of Kevin Decker nor the names of its + contributors may be used to endorse or promote products + derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/_extensions/d2/node_modules/diff/README.md b/_extensions/d2/node_modules/diff/README.md new file mode 100644 index 00000000..ec2ba840 --- /dev/null +++ b/_extensions/d2/node_modules/diff/README.md @@ -0,0 +1,211 @@ +# jsdiff + +[![Build Status](https://secure.travis-ci.org/kpdecker/jsdiff.svg)](http://travis-ci.org/kpdecker/jsdiff) +[![Sauce Test Status](https://saucelabs.com/buildstatus/jsdiff)](https://saucelabs.com/u/jsdiff) + +A javascript text differencing implementation. + +Based on the algorithm proposed in +["An O(ND) Difference Algorithm and its Variations" (Myers, 1986)](http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.4.6927). + +## Installation +```bash +npm install diff --save +``` + +## API + +* `Diff.diffChars(oldStr, newStr[, options])` - diffs two blocks of text, comparing character by character. + + Returns a list of change objects (See below). + + Options + * `ignoreCase`: `true` to ignore casing difference. Defaults to `false`. + +* `Diff.diffWords(oldStr, newStr[, options])` - diffs two blocks of text, comparing word by word, ignoring whitespace. + + Returns a list of change objects (See below). + + Options + * `ignoreCase`: Same as in `diffChars`. + +* `Diff.diffWordsWithSpace(oldStr, newStr[, options])` - diffs two blocks of text, comparing word by word, treating whitespace as significant. + + Returns a list of change objects (See below). + +* `Diff.diffLines(oldStr, newStr[, options])` - diffs two blocks of text, comparing line by line. + + Options + * `ignoreWhitespace`: `true` to ignore leading and trailing whitespace. This is the same as `diffTrimmedLines` + * `newlineIsToken`: `true` to treat newline characters as separate tokens. This allows for changes to the newline structure to occur independently of the line content and to be treated as such. In general this is the more human friendly form of `diffLines` and `diffLines` is better suited for patches and other computer friendly output. + + Returns a list of change objects (See below). + +* `Diff.diffTrimmedLines(oldStr, newStr[, options])` - diffs two blocks of text, comparing line by line, ignoring leading and trailing whitespace. + + Returns a list of change objects (See below). + +* `Diff.diffSentences(oldStr, newStr[, options])` - diffs two blocks of text, comparing sentence by sentence. + + Returns a list of change objects (See below). + +* `Diff.diffCss(oldStr, newStr[, options])` - diffs two blocks of text, comparing CSS tokens. + + Returns a list of change objects (See below). + +* `Diff.diffJson(oldObj, newObj[, options])` - diffs two JSON objects, comparing the fields defined on each. The order of fields, etc does not matter in this comparison. + + Returns a list of change objects (See below). + +* `Diff.diffArrays(oldArr, newArr[, options])` - diffs two arrays, comparing each item for strict equality (===). + + Options + * `comparator`: `function(left, right)` for custom equality checks + + Returns a list of change objects (See below). + +* `Diff.createTwoFilesPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader)` - creates a unified diff patch. + + Parameters: + * `oldFileName` : String to be output in the filename section of the patch for the removals + * `newFileName` : String to be output in the filename section of the patch for the additions + * `oldStr` : Original string value + * `newStr` : New string value + * `oldHeader` : Additional information to include in the old file header + * `newHeader` : Additional information to include in the new file header + * `options` : An object with options. + - `context` describes how many lines of context should be included. + - `ignoreWhitespace`: `true` to ignore leading and trailing whitespace. + - `newlineIsToken`: `true` to treat newline characters as separate tokens. This allows for changes to the newline structure to occur independently of the line content and to be treated as such. In general this is the more human friendly form of `diffLines` and `diffLines` is better suited for patches and other computer friendly output. + +* `Diff.createPatch(fileName, oldStr, newStr, oldHeader, newHeader)` - creates a unified diff patch. + + Just like Diff.createTwoFilesPatch, but with oldFileName being equal to newFileName. + + +* `Diff.structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options)` - returns an object with an array of hunk objects. + + This method is similar to createTwoFilesPatch, but returns a data structure + suitable for further processing. Parameters are the same as createTwoFilesPatch. The data structure returned may look like this: + + ```js + { + oldFileName: 'oldfile', newFileName: 'newfile', + oldHeader: 'header1', newHeader: 'header2', + hunks: [{ + oldStart: 1, oldLines: 3, newStart: 1, newLines: 3, + lines: [' line2', ' line3', '-line4', '+line5', '\\ No newline at end of file'], + }] + } + ``` + +* `Diff.applyPatch(source, patch[, options])` - applies a unified diff patch. + + Return a string containing new version of provided data. `patch` may be a string diff or the output from the `parsePatch` or `structuredPatch` methods. + + The optional `options` object may have the following keys: + + - `fuzzFactor`: Number of lines that are allowed to differ before rejecting a patch. Defaults to 0. + - `compareLine(lineNumber, line, operation, patchContent)`: Callback used to compare to given lines to determine if they should be considered equal when patching. Defaults to strict equality but may be overridden to provide fuzzier comparison. Should return false if the lines should be rejected. + +* `Diff.applyPatches(patch, options)` - applies one or more patches. + + This method will iterate over the contents of the patch and apply to data provided through callbacks. The general flow for each patch index is: + + - `options.loadFile(index, callback)` is called. The caller should then load the contents of the file and then pass that to the `callback(err, data)` callback. Passing an `err` will terminate further patch execution. + - `options.patched(index, content, callback)` is called once the patch has been applied. `content` will be the return value from `applyPatch`. When it's ready, the caller should call `callback(err)` callback. Passing an `err` will terminate further patch execution. + + Once all patches have been applied or an error occurs, the `options.complete(err)` callback is made. + +* `Diff.parsePatch(diffStr)` - Parses a patch into structured data + + Return a JSON object representation of the a patch, suitable for use with the `applyPatch` method. This parses to the same structure returned by `Diff.structuredPatch`. + +* `convertChangesToXML(changes)` - converts a list of changes to a serialized XML format + + +All methods above which accept the optional `callback` method will run in sync mode when that parameter is omitted and in async mode when supplied. This allows for larger diffs without blocking the event loop. This may be passed either directly as the final parameter or as the `callback` field in the `options` object. + +### Change Objects +Many of the methods above return change objects. These objects consist of the following fields: + +* `value`: Text content +* `added`: True if the value was inserted into the new string +* `removed`: True if the value was removed from the old string + +Note that some cases may omit a particular flag field. Comparison on the flag fields should always be done in a truthy or falsy manner. + +## Examples + +Basic example in Node + +```js +require('colors'); +const Diff = require('diff'); + +const one = 'beep boop'; +const other = 'beep boob blah'; + +const diff = Diff.diffChars(one, other); + +diff.forEach((part) => { + // green for additions, red for deletions + // grey for common parts + const color = part.added ? 'green' : + part.removed ? 'red' : 'grey'; + process.stderr.write(part.value[color]); +}); + +console.log(); +``` +Running the above program should yield + +Node Example + +Basic example in a web page + +```html +

+
+
+```
+
+Open the above .html file in a browser and you should see
+
+Node Example
+
+**[Full online demo](https://kpdecker.github.io/jsdiff)**
+
+## Compatibility
+
+[![Sauce Test Status](https://saucelabs.com/browser-matrix/jsdiff.svg)](https://saucelabs.com/u/jsdiff)
+
+jsdiff supports all ES3 environments with some known issues on IE8 and below. Under these browsers some diff algorithms such as word diff and others may fail due to lack of support for capturing groups in the `split` operation.
+
+## License
+
+See [LICENSE](https://github.com/kpdecker/jsdiff/blob/master/LICENSE).
diff --git a/_extensions/d2/node_modules/diff/dist/diff.js b/_extensions/d2/node_modules/diff/dist/diff.js
new file mode 100644
index 00000000..7fa3a556
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/dist/diff.js
@@ -0,0 +1,1627 @@
+/*!
+
+ diff v5.1.0
+
+Software License Agreement (BSD License)
+
+Copyright (c) 2009-2015, Kevin Decker 
+
+All rights reserved.
+
+Redistribution and use of this software in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above
+  copyright notice, this list of conditions and the
+  following disclaimer.
+
+* Redistributions in binary form must reproduce the above
+  copyright notice, this list of conditions and the
+  following disclaimer in the documentation and/or other
+  materials provided with the distribution.
+
+* Neither the name of Kevin Decker nor the names of its
+  contributors may be used to endorse or promote products
+  derived from this software without specific prior
+  written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+@license
+*/
+(function (global, factory) {
+  typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
+  typeof define === 'function' && define.amd ? define(['exports'], factory) :
+  (global = global || self, factory(global.Diff = {}));
+}(this, (function (exports) { 'use strict';
+
+  function Diff() {}
+  Diff.prototype = {
+    diff: function diff(oldString, newString) {
+      var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
+      var callback = options.callback;
+
+      if (typeof options === 'function') {
+        callback = options;
+        options = {};
+      }
+
+      this.options = options;
+      var self = this;
+
+      function done(value) {
+        if (callback) {
+          setTimeout(function () {
+            callback(undefined, value);
+          }, 0);
+          return true;
+        } else {
+          return value;
+        }
+      } // Allow subclasses to massage the input prior to running
+
+
+      oldString = this.castInput(oldString);
+      newString = this.castInput(newString);
+      oldString = this.removeEmpty(this.tokenize(oldString));
+      newString = this.removeEmpty(this.tokenize(newString));
+      var newLen = newString.length,
+          oldLen = oldString.length;
+      var editLength = 1;
+      var maxEditLength = newLen + oldLen;
+
+      if (options.maxEditLength) {
+        maxEditLength = Math.min(maxEditLength, options.maxEditLength);
+      }
+
+      var bestPath = [{
+        newPos: -1,
+        components: []
+      }]; // Seed editLength = 0, i.e. the content starts with the same values
+
+      var oldPos = this.extractCommon(bestPath[0], newString, oldString, 0);
+
+      if (bestPath[0].newPos + 1 >= newLen && oldPos + 1 >= oldLen) {
+        // Identity per the equality and tokenizer
+        return done([{
+          value: this.join(newString),
+          count: newString.length
+        }]);
+      } // Main worker method. checks all permutations of a given edit length for acceptance.
+
+
+      function execEditLength() {
+        for (var diagonalPath = -1 * editLength; diagonalPath <= editLength; diagonalPath += 2) {
+          var basePath = void 0;
+
+          var addPath = bestPath[diagonalPath - 1],
+              removePath = bestPath[diagonalPath + 1],
+              _oldPos = (removePath ? removePath.newPos : 0) - diagonalPath;
+
+          if (addPath) {
+            // No one else is going to attempt to use this value, clear it
+            bestPath[diagonalPath - 1] = undefined;
+          }
+
+          var canAdd = addPath && addPath.newPos + 1 < newLen,
+              canRemove = removePath && 0 <= _oldPos && _oldPos < oldLen;
+
+          if (!canAdd && !canRemove) {
+            // If this path is a terminal then prune
+            bestPath[diagonalPath] = undefined;
+            continue;
+          } // Select the diagonal that we want to branch from. We select the prior
+          // path whose position in the new string is the farthest from the origin
+          // and does not pass the bounds of the diff graph
+
+
+          if (!canAdd || canRemove && addPath.newPos < removePath.newPos) {
+            basePath = clonePath(removePath);
+            self.pushComponent(basePath.components, undefined, true);
+          } else {
+            basePath = addPath; // No need to clone, we've pulled it from the list
+
+            basePath.newPos++;
+            self.pushComponent(basePath.components, true, undefined);
+          }
+
+          _oldPos = self.extractCommon(basePath, newString, oldString, diagonalPath); // If we have hit the end of both strings, then we are done
+
+          if (basePath.newPos + 1 >= newLen && _oldPos + 1 >= oldLen) {
+            return done(buildValues(self, basePath.components, newString, oldString, self.useLongestToken));
+          } else {
+            // Otherwise track this path as a potential candidate and continue.
+            bestPath[diagonalPath] = basePath;
+          }
+        }
+
+        editLength++;
+      } // Performs the length of edit iteration. Is a bit fugly as this has to support the
+      // sync and async mode which is never fun. Loops over execEditLength until a value
+      // is produced, or until the edit length exceeds options.maxEditLength (if given),
+      // in which case it will return undefined.
+
+
+      if (callback) {
+        (function exec() {
+          setTimeout(function () {
+            if (editLength > maxEditLength) {
+              return callback();
+            }
+
+            if (!execEditLength()) {
+              exec();
+            }
+          }, 0);
+        })();
+      } else {
+        while (editLength <= maxEditLength) {
+          var ret = execEditLength();
+
+          if (ret) {
+            return ret;
+          }
+        }
+      }
+    },
+    pushComponent: function pushComponent(components, added, removed) {
+      var last = components[components.length - 1];
+
+      if (last && last.added === added && last.removed === removed) {
+        // We need to clone here as the component clone operation is just
+        // as shallow array clone
+        components[components.length - 1] = {
+          count: last.count + 1,
+          added: added,
+          removed: removed
+        };
+      } else {
+        components.push({
+          count: 1,
+          added: added,
+          removed: removed
+        });
+      }
+    },
+    extractCommon: function extractCommon(basePath, newString, oldString, diagonalPath) {
+      var newLen = newString.length,
+          oldLen = oldString.length,
+          newPos = basePath.newPos,
+          oldPos = newPos - diagonalPath,
+          commonCount = 0;
+
+      while (newPos + 1 < newLen && oldPos + 1 < oldLen && this.equals(newString[newPos + 1], oldString[oldPos + 1])) {
+        newPos++;
+        oldPos++;
+        commonCount++;
+      }
+
+      if (commonCount) {
+        basePath.components.push({
+          count: commonCount
+        });
+      }
+
+      basePath.newPos = newPos;
+      return oldPos;
+    },
+    equals: function equals(left, right) {
+      if (this.options.comparator) {
+        return this.options.comparator(left, right);
+      } else {
+        return left === right || this.options.ignoreCase && left.toLowerCase() === right.toLowerCase();
+      }
+    },
+    removeEmpty: function removeEmpty(array) {
+      var ret = [];
+
+      for (var i = 0; i < array.length; i++) {
+        if (array[i]) {
+          ret.push(array[i]);
+        }
+      }
+
+      return ret;
+    },
+    castInput: function castInput(value) {
+      return value;
+    },
+    tokenize: function tokenize(value) {
+      return value.split('');
+    },
+    join: function join(chars) {
+      return chars.join('');
+    }
+  };
+
+  function buildValues(diff, components, newString, oldString, useLongestToken) {
+    var componentPos = 0,
+        componentLen = components.length,
+        newPos = 0,
+        oldPos = 0;
+
+    for (; componentPos < componentLen; componentPos++) {
+      var component = components[componentPos];
+
+      if (!component.removed) {
+        if (!component.added && useLongestToken) {
+          var value = newString.slice(newPos, newPos + component.count);
+          value = value.map(function (value, i) {
+            var oldValue = oldString[oldPos + i];
+            return oldValue.length > value.length ? oldValue : value;
+          });
+          component.value = diff.join(value);
+        } else {
+          component.value = diff.join(newString.slice(newPos, newPos + component.count));
+        }
+
+        newPos += component.count; // Common case
+
+        if (!component.added) {
+          oldPos += component.count;
+        }
+      } else {
+        component.value = diff.join(oldString.slice(oldPos, oldPos + component.count));
+        oldPos += component.count; // Reverse add and remove so removes are output first to match common convention
+        // The diffing algorithm is tied to add then remove output and this is the simplest
+        // route to get the desired output with minimal overhead.
+
+        if (componentPos && components[componentPos - 1].added) {
+          var tmp = components[componentPos - 1];
+          components[componentPos - 1] = components[componentPos];
+          components[componentPos] = tmp;
+        }
+      }
+    } // Special case handle for when one terminal is ignored (i.e. whitespace).
+    // For this case we merge the terminal into the prior string and drop the change.
+    // This is only available for string mode.
+
+
+    var lastComponent = components[componentLen - 1];
+
+    if (componentLen > 1 && typeof lastComponent.value === 'string' && (lastComponent.added || lastComponent.removed) && diff.equals('', lastComponent.value)) {
+      components[componentLen - 2].value += lastComponent.value;
+      components.pop();
+    }
+
+    return components;
+  }
+
+  function clonePath(path) {
+    return {
+      newPos: path.newPos,
+      components: path.components.slice(0)
+    };
+  }
+
+  var characterDiff = new Diff();
+  function diffChars(oldStr, newStr, options) {
+    return characterDiff.diff(oldStr, newStr, options);
+  }
+
+  function generateOptions(options, defaults) {
+    if (typeof options === 'function') {
+      defaults.callback = options;
+    } else if (options) {
+      for (var name in options) {
+        /* istanbul ignore else */
+        if (options.hasOwnProperty(name)) {
+          defaults[name] = options[name];
+        }
+      }
+    }
+
+    return defaults;
+  }
+
+  //
+  // Ranges and exceptions:
+  // Latin-1 Supplement, 0080–00FF
+  //  - U+00D7  × Multiplication sign
+  //  - U+00F7  ÷ Division sign
+  // Latin Extended-A, 0100–017F
+  // Latin Extended-B, 0180–024F
+  // IPA Extensions, 0250–02AF
+  // Spacing Modifier Letters, 02B0–02FF
+  //  - U+02C7  ˇ ˇ  Caron
+  //  - U+02D8  ˘ ˘  Breve
+  //  - U+02D9  ˙ ˙  Dot Above
+  //  - U+02DA  ˚ ˚  Ring Above
+  //  - U+02DB  ˛ ˛  Ogonek
+  //  - U+02DC  ˜ ˜  Small Tilde
+  //  - U+02DD  ˝ ˝  Double Acute Accent
+  // Latin Extended Additional, 1E00–1EFF
+
+  var extendedWordChars = /^[A-Za-z\xC0-\u02C6\u02C8-\u02D7\u02DE-\u02FF\u1E00-\u1EFF]+$/;
+  var reWhitespace = /\S/;
+  var wordDiff = new Diff();
+
+  wordDiff.equals = function (left, right) {
+    if (this.options.ignoreCase) {
+      left = left.toLowerCase();
+      right = right.toLowerCase();
+    }
+
+    return left === right || this.options.ignoreWhitespace && !reWhitespace.test(left) && !reWhitespace.test(right);
+  };
+
+  wordDiff.tokenize = function (value) {
+    // All whitespace symbols except newline group into one token, each newline - in separate token
+    var tokens = value.split(/([^\S\r\n]+|[()[\]{}'"\r\n]|\b)/); // Join the boundary splits that we do not consider to be boundaries. This is primarily the extended Latin character set.
+
+    for (var i = 0; i < tokens.length - 1; i++) {
+      // If we have an empty string in the next field and we have only word chars before and after, merge
+      if (!tokens[i + 1] && tokens[i + 2] && extendedWordChars.test(tokens[i]) && extendedWordChars.test(tokens[i + 2])) {
+        tokens[i] += tokens[i + 2];
+        tokens.splice(i + 1, 2);
+        i--;
+      }
+    }
+
+    return tokens;
+  };
+
+  function diffWords(oldStr, newStr, options) {
+    options = generateOptions(options, {
+      ignoreWhitespace: true
+    });
+    return wordDiff.diff(oldStr, newStr, options);
+  }
+  function diffWordsWithSpace(oldStr, newStr, options) {
+    return wordDiff.diff(oldStr, newStr, options);
+  }
+
+  var lineDiff = new Diff();
+
+  lineDiff.tokenize = function (value) {
+    var retLines = [],
+        linesAndNewlines = value.split(/(\n|\r\n)/); // Ignore the final empty token that occurs if the string ends with a new line
+
+    if (!linesAndNewlines[linesAndNewlines.length - 1]) {
+      linesAndNewlines.pop();
+    } // Merge the content and line separators into single tokens
+
+
+    for (var i = 0; i < linesAndNewlines.length; i++) {
+      var line = linesAndNewlines[i];
+
+      if (i % 2 && !this.options.newlineIsToken) {
+        retLines[retLines.length - 1] += line;
+      } else {
+        if (this.options.ignoreWhitespace) {
+          line = line.trim();
+        }
+
+        retLines.push(line);
+      }
+    }
+
+    return retLines;
+  };
+
+  function diffLines(oldStr, newStr, callback) {
+    return lineDiff.diff(oldStr, newStr, callback);
+  }
+  function diffTrimmedLines(oldStr, newStr, callback) {
+    var options = generateOptions(callback, {
+      ignoreWhitespace: true
+    });
+    return lineDiff.diff(oldStr, newStr, options);
+  }
+
+  var sentenceDiff = new Diff();
+
+  sentenceDiff.tokenize = function (value) {
+    return value.split(/(\S.+?[.!?])(?=\s+|$)/);
+  };
+
+  function diffSentences(oldStr, newStr, callback) {
+    return sentenceDiff.diff(oldStr, newStr, callback);
+  }
+
+  var cssDiff = new Diff();
+
+  cssDiff.tokenize = function (value) {
+    return value.split(/([{}:;,]|\s+)/);
+  };
+
+  function diffCss(oldStr, newStr, callback) {
+    return cssDiff.diff(oldStr, newStr, callback);
+  }
+
+  function _typeof(obj) {
+    "@babel/helpers - typeof";
+
+    if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") {
+      _typeof = function (obj) {
+        return typeof obj;
+      };
+    } else {
+      _typeof = function (obj) {
+        return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj;
+      };
+    }
+
+    return _typeof(obj);
+  }
+
+  function _toConsumableArray(arr) {
+    return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _unsupportedIterableToArray(arr) || _nonIterableSpread();
+  }
+
+  function _arrayWithoutHoles(arr) {
+    if (Array.isArray(arr)) return _arrayLikeToArray(arr);
+  }
+
+  function _iterableToArray(iter) {
+    if (typeof Symbol !== "undefined" && Symbol.iterator in Object(iter)) return Array.from(iter);
+  }
+
+  function _unsupportedIterableToArray(o, minLen) {
+    if (!o) return;
+    if (typeof o === "string") return _arrayLikeToArray(o, minLen);
+    var n = Object.prototype.toString.call(o).slice(8, -1);
+    if (n === "Object" && o.constructor) n = o.constructor.name;
+    if (n === "Map" || n === "Set") return Array.from(o);
+    if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen);
+  }
+
+  function _arrayLikeToArray(arr, len) {
+    if (len == null || len > arr.length) len = arr.length;
+
+    for (var i = 0, arr2 = new Array(len); i < len; i++) arr2[i] = arr[i];
+
+    return arr2;
+  }
+
+  function _nonIterableSpread() {
+    throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
+  }
+
+  var objectPrototypeToString = Object.prototype.toString;
+  var jsonDiff = new Diff(); // Discriminate between two lines of pretty-printed, serialized JSON where one of them has a
+  // dangling comma and the other doesn't. Turns out including the dangling comma yields the nicest output:
+
+  jsonDiff.useLongestToken = true;
+  jsonDiff.tokenize = lineDiff.tokenize;
+
+  jsonDiff.castInput = function (value) {
+    var _this$options = this.options,
+        undefinedReplacement = _this$options.undefinedReplacement,
+        _this$options$stringi = _this$options.stringifyReplacer,
+        stringifyReplacer = _this$options$stringi === void 0 ? function (k, v) {
+      return typeof v === 'undefined' ? undefinedReplacement : v;
+    } : _this$options$stringi;
+    return typeof value === 'string' ? value : JSON.stringify(canonicalize(value, null, null, stringifyReplacer), stringifyReplacer, '  ');
+  };
+
+  jsonDiff.equals = function (left, right) {
+    return Diff.prototype.equals.call(jsonDiff, left.replace(/,([\r\n])/g, '$1'), right.replace(/,([\r\n])/g, '$1'));
+  };
+
+  function diffJson(oldObj, newObj, options) {
+    return jsonDiff.diff(oldObj, newObj, options);
+  } // This function handles the presence of circular references by bailing out when encountering an
+  // object that is already on the "stack" of items being processed. Accepts an optional replacer
+
+  function canonicalize(obj, stack, replacementStack, replacer, key) {
+    stack = stack || [];
+    replacementStack = replacementStack || [];
+
+    if (replacer) {
+      obj = replacer(key, obj);
+    }
+
+    var i;
+
+    for (i = 0; i < stack.length; i += 1) {
+      if (stack[i] === obj) {
+        return replacementStack[i];
+      }
+    }
+
+    var canonicalizedObj;
+
+    if ('[object Array]' === objectPrototypeToString.call(obj)) {
+      stack.push(obj);
+      canonicalizedObj = new Array(obj.length);
+      replacementStack.push(canonicalizedObj);
+
+      for (i = 0; i < obj.length; i += 1) {
+        canonicalizedObj[i] = canonicalize(obj[i], stack, replacementStack, replacer, key);
+      }
+
+      stack.pop();
+      replacementStack.pop();
+      return canonicalizedObj;
+    }
+
+    if (obj && obj.toJSON) {
+      obj = obj.toJSON();
+    }
+
+    if (_typeof(obj) === 'object' && obj !== null) {
+      stack.push(obj);
+      canonicalizedObj = {};
+      replacementStack.push(canonicalizedObj);
+
+      var sortedKeys = [],
+          _key;
+
+      for (_key in obj) {
+        /* istanbul ignore else */
+        if (obj.hasOwnProperty(_key)) {
+          sortedKeys.push(_key);
+        }
+      }
+
+      sortedKeys.sort();
+
+      for (i = 0; i < sortedKeys.length; i += 1) {
+        _key = sortedKeys[i];
+        canonicalizedObj[_key] = canonicalize(obj[_key], stack, replacementStack, replacer, _key);
+      }
+
+      stack.pop();
+      replacementStack.pop();
+    } else {
+      canonicalizedObj = obj;
+    }
+
+    return canonicalizedObj;
+  }
+
+  var arrayDiff = new Diff();
+
+  arrayDiff.tokenize = function (value) {
+    return value.slice();
+  };
+
+  arrayDiff.join = arrayDiff.removeEmpty = function (value) {
+    return value;
+  };
+
+  function diffArrays(oldArr, newArr, callback) {
+    return arrayDiff.diff(oldArr, newArr, callback);
+  }
+
+  function parsePatch(uniDiff) {
+    var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};
+    var diffstr = uniDiff.split(/\r\n|[\n\v\f\r\x85]/),
+        delimiters = uniDiff.match(/\r\n|[\n\v\f\r\x85]/g) || [],
+        list = [],
+        i = 0;
+
+    function parseIndex() {
+      var index = {};
+      list.push(index); // Parse diff metadata
+
+      while (i < diffstr.length) {
+        var line = diffstr[i]; // File header found, end parsing diff metadata
+
+        if (/^(\-\-\-|\+\+\+|@@)\s/.test(line)) {
+          break;
+        } // Diff index
+
+
+        var header = /^(?:Index:|diff(?: -r \w+)+)\s+(.+?)\s*$/.exec(line);
+
+        if (header) {
+          index.index = header[1];
+        }
+
+        i++;
+      } // Parse file headers if they are defined. Unified diff requires them, but
+      // there's no technical issues to have an isolated hunk without file header
+
+
+      parseFileHeader(index);
+      parseFileHeader(index); // Parse hunks
+
+      index.hunks = [];
+
+      while (i < diffstr.length) {
+        var _line = diffstr[i];
+
+        if (/^(Index:|diff|\-\-\-|\+\+\+)\s/.test(_line)) {
+          break;
+        } else if (/^@@/.test(_line)) {
+          index.hunks.push(parseHunk());
+        } else if (_line && options.strict) {
+          // Ignore unexpected content unless in strict mode
+          throw new Error('Unknown line ' + (i + 1) + ' ' + JSON.stringify(_line));
+        } else {
+          i++;
+        }
+      }
+    } // Parses the --- and +++ headers, if none are found, no lines
+    // are consumed.
+
+
+    function parseFileHeader(index) {
+      var fileHeader = /^(---|\+\+\+)\s+(.*)$/.exec(diffstr[i]);
+
+      if (fileHeader) {
+        var keyPrefix = fileHeader[1] === '---' ? 'old' : 'new';
+        var data = fileHeader[2].split('\t', 2);
+        var fileName = data[0].replace(/\\\\/g, '\\');
+
+        if (/^".*"$/.test(fileName)) {
+          fileName = fileName.substr(1, fileName.length - 2);
+        }
+
+        index[keyPrefix + 'FileName'] = fileName;
+        index[keyPrefix + 'Header'] = (data[1] || '').trim();
+        i++;
+      }
+    } // Parses a hunk
+    // This assumes that we are at the start of a hunk.
+
+
+    function parseHunk() {
+      var chunkHeaderIndex = i,
+          chunkHeaderLine = diffstr[i++],
+          chunkHeader = chunkHeaderLine.split(/@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@/);
+      var hunk = {
+        oldStart: +chunkHeader[1],
+        oldLines: typeof chunkHeader[2] === 'undefined' ? 1 : +chunkHeader[2],
+        newStart: +chunkHeader[3],
+        newLines: typeof chunkHeader[4] === 'undefined' ? 1 : +chunkHeader[4],
+        lines: [],
+        linedelimiters: []
+      }; // Unified Diff Format quirk: If the chunk size is 0,
+      // the first number is one lower than one would expect.
+      // https://www.artima.com/weblogs/viewpost.jsp?thread=164293
+
+      if (hunk.oldLines === 0) {
+        hunk.oldStart += 1;
+      }
+
+      if (hunk.newLines === 0) {
+        hunk.newStart += 1;
+      }
+
+      var addCount = 0,
+          removeCount = 0;
+
+      for (; i < diffstr.length; i++) {
+        // Lines starting with '---' could be mistaken for the "remove line" operation
+        // But they could be the header for the next file. Therefore prune such cases out.
+        if (diffstr[i].indexOf('--- ') === 0 && i + 2 < diffstr.length && diffstr[i + 1].indexOf('+++ ') === 0 && diffstr[i + 2].indexOf('@@') === 0) {
+          break;
+        }
+
+        var operation = diffstr[i].length == 0 && i != diffstr.length - 1 ? ' ' : diffstr[i][0];
+
+        if (operation === '+' || operation === '-' || operation === ' ' || operation === '\\') {
+          hunk.lines.push(diffstr[i]);
+          hunk.linedelimiters.push(delimiters[i] || '\n');
+
+          if (operation === '+') {
+            addCount++;
+          } else if (operation === '-') {
+            removeCount++;
+          } else if (operation === ' ') {
+            addCount++;
+            removeCount++;
+          }
+        } else {
+          break;
+        }
+      } // Handle the empty block count case
+
+
+      if (!addCount && hunk.newLines === 1) {
+        hunk.newLines = 0;
+      }
+
+      if (!removeCount && hunk.oldLines === 1) {
+        hunk.oldLines = 0;
+      } // Perform optional sanity checking
+
+
+      if (options.strict) {
+        if (addCount !== hunk.newLines) {
+          throw new Error('Added line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
+        }
+
+        if (removeCount !== hunk.oldLines) {
+          throw new Error('Removed line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
+        }
+      }
+
+      return hunk;
+    }
+
+    while (i < diffstr.length) {
+      parseIndex();
+    }
+
+    return list;
+  }
+
+  // Iterator that traverses in the range of [min, max], stepping
+  // by distance from a given start position. I.e. for [0, 4], with
+  // start of 2, this will iterate 2, 3, 1, 4, 0.
+  function distanceIterator (start, minLine, maxLine) {
+    var wantForward = true,
+        backwardExhausted = false,
+        forwardExhausted = false,
+        localOffset = 1;
+    return function iterator() {
+      if (wantForward && !forwardExhausted) {
+        if (backwardExhausted) {
+          localOffset++;
+        } else {
+          wantForward = false;
+        } // Check if trying to fit beyond text length, and if not, check it fits
+        // after offset location (or desired location on first iteration)
+
+
+        if (start + localOffset <= maxLine) {
+          return localOffset;
+        }
+
+        forwardExhausted = true;
+      }
+
+      if (!backwardExhausted) {
+        if (!forwardExhausted) {
+          wantForward = true;
+        } // Check if trying to fit before text beginning, and if not, check it fits
+        // before offset location
+
+
+        if (minLine <= start - localOffset) {
+          return -localOffset++;
+        }
+
+        backwardExhausted = true;
+        return iterator();
+      } // We tried to fit hunk before text beginning and beyond text length, then
+      // hunk can't fit on the text. Return undefined
+
+    };
+  }
+
+  function applyPatch(source, uniDiff) {
+    var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
+
+    if (typeof uniDiff === 'string') {
+      uniDiff = parsePatch(uniDiff);
+    }
+
+    if (Array.isArray(uniDiff)) {
+      if (uniDiff.length > 1) {
+        throw new Error('applyPatch only works with a single input.');
+      }
+
+      uniDiff = uniDiff[0];
+    } // Apply the diff to the input
+
+
+    var lines = source.split(/\r\n|[\n\v\f\r\x85]/),
+        delimiters = source.match(/\r\n|[\n\v\f\r\x85]/g) || [],
+        hunks = uniDiff.hunks,
+        compareLine = options.compareLine || function (lineNumber, line, operation, patchContent) {
+      return line === patchContent;
+    },
+        errorCount = 0,
+        fuzzFactor = options.fuzzFactor || 0,
+        minLine = 0,
+        offset = 0,
+        removeEOFNL,
+        addEOFNL;
+    /**
+     * Checks if the hunk exactly fits on the provided location
+     */
+
+
+    function hunkFits(hunk, toPos) {
+      for (var j = 0; j < hunk.lines.length; j++) {
+        var line = hunk.lines[j],
+            operation = line.length > 0 ? line[0] : ' ',
+            content = line.length > 0 ? line.substr(1) : line;
+
+        if (operation === ' ' || operation === '-') {
+          // Context sanity check
+          if (!compareLine(toPos + 1, lines[toPos], operation, content)) {
+            errorCount++;
+
+            if (errorCount > fuzzFactor) {
+              return false;
+            }
+          }
+
+          toPos++;
+        }
+      }
+
+      return true;
+    } // Search best fit offsets for each hunk based on the previous ones
+
+
+    for (var i = 0; i < hunks.length; i++) {
+      var hunk = hunks[i],
+          maxLine = lines.length - hunk.oldLines,
+          localOffset = 0,
+          toPos = offset + hunk.oldStart - 1;
+      var iterator = distanceIterator(toPos, minLine, maxLine);
+
+      for (; localOffset !== undefined; localOffset = iterator()) {
+        if (hunkFits(hunk, toPos + localOffset)) {
+          hunk.offset = offset += localOffset;
+          break;
+        }
+      }
+
+      if (localOffset === undefined) {
+        return false;
+      } // Set lower text limit to end of the current hunk, so next ones don't try
+      // to fit over already patched text
+
+
+      minLine = hunk.offset + hunk.oldStart + hunk.oldLines;
+    } // Apply patch hunks
+
+
+    var diffOffset = 0;
+
+    for (var _i = 0; _i < hunks.length; _i++) {
+      var _hunk = hunks[_i],
+          _toPos = _hunk.oldStart + _hunk.offset + diffOffset - 1;
+
+      diffOffset += _hunk.newLines - _hunk.oldLines;
+
+      for (var j = 0; j < _hunk.lines.length; j++) {
+        var line = _hunk.lines[j],
+            operation = line.length > 0 ? line[0] : ' ',
+            content = line.length > 0 ? line.substr(1) : line,
+            delimiter = _hunk.linedelimiters[j];
+
+        if (operation === ' ') {
+          _toPos++;
+        } else if (operation === '-') {
+          lines.splice(_toPos, 1);
+          delimiters.splice(_toPos, 1);
+          /* istanbul ignore else */
+        } else if (operation === '+') {
+          lines.splice(_toPos, 0, content);
+          delimiters.splice(_toPos, 0, delimiter);
+          _toPos++;
+        } else if (operation === '\\') {
+          var previousOperation = _hunk.lines[j - 1] ? _hunk.lines[j - 1][0] : null;
+
+          if (previousOperation === '+') {
+            removeEOFNL = true;
+          } else if (previousOperation === '-') {
+            addEOFNL = true;
+          }
+        }
+      }
+    } // Handle EOFNL insertion/removal
+
+
+    if (removeEOFNL) {
+      while (!lines[lines.length - 1]) {
+        lines.pop();
+        delimiters.pop();
+      }
+    } else if (addEOFNL) {
+      lines.push('');
+      delimiters.push('\n');
+    }
+
+    for (var _k = 0; _k < lines.length - 1; _k++) {
+      lines[_k] = lines[_k] + delimiters[_k];
+    }
+
+    return lines.join('');
+  } // Wrapper that supports multiple file patches via callbacks.
+
+  function applyPatches(uniDiff, options) {
+    if (typeof uniDiff === 'string') {
+      uniDiff = parsePatch(uniDiff);
+    }
+
+    var currentIndex = 0;
+
+    function processIndex() {
+      var index = uniDiff[currentIndex++];
+
+      if (!index) {
+        return options.complete();
+      }
+
+      options.loadFile(index, function (err, data) {
+        if (err) {
+          return options.complete(err);
+        }
+
+        var updatedContent = applyPatch(data, index, options);
+        options.patched(index, updatedContent, function (err) {
+          if (err) {
+            return options.complete(err);
+          }
+
+          processIndex();
+        });
+      });
+    }
+
+    processIndex();
+  }
+
+  function structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
+    if (!options) {
+      options = {};
+    }
+
+    if (typeof options.context === 'undefined') {
+      options.context = 4;
+    }
+
+    var diff = diffLines(oldStr, newStr, options);
+
+    if (!diff) {
+      return;
+    }
+
+    diff.push({
+      value: '',
+      lines: []
+    }); // Append an empty value to make cleanup easier
+
+    function contextLines(lines) {
+      return lines.map(function (entry) {
+        return ' ' + entry;
+      });
+    }
+
+    var hunks = [];
+    var oldRangeStart = 0,
+        newRangeStart = 0,
+        curRange = [],
+        oldLine = 1,
+        newLine = 1;
+
+    var _loop = function _loop(i) {
+      var current = diff[i],
+          lines = current.lines || current.value.replace(/\n$/, '').split('\n');
+      current.lines = lines;
+
+      if (current.added || current.removed) {
+        var _curRange;
+
+        // If we have previous context, start with that
+        if (!oldRangeStart) {
+          var prev = diff[i - 1];
+          oldRangeStart = oldLine;
+          newRangeStart = newLine;
+
+          if (prev) {
+            curRange = options.context > 0 ? contextLines(prev.lines.slice(-options.context)) : [];
+            oldRangeStart -= curRange.length;
+            newRangeStart -= curRange.length;
+          }
+        } // Output our changes
+
+
+        (_curRange = curRange).push.apply(_curRange, _toConsumableArray(lines.map(function (entry) {
+          return (current.added ? '+' : '-') + entry;
+        }))); // Track the updated file position
+
+
+        if (current.added) {
+          newLine += lines.length;
+        } else {
+          oldLine += lines.length;
+        }
+      } else {
+        // Identical context lines. Track line changes
+        if (oldRangeStart) {
+          // Close out any changes that have been output (or join overlapping)
+          if (lines.length <= options.context * 2 && i < diff.length - 2) {
+            var _curRange2;
+
+            // Overlapping
+            (_curRange2 = curRange).push.apply(_curRange2, _toConsumableArray(contextLines(lines)));
+          } else {
+            var _curRange3;
+
+            // end the range and output
+            var contextSize = Math.min(lines.length, options.context);
+
+            (_curRange3 = curRange).push.apply(_curRange3, _toConsumableArray(contextLines(lines.slice(0, contextSize))));
+
+            var hunk = {
+              oldStart: oldRangeStart,
+              oldLines: oldLine - oldRangeStart + contextSize,
+              newStart: newRangeStart,
+              newLines: newLine - newRangeStart + contextSize,
+              lines: curRange
+            };
+
+            if (i >= diff.length - 2 && lines.length <= options.context) {
+              // EOF is inside this hunk
+              var oldEOFNewline = /\n$/.test(oldStr);
+              var newEOFNewline = /\n$/.test(newStr);
+              var noNlBeforeAdds = lines.length == 0 && curRange.length > hunk.oldLines;
+
+              if (!oldEOFNewline && noNlBeforeAdds && oldStr.length > 0) {
+                // special case: old has no eol and no trailing context; no-nl can end up before adds
+                // however, if the old file is empty, do not output the no-nl line
+                curRange.splice(hunk.oldLines, 0, '\\ No newline at end of file');
+              }
+
+              if (!oldEOFNewline && !noNlBeforeAdds || !newEOFNewline) {
+                curRange.push('\\ No newline at end of file');
+              }
+            }
+
+            hunks.push(hunk);
+            oldRangeStart = 0;
+            newRangeStart = 0;
+            curRange = [];
+          }
+        }
+
+        oldLine += lines.length;
+        newLine += lines.length;
+      }
+    };
+
+    for (var i = 0; i < diff.length; i++) {
+      _loop(i);
+    }
+
+    return {
+      oldFileName: oldFileName,
+      newFileName: newFileName,
+      oldHeader: oldHeader,
+      newHeader: newHeader,
+      hunks: hunks
+    };
+  }
+  function formatPatch(diff) {
+    var ret = [];
+
+    if (diff.oldFileName == diff.newFileName) {
+      ret.push('Index: ' + diff.oldFileName);
+    }
+
+    ret.push('===================================================================');
+    ret.push('--- ' + diff.oldFileName + (typeof diff.oldHeader === 'undefined' ? '' : '\t' + diff.oldHeader));
+    ret.push('+++ ' + diff.newFileName + (typeof diff.newHeader === 'undefined' ? '' : '\t' + diff.newHeader));
+
+    for (var i = 0; i < diff.hunks.length; i++) {
+      var hunk = diff.hunks[i]; // Unified Diff Format quirk: If the chunk size is 0,
+      // the first number is one lower than one would expect.
+      // https://www.artima.com/weblogs/viewpost.jsp?thread=164293
+
+      if (hunk.oldLines === 0) {
+        hunk.oldStart -= 1;
+      }
+
+      if (hunk.newLines === 0) {
+        hunk.newStart -= 1;
+      }
+
+      ret.push('@@ -' + hunk.oldStart + ',' + hunk.oldLines + ' +' + hunk.newStart + ',' + hunk.newLines + ' @@');
+      ret.push.apply(ret, hunk.lines);
+    }
+
+    return ret.join('\n') + '\n';
+  }
+  function createTwoFilesPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
+    return formatPatch(structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options));
+  }
+  function createPatch(fileName, oldStr, newStr, oldHeader, newHeader, options) {
+    return createTwoFilesPatch(fileName, fileName, oldStr, newStr, oldHeader, newHeader, options);
+  }
+
+  function arrayEqual(a, b) {
+    if (a.length !== b.length) {
+      return false;
+    }
+
+    return arrayStartsWith(a, b);
+  }
+  function arrayStartsWith(array, start) {
+    if (start.length > array.length) {
+      return false;
+    }
+
+    for (var i = 0; i < start.length; i++) {
+      if (start[i] !== array[i]) {
+        return false;
+      }
+    }
+
+    return true;
+  }
+
+  function calcLineCount(hunk) {
+    var _calcOldNewLineCount = calcOldNewLineCount(hunk.lines),
+        oldLines = _calcOldNewLineCount.oldLines,
+        newLines = _calcOldNewLineCount.newLines;
+
+    if (oldLines !== undefined) {
+      hunk.oldLines = oldLines;
+    } else {
+      delete hunk.oldLines;
+    }
+
+    if (newLines !== undefined) {
+      hunk.newLines = newLines;
+    } else {
+      delete hunk.newLines;
+    }
+  }
+  function merge(mine, theirs, base) {
+    mine = loadPatch(mine, base);
+    theirs = loadPatch(theirs, base);
+    var ret = {}; // For index we just let it pass through as it doesn't have any necessary meaning.
+    // Leaving sanity checks on this to the API consumer that may know more about the
+    // meaning in their own context.
+
+    if (mine.index || theirs.index) {
+      ret.index = mine.index || theirs.index;
+    }
+
+    if (mine.newFileName || theirs.newFileName) {
+      if (!fileNameChanged(mine)) {
+        // No header or no change in ours, use theirs (and ours if theirs does not exist)
+        ret.oldFileName = theirs.oldFileName || mine.oldFileName;
+        ret.newFileName = theirs.newFileName || mine.newFileName;
+        ret.oldHeader = theirs.oldHeader || mine.oldHeader;
+        ret.newHeader = theirs.newHeader || mine.newHeader;
+      } else if (!fileNameChanged(theirs)) {
+        // No header or no change in theirs, use ours
+        ret.oldFileName = mine.oldFileName;
+        ret.newFileName = mine.newFileName;
+        ret.oldHeader = mine.oldHeader;
+        ret.newHeader = mine.newHeader;
+      } else {
+        // Both changed... figure it out
+        ret.oldFileName = selectField(ret, mine.oldFileName, theirs.oldFileName);
+        ret.newFileName = selectField(ret, mine.newFileName, theirs.newFileName);
+        ret.oldHeader = selectField(ret, mine.oldHeader, theirs.oldHeader);
+        ret.newHeader = selectField(ret, mine.newHeader, theirs.newHeader);
+      }
+    }
+
+    ret.hunks = [];
+    var mineIndex = 0,
+        theirsIndex = 0,
+        mineOffset = 0,
+        theirsOffset = 0;
+
+    while (mineIndex < mine.hunks.length || theirsIndex < theirs.hunks.length) {
+      var mineCurrent = mine.hunks[mineIndex] || {
+        oldStart: Infinity
+      },
+          theirsCurrent = theirs.hunks[theirsIndex] || {
+        oldStart: Infinity
+      };
+
+      if (hunkBefore(mineCurrent, theirsCurrent)) {
+        // This patch does not overlap with any of the others, yay.
+        ret.hunks.push(cloneHunk(mineCurrent, mineOffset));
+        mineIndex++;
+        theirsOffset += mineCurrent.newLines - mineCurrent.oldLines;
+      } else if (hunkBefore(theirsCurrent, mineCurrent)) {
+        // This patch does not overlap with any of the others, yay.
+        ret.hunks.push(cloneHunk(theirsCurrent, theirsOffset));
+        theirsIndex++;
+        mineOffset += theirsCurrent.newLines - theirsCurrent.oldLines;
+      } else {
+        // Overlap, merge as best we can
+        var mergedHunk = {
+          oldStart: Math.min(mineCurrent.oldStart, theirsCurrent.oldStart),
+          oldLines: 0,
+          newStart: Math.min(mineCurrent.newStart + mineOffset, theirsCurrent.oldStart + theirsOffset),
+          newLines: 0,
+          lines: []
+        };
+        mergeLines(mergedHunk, mineCurrent.oldStart, mineCurrent.lines, theirsCurrent.oldStart, theirsCurrent.lines);
+        theirsIndex++;
+        mineIndex++;
+        ret.hunks.push(mergedHunk);
+      }
+    }
+
+    return ret;
+  }
+
+  function loadPatch(param, base) {
+    if (typeof param === 'string') {
+      if (/^@@/m.test(param) || /^Index:/m.test(param)) {
+        return parsePatch(param)[0];
+      }
+
+      if (!base) {
+        throw new Error('Must provide a base reference or pass in a patch');
+      }
+
+      return structuredPatch(undefined, undefined, base, param);
+    }
+
+    return param;
+  }
+
+  function fileNameChanged(patch) {
+    return patch.newFileName && patch.newFileName !== patch.oldFileName;
+  }
+
+  function selectField(index, mine, theirs) {
+    if (mine === theirs) {
+      return mine;
+    } else {
+      index.conflict = true;
+      return {
+        mine: mine,
+        theirs: theirs
+      };
+    }
+  }
+
+  function hunkBefore(test, check) {
+    return test.oldStart < check.oldStart && test.oldStart + test.oldLines < check.oldStart;
+  }
+
+  function cloneHunk(hunk, offset) {
+    return {
+      oldStart: hunk.oldStart,
+      oldLines: hunk.oldLines,
+      newStart: hunk.newStart + offset,
+      newLines: hunk.newLines,
+      lines: hunk.lines
+    };
+  }
+
+  function mergeLines(hunk, mineOffset, mineLines, theirOffset, theirLines) {
+    // This will generally result in a conflicted hunk, but there are cases where the context
+    // is the only overlap where we can successfully merge the content here.
+    var mine = {
+      offset: mineOffset,
+      lines: mineLines,
+      index: 0
+    },
+        their = {
+      offset: theirOffset,
+      lines: theirLines,
+      index: 0
+    }; // Handle any leading content
+
+    insertLeading(hunk, mine, their);
+    insertLeading(hunk, their, mine); // Now in the overlap content. Scan through and select the best changes from each.
+
+    while (mine.index < mine.lines.length && their.index < their.lines.length) {
+      var mineCurrent = mine.lines[mine.index],
+          theirCurrent = their.lines[their.index];
+
+      if ((mineCurrent[0] === '-' || mineCurrent[0] === '+') && (theirCurrent[0] === '-' || theirCurrent[0] === '+')) {
+        // Both modified ...
+        mutualChange(hunk, mine, their);
+      } else if (mineCurrent[0] === '+' && theirCurrent[0] === ' ') {
+        var _hunk$lines;
+
+        // Mine inserted
+        (_hunk$lines = hunk.lines).push.apply(_hunk$lines, _toConsumableArray(collectChange(mine)));
+      } else if (theirCurrent[0] === '+' && mineCurrent[0] === ' ') {
+        var _hunk$lines2;
+
+        // Theirs inserted
+        (_hunk$lines2 = hunk.lines).push.apply(_hunk$lines2, _toConsumableArray(collectChange(their)));
+      } else if (mineCurrent[0] === '-' && theirCurrent[0] === ' ') {
+        // Mine removed or edited
+        removal(hunk, mine, their);
+      } else if (theirCurrent[0] === '-' && mineCurrent[0] === ' ') {
+        // Their removed or edited
+        removal(hunk, their, mine, true);
+      } else if (mineCurrent === theirCurrent) {
+        // Context identity
+        hunk.lines.push(mineCurrent);
+        mine.index++;
+        their.index++;
+      } else {
+        // Context mismatch
+        conflict(hunk, collectChange(mine), collectChange(their));
+      }
+    } // Now push anything that may be remaining
+
+
+    insertTrailing(hunk, mine);
+    insertTrailing(hunk, their);
+    calcLineCount(hunk);
+  }
+
+  function mutualChange(hunk, mine, their) {
+    var myChanges = collectChange(mine),
+        theirChanges = collectChange(their);
+
+    if (allRemoves(myChanges) && allRemoves(theirChanges)) {
+      // Special case for remove changes that are supersets of one another
+      if (arrayStartsWith(myChanges, theirChanges) && skipRemoveSuperset(their, myChanges, myChanges.length - theirChanges.length)) {
+        var _hunk$lines3;
+
+        (_hunk$lines3 = hunk.lines).push.apply(_hunk$lines3, _toConsumableArray(myChanges));
+
+        return;
+      } else if (arrayStartsWith(theirChanges, myChanges) && skipRemoveSuperset(mine, theirChanges, theirChanges.length - myChanges.length)) {
+        var _hunk$lines4;
+
+        (_hunk$lines4 = hunk.lines).push.apply(_hunk$lines4, _toConsumableArray(theirChanges));
+
+        return;
+      }
+    } else if (arrayEqual(myChanges, theirChanges)) {
+      var _hunk$lines5;
+
+      (_hunk$lines5 = hunk.lines).push.apply(_hunk$lines5, _toConsumableArray(myChanges));
+
+      return;
+    }
+
+    conflict(hunk, myChanges, theirChanges);
+  }
+
+  function removal(hunk, mine, their, swap) {
+    var myChanges = collectChange(mine),
+        theirChanges = collectContext(their, myChanges);
+
+    if (theirChanges.merged) {
+      var _hunk$lines6;
+
+      (_hunk$lines6 = hunk.lines).push.apply(_hunk$lines6, _toConsumableArray(theirChanges.merged));
+    } else {
+      conflict(hunk, swap ? theirChanges : myChanges, swap ? myChanges : theirChanges);
+    }
+  }
+
+  function conflict(hunk, mine, their) {
+    hunk.conflict = true;
+    hunk.lines.push({
+      conflict: true,
+      mine: mine,
+      theirs: their
+    });
+  }
+
+  function insertLeading(hunk, insert, their) {
+    while (insert.offset < their.offset && insert.index < insert.lines.length) {
+      var line = insert.lines[insert.index++];
+      hunk.lines.push(line);
+      insert.offset++;
+    }
+  }
+
+  function insertTrailing(hunk, insert) {
+    while (insert.index < insert.lines.length) {
+      var line = insert.lines[insert.index++];
+      hunk.lines.push(line);
+    }
+  }
+
+  function collectChange(state) {
+    var ret = [],
+        operation = state.lines[state.index][0];
+
+    while (state.index < state.lines.length) {
+      var line = state.lines[state.index]; // Group additions that are immediately after subtractions and treat them as one "atomic" modify change.
+
+      if (operation === '-' && line[0] === '+') {
+        operation = '+';
+      }
+
+      if (operation === line[0]) {
+        ret.push(line);
+        state.index++;
+      } else {
+        break;
+      }
+    }
+
+    return ret;
+  }
+
+  function collectContext(state, matchChanges) {
+    var changes = [],
+        merged = [],
+        matchIndex = 0,
+        contextChanges = false,
+        conflicted = false;
+
+    while (matchIndex < matchChanges.length && state.index < state.lines.length) {
+      var change = state.lines[state.index],
+          match = matchChanges[matchIndex]; // Once we've hit our add, then we are done
+
+      if (match[0] === '+') {
+        break;
+      }
+
+      contextChanges = contextChanges || change[0] !== ' ';
+      merged.push(match);
+      matchIndex++; // Consume any additions in the other block as a conflict to attempt
+      // to pull in the remaining context after this
+
+      if (change[0] === '+') {
+        conflicted = true;
+
+        while (change[0] === '+') {
+          changes.push(change);
+          change = state.lines[++state.index];
+        }
+      }
+
+      if (match.substr(1) === change.substr(1)) {
+        changes.push(change);
+        state.index++;
+      } else {
+        conflicted = true;
+      }
+    }
+
+    if ((matchChanges[matchIndex] || '')[0] === '+' && contextChanges) {
+      conflicted = true;
+    }
+
+    if (conflicted) {
+      return changes;
+    }
+
+    while (matchIndex < matchChanges.length) {
+      merged.push(matchChanges[matchIndex++]);
+    }
+
+    return {
+      merged: merged,
+      changes: changes
+    };
+  }
+
+  function allRemoves(changes) {
+    return changes.reduce(function (prev, change) {
+      return prev && change[0] === '-';
+    }, true);
+  }
+
+  function skipRemoveSuperset(state, removeChanges, delta) {
+    for (var i = 0; i < delta; i++) {
+      var changeContent = removeChanges[removeChanges.length - delta + i].substr(1);
+
+      if (state.lines[state.index + i] !== ' ' + changeContent) {
+        return false;
+      }
+    }
+
+    state.index += delta;
+    return true;
+  }
+
+  function calcOldNewLineCount(lines) {
+    var oldLines = 0;
+    var newLines = 0;
+    lines.forEach(function (line) {
+      if (typeof line !== 'string') {
+        var myCount = calcOldNewLineCount(line.mine);
+        var theirCount = calcOldNewLineCount(line.theirs);
+
+        if (oldLines !== undefined) {
+          if (myCount.oldLines === theirCount.oldLines) {
+            oldLines += myCount.oldLines;
+          } else {
+            oldLines = undefined;
+          }
+        }
+
+        if (newLines !== undefined) {
+          if (myCount.newLines === theirCount.newLines) {
+            newLines += myCount.newLines;
+          } else {
+            newLines = undefined;
+          }
+        }
+      } else {
+        if (newLines !== undefined && (line[0] === '+' || line[0] === ' ')) {
+          newLines++;
+        }
+
+        if (oldLines !== undefined && (line[0] === '-' || line[0] === ' ')) {
+          oldLines++;
+        }
+      }
+    });
+    return {
+      oldLines: oldLines,
+      newLines: newLines
+    };
+  }
+
+  // See: http://code.google.com/p/google-diff-match-patch/wiki/API
+  function convertChangesToDMP(changes) {
+    var ret = [],
+        change,
+        operation;
+
+    for (var i = 0; i < changes.length; i++) {
+      change = changes[i];
+
+      if (change.added) {
+        operation = 1;
+      } else if (change.removed) {
+        operation = -1;
+      } else {
+        operation = 0;
+      }
+
+      ret.push([operation, change.value]);
+    }
+
+    return ret;
+  }
+
+  function convertChangesToXML(changes) {
+    var ret = [];
+
+    for (var i = 0; i < changes.length; i++) {
+      var change = changes[i];
+
+      if (change.added) {
+        ret.push('');
+      } else if (change.removed) {
+        ret.push('');
+      }
+
+      ret.push(escapeHTML(change.value));
+
+      if (change.added) {
+        ret.push('');
+      } else if (change.removed) {
+        ret.push('');
+      }
+    }
+
+    return ret.join('');
+  }
+
+  function escapeHTML(s) {
+    var n = s;
+    n = n.replace(/&/g, '&');
+    n = n.replace(//g, '>');
+    n = n.replace(/"/g, '"');
+    return n;
+  }
+
+  exports.Diff = Diff;
+  exports.applyPatch = applyPatch;
+  exports.applyPatches = applyPatches;
+  exports.canonicalize = canonicalize;
+  exports.convertChangesToDMP = convertChangesToDMP;
+  exports.convertChangesToXML = convertChangesToXML;
+  exports.createPatch = createPatch;
+  exports.createTwoFilesPatch = createTwoFilesPatch;
+  exports.diffArrays = diffArrays;
+  exports.diffChars = diffChars;
+  exports.diffCss = diffCss;
+  exports.diffJson = diffJson;
+  exports.diffLines = diffLines;
+  exports.diffSentences = diffSentences;
+  exports.diffTrimmedLines = diffTrimmedLines;
+  exports.diffWords = diffWords;
+  exports.diffWordsWithSpace = diffWordsWithSpace;
+  exports.merge = merge;
+  exports.parsePatch = parsePatch;
+  exports.structuredPatch = structuredPatch;
+
+  Object.defineProperty(exports, '__esModule', { value: true });
+
+})));
diff --git a/_extensions/d2/node_modules/diff/dist/diff.min.js b/_extensions/d2/node_modules/diff/dist/diff.min.js
new file mode 100644
index 00000000..80c20de5
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/dist/diff.min.js
@@ -0,0 +1,38 @@
+/*!
+
+ diff v5.1.0
+
+Software License Agreement (BSD License)
+
+Copyright (c) 2009-2015, Kevin Decker 
+
+All rights reserved.
+
+Redistribution and use of this software in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above
+  copyright notice, this list of conditions and the
+  following disclaimer.
+
+* Redistributions in binary form must reproduce the above
+  copyright notice, this list of conditions and the
+  following disclaimer in the documentation and/or other
+  materials provided with the distribution.
+
+* Neither the name of Kevin Decker nor the names of its
+  contributors may be used to endorse or promote products
+  derived from this software without specific prior
+  written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+@license
+*/
+!function(e,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n((e=e||self).Diff={})}(this,function(e){"use strict";function t(){}t.prototype={diff:function(u,a,e){var n=2=c&&h<=i+1)return d([{value:this.join(a),count:a.length}]);function o(){for(var e,n=-1*p;n<=p;n+=2){var t=void 0,r=v[n-1],i=v[n+1],o=(i?i.newPos:0)-n;r&&(v[n-1]=void 0);var l=r&&r.newPos+1=c&&h<=o+1)return d(function(e,n,t,r,i){for(var o=0,l=n.length,s=0,u=0;oe.length?t:e}),d.value=e.join(f)):d.value=e.join(t.slice(s,s+d.count)),s+=d.count,d.added||(u+=d.count))}var c=n[l-1];1e.length)&&(n=e.length);for(var t=0,r=new Array(n);t=c.length-2&&u.length<=d.context&&(i=/\n$/.test(a),o=/\n$/.test(f),l=0==u.length&&g.length>r.oldLines,!i&&l&&0e.length)return!1;for(var t=0;t"):i.removed&&t.push(""),t.push((n=i.value,n.replace(/&/g,"&").replace(//g,">").replace(/"/g,"""))),i.added?t.push(""):i.removed&&t.push("")}return t.join("")},e.createPatch=function(e,n,t,r,i,o){return y(e,e,n,t,r,i,o)},e.createTwoFilesPatch=y,e.diffArrays=function(e,n,t){return g.diff(e,n,t)},e.diffChars=function(e,n,t){return r.diff(e,n,t)},e.diffCss=function(e,n,t){return f.diff(e,n,t)},e.diffJson=function(e,n,t){return p.diff(e,n,t)},e.diffLines=L,e.diffSentences=function(e,n,t){return a.diff(e,n,t)},e.diffTrimmedLines=function(e,n,t){var r=i(t,{ignoreWhitespace:!0});return u.diff(e,n,r)},e.diffWords=function(e,n,t){return t=i(t,{ignoreWhitespace:!0}),s.diff(e,n,t)},e.diffWordsWithSpace=function(e,n,t){return s.diff(e,n,t)},e.merge=function(e,n,t){e=b(e,t),n=b(n,t);var r={};(e.index||n.index)&&(r.index=e.index||n.index),(e.newFileName||n.newFileName)&&(F(e)?F(n)?(r.oldFileName=N(r,e.oldFileName,n.oldFileName),r.newFileName=N(r,e.newFileName,n.newFileName),r.oldHeader=N(r,e.oldHeader,n.oldHeader),r.newHeader=N(r,e.newHeader,n.newHeader)):(r.oldFileName=e.oldFileName,r.newFileName=e.newFileName,r.oldHeader=e.oldHeader,r.newHeader=e.newHeader):(r.oldFileName=n.oldFileName||e.oldFileName,r.newFileName=n.newFileName||e.newFileName,r.oldHeader=n.oldHeader||e.oldHeader,r.newHeader=n.newHeader||e.newHeader)),r.hunks=[];for(var i=0,o=0,l=0,s=0;i');
+    } else if (change.removed) {
+      ret.push('');
+    }
+
+    ret.push(escapeHTML(change.value));
+
+    if (change.added) {
+      ret.push('');
+    } else if (change.removed) {
+      ret.push('');
+    }
+  }
+
+  return ret.join('');
+}
+
+function escapeHTML(s) {
+  var n = s;
+  n = n.replace(/&/g, '&');
+  n = n.replace(//g, '>');
+  n = n.replace(/"/g, '"');
+  return n;
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy9jb252ZXJ0L3htbC5qcyJdLCJuYW1lcyI6WyJjb252ZXJ0Q2hhbmdlc1RvWE1MIiwiY2hhbmdlcyIsInJldCIsImkiLCJsZW5ndGgiLCJjaGFuZ2UiLCJhZGRlZCIsInB1c2giLCJyZW1vdmVkIiwiZXNjYXBlSFRNTCIsInZhbHVlIiwiam9pbiIsInMiLCJuIiwicmVwbGFjZSJdLCJtYXBwaW5ncyI6Ijs7Ozs7Ozs7O0FBQU8sU0FBU0EsbUJBQVQsQ0FBNkJDLE9BQTdCLEVBQXNDO0FBQzNDLE1BQUlDLEdBQUcsR0FBRyxFQUFWOztBQUNBLE9BQUssSUFBSUMsQ0FBQyxHQUFHLENBQWIsRUFBZ0JBLENBQUMsR0FBR0YsT0FBTyxDQUFDRyxNQUE1QixFQUFvQ0QsQ0FBQyxFQUFyQyxFQUF5QztBQUN2QyxRQUFJRSxNQUFNLEdBQUdKLE9BQU8sQ0FBQ0UsQ0FBRCxDQUFwQjs7QUFDQSxRQUFJRSxNQUFNLENBQUNDLEtBQVgsRUFBa0I7QUFDaEJKLE1BQUFBLEdBQUcsQ0FBQ0ssSUFBSixDQUFTLE9BQVQ7QUFDRCxLQUZELE1BRU8sSUFBSUYsTUFBTSxDQUFDRyxPQUFYLEVBQW9CO0FBQ3pCTixNQUFBQSxHQUFHLENBQUNLLElBQUosQ0FBUyxPQUFUO0FBQ0Q7O0FBRURMLElBQUFBLEdBQUcsQ0FBQ0ssSUFBSixDQUFTRSxVQUFVLENBQUNKLE1BQU0sQ0FBQ0ssS0FBUixDQUFuQjs7QUFFQSxRQUFJTCxNQUFNLENBQUNDLEtBQVgsRUFBa0I7QUFDaEJKLE1BQUFBLEdBQUcsQ0FBQ0ssSUFBSixDQUFTLFFBQVQ7QUFDRCxLQUZELE1BRU8sSUFBSUYsTUFBTSxDQUFDRyxPQUFYLEVBQW9CO0FBQ3pCTixNQUFBQSxHQUFHLENBQUNLLElBQUosQ0FBUyxRQUFUO0FBQ0Q7QUFDRjs7QUFDRCxTQUFPTCxHQUFHLENBQUNTLElBQUosQ0FBUyxFQUFULENBQVA7QUFDRDs7QUFFRCxTQUFTRixVQUFULENBQW9CRyxDQUFwQixFQUF1QjtBQUNyQixNQUFJQyxDQUFDLEdBQUdELENBQVI7QUFDQUMsRUFBQUEsQ0FBQyxHQUFHQSxDQUFDLENBQUNDLE9BQUYsQ0FBVSxJQUFWLEVBQWdCLE9BQWhCLENBQUo7QUFDQUQsRUFBQUEsQ0FBQyxHQUFHQSxDQUFDLENBQUNDLE9BQUYsQ0FBVSxJQUFWLEVBQWdCLE1BQWhCLENBQUo7QUFDQUQsRUFBQUEsQ0FBQyxHQUFHQSxDQUFDLENBQUNDLE9BQUYsQ0FBVSxJQUFWLEVBQWdCLE1BQWhCLENBQUo7QUFDQUQsRUFBQUEsQ0FBQyxHQUFHQSxDQUFDLENBQUNDLE9BQUYsQ0FBVSxJQUFWLEVBQWdCLFFBQWhCLENBQUo7QUFFQSxTQUFPRCxDQUFQO0FBQ0QiLCJzb3VyY2VzQ29udGVudCI6WyJleHBvcnQgZnVuY3Rpb24gY29udmVydENoYW5nZXNUb1hNTChjaGFuZ2VzKSB7XG4gIGxldCByZXQgPSBbXTtcbiAgZm9yIChsZXQgaSA9IDA7IGkgPCBjaGFuZ2VzLmxlbmd0aDsgaSsrKSB7XG4gICAgbGV0IGNoYW5nZSA9IGNoYW5nZXNbaV07XG4gICAgaWYgKGNoYW5nZS5hZGRlZCkge1xuICAgICAgcmV0LnB1c2goJzxpbnM+Jyk7XG4gICAgfSBlbHNlIGlmIChjaGFuZ2UucmVtb3ZlZCkge1xuICAgICAgcmV0LnB1c2goJzxkZWw+Jyk7XG4gICAgfVxuXG4gICAgcmV0LnB1c2goZXNjYXBlSFRNTChjaGFuZ2UudmFsdWUpKTtcblxuICAgIGlmIChjaGFuZ2UuYWRkZWQpIHtcbiAgICAgIHJldC5wdXNoKCc8L2lucz4nKTtcbiAgICB9IGVsc2UgaWYgKGNoYW5nZS5yZW1vdmVkKSB7XG4gICAgICByZXQucHVzaCgnPC9kZWw+Jyk7XG4gICAgfVxuICB9XG4gIHJldHVybiByZXQuam9pbignJyk7XG59XG5cbmZ1bmN0aW9uIGVzY2FwZUhUTUwocykge1xuICBsZXQgbiA9IHM7XG4gIG4gPSBuLnJlcGxhY2UoLyYvZywgJyZhbXA7Jyk7XG4gIG4gPSBuLnJlcGxhY2UoLzwvZywgJyZsdDsnKTtcbiAgbiA9IG4ucmVwbGFjZSgvPi9nLCAnJmd0OycpO1xuICBuID0gbi5yZXBsYWNlKC9cIi9nLCAnJnF1b3Q7Jyk7XG5cbiAgcmV0dXJuIG47XG59XG4iXX0=
diff --git a/_extensions/d2/node_modules/diff/lib/diff/array.js b/_extensions/d2/node_modules/diff/lib/diff/array.js
new file mode 100644
index 00000000..19e36809
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/diff/array.js
@@ -0,0 +1,45 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.diffArrays = diffArrays;
+exports.arrayDiff = void 0;
+
+/*istanbul ignore end*/
+var
+/*istanbul ignore start*/
+_base = _interopRequireDefault(require("./base"))
+/*istanbul ignore end*/
+;
+
+/*istanbul ignore start*/ function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
+
+/*istanbul ignore end*/
+var arrayDiff = new
+/*istanbul ignore start*/
+_base
+/*istanbul ignore end*/
+[
+/*istanbul ignore start*/
+"default"
+/*istanbul ignore end*/
+]();
+
+/*istanbul ignore start*/
+exports.arrayDiff = arrayDiff;
+
+/*istanbul ignore end*/
+arrayDiff.tokenize = function (value) {
+  return value.slice();
+};
+
+arrayDiff.join = arrayDiff.removeEmpty = function (value) {
+  return value;
+};
+
+function diffArrays(oldArr, newArr, callback) {
+  return arrayDiff.diff(oldArr, newArr, callback);
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy9kaWZmL2FycmF5LmpzIl0sIm5hbWVzIjpbImFycmF5RGlmZiIsIkRpZmYiLCJ0b2tlbml6ZSIsInZhbHVlIiwic2xpY2UiLCJqb2luIiwicmVtb3ZlRW1wdHkiLCJkaWZmQXJyYXlzIiwib2xkQXJyIiwibmV3QXJyIiwiY2FsbGJhY2siLCJkaWZmIl0sIm1hcHBpbmdzIjoiOzs7Ozs7Ozs7O0FBQUE7QUFBQTtBQUFBO0FBQUE7QUFBQTs7Ozs7QUFFTyxJQUFNQSxTQUFTLEdBQUc7QUFBSUM7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUEsQ0FBSixFQUFsQjs7Ozs7O0FBQ1BELFNBQVMsQ0FBQ0UsUUFBVixHQUFxQixVQUFTQyxLQUFULEVBQWdCO0FBQ25DLFNBQU9BLEtBQUssQ0FBQ0MsS0FBTixFQUFQO0FBQ0QsQ0FGRDs7QUFHQUosU0FBUyxDQUFDSyxJQUFWLEdBQWlCTCxTQUFTLENBQUNNLFdBQVYsR0FBd0IsVUFBU0gsS0FBVCxFQUFnQjtBQUN2RCxTQUFPQSxLQUFQO0FBQ0QsQ0FGRDs7QUFJTyxTQUFTSSxVQUFULENBQW9CQyxNQUFwQixFQUE0QkMsTUFBNUIsRUFBb0NDLFFBQXBDLEVBQThDO0FBQUUsU0FBT1YsU0FBUyxDQUFDVyxJQUFWLENBQWVILE1BQWYsRUFBdUJDLE1BQXZCLEVBQStCQyxRQUEvQixDQUFQO0FBQWtEIiwic291cmNlc0NvbnRlbnQiOlsiaW1wb3J0IERpZmYgZnJvbSAnLi9iYXNlJztcblxuZXhwb3J0IGNvbnN0IGFycmF5RGlmZiA9IG5ldyBEaWZmKCk7XG5hcnJheURpZmYudG9rZW5pemUgPSBmdW5jdGlvbih2YWx1ZSkge1xuICByZXR1cm4gdmFsdWUuc2xpY2UoKTtcbn07XG5hcnJheURpZmYuam9pbiA9IGFycmF5RGlmZi5yZW1vdmVFbXB0eSA9IGZ1bmN0aW9uKHZhbHVlKSB7XG4gIHJldHVybiB2YWx1ZTtcbn07XG5cbmV4cG9ydCBmdW5jdGlvbiBkaWZmQXJyYXlzKG9sZEFyciwgbmV3QXJyLCBjYWxsYmFjaykgeyByZXR1cm4gYXJyYXlEaWZmLmRpZmYob2xkQXJyLCBuZXdBcnIsIGNhbGxiYWNrKTsgfVxuIl19
diff --git a/_extensions/d2/node_modules/diff/lib/diff/base.js b/_extensions/d2/node_modules/diff/lib/diff/base.js
new file mode 100644
index 00000000..521da8a6
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/diff/base.js
@@ -0,0 +1,307 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports["default"] = Diff;
+
+/*istanbul ignore end*/
+function Diff() {}
+
+Diff.prototype = {
+  /*istanbul ignore start*/
+
+  /*istanbul ignore end*/
+  diff: function diff(oldString, newString) {
+    /*istanbul ignore start*/
+    var
+    /*istanbul ignore end*/
+    options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
+    var callback = options.callback;
+
+    if (typeof options === 'function') {
+      callback = options;
+      options = {};
+    }
+
+    this.options = options;
+    var self = this;
+
+    function done(value) {
+      if (callback) {
+        setTimeout(function () {
+          callback(undefined, value);
+        }, 0);
+        return true;
+      } else {
+        return value;
+      }
+    } // Allow subclasses to massage the input prior to running
+
+
+    oldString = this.castInput(oldString);
+    newString = this.castInput(newString);
+    oldString = this.removeEmpty(this.tokenize(oldString));
+    newString = this.removeEmpty(this.tokenize(newString));
+    var newLen = newString.length,
+        oldLen = oldString.length;
+    var editLength = 1;
+    var maxEditLength = newLen + oldLen;
+
+    if (options.maxEditLength) {
+      maxEditLength = Math.min(maxEditLength, options.maxEditLength);
+    }
+
+    var bestPath = [{
+      newPos: -1,
+      components: []
+    }]; // Seed editLength = 0, i.e. the content starts with the same values
+
+    var oldPos = this.extractCommon(bestPath[0], newString, oldString, 0);
+
+    if (bestPath[0].newPos + 1 >= newLen && oldPos + 1 >= oldLen) {
+      // Identity per the equality and tokenizer
+      return done([{
+        value: this.join(newString),
+        count: newString.length
+      }]);
+    } // Main worker method. checks all permutations of a given edit length for acceptance.
+
+
+    function execEditLength() {
+      for (var diagonalPath = -1 * editLength; diagonalPath <= editLength; diagonalPath += 2) {
+        var basePath =
+        /*istanbul ignore start*/
+        void 0
+        /*istanbul ignore end*/
+        ;
+
+        var addPath = bestPath[diagonalPath - 1],
+            removePath = bestPath[diagonalPath + 1],
+            _oldPos = (removePath ? removePath.newPos : 0) - diagonalPath;
+
+        if (addPath) {
+          // No one else is going to attempt to use this value, clear it
+          bestPath[diagonalPath - 1] = undefined;
+        }
+
+        var canAdd = addPath && addPath.newPos + 1 < newLen,
+            canRemove = removePath && 0 <= _oldPos && _oldPos < oldLen;
+
+        if (!canAdd && !canRemove) {
+          // If this path is a terminal then prune
+          bestPath[diagonalPath] = undefined;
+          continue;
+        } // Select the diagonal that we want to branch from. We select the prior
+        // path whose position in the new string is the farthest from the origin
+        // and does not pass the bounds of the diff graph
+
+
+        if (!canAdd || canRemove && addPath.newPos < removePath.newPos) {
+          basePath = clonePath(removePath);
+          self.pushComponent(basePath.components, undefined, true);
+        } else {
+          basePath = addPath; // No need to clone, we've pulled it from the list
+
+          basePath.newPos++;
+          self.pushComponent(basePath.components, true, undefined);
+        }
+
+        _oldPos = self.extractCommon(basePath, newString, oldString, diagonalPath); // If we have hit the end of both strings, then we are done
+
+        if (basePath.newPos + 1 >= newLen && _oldPos + 1 >= oldLen) {
+          return done(buildValues(self, basePath.components, newString, oldString, self.useLongestToken));
+        } else {
+          // Otherwise track this path as a potential candidate and continue.
+          bestPath[diagonalPath] = basePath;
+        }
+      }
+
+      editLength++;
+    } // Performs the length of edit iteration. Is a bit fugly as this has to support the
+    // sync and async mode which is never fun. Loops over execEditLength until a value
+    // is produced, or until the edit length exceeds options.maxEditLength (if given),
+    // in which case it will return undefined.
+
+
+    if (callback) {
+      (function exec() {
+        setTimeout(function () {
+          if (editLength > maxEditLength) {
+            return callback();
+          }
+
+          if (!execEditLength()) {
+            exec();
+          }
+        }, 0);
+      })();
+    } else {
+      while (editLength <= maxEditLength) {
+        var ret = execEditLength();
+
+        if (ret) {
+          return ret;
+        }
+      }
+    }
+  },
+
+  /*istanbul ignore start*/
+
+  /*istanbul ignore end*/
+  pushComponent: function pushComponent(components, added, removed) {
+    var last = components[components.length - 1];
+
+    if (last && last.added === added && last.removed === removed) {
+      // We need to clone here as the component clone operation is just
+      // as shallow array clone
+      components[components.length - 1] = {
+        count: last.count + 1,
+        added: added,
+        removed: removed
+      };
+    } else {
+      components.push({
+        count: 1,
+        added: added,
+        removed: removed
+      });
+    }
+  },
+
+  /*istanbul ignore start*/
+
+  /*istanbul ignore end*/
+  extractCommon: function extractCommon(basePath, newString, oldString, diagonalPath) {
+    var newLen = newString.length,
+        oldLen = oldString.length,
+        newPos = basePath.newPos,
+        oldPos = newPos - diagonalPath,
+        commonCount = 0;
+
+    while (newPos + 1 < newLen && oldPos + 1 < oldLen && this.equals(newString[newPos + 1], oldString[oldPos + 1])) {
+      newPos++;
+      oldPos++;
+      commonCount++;
+    }
+
+    if (commonCount) {
+      basePath.components.push({
+        count: commonCount
+      });
+    }
+
+    basePath.newPos = newPos;
+    return oldPos;
+  },
+
+  /*istanbul ignore start*/
+
+  /*istanbul ignore end*/
+  equals: function equals(left, right) {
+    if (this.options.comparator) {
+      return this.options.comparator(left, right);
+    } else {
+      return left === right || this.options.ignoreCase && left.toLowerCase() === right.toLowerCase();
+    }
+  },
+
+  /*istanbul ignore start*/
+
+  /*istanbul ignore end*/
+  removeEmpty: function removeEmpty(array) {
+    var ret = [];
+
+    for (var i = 0; i < array.length; i++) {
+      if (array[i]) {
+        ret.push(array[i]);
+      }
+    }
+
+    return ret;
+  },
+
+  /*istanbul ignore start*/
+
+  /*istanbul ignore end*/
+  castInput: function castInput(value) {
+    return value;
+  },
+
+  /*istanbul ignore start*/
+
+  /*istanbul ignore end*/
+  tokenize: function tokenize(value) {
+    return value.split('');
+  },
+
+  /*istanbul ignore start*/
+
+  /*istanbul ignore end*/
+  join: function join(chars) {
+    return chars.join('');
+  }
+};
+
+function buildValues(diff, components, newString, oldString, useLongestToken) {
+  var componentPos = 0,
+      componentLen = components.length,
+      newPos = 0,
+      oldPos = 0;
+
+  for (; componentPos < componentLen; componentPos++) {
+    var component = components[componentPos];
+
+    if (!component.removed) {
+      if (!component.added && useLongestToken) {
+        var value = newString.slice(newPos, newPos + component.count);
+        value = value.map(function (value, i) {
+          var oldValue = oldString[oldPos + i];
+          return oldValue.length > value.length ? oldValue : value;
+        });
+        component.value = diff.join(value);
+      } else {
+        component.value = diff.join(newString.slice(newPos, newPos + component.count));
+      }
+
+      newPos += component.count; // Common case
+
+      if (!component.added) {
+        oldPos += component.count;
+      }
+    } else {
+      component.value = diff.join(oldString.slice(oldPos, oldPos + component.count));
+      oldPos += component.count; // Reverse add and remove so removes are output first to match common convention
+      // The diffing algorithm is tied to add then remove output and this is the simplest
+      // route to get the desired output with minimal overhead.
+
+      if (componentPos && components[componentPos - 1].added) {
+        var tmp = components[componentPos - 1];
+        components[componentPos - 1] = components[componentPos];
+        components[componentPos] = tmp;
+      }
+    }
+  } // Special case handle for when one terminal is ignored (i.e. whitespace).
+  // For this case we merge the terminal into the prior string and drop the change.
+  // This is only available for string mode.
+
+
+  var lastComponent = components[componentLen - 1];
+
+  if (componentLen > 1 && typeof lastComponent.value === 'string' && (lastComponent.added || lastComponent.removed) && diff.equals('', lastComponent.value)) {
+    components[componentLen - 2].value += lastComponent.value;
+    components.pop();
+  }
+
+  return components;
+}
+
+function clonePath(path) {
+  return {
+    newPos: path.newPos,
+    components: path.components.slice(0)
+  };
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"sources":["../../src/diff/base.js"],"names":["Diff","prototype","diff","oldString","newString","options","callback","self","done","value","setTimeout","undefined","castInput","removeEmpty","tokenize","newLen","length","oldLen","editLength","maxEditLength","Math","min","bestPath","newPos","components","oldPos","extractCommon","join","count","execEditLength","diagonalPath","basePath","addPath","removePath","canAdd","canRemove","clonePath","pushComponent","buildValues","useLongestToken","exec","ret","added","removed","last","push","commonCount","equals","left","right","comparator","ignoreCase","toLowerCase","array","i","split","chars","componentPos","componentLen","component","slice","map","oldValue","tmp","lastComponent","pop","path"],"mappings":";;;;;;;;;AAAe,SAASA,IAAT,GAAgB,CAAE;;AAEjCA,IAAI,CAACC,SAAL,GAAiB;AAAA;;AAAA;AACfC,EAAAA,IADe,gBACVC,SADU,EACCC,SADD,EAC0B;AAAA;AAAA;AAAA;AAAdC,IAAAA,OAAc,uEAAJ,EAAI;AACvC,QAAIC,QAAQ,GAAGD,OAAO,CAACC,QAAvB;;AACA,QAAI,OAAOD,OAAP,KAAmB,UAAvB,EAAmC;AACjCC,MAAAA,QAAQ,GAAGD,OAAX;AACAA,MAAAA,OAAO,GAAG,EAAV;AACD;;AACD,SAAKA,OAAL,GAAeA,OAAf;AAEA,QAAIE,IAAI,GAAG,IAAX;;AAEA,aAASC,IAAT,CAAcC,KAAd,EAAqB;AACnB,UAAIH,QAAJ,EAAc;AACZI,QAAAA,UAAU,CAAC,YAAW;AAAEJ,UAAAA,QAAQ,CAACK,SAAD,EAAYF,KAAZ,CAAR;AAA6B,SAA3C,EAA6C,CAA7C,CAAV;AACA,eAAO,IAAP;AACD,OAHD,MAGO;AACL,eAAOA,KAAP;AACD;AACF,KAjBsC,CAmBvC;;;AACAN,IAAAA,SAAS,GAAG,KAAKS,SAAL,CAAeT,SAAf,CAAZ;AACAC,IAAAA,SAAS,GAAG,KAAKQ,SAAL,CAAeR,SAAf,CAAZ;AAEAD,IAAAA,SAAS,GAAG,KAAKU,WAAL,CAAiB,KAAKC,QAAL,CAAcX,SAAd,CAAjB,CAAZ;AACAC,IAAAA,SAAS,GAAG,KAAKS,WAAL,CAAiB,KAAKC,QAAL,CAAcV,SAAd,CAAjB,CAAZ;AAEA,QAAIW,MAAM,GAAGX,SAAS,CAACY,MAAvB;AAAA,QAA+BC,MAAM,GAAGd,SAAS,CAACa,MAAlD;AACA,QAAIE,UAAU,GAAG,CAAjB;AACA,QAAIC,aAAa,GAAGJ,MAAM,GAAGE,MAA7B;;AACA,QAAGZ,OAAO,CAACc,aAAX,EAA0B;AACxBA,MAAAA,aAAa,GAAGC,IAAI,CAACC,GAAL,CAASF,aAAT,EAAwBd,OAAO,CAACc,aAAhC,CAAhB;AACD;;AAED,QAAIG,QAAQ,GAAG,CAAC;AAAEC,MAAAA,MAAM,EAAE,CAAC,CAAX;AAAcC,MAAAA,UAAU,EAAE;AAA1B,KAAD,CAAf,CAjCuC,CAmCvC;;AACA,QAAIC,MAAM,GAAG,KAAKC,aAAL,CAAmBJ,QAAQ,CAAC,CAAD,CAA3B,EAAgClB,SAAhC,EAA2CD,SAA3C,EAAsD,CAAtD,CAAb;;AACA,QAAImB,QAAQ,CAAC,CAAD,CAAR,CAAYC,MAAZ,GAAqB,CAArB,IAA0BR,MAA1B,IAAoCU,MAAM,GAAG,CAAT,IAAcR,MAAtD,EAA8D;AAC5D;AACA,aAAOT,IAAI,CAAC,CAAC;AAACC,QAAAA,KAAK,EAAE,KAAKkB,IAAL,CAAUvB,SAAV,CAAR;AAA8BwB,QAAAA,KAAK,EAAExB,SAAS,CAACY;AAA/C,OAAD,CAAD,CAAX;AACD,KAxCsC,CA0CvC;;;AACA,aAASa,cAAT,GAA0B;AACxB,WAAK,IAAIC,YAAY,GAAG,CAAC,CAAD,GAAKZ,UAA7B,EAAyCY,YAAY,IAAIZ,UAAzD,EAAqEY,YAAY,IAAI,CAArF,EAAwF;AACtF,YAAIC,QAAQ;AAAA;AAAA;AAAZ;AAAA;;AACA,YAAIC,OAAO,GAAGV,QAAQ,CAACQ,YAAY,GAAG,CAAhB,CAAtB;AAAA,YACIG,UAAU,GAAGX,QAAQ,CAACQ,YAAY,GAAG,CAAhB,CADzB;AAAA,YAEIL,OAAM,GAAG,CAACQ,UAAU,GAAGA,UAAU,CAACV,MAAd,GAAuB,CAAlC,IAAuCO,YAFpD;;AAGA,YAAIE,OAAJ,EAAa;AACX;AACAV,UAAAA,QAAQ,CAACQ,YAAY,GAAG,CAAhB,CAAR,GAA6BnB,SAA7B;AACD;;AAED,YAAIuB,MAAM,GAAGF,OAAO,IAAIA,OAAO,CAACT,MAAR,GAAiB,CAAjB,GAAqBR,MAA7C;AAAA,YACIoB,SAAS,GAAGF,UAAU,IAAI,KAAKR,OAAnB,IAA6BA,OAAM,GAAGR,MADtD;;AAEA,YAAI,CAACiB,MAAD,IAAW,CAACC,SAAhB,EAA2B;AACzB;AACAb,UAAAA,QAAQ,CAACQ,YAAD,CAAR,GAAyBnB,SAAzB;AACA;AACD,SAhBqF,CAkBtF;AACA;AACA;;;AACA,YAAI,CAACuB,MAAD,IAAYC,SAAS,IAAIH,OAAO,CAACT,MAAR,GAAiBU,UAAU,CAACV,MAAzD,EAAkE;AAChEQ,UAAAA,QAAQ,GAAGK,SAAS,CAACH,UAAD,CAApB;AACA1B,UAAAA,IAAI,CAAC8B,aAAL,CAAmBN,QAAQ,CAACP,UAA5B,EAAwCb,SAAxC,EAAmD,IAAnD;AACD,SAHD,MAGO;AACLoB,UAAAA,QAAQ,GAAGC,OAAX,CADK,CACe;;AACpBD,UAAAA,QAAQ,CAACR,MAAT;AACAhB,UAAAA,IAAI,CAAC8B,aAAL,CAAmBN,QAAQ,CAACP,UAA5B,EAAwC,IAAxC,EAA8Cb,SAA9C;AACD;;AAEDc,QAAAA,OAAM,GAAGlB,IAAI,CAACmB,aAAL,CAAmBK,QAAnB,EAA6B3B,SAA7B,EAAwCD,SAAxC,EAAmD2B,YAAnD,CAAT,CA9BsF,CAgCtF;;AACA,YAAIC,QAAQ,CAACR,MAAT,GAAkB,CAAlB,IAAuBR,MAAvB,IAAiCU,OAAM,GAAG,CAAT,IAAcR,MAAnD,EAA2D;AACzD,iBAAOT,IAAI,CAAC8B,WAAW,CAAC/B,IAAD,EAAOwB,QAAQ,CAACP,UAAhB,EAA4BpB,SAA5B,EAAuCD,SAAvC,EAAkDI,IAAI,CAACgC,eAAvD,CAAZ,CAAX;AACD,SAFD,MAEO;AACL;AACAjB,UAAAA,QAAQ,CAACQ,YAAD,CAAR,GAAyBC,QAAzB;AACD;AACF;;AAEDb,MAAAA,UAAU;AACX,KAtFsC,CAwFvC;AACA;AACA;AACA;;;AACA,QAAIZ,QAAJ,EAAc;AACX,gBAASkC,IAAT,GAAgB;AACf9B,QAAAA,UAAU,CAAC,YAAW;AACpB,cAAIQ,UAAU,GAAGC,aAAjB,EAAgC;AAC9B,mBAAOb,QAAQ,EAAf;AACD;;AAED,cAAI,CAACuB,cAAc,EAAnB,EAAuB;AACrBW,YAAAA,IAAI;AACL;AACF,SARS,EAQP,CARO,CAAV;AASD,OAVA,GAAD;AAWD,KAZD,MAYO;AACL,aAAOtB,UAAU,IAAIC,aAArB,EAAoC;AAClC,YAAIsB,GAAG,GAAGZ,cAAc,EAAxB;;AACA,YAAIY,GAAJ,EAAS;AACP,iBAAOA,GAAP;AACD;AACF;AACF;AACF,GAjHc;;AAAA;;AAAA;AAmHfJ,EAAAA,aAnHe,yBAmHDb,UAnHC,EAmHWkB,KAnHX,EAmHkBC,OAnHlB,EAmH2B;AACxC,QAAIC,IAAI,GAAGpB,UAAU,CAACA,UAAU,CAACR,MAAX,GAAoB,CAArB,CAArB;;AACA,QAAI4B,IAAI,IAAIA,IAAI,CAACF,KAAL,KAAeA,KAAvB,IAAgCE,IAAI,CAACD,OAAL,KAAiBA,OAArD,EAA8D;AAC5D;AACA;AACAnB,MAAAA,UAAU,CAACA,UAAU,CAACR,MAAX,GAAoB,CAArB,CAAV,GAAoC;AAACY,QAAAA,KAAK,EAAEgB,IAAI,CAAChB,KAAL,GAAa,CAArB;AAAwBc,QAAAA,KAAK,EAAEA,KAA/B;AAAsCC,QAAAA,OAAO,EAAEA;AAA/C,OAApC;AACD,KAJD,MAIO;AACLnB,MAAAA,UAAU,CAACqB,IAAX,CAAgB;AAACjB,QAAAA,KAAK,EAAE,CAAR;AAAWc,QAAAA,KAAK,EAAEA,KAAlB;AAAyBC,QAAAA,OAAO,EAAEA;AAAlC,OAAhB;AACD;AACF,GA5Hc;;AAAA;;AAAA;AA6HfjB,EAAAA,aA7He,yBA6HDK,QA7HC,EA6HS3B,SA7HT,EA6HoBD,SA7HpB,EA6H+B2B,YA7H/B,EA6H6C;AAC1D,QAAIf,MAAM,GAAGX,SAAS,CAACY,MAAvB;AAAA,QACIC,MAAM,GAAGd,SAAS,CAACa,MADvB;AAAA,QAEIO,MAAM,GAAGQ,QAAQ,CAACR,MAFtB;AAAA,QAGIE,MAAM,GAAGF,MAAM,GAAGO,YAHtB;AAAA,QAKIgB,WAAW,GAAG,CALlB;;AAMA,WAAOvB,MAAM,GAAG,CAAT,GAAaR,MAAb,IAAuBU,MAAM,GAAG,CAAT,GAAaR,MAApC,IAA8C,KAAK8B,MAAL,CAAY3C,SAAS,CAACmB,MAAM,GAAG,CAAV,CAArB,EAAmCpB,SAAS,CAACsB,MAAM,GAAG,CAAV,CAA5C,CAArD,EAAgH;AAC9GF,MAAAA,MAAM;AACNE,MAAAA,MAAM;AACNqB,MAAAA,WAAW;AACZ;;AAED,QAAIA,WAAJ,EAAiB;AACff,MAAAA,QAAQ,CAACP,UAAT,CAAoBqB,IAApB,CAAyB;AAACjB,QAAAA,KAAK,EAAEkB;AAAR,OAAzB;AACD;;AAEDf,IAAAA,QAAQ,CAACR,MAAT,GAAkBA,MAAlB;AACA,WAAOE,MAAP;AACD,GAhJc;;AAAA;;AAAA;AAkJfsB,EAAAA,MAlJe,kBAkJRC,IAlJQ,EAkJFC,KAlJE,EAkJK;AAClB,QAAI,KAAK5C,OAAL,CAAa6C,UAAjB,EAA6B;AAC3B,aAAO,KAAK7C,OAAL,CAAa6C,UAAb,CAAwBF,IAAxB,EAA8BC,KAA9B,CAAP;AACD,KAFD,MAEO;AACL,aAAOD,IAAI,KAAKC,KAAT,IACD,KAAK5C,OAAL,CAAa8C,UAAb,IAA2BH,IAAI,CAACI,WAAL,OAAuBH,KAAK,CAACG,WAAN,EADxD;AAED;AACF,GAzJc;;AAAA;;AAAA;AA0JfvC,EAAAA,WA1Je,uBA0JHwC,KA1JG,EA0JI;AACjB,QAAIZ,GAAG,GAAG,EAAV;;AACA,SAAK,IAAIa,CAAC,GAAG,CAAb,EAAgBA,CAAC,GAAGD,KAAK,CAACrC,MAA1B,EAAkCsC,CAAC,EAAnC,EAAuC;AACrC,UAAID,KAAK,CAACC,CAAD,CAAT,EAAc;AACZb,QAAAA,GAAG,CAACI,IAAJ,CAASQ,KAAK,CAACC,CAAD,CAAd;AACD;AACF;;AACD,WAAOb,GAAP;AACD,GAlKc;;AAAA;;AAAA;AAmKf7B,EAAAA,SAnKe,qBAmKLH,KAnKK,EAmKE;AACf,WAAOA,KAAP;AACD,GArKc;;AAAA;;AAAA;AAsKfK,EAAAA,QAtKe,oBAsKNL,KAtKM,EAsKC;AACd,WAAOA,KAAK,CAAC8C,KAAN,CAAY,EAAZ,CAAP;AACD,GAxKc;;AAAA;;AAAA;AAyKf5B,EAAAA,IAzKe,gBAyKV6B,KAzKU,EAyKH;AACV,WAAOA,KAAK,CAAC7B,IAAN,CAAW,EAAX,CAAP;AACD;AA3Kc,CAAjB;;AA8KA,SAASW,WAAT,CAAqBpC,IAArB,EAA2BsB,UAA3B,EAAuCpB,SAAvC,EAAkDD,SAAlD,EAA6DoC,eAA7D,EAA8E;AAC5E,MAAIkB,YAAY,GAAG,CAAnB;AAAA,MACIC,YAAY,GAAGlC,UAAU,CAACR,MAD9B;AAAA,MAEIO,MAAM,GAAG,CAFb;AAAA,MAGIE,MAAM,GAAG,CAHb;;AAKA,SAAOgC,YAAY,GAAGC,YAAtB,EAAoCD,YAAY,EAAhD,EAAoD;AAClD,QAAIE,SAAS,GAAGnC,UAAU,CAACiC,YAAD,CAA1B;;AACA,QAAI,CAACE,SAAS,CAAChB,OAAf,EAAwB;AACtB,UAAI,CAACgB,SAAS,CAACjB,KAAX,IAAoBH,eAAxB,EAAyC;AACvC,YAAI9B,KAAK,GAAGL,SAAS,CAACwD,KAAV,CAAgBrC,MAAhB,EAAwBA,MAAM,GAAGoC,SAAS,CAAC/B,KAA3C,CAAZ;AACAnB,QAAAA,KAAK,GAAGA,KAAK,CAACoD,GAAN,CAAU,UAASpD,KAAT,EAAgB6C,CAAhB,EAAmB;AACnC,cAAIQ,QAAQ,GAAG3D,SAAS,CAACsB,MAAM,GAAG6B,CAAV,CAAxB;AACA,iBAAOQ,QAAQ,CAAC9C,MAAT,GAAkBP,KAAK,CAACO,MAAxB,GAAiC8C,QAAjC,GAA4CrD,KAAnD;AACD,SAHO,CAAR;AAKAkD,QAAAA,SAAS,CAAClD,KAAV,GAAkBP,IAAI,CAACyB,IAAL,CAAUlB,KAAV,CAAlB;AACD,OARD,MAQO;AACLkD,QAAAA,SAAS,CAAClD,KAAV,GAAkBP,IAAI,CAACyB,IAAL,CAAUvB,SAAS,CAACwD,KAAV,CAAgBrC,MAAhB,EAAwBA,MAAM,GAAGoC,SAAS,CAAC/B,KAA3C,CAAV,CAAlB;AACD;;AACDL,MAAAA,MAAM,IAAIoC,SAAS,CAAC/B,KAApB,CAZsB,CActB;;AACA,UAAI,CAAC+B,SAAS,CAACjB,KAAf,EAAsB;AACpBjB,QAAAA,MAAM,IAAIkC,SAAS,CAAC/B,KAApB;AACD;AACF,KAlBD,MAkBO;AACL+B,MAAAA,SAAS,CAAClD,KAAV,GAAkBP,IAAI,CAACyB,IAAL,CAAUxB,SAAS,CAACyD,KAAV,CAAgBnC,MAAhB,EAAwBA,MAAM,GAAGkC,SAAS,CAAC/B,KAA3C,CAAV,CAAlB;AACAH,MAAAA,MAAM,IAAIkC,SAAS,CAAC/B,KAApB,CAFK,CAIL;AACA;AACA;;AACA,UAAI6B,YAAY,IAAIjC,UAAU,CAACiC,YAAY,GAAG,CAAhB,CAAV,CAA6Bf,KAAjD,EAAwD;AACtD,YAAIqB,GAAG,GAAGvC,UAAU,CAACiC,YAAY,GAAG,CAAhB,CAApB;AACAjC,QAAAA,UAAU,CAACiC,YAAY,GAAG,CAAhB,CAAV,GAA+BjC,UAAU,CAACiC,YAAD,CAAzC;AACAjC,QAAAA,UAAU,CAACiC,YAAD,CAAV,GAA2BM,GAA3B;AACD;AACF;AACF,GAvC2E,CAyC5E;AACA;AACA;;;AACA,MAAIC,aAAa,GAAGxC,UAAU,CAACkC,YAAY,GAAG,CAAhB,CAA9B;;AACA,MAAIA,YAAY,GAAG,CAAf,IACG,OAAOM,aAAa,CAACvD,KAArB,KAA+B,QADlC,KAEIuD,aAAa,CAACtB,KAAd,IAAuBsB,aAAa,CAACrB,OAFzC,KAGGzC,IAAI,CAAC6C,MAAL,CAAY,EAAZ,EAAgBiB,aAAa,CAACvD,KAA9B,CAHP,EAG6C;AAC3Ce,IAAAA,UAAU,CAACkC,YAAY,GAAG,CAAhB,CAAV,CAA6BjD,KAA7B,IAAsCuD,aAAa,CAACvD,KAApD;AACAe,IAAAA,UAAU,CAACyC,GAAX;AACD;;AAED,SAAOzC,UAAP;AACD;;AAED,SAASY,SAAT,CAAmB8B,IAAnB,EAAyB;AACvB,SAAO;AAAE3C,IAAAA,MAAM,EAAE2C,IAAI,CAAC3C,MAAf;AAAuBC,IAAAA,UAAU,EAAE0C,IAAI,CAAC1C,UAAL,CAAgBoC,KAAhB,CAAsB,CAAtB;AAAnC,GAAP;AACD","sourcesContent":["export default function Diff() {}\n\nDiff.prototype = {\n  diff(oldString, newString, options = {}) {\n    let callback = options.callback;\n    if (typeof options === 'function') {\n      callback = options;\n      options = {};\n    }\n    this.options = options;\n\n    let self = this;\n\n    function done(value) {\n      if (callback) {\n        setTimeout(function() { callback(undefined, value); }, 0);\n        return true;\n      } else {\n        return value;\n      }\n    }\n\n    // Allow subclasses to massage the input prior to running\n    oldString = this.castInput(oldString);\n    newString = this.castInput(newString);\n\n    oldString = this.removeEmpty(this.tokenize(oldString));\n    newString = this.removeEmpty(this.tokenize(newString));\n\n    let newLen = newString.length, oldLen = oldString.length;\n    let editLength = 1;\n    let maxEditLength = newLen + oldLen;\n    if(options.maxEditLength) {\n      maxEditLength = Math.min(maxEditLength, options.maxEditLength);\n    }\n\n    let bestPath = [{ newPos: -1, components: [] }];\n\n    // Seed editLength = 0, i.e. the content starts with the same values\n    let oldPos = this.extractCommon(bestPath[0], newString, oldString, 0);\n    if (bestPath[0].newPos + 1 >= newLen && oldPos + 1 >= oldLen) {\n      // Identity per the equality and tokenizer\n      return done([{value: this.join(newString), count: newString.length}]);\n    }\n\n    // Main worker method. checks all permutations of a given edit length for acceptance.\n    function execEditLength() {\n      for (let diagonalPath = -1 * editLength; diagonalPath <= editLength; diagonalPath += 2) {\n        let basePath;\n        let addPath = bestPath[diagonalPath - 1],\n            removePath = bestPath[diagonalPath + 1],\n            oldPos = (removePath ? removePath.newPos : 0) - diagonalPath;\n        if (addPath) {\n          // No one else is going to attempt to use this value, clear it\n          bestPath[diagonalPath - 1] = undefined;\n        }\n\n        let canAdd = addPath && addPath.newPos + 1 < newLen,\n            canRemove = removePath && 0 <= oldPos && oldPos < oldLen;\n        if (!canAdd && !canRemove) {\n          // If this path is a terminal then prune\n          bestPath[diagonalPath] = undefined;\n          continue;\n        }\n\n        // Select the diagonal that we want to branch from. We select the prior\n        // path whose position in the new string is the farthest from the origin\n        // and does not pass the bounds of the diff graph\n        if (!canAdd || (canRemove && addPath.newPos < removePath.newPos)) {\n          basePath = clonePath(removePath);\n          self.pushComponent(basePath.components, undefined, true);\n        } else {\n          basePath = addPath; // No need to clone, we've pulled it from the list\n          basePath.newPos++;\n          self.pushComponent(basePath.components, true, undefined);\n        }\n\n        oldPos = self.extractCommon(basePath, newString, oldString, diagonalPath);\n\n        // If we have hit the end of both strings, then we are done\n        if (basePath.newPos + 1 >= newLen && oldPos + 1 >= oldLen) {\n          return done(buildValues(self, basePath.components, newString, oldString, self.useLongestToken));\n        } else {\n          // Otherwise track this path as a potential candidate and continue.\n          bestPath[diagonalPath] = basePath;\n        }\n      }\n\n      editLength++;\n    }\n\n    // Performs the length of edit iteration. Is a bit fugly as this has to support the\n    // sync and async mode which is never fun. Loops over execEditLength until a value\n    // is produced, or until the edit length exceeds options.maxEditLength (if given),\n    // in which case it will return undefined.\n    if (callback) {\n      (function exec() {\n        setTimeout(function() {\n          if (editLength > maxEditLength) {\n            return callback();\n          }\n\n          if (!execEditLength()) {\n            exec();\n          }\n        }, 0);\n      }());\n    } else {\n      while (editLength <= maxEditLength) {\n        let ret = execEditLength();\n        if (ret) {\n          return ret;\n        }\n      }\n    }\n  },\n\n  pushComponent(components, added, removed) {\n    let last = components[components.length - 1];\n    if (last && last.added === added && last.removed === removed) {\n      // We need to clone here as the component clone operation is just\n      // as shallow array clone\n      components[components.length - 1] = {count: last.count + 1, added: added, removed: removed };\n    } else {\n      components.push({count: 1, added: added, removed: removed });\n    }\n  },\n  extractCommon(basePath, newString, oldString, diagonalPath) {\n    let newLen = newString.length,\n        oldLen = oldString.length,\n        newPos = basePath.newPos,\n        oldPos = newPos - diagonalPath,\n\n        commonCount = 0;\n    while (newPos + 1 < newLen && oldPos + 1 < oldLen && this.equals(newString[newPos + 1], oldString[oldPos + 1])) {\n      newPos++;\n      oldPos++;\n      commonCount++;\n    }\n\n    if (commonCount) {\n      basePath.components.push({count: commonCount});\n    }\n\n    basePath.newPos = newPos;\n    return oldPos;\n  },\n\n  equals(left, right) {\n    if (this.options.comparator) {\n      return this.options.comparator(left, right);\n    } else {\n      return left === right\n        || (this.options.ignoreCase && left.toLowerCase() === right.toLowerCase());\n    }\n  },\n  removeEmpty(array) {\n    let ret = [];\n    for (let i = 0; i < array.length; i++) {\n      if (array[i]) {\n        ret.push(array[i]);\n      }\n    }\n    return ret;\n  },\n  castInput(value) {\n    return value;\n  },\n  tokenize(value) {\n    return value.split('');\n  },\n  join(chars) {\n    return chars.join('');\n  }\n};\n\nfunction buildValues(diff, components, newString, oldString, useLongestToken) {\n  let componentPos = 0,\n      componentLen = components.length,\n      newPos = 0,\n      oldPos = 0;\n\n  for (; componentPos < componentLen; componentPos++) {\n    let component = components[componentPos];\n    if (!component.removed) {\n      if (!component.added && useLongestToken) {\n        let value = newString.slice(newPos, newPos + component.count);\n        value = value.map(function(value, i) {\n          let oldValue = oldString[oldPos + i];\n          return oldValue.length > value.length ? oldValue : value;\n        });\n\n        component.value = diff.join(value);\n      } else {\n        component.value = diff.join(newString.slice(newPos, newPos + component.count));\n      }\n      newPos += component.count;\n\n      // Common case\n      if (!component.added) {\n        oldPos += component.count;\n      }\n    } else {\n      component.value = diff.join(oldString.slice(oldPos, oldPos + component.count));\n      oldPos += component.count;\n\n      // Reverse add and remove so removes are output first to match common convention\n      // The diffing algorithm is tied to add then remove output and this is the simplest\n      // route to get the desired output with minimal overhead.\n      if (componentPos && components[componentPos - 1].added) {\n        let tmp = components[componentPos - 1];\n        components[componentPos - 1] = components[componentPos];\n        components[componentPos] = tmp;\n      }\n    }\n  }\n\n  // Special case handle for when one terminal is ignored (i.e. whitespace).\n  // For this case we merge the terminal into the prior string and drop the change.\n  // This is only available for string mode.\n  let lastComponent = components[componentLen - 1];\n  if (componentLen > 1\n      && typeof lastComponent.value === 'string'\n      && (lastComponent.added || lastComponent.removed)\n      && diff.equals('', lastComponent.value)) {\n    components[componentLen - 2].value += lastComponent.value;\n    components.pop();\n  }\n\n  return components;\n}\n\nfunction clonePath(path) {\n  return { newPos: path.newPos, components: path.components.slice(0) };\n}\n"]}
diff --git a/_extensions/d2/node_modules/diff/lib/diff/character.js b/_extensions/d2/node_modules/diff/lib/diff/character.js
new file mode 100644
index 00000000..7ddfa205
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/diff/character.js
@@ -0,0 +1,37 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.diffChars = diffChars;
+exports.characterDiff = void 0;
+
+/*istanbul ignore end*/
+var
+/*istanbul ignore start*/
+_base = _interopRequireDefault(require("./base"))
+/*istanbul ignore end*/
+;
+
+/*istanbul ignore start*/ function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
+
+/*istanbul ignore end*/
+var characterDiff = new
+/*istanbul ignore start*/
+_base
+/*istanbul ignore end*/
+[
+/*istanbul ignore start*/
+"default"
+/*istanbul ignore end*/
+]();
+
+/*istanbul ignore start*/
+exports.characterDiff = characterDiff;
+
+/*istanbul ignore end*/
+function diffChars(oldStr, newStr, options) {
+  return characterDiff.diff(oldStr, newStr, options);
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy9kaWZmL2NoYXJhY3Rlci5qcyJdLCJuYW1lcyI6WyJjaGFyYWN0ZXJEaWZmIiwiRGlmZiIsImRpZmZDaGFycyIsIm9sZFN0ciIsIm5ld1N0ciIsIm9wdGlvbnMiLCJkaWZmIl0sIm1hcHBpbmdzIjoiOzs7Ozs7Ozs7O0FBQUE7QUFBQTtBQUFBO0FBQUE7QUFBQTs7Ozs7QUFFTyxJQUFNQSxhQUFhLEdBQUc7QUFBSUM7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUEsQ0FBSixFQUF0Qjs7Ozs7O0FBQ0EsU0FBU0MsU0FBVCxDQUFtQkMsTUFBbkIsRUFBMkJDLE1BQTNCLEVBQW1DQyxPQUFuQyxFQUE0QztBQUFFLFNBQU9MLGFBQWEsQ0FBQ00sSUFBZCxDQUFtQkgsTUFBbkIsRUFBMkJDLE1BQTNCLEVBQW1DQyxPQUFuQyxDQUFQO0FBQXFEIiwic291cmNlc0NvbnRlbnQiOlsiaW1wb3J0IERpZmYgZnJvbSAnLi9iYXNlJztcblxuZXhwb3J0IGNvbnN0IGNoYXJhY3RlckRpZmYgPSBuZXcgRGlmZigpO1xuZXhwb3J0IGZ1bmN0aW9uIGRpZmZDaGFycyhvbGRTdHIsIG5ld1N0ciwgb3B0aW9ucykgeyByZXR1cm4gY2hhcmFjdGVyRGlmZi5kaWZmKG9sZFN0ciwgbmV3U3RyLCBvcHRpb25zKTsgfVxuIl19
diff --git a/_extensions/d2/node_modules/diff/lib/diff/css.js b/_extensions/d2/node_modules/diff/lib/diff/css.js
new file mode 100644
index 00000000..e3ad1fcb
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/diff/css.js
@@ -0,0 +1,41 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.diffCss = diffCss;
+exports.cssDiff = void 0;
+
+/*istanbul ignore end*/
+var
+/*istanbul ignore start*/
+_base = _interopRequireDefault(require("./base"))
+/*istanbul ignore end*/
+;
+
+/*istanbul ignore start*/ function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
+
+/*istanbul ignore end*/
+var cssDiff = new
+/*istanbul ignore start*/
+_base
+/*istanbul ignore end*/
+[
+/*istanbul ignore start*/
+"default"
+/*istanbul ignore end*/
+]();
+
+/*istanbul ignore start*/
+exports.cssDiff = cssDiff;
+
+/*istanbul ignore end*/
+cssDiff.tokenize = function (value) {
+  return value.split(/([{}:;,]|\s+)/);
+};
+
+function diffCss(oldStr, newStr, callback) {
+  return cssDiff.diff(oldStr, newStr, callback);
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy9kaWZmL2Nzcy5qcyJdLCJuYW1lcyI6WyJjc3NEaWZmIiwiRGlmZiIsInRva2VuaXplIiwidmFsdWUiLCJzcGxpdCIsImRpZmZDc3MiLCJvbGRTdHIiLCJuZXdTdHIiLCJjYWxsYmFjayIsImRpZmYiXSwibWFwcGluZ3MiOiI7Ozs7Ozs7Ozs7QUFBQTtBQUFBO0FBQUE7QUFBQTtBQUFBOzs7OztBQUVPLElBQU1BLE9BQU8sR0FBRztBQUFJQztBQUFBQTtBQUFBQTtBQUFBQTtBQUFBQTtBQUFBQTtBQUFBQTtBQUFBQSxDQUFKLEVBQWhCOzs7Ozs7QUFDUEQsT0FBTyxDQUFDRSxRQUFSLEdBQW1CLFVBQVNDLEtBQVQsRUFBZ0I7QUFDakMsU0FBT0EsS0FBSyxDQUFDQyxLQUFOLENBQVksZUFBWixDQUFQO0FBQ0QsQ0FGRDs7QUFJTyxTQUFTQyxPQUFULENBQWlCQyxNQUFqQixFQUF5QkMsTUFBekIsRUFBaUNDLFFBQWpDLEVBQTJDO0FBQUUsU0FBT1IsT0FBTyxDQUFDUyxJQUFSLENBQWFILE1BQWIsRUFBcUJDLE1BQXJCLEVBQTZCQyxRQUE3QixDQUFQO0FBQWdEIiwic291cmNlc0NvbnRlbnQiOlsiaW1wb3J0IERpZmYgZnJvbSAnLi9iYXNlJztcblxuZXhwb3J0IGNvbnN0IGNzc0RpZmYgPSBuZXcgRGlmZigpO1xuY3NzRGlmZi50b2tlbml6ZSA9IGZ1bmN0aW9uKHZhbHVlKSB7XG4gIHJldHVybiB2YWx1ZS5zcGxpdCgvKFt7fTo7LF18XFxzKykvKTtcbn07XG5cbmV4cG9ydCBmdW5jdGlvbiBkaWZmQ3NzKG9sZFN0ciwgbmV3U3RyLCBjYWxsYmFjaykgeyByZXR1cm4gY3NzRGlmZi5kaWZmKG9sZFN0ciwgbmV3U3RyLCBjYWxsYmFjayk7IH1cbiJdfQ==
diff --git a/_extensions/d2/node_modules/diff/lib/diff/json.js b/_extensions/d2/node_modules/diff/lib/diff/json.js
new file mode 100644
index 00000000..67c2f175
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/diff/json.js
@@ -0,0 +1,163 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.diffJson = diffJson;
+exports.canonicalize = canonicalize;
+exports.jsonDiff = void 0;
+
+/*istanbul ignore end*/
+var
+/*istanbul ignore start*/
+_base = _interopRequireDefault(require("./base"))
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_line = require("./line")
+/*istanbul ignore end*/
+;
+
+/*istanbul ignore start*/ function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
+
+function _typeof(obj) { "@babel/helpers - typeof"; if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; } return _typeof(obj); }
+
+/*istanbul ignore end*/
+var objectPrototypeToString = Object.prototype.toString;
+var jsonDiff = new
+/*istanbul ignore start*/
+_base
+/*istanbul ignore end*/
+[
+/*istanbul ignore start*/
+"default"
+/*istanbul ignore end*/
+](); // Discriminate between two lines of pretty-printed, serialized JSON where one of them has a
+// dangling comma and the other doesn't. Turns out including the dangling comma yields the nicest output:
+
+/*istanbul ignore start*/
+exports.jsonDiff = jsonDiff;
+
+/*istanbul ignore end*/
+jsonDiff.useLongestToken = true;
+jsonDiff.tokenize =
+/*istanbul ignore start*/
+_line
+/*istanbul ignore end*/
+.
+/*istanbul ignore start*/
+lineDiff
+/*istanbul ignore end*/
+.tokenize;
+
+jsonDiff.castInput = function (value) {
+  /*istanbul ignore start*/
+  var _this$options =
+  /*istanbul ignore end*/
+  this.options,
+      undefinedReplacement = _this$options.undefinedReplacement,
+      _this$options$stringi = _this$options.stringifyReplacer,
+      stringifyReplacer = _this$options$stringi === void 0 ? function (k, v)
+  /*istanbul ignore start*/
+  {
+    return (
+      /*istanbul ignore end*/
+      typeof v === 'undefined' ? undefinedReplacement : v
+    );
+  } : _this$options$stringi;
+  return typeof value === 'string' ? value : JSON.stringify(canonicalize(value, null, null, stringifyReplacer), stringifyReplacer, '  ');
+};
+
+jsonDiff.equals = function (left, right) {
+  return (
+    /*istanbul ignore start*/
+    _base
+    /*istanbul ignore end*/
+    [
+    /*istanbul ignore start*/
+    "default"
+    /*istanbul ignore end*/
+    ].prototype.equals.call(jsonDiff, left.replace(/,([\r\n])/g, '$1'), right.replace(/,([\r\n])/g, '$1'))
+  );
+};
+
+function diffJson(oldObj, newObj, options) {
+  return jsonDiff.diff(oldObj, newObj, options);
+} // This function handles the presence of circular references by bailing out when encountering an
+// object that is already on the "stack" of items being processed. Accepts an optional replacer
+
+
+function canonicalize(obj, stack, replacementStack, replacer, key) {
+  stack = stack || [];
+  replacementStack = replacementStack || [];
+
+  if (replacer) {
+    obj = replacer(key, obj);
+  }
+
+  var i;
+
+  for (i = 0; i < stack.length; i += 1) {
+    if (stack[i] === obj) {
+      return replacementStack[i];
+    }
+  }
+
+  var canonicalizedObj;
+
+  if ('[object Array]' === objectPrototypeToString.call(obj)) {
+    stack.push(obj);
+    canonicalizedObj = new Array(obj.length);
+    replacementStack.push(canonicalizedObj);
+
+    for (i = 0; i < obj.length; i += 1) {
+      canonicalizedObj[i] = canonicalize(obj[i], stack, replacementStack, replacer, key);
+    }
+
+    stack.pop();
+    replacementStack.pop();
+    return canonicalizedObj;
+  }
+
+  if (obj && obj.toJSON) {
+    obj = obj.toJSON();
+  }
+
+  if (
+  /*istanbul ignore start*/
+  _typeof(
+  /*istanbul ignore end*/
+  obj) === 'object' && obj !== null) {
+    stack.push(obj);
+    canonicalizedObj = {};
+    replacementStack.push(canonicalizedObj);
+
+    var sortedKeys = [],
+        _key;
+
+    for (_key in obj) {
+      /* istanbul ignore else */
+      if (obj.hasOwnProperty(_key)) {
+        sortedKeys.push(_key);
+      }
+    }
+
+    sortedKeys.sort();
+
+    for (i = 0; i < sortedKeys.length; i += 1) {
+      _key = sortedKeys[i];
+      canonicalizedObj[_key] = canonicalize(obj[_key], stack, replacementStack, replacer, _key);
+    }
+
+    stack.pop();
+    replacementStack.pop();
+  } else {
+    canonicalizedObj = obj;
+  }
+
+  return canonicalizedObj;
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"sources":["../../src/diff/json.js"],"names":["objectPrototypeToString","Object","prototype","toString","jsonDiff","Diff","useLongestToken","tokenize","lineDiff","castInput","value","options","undefinedReplacement","stringifyReplacer","k","v","JSON","stringify","canonicalize","equals","left","right","call","replace","diffJson","oldObj","newObj","diff","obj","stack","replacementStack","replacer","key","i","length","canonicalizedObj","push","Array","pop","toJSON","sortedKeys","hasOwnProperty","sort"],"mappings":";;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;;AACA;AAAA;AAAA;AAAA;AAAA;;;;;;;AAEA,IAAMA,uBAAuB,GAAGC,MAAM,CAACC,SAAP,CAAiBC,QAAjD;AAGO,IAAMC,QAAQ,GAAG;AAAIC;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA,CAAJ,EAAjB,C,CACP;AACA;;;;;;AACAD,QAAQ,CAACE,eAAT,GAA2B,IAA3B;AAEAF,QAAQ,CAACG,QAAT;AAAoBC;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAA;AAAA,CAASD,QAA7B;;AACAH,QAAQ,CAACK,SAAT,GAAqB,UAASC,KAAT,EAAgB;AAAA;AAAA;AAAA;AAC+E,OAAKC,OADpF;AAAA,MAC5BC,oBAD4B,iBAC5BA,oBAD4B;AAAA,4CACNC,iBADM;AAAA,MACNA,iBADM,sCACc,UAACC,CAAD,EAAIC,CAAJ;AAAA;AAAA;AAAA;AAAA;AAAU,aAAOA,CAAP,KAAa,WAAb,GAA2BH,oBAA3B,GAAkDG;AAA5D;AAAA,GADd;AAGnC,SAAO,OAAOL,KAAP,KAAiB,QAAjB,GAA4BA,KAA5B,GAAoCM,IAAI,CAACC,SAAL,CAAeC,YAAY,CAACR,KAAD,EAAQ,IAAR,EAAc,IAAd,EAAoBG,iBAApB,CAA3B,EAAmEA,iBAAnE,EAAsF,IAAtF,CAA3C;AACD,CAJD;;AAKAT,QAAQ,CAACe,MAAT,GAAkB,UAASC,IAAT,EAAeC,KAAf,EAAsB;AACtC,SAAOhB;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA,MAAKH,SAAL,CAAeiB,MAAf,CAAsBG,IAAtB,CAA2BlB,QAA3B,EAAqCgB,IAAI,CAACG,OAAL,CAAa,YAAb,EAA2B,IAA3B,CAArC,EAAuEF,KAAK,CAACE,OAAN,CAAc,YAAd,EAA4B,IAA5B,CAAvE;AAAP;AACD,CAFD;;AAIO,SAASC,QAAT,CAAkBC,MAAlB,EAA0BC,MAA1B,EAAkCf,OAAlC,EAA2C;AAAE,SAAOP,QAAQ,CAACuB,IAAT,CAAcF,MAAd,EAAsBC,MAAtB,EAA8Bf,OAA9B,CAAP;AAAgD,C,CAEpG;AACA;;;AACO,SAASO,YAAT,CAAsBU,GAAtB,EAA2BC,KAA3B,EAAkCC,gBAAlC,EAAoDC,QAApD,EAA8DC,GAA9D,EAAmE;AACxEH,EAAAA,KAAK,GAAGA,KAAK,IAAI,EAAjB;AACAC,EAAAA,gBAAgB,GAAGA,gBAAgB,IAAI,EAAvC;;AAEA,MAAIC,QAAJ,EAAc;AACZH,IAAAA,GAAG,GAAGG,QAAQ,CAACC,GAAD,EAAMJ,GAAN,CAAd;AACD;;AAED,MAAIK,CAAJ;;AAEA,OAAKA,CAAC,GAAG,CAAT,EAAYA,CAAC,GAAGJ,KAAK,CAACK,MAAtB,EAA8BD,CAAC,IAAI,CAAnC,EAAsC;AACpC,QAAIJ,KAAK,CAACI,CAAD,CAAL,KAAaL,GAAjB,EAAsB;AACpB,aAAOE,gBAAgB,CAACG,CAAD,CAAvB;AACD;AACF;;AAED,MAAIE,gBAAJ;;AAEA,MAAI,qBAAqBnC,uBAAuB,CAACsB,IAAxB,CAA6BM,GAA7B,CAAzB,EAA4D;AAC1DC,IAAAA,KAAK,CAACO,IAAN,CAAWR,GAAX;AACAO,IAAAA,gBAAgB,GAAG,IAAIE,KAAJ,CAAUT,GAAG,CAACM,MAAd,CAAnB;AACAJ,IAAAA,gBAAgB,CAACM,IAAjB,CAAsBD,gBAAtB;;AACA,SAAKF,CAAC,GAAG,CAAT,EAAYA,CAAC,GAAGL,GAAG,CAACM,MAApB,EAA4BD,CAAC,IAAI,CAAjC,EAAoC;AAClCE,MAAAA,gBAAgB,CAACF,CAAD,CAAhB,GAAsBf,YAAY,CAACU,GAAG,CAACK,CAAD,CAAJ,EAASJ,KAAT,EAAgBC,gBAAhB,EAAkCC,QAAlC,EAA4CC,GAA5C,CAAlC;AACD;;AACDH,IAAAA,KAAK,CAACS,GAAN;AACAR,IAAAA,gBAAgB,CAACQ,GAAjB;AACA,WAAOH,gBAAP;AACD;;AAED,MAAIP,GAAG,IAAIA,GAAG,CAACW,MAAf,EAAuB;AACrBX,IAAAA,GAAG,GAAGA,GAAG,CAACW,MAAJ,EAAN;AACD;;AAED;AAAI;AAAA;AAAA;AAAOX,EAAAA,GAAP,MAAe,QAAf,IAA2BA,GAAG,KAAK,IAAvC,EAA6C;AAC3CC,IAAAA,KAAK,CAACO,IAAN,CAAWR,GAAX;AACAO,IAAAA,gBAAgB,GAAG,EAAnB;AACAL,IAAAA,gBAAgB,CAACM,IAAjB,CAAsBD,gBAAtB;;AACA,QAAIK,UAAU,GAAG,EAAjB;AAAA,QACIR,IADJ;;AAEA,SAAKA,IAAL,IAAYJ,GAAZ,EAAiB;AACf;AACA,UAAIA,GAAG,CAACa,cAAJ,CAAmBT,IAAnB,CAAJ,EAA6B;AAC3BQ,QAAAA,UAAU,CAACJ,IAAX,CAAgBJ,IAAhB;AACD;AACF;;AACDQ,IAAAA,UAAU,CAACE,IAAX;;AACA,SAAKT,CAAC,GAAG,CAAT,EAAYA,CAAC,GAAGO,UAAU,CAACN,MAA3B,EAAmCD,CAAC,IAAI,CAAxC,EAA2C;AACzCD,MAAAA,IAAG,GAAGQ,UAAU,CAACP,CAAD,CAAhB;AACAE,MAAAA,gBAAgB,CAACH,IAAD,CAAhB,GAAwBd,YAAY,CAACU,GAAG,CAACI,IAAD,CAAJ,EAAWH,KAAX,EAAkBC,gBAAlB,EAAoCC,QAApC,EAA8CC,IAA9C,CAApC;AACD;;AACDH,IAAAA,KAAK,CAACS,GAAN;AACAR,IAAAA,gBAAgB,CAACQ,GAAjB;AACD,GAnBD,MAmBO;AACLH,IAAAA,gBAAgB,GAAGP,GAAnB;AACD;;AACD,SAAOO,gBAAP;AACD","sourcesContent":["import Diff from './base';\nimport {lineDiff} from './line';\n\nconst objectPrototypeToString = Object.prototype.toString;\n\n\nexport const jsonDiff = new Diff();\n// Discriminate between two lines of pretty-printed, serialized JSON where one of them has a\n// dangling comma and the other doesn't. Turns out including the dangling comma yields the nicest output:\njsonDiff.useLongestToken = true;\n\njsonDiff.tokenize = lineDiff.tokenize;\njsonDiff.castInput = function(value) {\n  const {undefinedReplacement, stringifyReplacer = (k, v) => typeof v === 'undefined' ? undefinedReplacement : v} = this.options;\n\n  return typeof value === 'string' ? value : JSON.stringify(canonicalize(value, null, null, stringifyReplacer), stringifyReplacer, '  ');\n};\njsonDiff.equals = function(left, right) {\n  return Diff.prototype.equals.call(jsonDiff, left.replace(/,([\\r\\n])/g, '$1'), right.replace(/,([\\r\\n])/g, '$1'));\n};\n\nexport function diffJson(oldObj, newObj, options) { return jsonDiff.diff(oldObj, newObj, options); }\n\n// This function handles the presence of circular references by bailing out when encountering an\n// object that is already on the \"stack\" of items being processed. Accepts an optional replacer\nexport function canonicalize(obj, stack, replacementStack, replacer, key) {\n  stack = stack || [];\n  replacementStack = replacementStack || [];\n\n  if (replacer) {\n    obj = replacer(key, obj);\n  }\n\n  let i;\n\n  for (i = 0; i < stack.length; i += 1) {\n    if (stack[i] === obj) {\n      return replacementStack[i];\n    }\n  }\n\n  let canonicalizedObj;\n\n  if ('[object Array]' === objectPrototypeToString.call(obj)) {\n    stack.push(obj);\n    canonicalizedObj = new Array(obj.length);\n    replacementStack.push(canonicalizedObj);\n    for (i = 0; i < obj.length; i += 1) {\n      canonicalizedObj[i] = canonicalize(obj[i], stack, replacementStack, replacer, key);\n    }\n    stack.pop();\n    replacementStack.pop();\n    return canonicalizedObj;\n  }\n\n  if (obj && obj.toJSON) {\n    obj = obj.toJSON();\n  }\n\n  if (typeof obj === 'object' && obj !== null) {\n    stack.push(obj);\n    canonicalizedObj = {};\n    replacementStack.push(canonicalizedObj);\n    let sortedKeys = [],\n        key;\n    for (key in obj) {\n      /* istanbul ignore else */\n      if (obj.hasOwnProperty(key)) {\n        sortedKeys.push(key);\n      }\n    }\n    sortedKeys.sort();\n    for (i = 0; i < sortedKeys.length; i += 1) {\n      key = sortedKeys[i];\n      canonicalizedObj[key] = canonicalize(obj[key], stack, replacementStack, replacer, key);\n    }\n    stack.pop();\n    replacementStack.pop();\n  } else {\n    canonicalizedObj = obj;\n  }\n  return canonicalizedObj;\n}\n"]}
diff --git a/_extensions/d2/node_modules/diff/lib/diff/line.js b/_extensions/d2/node_modules/diff/lib/diff/line.js
new file mode 100644
index 00000000..855fe30b
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/diff/line.js
@@ -0,0 +1,89 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.diffLines = diffLines;
+exports.diffTrimmedLines = diffTrimmedLines;
+exports.lineDiff = void 0;
+
+/*istanbul ignore end*/
+var
+/*istanbul ignore start*/
+_base = _interopRequireDefault(require("./base"))
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_params = require("../util/params")
+/*istanbul ignore end*/
+;
+
+/*istanbul ignore start*/ function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
+
+/*istanbul ignore end*/
+var lineDiff = new
+/*istanbul ignore start*/
+_base
+/*istanbul ignore end*/
+[
+/*istanbul ignore start*/
+"default"
+/*istanbul ignore end*/
+]();
+
+/*istanbul ignore start*/
+exports.lineDiff = lineDiff;
+
+/*istanbul ignore end*/
+lineDiff.tokenize = function (value) {
+  var retLines = [],
+      linesAndNewlines = value.split(/(\n|\r\n)/); // Ignore the final empty token that occurs if the string ends with a new line
+
+  if (!linesAndNewlines[linesAndNewlines.length - 1]) {
+    linesAndNewlines.pop();
+  } // Merge the content and line separators into single tokens
+
+
+  for (var i = 0; i < linesAndNewlines.length; i++) {
+    var line = linesAndNewlines[i];
+
+    if (i % 2 && !this.options.newlineIsToken) {
+      retLines[retLines.length - 1] += line;
+    } else {
+      if (this.options.ignoreWhitespace) {
+        line = line.trim();
+      }
+
+      retLines.push(line);
+    }
+  }
+
+  return retLines;
+};
+
+function diffLines(oldStr, newStr, callback) {
+  return lineDiff.diff(oldStr, newStr, callback);
+}
+
+function diffTrimmedLines(oldStr, newStr, callback) {
+  var options =
+  /*istanbul ignore start*/
+  (0,
+  /*istanbul ignore end*/
+
+  /*istanbul ignore start*/
+  _params
+  /*istanbul ignore end*/
+  .
+  /*istanbul ignore start*/
+  generateOptions)
+  /*istanbul ignore end*/
+  (callback, {
+    ignoreWhitespace: true
+  });
+  return lineDiff.diff(oldStr, newStr, options);
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy9kaWZmL2xpbmUuanMiXSwibmFtZXMiOlsibGluZURpZmYiLCJEaWZmIiwidG9rZW5pemUiLCJ2YWx1ZSIsInJldExpbmVzIiwibGluZXNBbmROZXdsaW5lcyIsInNwbGl0IiwibGVuZ3RoIiwicG9wIiwiaSIsImxpbmUiLCJvcHRpb25zIiwibmV3bGluZUlzVG9rZW4iLCJpZ25vcmVXaGl0ZXNwYWNlIiwidHJpbSIsInB1c2giLCJkaWZmTGluZXMiLCJvbGRTdHIiLCJuZXdTdHIiLCJjYWxsYmFjayIsImRpZmYiLCJkaWZmVHJpbW1lZExpbmVzIiwiZ2VuZXJhdGVPcHRpb25zIl0sIm1hcHBpbmdzIjoiOzs7Ozs7Ozs7OztBQUFBO0FBQUE7QUFBQTtBQUFBO0FBQUE7O0FBQ0E7QUFBQTtBQUFBO0FBQUE7QUFBQTs7Ozs7QUFFTyxJQUFNQSxRQUFRLEdBQUc7QUFBSUM7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUEsQ0FBSixFQUFqQjs7Ozs7O0FBQ1BELFFBQVEsQ0FBQ0UsUUFBVCxHQUFvQixVQUFTQyxLQUFULEVBQWdCO0FBQ2xDLE1BQUlDLFFBQVEsR0FBRyxFQUFmO0FBQUEsTUFDSUMsZ0JBQWdCLEdBQUdGLEtBQUssQ0FBQ0csS0FBTixDQUFZLFdBQVosQ0FEdkIsQ0FEa0MsQ0FJbEM7O0FBQ0EsTUFBSSxDQUFDRCxnQkFBZ0IsQ0FBQ0EsZ0JBQWdCLENBQUNFLE1BQWpCLEdBQTBCLENBQTNCLENBQXJCLEVBQW9EO0FBQ2xERixJQUFBQSxnQkFBZ0IsQ0FBQ0csR0FBakI7QUFDRCxHQVBpQyxDQVNsQzs7O0FBQ0EsT0FBSyxJQUFJQyxDQUFDLEdBQUcsQ0FBYixFQUFnQkEsQ0FBQyxHQUFHSixnQkFBZ0IsQ0FBQ0UsTUFBckMsRUFBNkNFLENBQUMsRUFBOUMsRUFBa0Q7QUFDaEQsUUFBSUMsSUFBSSxHQUFHTCxnQkFBZ0IsQ0FBQ0ksQ0FBRCxDQUEzQjs7QUFFQSxRQUFJQSxDQUFDLEdBQUcsQ0FBSixJQUFTLENBQUMsS0FBS0UsT0FBTCxDQUFhQyxjQUEzQixFQUEyQztBQUN6Q1IsTUFBQUEsUUFBUSxDQUFDQSxRQUFRLENBQUNHLE1BQVQsR0FBa0IsQ0FBbkIsQ0FBUixJQUFpQ0csSUFBakM7QUFDRCxLQUZELE1BRU87QUFDTCxVQUFJLEtBQUtDLE9BQUwsQ0FBYUUsZ0JBQWpCLEVBQW1DO0FBQ2pDSCxRQUFBQSxJQUFJLEdBQUdBLElBQUksQ0FBQ0ksSUFBTCxFQUFQO0FBQ0Q7O0FBQ0RWLE1BQUFBLFFBQVEsQ0FBQ1csSUFBVCxDQUFjTCxJQUFkO0FBQ0Q7QUFDRjs7QUFFRCxTQUFPTixRQUFQO0FBQ0QsQ0F4QkQ7O0FBMEJPLFNBQVNZLFNBQVQsQ0FBbUJDLE1BQW5CLEVBQTJCQyxNQUEzQixFQUFtQ0MsUUFBbkMsRUFBNkM7QUFBRSxTQUFPbkIsUUFBUSxDQUFDb0IsSUFBVCxDQUFjSCxNQUFkLEVBQXNCQyxNQUF0QixFQUE4QkMsUUFBOUIsQ0FBUDtBQUFpRDs7QUFDaEcsU0FBU0UsZ0JBQVQsQ0FBMEJKLE1BQTFCLEVBQWtDQyxNQUFsQyxFQUEwQ0MsUUFBMUMsRUFBb0Q7QUFDekQsTUFBSVIsT0FBTztBQUFHO0FBQUE7QUFBQTs7QUFBQVc7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQTtBQUFBLEdBQWdCSCxRQUFoQixFQUEwQjtBQUFDTixJQUFBQSxnQkFBZ0IsRUFBRTtBQUFuQixHQUExQixDQUFkO0FBQ0EsU0FBT2IsUUFBUSxDQUFDb0IsSUFBVCxDQUFjSCxNQUFkLEVBQXNCQyxNQUF0QixFQUE4QlAsT0FBOUIsQ0FBUDtBQUNEIiwic291cmNlc0NvbnRlbnQiOlsiaW1wb3J0IERpZmYgZnJvbSAnLi9iYXNlJztcbmltcG9ydCB7Z2VuZXJhdGVPcHRpb25zfSBmcm9tICcuLi91dGlsL3BhcmFtcyc7XG5cbmV4cG9ydCBjb25zdCBsaW5lRGlmZiA9IG5ldyBEaWZmKCk7XG5saW5lRGlmZi50b2tlbml6ZSA9IGZ1bmN0aW9uKHZhbHVlKSB7XG4gIGxldCByZXRMaW5lcyA9IFtdLFxuICAgICAgbGluZXNBbmROZXdsaW5lcyA9IHZhbHVlLnNwbGl0KC8oXFxufFxcclxcbikvKTtcblxuICAvLyBJZ25vcmUgdGhlIGZpbmFsIGVtcHR5IHRva2VuIHRoYXQgb2NjdXJzIGlmIHRoZSBzdHJpbmcgZW5kcyB3aXRoIGEgbmV3IGxpbmVcbiAgaWYgKCFsaW5lc0FuZE5ld2xpbmVzW2xpbmVzQW5kTmV3bGluZXMubGVuZ3RoIC0gMV0pIHtcbiAgICBsaW5lc0FuZE5ld2xpbmVzLnBvcCgpO1xuICB9XG5cbiAgLy8gTWVyZ2UgdGhlIGNvbnRlbnQgYW5kIGxpbmUgc2VwYXJhdG9ycyBpbnRvIHNpbmdsZSB0b2tlbnNcbiAgZm9yIChsZXQgaSA9IDA7IGkgPCBsaW5lc0FuZE5ld2xpbmVzLmxlbmd0aDsgaSsrKSB7XG4gICAgbGV0IGxpbmUgPSBsaW5lc0FuZE5ld2xpbmVzW2ldO1xuXG4gICAgaWYgKGkgJSAyICYmICF0aGlzLm9wdGlvbnMubmV3bGluZUlzVG9rZW4pIHtcbiAgICAgIHJldExpbmVzW3JldExpbmVzLmxlbmd0aCAtIDFdICs9IGxpbmU7XG4gICAgfSBlbHNlIHtcbiAgICAgIGlmICh0aGlzLm9wdGlvbnMuaWdub3JlV2hpdGVzcGFjZSkge1xuICAgICAgICBsaW5lID0gbGluZS50cmltKCk7XG4gICAgICB9XG4gICAgICByZXRMaW5lcy5wdXNoKGxpbmUpO1xuICAgIH1cbiAgfVxuXG4gIHJldHVybiByZXRMaW5lcztcbn07XG5cbmV4cG9ydCBmdW5jdGlvbiBkaWZmTGluZXMob2xkU3RyLCBuZXdTdHIsIGNhbGxiYWNrKSB7IHJldHVybiBsaW5lRGlmZi5kaWZmKG9sZFN0ciwgbmV3U3RyLCBjYWxsYmFjayk7IH1cbmV4cG9ydCBmdW5jdGlvbiBkaWZmVHJpbW1lZExpbmVzKG9sZFN0ciwgbmV3U3RyLCBjYWxsYmFjaykge1xuICBsZXQgb3B0aW9ucyA9IGdlbmVyYXRlT3B0aW9ucyhjYWxsYmFjaywge2lnbm9yZVdoaXRlc3BhY2U6IHRydWV9KTtcbiAgcmV0dXJuIGxpbmVEaWZmLmRpZmYob2xkU3RyLCBuZXdTdHIsIG9wdGlvbnMpO1xufVxuIl19
diff --git a/_extensions/d2/node_modules/diff/lib/diff/sentence.js b/_extensions/d2/node_modules/diff/lib/diff/sentence.js
new file mode 100644
index 00000000..95158d6f
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/diff/sentence.js
@@ -0,0 +1,41 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.diffSentences = diffSentences;
+exports.sentenceDiff = void 0;
+
+/*istanbul ignore end*/
+var
+/*istanbul ignore start*/
+_base = _interopRequireDefault(require("./base"))
+/*istanbul ignore end*/
+;
+
+/*istanbul ignore start*/ function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
+
+/*istanbul ignore end*/
+var sentenceDiff = new
+/*istanbul ignore start*/
+_base
+/*istanbul ignore end*/
+[
+/*istanbul ignore start*/
+"default"
+/*istanbul ignore end*/
+]();
+
+/*istanbul ignore start*/
+exports.sentenceDiff = sentenceDiff;
+
+/*istanbul ignore end*/
+sentenceDiff.tokenize = function (value) {
+  return value.split(/(\S.+?[.!?])(?=\s+|$)/);
+};
+
+function diffSentences(oldStr, newStr, callback) {
+  return sentenceDiff.diff(oldStr, newStr, callback);
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy9kaWZmL3NlbnRlbmNlLmpzIl0sIm5hbWVzIjpbInNlbnRlbmNlRGlmZiIsIkRpZmYiLCJ0b2tlbml6ZSIsInZhbHVlIiwic3BsaXQiLCJkaWZmU2VudGVuY2VzIiwib2xkU3RyIiwibmV3U3RyIiwiY2FsbGJhY2siLCJkaWZmIl0sIm1hcHBpbmdzIjoiOzs7Ozs7Ozs7O0FBQUE7QUFBQTtBQUFBO0FBQUE7QUFBQTs7Ozs7QUFHTyxJQUFNQSxZQUFZLEdBQUc7QUFBSUM7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUE7QUFBQUEsQ0FBSixFQUFyQjs7Ozs7O0FBQ1BELFlBQVksQ0FBQ0UsUUFBYixHQUF3QixVQUFTQyxLQUFULEVBQWdCO0FBQ3RDLFNBQU9BLEtBQUssQ0FBQ0MsS0FBTixDQUFZLHVCQUFaLENBQVA7QUFDRCxDQUZEOztBQUlPLFNBQVNDLGFBQVQsQ0FBdUJDLE1BQXZCLEVBQStCQyxNQUEvQixFQUF1Q0MsUUFBdkMsRUFBaUQ7QUFBRSxTQUFPUixZQUFZLENBQUNTLElBQWIsQ0FBa0JILE1BQWxCLEVBQTBCQyxNQUExQixFQUFrQ0MsUUFBbEMsQ0FBUDtBQUFxRCIsInNvdXJjZXNDb250ZW50IjpbImltcG9ydCBEaWZmIGZyb20gJy4vYmFzZSc7XG5cblxuZXhwb3J0IGNvbnN0IHNlbnRlbmNlRGlmZiA9IG5ldyBEaWZmKCk7XG5zZW50ZW5jZURpZmYudG9rZW5pemUgPSBmdW5jdGlvbih2YWx1ZSkge1xuICByZXR1cm4gdmFsdWUuc3BsaXQoLyhcXFMuKz9bLiE/XSkoPz1cXHMrfCQpLyk7XG59O1xuXG5leHBvcnQgZnVuY3Rpb24gZGlmZlNlbnRlbmNlcyhvbGRTdHIsIG5ld1N0ciwgY2FsbGJhY2spIHsgcmV0dXJuIHNlbnRlbmNlRGlmZi5kaWZmKG9sZFN0ciwgbmV3U3RyLCBjYWxsYmFjayk7IH1cbiJdfQ==
diff --git a/_extensions/d2/node_modules/diff/lib/diff/word.js b/_extensions/d2/node_modules/diff/lib/diff/word.js
new file mode 100644
index 00000000..cef7fe17
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/diff/word.js
@@ -0,0 +1,108 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.diffWords = diffWords;
+exports.diffWordsWithSpace = diffWordsWithSpace;
+exports.wordDiff = void 0;
+
+/*istanbul ignore end*/
+var
+/*istanbul ignore start*/
+_base = _interopRequireDefault(require("./base"))
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_params = require("../util/params")
+/*istanbul ignore end*/
+;
+
+/*istanbul ignore start*/ function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
+
+/*istanbul ignore end*/
+// Based on https://en.wikipedia.org/wiki/Latin_script_in_Unicode
+//
+// Ranges and exceptions:
+// Latin-1 Supplement, 0080–00FF
+//  - U+00D7  × Multiplication sign
+//  - U+00F7  ÷ Division sign
+// Latin Extended-A, 0100–017F
+// Latin Extended-B, 0180–024F
+// IPA Extensions, 0250–02AF
+// Spacing Modifier Letters, 02B0–02FF
+//  - U+02C7  ˇ ˇ  Caron
+//  - U+02D8  ˘ ˘  Breve
+//  - U+02D9  ˙ ˙  Dot Above
+//  - U+02DA  ˚ ˚  Ring Above
+//  - U+02DB  ˛ ˛  Ogonek
+//  - U+02DC  ˜ ˜  Small Tilde
+//  - U+02DD  ˝ ˝  Double Acute Accent
+// Latin Extended Additional, 1E00–1EFF
+var extendedWordChars = /^[A-Za-z\xC0-\u02C6\u02C8-\u02D7\u02DE-\u02FF\u1E00-\u1EFF]+$/;
+var reWhitespace = /\S/;
+var wordDiff = new
+/*istanbul ignore start*/
+_base
+/*istanbul ignore end*/
+[
+/*istanbul ignore start*/
+"default"
+/*istanbul ignore end*/
+]();
+
+/*istanbul ignore start*/
+exports.wordDiff = wordDiff;
+
+/*istanbul ignore end*/
+wordDiff.equals = function (left, right) {
+  if (this.options.ignoreCase) {
+    left = left.toLowerCase();
+    right = right.toLowerCase();
+  }
+
+  return left === right || this.options.ignoreWhitespace && !reWhitespace.test(left) && !reWhitespace.test(right);
+};
+
+wordDiff.tokenize = function (value) {
+  // All whitespace symbols except newline group into one token, each newline - in separate token
+  var tokens = value.split(/([^\S\r\n]+|[()[\]{}'"\r\n]|\b)/); // Join the boundary splits that we do not consider to be boundaries. This is primarily the extended Latin character set.
+
+  for (var i = 0; i < tokens.length - 1; i++) {
+    // If we have an empty string in the next field and we have only word chars before and after, merge
+    if (!tokens[i + 1] && tokens[i + 2] && extendedWordChars.test(tokens[i]) && extendedWordChars.test(tokens[i + 2])) {
+      tokens[i] += tokens[i + 2];
+      tokens.splice(i + 1, 2);
+      i--;
+    }
+  }
+
+  return tokens;
+};
+
+function diffWords(oldStr, newStr, options) {
+  options =
+  /*istanbul ignore start*/
+  (0,
+  /*istanbul ignore end*/
+
+  /*istanbul ignore start*/
+  _params
+  /*istanbul ignore end*/
+  .
+  /*istanbul ignore start*/
+  generateOptions)
+  /*istanbul ignore end*/
+  (options, {
+    ignoreWhitespace: true
+  });
+  return wordDiff.diff(oldStr, newStr, options);
+}
+
+function diffWordsWithSpace(oldStr, newStr, options) {
+  return wordDiff.diff(oldStr, newStr, options);
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy9kaWZmL3dvcmQuanMiXSwibmFtZXMiOlsiZXh0ZW5kZWRXb3JkQ2hhcnMiLCJyZVdoaXRlc3BhY2UiLCJ3b3JkRGlmZiIsIkRpZmYiLCJlcXVhbHMiLCJsZWZ0IiwicmlnaHQiLCJvcHRpb25zIiwiaWdub3JlQ2FzZSIsInRvTG93ZXJDYXNlIiwiaWdub3JlV2hpdGVzcGFjZSIsInRlc3QiLCJ0b2tlbml6ZSIsInZhbHVlIiwidG9rZW5zIiwic3BsaXQiLCJpIiwibGVuZ3RoIiwic3BsaWNlIiwiZGlmZldvcmRzIiwib2xkU3RyIiwibmV3U3RyIiwiZ2VuZXJhdGVPcHRpb25zIiwiZGlmZiIsImRpZmZXb3Jkc1dpdGhTcGFjZSJdLCJtYXBwaW5ncyI6Ijs7Ozs7Ozs7Ozs7QUFBQTtBQUFBO0FBQUE7QUFBQTtBQUFBOztBQUNBO0FBQUE7QUFBQTtBQUFBO0FBQUE7Ozs7O0FBRUE7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0E7QUFDQTtBQUNBO0FBQ0EsSUFBTUEsaUJBQWlCLEdBQUcsK0RBQTFCO0FBRUEsSUFBTUMsWUFBWSxHQUFHLElBQXJCO0FBRU8sSUFBTUMsUUFBUSxHQUFHO0FBQUlDO0FBQUFBO0FBQUFBO0FBQUFBO0FBQUFBO0FBQUFBO0FBQUFBO0FBQUFBLENBQUosRUFBakI7Ozs7OztBQUNQRCxRQUFRLENBQUNFLE1BQVQsR0FBa0IsVUFBU0MsSUFBVCxFQUFlQyxLQUFmLEVBQXNCO0FBQ3RDLE1BQUksS0FBS0MsT0FBTCxDQUFhQyxVQUFqQixFQUE2QjtBQUMzQkgsSUFBQUEsSUFBSSxHQUFHQSxJQUFJLENBQUNJLFdBQUwsRUFBUDtBQUNBSCxJQUFBQSxLQUFLLEdBQUdBLEtBQUssQ0FBQ0csV0FBTixFQUFSO0FBQ0Q7O0FBQ0QsU0FBT0osSUFBSSxLQUFLQyxLQUFULElBQW1CLEtBQUtDLE9BQUwsQ0FBYUcsZ0JBQWIsSUFBaUMsQ0FBQ1QsWUFBWSxDQUFDVSxJQUFiLENBQWtCTixJQUFsQixDQUFsQyxJQUE2RCxDQUFDSixZQUFZLENBQUNVLElBQWIsQ0FBa0JMLEtBQWxCLENBQXhGO0FBQ0QsQ0FORDs7QUFPQUosUUFBUSxDQUFDVSxRQUFULEdBQW9CLFVBQVNDLEtBQVQsRUFBZ0I7QUFDbEM7QUFDQSxNQUFJQyxNQUFNLEdBQUdELEtBQUssQ0FBQ0UsS0FBTixDQUFZLGlDQUFaLENBQWIsQ0FGa0MsQ0FJbEM7O0FBQ0EsT0FBSyxJQUFJQyxDQUFDLEdBQUcsQ0FBYixFQUFnQkEsQ0FBQyxHQUFHRixNQUFNLENBQUNHLE1BQVAsR0FBZ0IsQ0FBcEMsRUFBdUNELENBQUMsRUFBeEMsRUFBNEM7QUFDMUM7QUFDQSxRQUFJLENBQUNGLE1BQU0sQ0FBQ0UsQ0FBQyxHQUFHLENBQUwsQ0FBUCxJQUFrQkYsTUFBTSxDQUFDRSxDQUFDLEdBQUcsQ0FBTCxDQUF4QixJQUNLaEIsaUJBQWlCLENBQUNXLElBQWxCLENBQXVCRyxNQUFNLENBQUNFLENBQUQsQ0FBN0IsQ0FETCxJQUVLaEIsaUJBQWlCLENBQUNXLElBQWxCLENBQXVCRyxNQUFNLENBQUNFLENBQUMsR0FBRyxDQUFMLENBQTdCLENBRlQsRUFFZ0Q7QUFDOUNGLE1BQUFBLE1BQU0sQ0FBQ0UsQ0FBRCxDQUFOLElBQWFGLE1BQU0sQ0FBQ0UsQ0FBQyxHQUFHLENBQUwsQ0FBbkI7QUFDQUYsTUFBQUEsTUFBTSxDQUFDSSxNQUFQLENBQWNGLENBQUMsR0FBRyxDQUFsQixFQUFxQixDQUFyQjtBQUNBQSxNQUFBQSxDQUFDO0FBQ0Y7QUFDRjs7QUFFRCxTQUFPRixNQUFQO0FBQ0QsQ0FqQkQ7O0FBbUJPLFNBQVNLLFNBQVQsQ0FBbUJDLE1BQW5CLEVBQTJCQyxNQUEzQixFQUFtQ2QsT0FBbkMsRUFBNEM7QUFDakRBLEVBQUFBLE9BQU87QUFBRztBQUFBO0FBQUE7O0FBQUFlO0FBQUFBO0FBQUFBO0FBQUFBO0FBQUFBO0FBQUFBO0FBQUE7QUFBQSxHQUFnQmYsT0FBaEIsRUFBeUI7QUFBQ0csSUFBQUEsZ0JBQWdCLEVBQUU7QUFBbkIsR0FBekIsQ0FBVjtBQUNBLFNBQU9SLFFBQVEsQ0FBQ3FCLElBQVQsQ0FBY0gsTUFBZCxFQUFzQkMsTUFBdEIsRUFBOEJkLE9BQTlCLENBQVA7QUFDRDs7QUFFTSxTQUFTaUIsa0JBQVQsQ0FBNEJKLE1BQTVCLEVBQW9DQyxNQUFwQyxFQUE0Q2QsT0FBNUMsRUFBcUQ7QUFDMUQsU0FBT0wsUUFBUSxDQUFDcUIsSUFBVCxDQUFjSCxNQUFkLEVBQXNCQyxNQUF0QixFQUE4QmQsT0FBOUIsQ0FBUDtBQUNEIiwic291cmNlc0NvbnRlbnQiOlsiaW1wb3J0IERpZmYgZnJvbSAnLi9iYXNlJztcbmltcG9ydCB7Z2VuZXJhdGVPcHRpb25zfSBmcm9tICcuLi91dGlsL3BhcmFtcyc7XG5cbi8vIEJhc2VkIG9uIGh0dHBzOi8vZW4ud2lraXBlZGlhLm9yZy93aWtpL0xhdGluX3NjcmlwdF9pbl9Vbmljb2RlXG4vL1xuLy8gUmFuZ2VzIGFuZCBleGNlcHRpb25zOlxuLy8gTGF0aW4tMSBTdXBwbGVtZW50LCAwMDgw4oCTMDBGRlxuLy8gIC0gVSswMEQ3ICDDlyBNdWx0aXBsaWNhdGlvbiBzaWduXG4vLyAgLSBVKzAwRjcgIMO3IERpdmlzaW9uIHNpZ25cbi8vIExhdGluIEV4dGVuZGVkLUEsIDAxMDDigJMwMTdGXG4vLyBMYXRpbiBFeHRlbmRlZC1CLCAwMTgw4oCTMDI0RlxuLy8gSVBBIEV4dGVuc2lvbnMsIDAyNTDigJMwMkFGXG4vLyBTcGFjaW5nIE1vZGlmaWVyIExldHRlcnMsIDAyQjDigJMwMkZGXG4vLyAgLSBVKzAyQzcgIMuHICYjNzExOyAgQ2Fyb25cbi8vICAtIFUrMDJEOCAgy5ggJiM3Mjg7ICBCcmV2ZVxuLy8gIC0gVSswMkQ5ICDLmSAmIzcyOTsgIERvdCBBYm92ZVxuLy8gIC0gVSswMkRBICDLmiAmIzczMDsgIFJpbmcgQWJvdmVcbi8vICAtIFUrMDJEQiAgy5sgJiM3MzE7ICBPZ29uZWtcbi8vICAtIFUrMDJEQyAgy5wgJiM3MzI7ICBTbWFsbCBUaWxkZVxuLy8gIC0gVSswMkREICDLnSAmIzczMzsgIERvdWJsZSBBY3V0ZSBBY2NlbnRcbi8vIExhdGluIEV4dGVuZGVkIEFkZGl0aW9uYWwsIDFFMDDigJMxRUZGXG5jb25zdCBleHRlbmRlZFdvcmRDaGFycyA9IC9eW2EtekEtWlxcdXtDMH0tXFx1e0ZGfVxcdXtEOH0tXFx1e0Y2fVxcdXtGOH0tXFx1ezJDNn1cXHV7MkM4fS1cXHV7MkQ3fVxcdXsyREV9LVxcdXsyRkZ9XFx1ezFFMDB9LVxcdXsxRUZGfV0rJC91O1xuXG5jb25zdCByZVdoaXRlc3BhY2UgPSAvXFxTLztcblxuZXhwb3J0IGNvbnN0IHdvcmREaWZmID0gbmV3IERpZmYoKTtcbndvcmREaWZmLmVxdWFscyA9IGZ1bmN0aW9uKGxlZnQsIHJpZ2h0KSB7XG4gIGlmICh0aGlzLm9wdGlvbnMuaWdub3JlQ2FzZSkge1xuICAgIGxlZnQgPSBsZWZ0LnRvTG93ZXJDYXNlKCk7XG4gICAgcmlnaHQgPSByaWdodC50b0xvd2VyQ2FzZSgpO1xuICB9XG4gIHJldHVybiBsZWZ0ID09PSByaWdodCB8fCAodGhpcy5vcHRpb25zLmlnbm9yZVdoaXRlc3BhY2UgJiYgIXJlV2hpdGVzcGFjZS50ZXN0KGxlZnQpICYmICFyZVdoaXRlc3BhY2UudGVzdChyaWdodCkpO1xufTtcbndvcmREaWZmLnRva2VuaXplID0gZnVuY3Rpb24odmFsdWUpIHtcbiAgLy8gQWxsIHdoaXRlc3BhY2Ugc3ltYm9scyBleGNlcHQgbmV3bGluZSBncm91cCBpbnRvIG9uZSB0b2tlbiwgZWFjaCBuZXdsaW5lIC0gaW4gc2VwYXJhdGUgdG9rZW5cbiAgbGV0IHRva2VucyA9IHZhbHVlLnNwbGl0KC8oW15cXFNcXHJcXG5dK3xbKClbXFxde30nXCJcXHJcXG5dfFxcYikvKTtcblxuICAvLyBKb2luIHRoZSBib3VuZGFyeSBzcGxpdHMgdGhhdCB3ZSBkbyBub3QgY29uc2lkZXIgdG8gYmUgYm91bmRhcmllcy4gVGhpcyBpcyBwcmltYXJpbHkgdGhlIGV4dGVuZGVkIExhdGluIGNoYXJhY3RlciBzZXQuXG4gIGZvciAobGV0IGkgPSAwOyBpIDwgdG9rZW5zLmxlbmd0aCAtIDE7IGkrKykge1xuICAgIC8vIElmIHdlIGhhdmUgYW4gZW1wdHkgc3RyaW5nIGluIHRoZSBuZXh0IGZpZWxkIGFuZCB3ZSBoYXZlIG9ubHkgd29yZCBjaGFycyBiZWZvcmUgYW5kIGFmdGVyLCBtZXJnZVxuICAgIGlmICghdG9rZW5zW2kgKyAxXSAmJiB0b2tlbnNbaSArIDJdXG4gICAgICAgICAgJiYgZXh0ZW5kZWRXb3JkQ2hhcnMudGVzdCh0b2tlbnNbaV0pXG4gICAgICAgICAgJiYgZXh0ZW5kZWRXb3JkQ2hhcnMudGVzdCh0b2tlbnNbaSArIDJdKSkge1xuICAgICAgdG9rZW5zW2ldICs9IHRva2Vuc1tpICsgMl07XG4gICAgICB0b2tlbnMuc3BsaWNlKGkgKyAxLCAyKTtcbiAgICAgIGktLTtcbiAgICB9XG4gIH1cblxuICByZXR1cm4gdG9rZW5zO1xufTtcblxuZXhwb3J0IGZ1bmN0aW9uIGRpZmZXb3JkcyhvbGRTdHIsIG5ld1N0ciwgb3B0aW9ucykge1xuICBvcHRpb25zID0gZ2VuZXJhdGVPcHRpb25zKG9wdGlvbnMsIHtpZ25vcmVXaGl0ZXNwYWNlOiB0cnVlfSk7XG4gIHJldHVybiB3b3JkRGlmZi5kaWZmKG9sZFN0ciwgbmV3U3RyLCBvcHRpb25zKTtcbn1cblxuZXhwb3J0IGZ1bmN0aW9uIGRpZmZXb3Jkc1dpdGhTcGFjZShvbGRTdHIsIG5ld1N0ciwgb3B0aW9ucykge1xuICByZXR1cm4gd29yZERpZmYuZGlmZihvbGRTdHIsIG5ld1N0ciwgb3B0aW9ucyk7XG59XG4iXX0=
diff --git a/_extensions/d2/node_modules/diff/lib/index.es6.js b/_extensions/d2/node_modules/diff/lib/index.es6.js
new file mode 100644
index 00000000..c2a00135
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/index.es6.js
@@ -0,0 +1,1561 @@
+function Diff() {}
+Diff.prototype = {
+  diff: function diff(oldString, newString) {
+    var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
+    var callback = options.callback;
+
+    if (typeof options === 'function') {
+      callback = options;
+      options = {};
+    }
+
+    this.options = options;
+    var self = this;
+
+    function done(value) {
+      if (callback) {
+        setTimeout(function () {
+          callback(undefined, value);
+        }, 0);
+        return true;
+      } else {
+        return value;
+      }
+    } // Allow subclasses to massage the input prior to running
+
+
+    oldString = this.castInput(oldString);
+    newString = this.castInput(newString);
+    oldString = this.removeEmpty(this.tokenize(oldString));
+    newString = this.removeEmpty(this.tokenize(newString));
+    var newLen = newString.length,
+        oldLen = oldString.length;
+    var editLength = 1;
+    var maxEditLength = newLen + oldLen;
+
+    if (options.maxEditLength) {
+      maxEditLength = Math.min(maxEditLength, options.maxEditLength);
+    }
+
+    var bestPath = [{
+      newPos: -1,
+      components: []
+    }]; // Seed editLength = 0, i.e. the content starts with the same values
+
+    var oldPos = this.extractCommon(bestPath[0], newString, oldString, 0);
+
+    if (bestPath[0].newPos + 1 >= newLen && oldPos + 1 >= oldLen) {
+      // Identity per the equality and tokenizer
+      return done([{
+        value: this.join(newString),
+        count: newString.length
+      }]);
+    } // Main worker method. checks all permutations of a given edit length for acceptance.
+
+
+    function execEditLength() {
+      for (var diagonalPath = -1 * editLength; diagonalPath <= editLength; diagonalPath += 2) {
+        var basePath = void 0;
+
+        var addPath = bestPath[diagonalPath - 1],
+            removePath = bestPath[diagonalPath + 1],
+            _oldPos = (removePath ? removePath.newPos : 0) - diagonalPath;
+
+        if (addPath) {
+          // No one else is going to attempt to use this value, clear it
+          bestPath[diagonalPath - 1] = undefined;
+        }
+
+        var canAdd = addPath && addPath.newPos + 1 < newLen,
+            canRemove = removePath && 0 <= _oldPos && _oldPos < oldLen;
+
+        if (!canAdd && !canRemove) {
+          // If this path is a terminal then prune
+          bestPath[diagonalPath] = undefined;
+          continue;
+        } // Select the diagonal that we want to branch from. We select the prior
+        // path whose position in the new string is the farthest from the origin
+        // and does not pass the bounds of the diff graph
+
+
+        if (!canAdd || canRemove && addPath.newPos < removePath.newPos) {
+          basePath = clonePath(removePath);
+          self.pushComponent(basePath.components, undefined, true);
+        } else {
+          basePath = addPath; // No need to clone, we've pulled it from the list
+
+          basePath.newPos++;
+          self.pushComponent(basePath.components, true, undefined);
+        }
+
+        _oldPos = self.extractCommon(basePath, newString, oldString, diagonalPath); // If we have hit the end of both strings, then we are done
+
+        if (basePath.newPos + 1 >= newLen && _oldPos + 1 >= oldLen) {
+          return done(buildValues(self, basePath.components, newString, oldString, self.useLongestToken));
+        } else {
+          // Otherwise track this path as a potential candidate and continue.
+          bestPath[diagonalPath] = basePath;
+        }
+      }
+
+      editLength++;
+    } // Performs the length of edit iteration. Is a bit fugly as this has to support the
+    // sync and async mode which is never fun. Loops over execEditLength until a value
+    // is produced, or until the edit length exceeds options.maxEditLength (if given),
+    // in which case it will return undefined.
+
+
+    if (callback) {
+      (function exec() {
+        setTimeout(function () {
+          if (editLength > maxEditLength) {
+            return callback();
+          }
+
+          if (!execEditLength()) {
+            exec();
+          }
+        }, 0);
+      })();
+    } else {
+      while (editLength <= maxEditLength) {
+        var ret = execEditLength();
+
+        if (ret) {
+          return ret;
+        }
+      }
+    }
+  },
+  pushComponent: function pushComponent(components, added, removed) {
+    var last = components[components.length - 1];
+
+    if (last && last.added === added && last.removed === removed) {
+      // We need to clone here as the component clone operation is just
+      // as shallow array clone
+      components[components.length - 1] = {
+        count: last.count + 1,
+        added: added,
+        removed: removed
+      };
+    } else {
+      components.push({
+        count: 1,
+        added: added,
+        removed: removed
+      });
+    }
+  },
+  extractCommon: function extractCommon(basePath, newString, oldString, diagonalPath) {
+    var newLen = newString.length,
+        oldLen = oldString.length,
+        newPos = basePath.newPos,
+        oldPos = newPos - diagonalPath,
+        commonCount = 0;
+
+    while (newPos + 1 < newLen && oldPos + 1 < oldLen && this.equals(newString[newPos + 1], oldString[oldPos + 1])) {
+      newPos++;
+      oldPos++;
+      commonCount++;
+    }
+
+    if (commonCount) {
+      basePath.components.push({
+        count: commonCount
+      });
+    }
+
+    basePath.newPos = newPos;
+    return oldPos;
+  },
+  equals: function equals(left, right) {
+    if (this.options.comparator) {
+      return this.options.comparator(left, right);
+    } else {
+      return left === right || this.options.ignoreCase && left.toLowerCase() === right.toLowerCase();
+    }
+  },
+  removeEmpty: function removeEmpty(array) {
+    var ret = [];
+
+    for (var i = 0; i < array.length; i++) {
+      if (array[i]) {
+        ret.push(array[i]);
+      }
+    }
+
+    return ret;
+  },
+  castInput: function castInput(value) {
+    return value;
+  },
+  tokenize: function tokenize(value) {
+    return value.split('');
+  },
+  join: function join(chars) {
+    return chars.join('');
+  }
+};
+
+function buildValues(diff, components, newString, oldString, useLongestToken) {
+  var componentPos = 0,
+      componentLen = components.length,
+      newPos = 0,
+      oldPos = 0;
+
+  for (; componentPos < componentLen; componentPos++) {
+    var component = components[componentPos];
+
+    if (!component.removed) {
+      if (!component.added && useLongestToken) {
+        var value = newString.slice(newPos, newPos + component.count);
+        value = value.map(function (value, i) {
+          var oldValue = oldString[oldPos + i];
+          return oldValue.length > value.length ? oldValue : value;
+        });
+        component.value = diff.join(value);
+      } else {
+        component.value = diff.join(newString.slice(newPos, newPos + component.count));
+      }
+
+      newPos += component.count; // Common case
+
+      if (!component.added) {
+        oldPos += component.count;
+      }
+    } else {
+      component.value = diff.join(oldString.slice(oldPos, oldPos + component.count));
+      oldPos += component.count; // Reverse add and remove so removes are output first to match common convention
+      // The diffing algorithm is tied to add then remove output and this is the simplest
+      // route to get the desired output with minimal overhead.
+
+      if (componentPos && components[componentPos - 1].added) {
+        var tmp = components[componentPos - 1];
+        components[componentPos - 1] = components[componentPos];
+        components[componentPos] = tmp;
+      }
+    }
+  } // Special case handle for when one terminal is ignored (i.e. whitespace).
+  // For this case we merge the terminal into the prior string and drop the change.
+  // This is only available for string mode.
+
+
+  var lastComponent = components[componentLen - 1];
+
+  if (componentLen > 1 && typeof lastComponent.value === 'string' && (lastComponent.added || lastComponent.removed) && diff.equals('', lastComponent.value)) {
+    components[componentLen - 2].value += lastComponent.value;
+    components.pop();
+  }
+
+  return components;
+}
+
+function clonePath(path) {
+  return {
+    newPos: path.newPos,
+    components: path.components.slice(0)
+  };
+}
+
+var characterDiff = new Diff();
+function diffChars(oldStr, newStr, options) {
+  return characterDiff.diff(oldStr, newStr, options);
+}
+
+function generateOptions(options, defaults) {
+  if (typeof options === 'function') {
+    defaults.callback = options;
+  } else if (options) {
+    for (var name in options) {
+      /* istanbul ignore else */
+      if (options.hasOwnProperty(name)) {
+        defaults[name] = options[name];
+      }
+    }
+  }
+
+  return defaults;
+}
+
+//
+// Ranges and exceptions:
+// Latin-1 Supplement, 0080–00FF
+//  - U+00D7  × Multiplication sign
+//  - U+00F7  ÷ Division sign
+// Latin Extended-A, 0100–017F
+// Latin Extended-B, 0180–024F
+// IPA Extensions, 0250–02AF
+// Spacing Modifier Letters, 02B0–02FF
+//  - U+02C7  ˇ ˇ  Caron
+//  - U+02D8  ˘ ˘  Breve
+//  - U+02D9  ˙ ˙  Dot Above
+//  - U+02DA  ˚ ˚  Ring Above
+//  - U+02DB  ˛ ˛  Ogonek
+//  - U+02DC  ˜ ˜  Small Tilde
+//  - U+02DD  ˝ ˝  Double Acute Accent
+// Latin Extended Additional, 1E00–1EFF
+
+var extendedWordChars = /^[A-Za-z\xC0-\u02C6\u02C8-\u02D7\u02DE-\u02FF\u1E00-\u1EFF]+$/;
+var reWhitespace = /\S/;
+var wordDiff = new Diff();
+
+wordDiff.equals = function (left, right) {
+  if (this.options.ignoreCase) {
+    left = left.toLowerCase();
+    right = right.toLowerCase();
+  }
+
+  return left === right || this.options.ignoreWhitespace && !reWhitespace.test(left) && !reWhitespace.test(right);
+};
+
+wordDiff.tokenize = function (value) {
+  // All whitespace symbols except newline group into one token, each newline - in separate token
+  var tokens = value.split(/([^\S\r\n]+|[()[\]{}'"\r\n]|\b)/); // Join the boundary splits that we do not consider to be boundaries. This is primarily the extended Latin character set.
+
+  for (var i = 0; i < tokens.length - 1; i++) {
+    // If we have an empty string in the next field and we have only word chars before and after, merge
+    if (!tokens[i + 1] && tokens[i + 2] && extendedWordChars.test(tokens[i]) && extendedWordChars.test(tokens[i + 2])) {
+      tokens[i] += tokens[i + 2];
+      tokens.splice(i + 1, 2);
+      i--;
+    }
+  }
+
+  return tokens;
+};
+
+function diffWords(oldStr, newStr, options) {
+  options = generateOptions(options, {
+    ignoreWhitespace: true
+  });
+  return wordDiff.diff(oldStr, newStr, options);
+}
+function diffWordsWithSpace(oldStr, newStr, options) {
+  return wordDiff.diff(oldStr, newStr, options);
+}
+
+var lineDiff = new Diff();
+
+lineDiff.tokenize = function (value) {
+  var retLines = [],
+      linesAndNewlines = value.split(/(\n|\r\n)/); // Ignore the final empty token that occurs if the string ends with a new line
+
+  if (!linesAndNewlines[linesAndNewlines.length - 1]) {
+    linesAndNewlines.pop();
+  } // Merge the content and line separators into single tokens
+
+
+  for (var i = 0; i < linesAndNewlines.length; i++) {
+    var line = linesAndNewlines[i];
+
+    if (i % 2 && !this.options.newlineIsToken) {
+      retLines[retLines.length - 1] += line;
+    } else {
+      if (this.options.ignoreWhitespace) {
+        line = line.trim();
+      }
+
+      retLines.push(line);
+    }
+  }
+
+  return retLines;
+};
+
+function diffLines(oldStr, newStr, callback) {
+  return lineDiff.diff(oldStr, newStr, callback);
+}
+function diffTrimmedLines(oldStr, newStr, callback) {
+  var options = generateOptions(callback, {
+    ignoreWhitespace: true
+  });
+  return lineDiff.diff(oldStr, newStr, options);
+}
+
+var sentenceDiff = new Diff();
+
+sentenceDiff.tokenize = function (value) {
+  return value.split(/(\S.+?[.!?])(?=\s+|$)/);
+};
+
+function diffSentences(oldStr, newStr, callback) {
+  return sentenceDiff.diff(oldStr, newStr, callback);
+}
+
+var cssDiff = new Diff();
+
+cssDiff.tokenize = function (value) {
+  return value.split(/([{}:;,]|\s+)/);
+};
+
+function diffCss(oldStr, newStr, callback) {
+  return cssDiff.diff(oldStr, newStr, callback);
+}
+
+function _typeof(obj) {
+  "@babel/helpers - typeof";
+
+  if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") {
+    _typeof = function (obj) {
+      return typeof obj;
+    };
+  } else {
+    _typeof = function (obj) {
+      return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj;
+    };
+  }
+
+  return _typeof(obj);
+}
+
+function _toConsumableArray(arr) {
+  return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _unsupportedIterableToArray(arr) || _nonIterableSpread();
+}
+
+function _arrayWithoutHoles(arr) {
+  if (Array.isArray(arr)) return _arrayLikeToArray(arr);
+}
+
+function _iterableToArray(iter) {
+  if (typeof Symbol !== "undefined" && Symbol.iterator in Object(iter)) return Array.from(iter);
+}
+
+function _unsupportedIterableToArray(o, minLen) {
+  if (!o) return;
+  if (typeof o === "string") return _arrayLikeToArray(o, minLen);
+  var n = Object.prototype.toString.call(o).slice(8, -1);
+  if (n === "Object" && o.constructor) n = o.constructor.name;
+  if (n === "Map" || n === "Set") return Array.from(o);
+  if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen);
+}
+
+function _arrayLikeToArray(arr, len) {
+  if (len == null || len > arr.length) len = arr.length;
+
+  for (var i = 0, arr2 = new Array(len); i < len; i++) arr2[i] = arr[i];
+
+  return arr2;
+}
+
+function _nonIterableSpread() {
+  throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
+}
+
+var objectPrototypeToString = Object.prototype.toString;
+var jsonDiff = new Diff(); // Discriminate between two lines of pretty-printed, serialized JSON where one of them has a
+// dangling comma and the other doesn't. Turns out including the dangling comma yields the nicest output:
+
+jsonDiff.useLongestToken = true;
+jsonDiff.tokenize = lineDiff.tokenize;
+
+jsonDiff.castInput = function (value) {
+  var _this$options = this.options,
+      undefinedReplacement = _this$options.undefinedReplacement,
+      _this$options$stringi = _this$options.stringifyReplacer,
+      stringifyReplacer = _this$options$stringi === void 0 ? function (k, v) {
+    return typeof v === 'undefined' ? undefinedReplacement : v;
+  } : _this$options$stringi;
+  return typeof value === 'string' ? value : JSON.stringify(canonicalize(value, null, null, stringifyReplacer), stringifyReplacer, '  ');
+};
+
+jsonDiff.equals = function (left, right) {
+  return Diff.prototype.equals.call(jsonDiff, left.replace(/,([\r\n])/g, '$1'), right.replace(/,([\r\n])/g, '$1'));
+};
+
+function diffJson(oldObj, newObj, options) {
+  return jsonDiff.diff(oldObj, newObj, options);
+} // This function handles the presence of circular references by bailing out when encountering an
+// object that is already on the "stack" of items being processed. Accepts an optional replacer
+
+function canonicalize(obj, stack, replacementStack, replacer, key) {
+  stack = stack || [];
+  replacementStack = replacementStack || [];
+
+  if (replacer) {
+    obj = replacer(key, obj);
+  }
+
+  var i;
+
+  for (i = 0; i < stack.length; i += 1) {
+    if (stack[i] === obj) {
+      return replacementStack[i];
+    }
+  }
+
+  var canonicalizedObj;
+
+  if ('[object Array]' === objectPrototypeToString.call(obj)) {
+    stack.push(obj);
+    canonicalizedObj = new Array(obj.length);
+    replacementStack.push(canonicalizedObj);
+
+    for (i = 0; i < obj.length; i += 1) {
+      canonicalizedObj[i] = canonicalize(obj[i], stack, replacementStack, replacer, key);
+    }
+
+    stack.pop();
+    replacementStack.pop();
+    return canonicalizedObj;
+  }
+
+  if (obj && obj.toJSON) {
+    obj = obj.toJSON();
+  }
+
+  if (_typeof(obj) === 'object' && obj !== null) {
+    stack.push(obj);
+    canonicalizedObj = {};
+    replacementStack.push(canonicalizedObj);
+
+    var sortedKeys = [],
+        _key;
+
+    for (_key in obj) {
+      /* istanbul ignore else */
+      if (obj.hasOwnProperty(_key)) {
+        sortedKeys.push(_key);
+      }
+    }
+
+    sortedKeys.sort();
+
+    for (i = 0; i < sortedKeys.length; i += 1) {
+      _key = sortedKeys[i];
+      canonicalizedObj[_key] = canonicalize(obj[_key], stack, replacementStack, replacer, _key);
+    }
+
+    stack.pop();
+    replacementStack.pop();
+  } else {
+    canonicalizedObj = obj;
+  }
+
+  return canonicalizedObj;
+}
+
+var arrayDiff = new Diff();
+
+arrayDiff.tokenize = function (value) {
+  return value.slice();
+};
+
+arrayDiff.join = arrayDiff.removeEmpty = function (value) {
+  return value;
+};
+
+function diffArrays(oldArr, newArr, callback) {
+  return arrayDiff.diff(oldArr, newArr, callback);
+}
+
+function parsePatch(uniDiff) {
+  var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};
+  var diffstr = uniDiff.split(/\r\n|[\n\v\f\r\x85]/),
+      delimiters = uniDiff.match(/\r\n|[\n\v\f\r\x85]/g) || [],
+      list = [],
+      i = 0;
+
+  function parseIndex() {
+    var index = {};
+    list.push(index); // Parse diff metadata
+
+    while (i < diffstr.length) {
+      var line = diffstr[i]; // File header found, end parsing diff metadata
+
+      if (/^(\-\-\-|\+\+\+|@@)\s/.test(line)) {
+        break;
+      } // Diff index
+
+
+      var header = /^(?:Index:|diff(?: -r \w+)+)\s+(.+?)\s*$/.exec(line);
+
+      if (header) {
+        index.index = header[1];
+      }
+
+      i++;
+    } // Parse file headers if they are defined. Unified diff requires them, but
+    // there's no technical issues to have an isolated hunk without file header
+
+
+    parseFileHeader(index);
+    parseFileHeader(index); // Parse hunks
+
+    index.hunks = [];
+
+    while (i < diffstr.length) {
+      var _line = diffstr[i];
+
+      if (/^(Index:|diff|\-\-\-|\+\+\+)\s/.test(_line)) {
+        break;
+      } else if (/^@@/.test(_line)) {
+        index.hunks.push(parseHunk());
+      } else if (_line && options.strict) {
+        // Ignore unexpected content unless in strict mode
+        throw new Error('Unknown line ' + (i + 1) + ' ' + JSON.stringify(_line));
+      } else {
+        i++;
+      }
+    }
+  } // Parses the --- and +++ headers, if none are found, no lines
+  // are consumed.
+
+
+  function parseFileHeader(index) {
+    var fileHeader = /^(---|\+\+\+)\s+(.*)$/.exec(diffstr[i]);
+
+    if (fileHeader) {
+      var keyPrefix = fileHeader[1] === '---' ? 'old' : 'new';
+      var data = fileHeader[2].split('\t', 2);
+      var fileName = data[0].replace(/\\\\/g, '\\');
+
+      if (/^".*"$/.test(fileName)) {
+        fileName = fileName.substr(1, fileName.length - 2);
+      }
+
+      index[keyPrefix + 'FileName'] = fileName;
+      index[keyPrefix + 'Header'] = (data[1] || '').trim();
+      i++;
+    }
+  } // Parses a hunk
+  // This assumes that we are at the start of a hunk.
+
+
+  function parseHunk() {
+    var chunkHeaderIndex = i,
+        chunkHeaderLine = diffstr[i++],
+        chunkHeader = chunkHeaderLine.split(/@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@/);
+    var hunk = {
+      oldStart: +chunkHeader[1],
+      oldLines: typeof chunkHeader[2] === 'undefined' ? 1 : +chunkHeader[2],
+      newStart: +chunkHeader[3],
+      newLines: typeof chunkHeader[4] === 'undefined' ? 1 : +chunkHeader[4],
+      lines: [],
+      linedelimiters: []
+    }; // Unified Diff Format quirk: If the chunk size is 0,
+    // the first number is one lower than one would expect.
+    // https://www.artima.com/weblogs/viewpost.jsp?thread=164293
+
+    if (hunk.oldLines === 0) {
+      hunk.oldStart += 1;
+    }
+
+    if (hunk.newLines === 0) {
+      hunk.newStart += 1;
+    }
+
+    var addCount = 0,
+        removeCount = 0;
+
+    for (; i < diffstr.length; i++) {
+      // Lines starting with '---' could be mistaken for the "remove line" operation
+      // But they could be the header for the next file. Therefore prune such cases out.
+      if (diffstr[i].indexOf('--- ') === 0 && i + 2 < diffstr.length && diffstr[i + 1].indexOf('+++ ') === 0 && diffstr[i + 2].indexOf('@@') === 0) {
+        break;
+      }
+
+      var operation = diffstr[i].length == 0 && i != diffstr.length - 1 ? ' ' : diffstr[i][0];
+
+      if (operation === '+' || operation === '-' || operation === ' ' || operation === '\\') {
+        hunk.lines.push(diffstr[i]);
+        hunk.linedelimiters.push(delimiters[i] || '\n');
+
+        if (operation === '+') {
+          addCount++;
+        } else if (operation === '-') {
+          removeCount++;
+        } else if (operation === ' ') {
+          addCount++;
+          removeCount++;
+        }
+      } else {
+        break;
+      }
+    } // Handle the empty block count case
+
+
+    if (!addCount && hunk.newLines === 1) {
+      hunk.newLines = 0;
+    }
+
+    if (!removeCount && hunk.oldLines === 1) {
+      hunk.oldLines = 0;
+    } // Perform optional sanity checking
+
+
+    if (options.strict) {
+      if (addCount !== hunk.newLines) {
+        throw new Error('Added line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
+      }
+
+      if (removeCount !== hunk.oldLines) {
+        throw new Error('Removed line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
+      }
+    }
+
+    return hunk;
+  }
+
+  while (i < diffstr.length) {
+    parseIndex();
+  }
+
+  return list;
+}
+
+// Iterator that traverses in the range of [min, max], stepping
+// by distance from a given start position. I.e. for [0, 4], with
+// start of 2, this will iterate 2, 3, 1, 4, 0.
+function distanceIterator (start, minLine, maxLine) {
+  var wantForward = true,
+      backwardExhausted = false,
+      forwardExhausted = false,
+      localOffset = 1;
+  return function iterator() {
+    if (wantForward && !forwardExhausted) {
+      if (backwardExhausted) {
+        localOffset++;
+      } else {
+        wantForward = false;
+      } // Check if trying to fit beyond text length, and if not, check it fits
+      // after offset location (or desired location on first iteration)
+
+
+      if (start + localOffset <= maxLine) {
+        return localOffset;
+      }
+
+      forwardExhausted = true;
+    }
+
+    if (!backwardExhausted) {
+      if (!forwardExhausted) {
+        wantForward = true;
+      } // Check if trying to fit before text beginning, and if not, check it fits
+      // before offset location
+
+
+      if (minLine <= start - localOffset) {
+        return -localOffset++;
+      }
+
+      backwardExhausted = true;
+      return iterator();
+    } // We tried to fit hunk before text beginning and beyond text length, then
+    // hunk can't fit on the text. Return undefined
+
+  };
+}
+
+function applyPatch(source, uniDiff) {
+  var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
+
+  if (typeof uniDiff === 'string') {
+    uniDiff = parsePatch(uniDiff);
+  }
+
+  if (Array.isArray(uniDiff)) {
+    if (uniDiff.length > 1) {
+      throw new Error('applyPatch only works with a single input.');
+    }
+
+    uniDiff = uniDiff[0];
+  } // Apply the diff to the input
+
+
+  var lines = source.split(/\r\n|[\n\v\f\r\x85]/),
+      delimiters = source.match(/\r\n|[\n\v\f\r\x85]/g) || [],
+      hunks = uniDiff.hunks,
+      compareLine = options.compareLine || function (lineNumber, line, operation, patchContent) {
+    return line === patchContent;
+  },
+      errorCount = 0,
+      fuzzFactor = options.fuzzFactor || 0,
+      minLine = 0,
+      offset = 0,
+      removeEOFNL,
+      addEOFNL;
+  /**
+   * Checks if the hunk exactly fits on the provided location
+   */
+
+
+  function hunkFits(hunk, toPos) {
+    for (var j = 0; j < hunk.lines.length; j++) {
+      var line = hunk.lines[j],
+          operation = line.length > 0 ? line[0] : ' ',
+          content = line.length > 0 ? line.substr(1) : line;
+
+      if (operation === ' ' || operation === '-') {
+        // Context sanity check
+        if (!compareLine(toPos + 1, lines[toPos], operation, content)) {
+          errorCount++;
+
+          if (errorCount > fuzzFactor) {
+            return false;
+          }
+        }
+
+        toPos++;
+      }
+    }
+
+    return true;
+  } // Search best fit offsets for each hunk based on the previous ones
+
+
+  for (var i = 0; i < hunks.length; i++) {
+    var hunk = hunks[i],
+        maxLine = lines.length - hunk.oldLines,
+        localOffset = 0,
+        toPos = offset + hunk.oldStart - 1;
+    var iterator = distanceIterator(toPos, minLine, maxLine);
+
+    for (; localOffset !== undefined; localOffset = iterator()) {
+      if (hunkFits(hunk, toPos + localOffset)) {
+        hunk.offset = offset += localOffset;
+        break;
+      }
+    }
+
+    if (localOffset === undefined) {
+      return false;
+    } // Set lower text limit to end of the current hunk, so next ones don't try
+    // to fit over already patched text
+
+
+    minLine = hunk.offset + hunk.oldStart + hunk.oldLines;
+  } // Apply patch hunks
+
+
+  var diffOffset = 0;
+
+  for (var _i = 0; _i < hunks.length; _i++) {
+    var _hunk = hunks[_i],
+        _toPos = _hunk.oldStart + _hunk.offset + diffOffset - 1;
+
+    diffOffset += _hunk.newLines - _hunk.oldLines;
+
+    for (var j = 0; j < _hunk.lines.length; j++) {
+      var line = _hunk.lines[j],
+          operation = line.length > 0 ? line[0] : ' ',
+          content = line.length > 0 ? line.substr(1) : line,
+          delimiter = _hunk.linedelimiters[j];
+
+      if (operation === ' ') {
+        _toPos++;
+      } else if (operation === '-') {
+        lines.splice(_toPos, 1);
+        delimiters.splice(_toPos, 1);
+        /* istanbul ignore else */
+      } else if (operation === '+') {
+        lines.splice(_toPos, 0, content);
+        delimiters.splice(_toPos, 0, delimiter);
+        _toPos++;
+      } else if (operation === '\\') {
+        var previousOperation = _hunk.lines[j - 1] ? _hunk.lines[j - 1][0] : null;
+
+        if (previousOperation === '+') {
+          removeEOFNL = true;
+        } else if (previousOperation === '-') {
+          addEOFNL = true;
+        }
+      }
+    }
+  } // Handle EOFNL insertion/removal
+
+
+  if (removeEOFNL) {
+    while (!lines[lines.length - 1]) {
+      lines.pop();
+      delimiters.pop();
+    }
+  } else if (addEOFNL) {
+    lines.push('');
+    delimiters.push('\n');
+  }
+
+  for (var _k = 0; _k < lines.length - 1; _k++) {
+    lines[_k] = lines[_k] + delimiters[_k];
+  }
+
+  return lines.join('');
+} // Wrapper that supports multiple file patches via callbacks.
+
+function applyPatches(uniDiff, options) {
+  if (typeof uniDiff === 'string') {
+    uniDiff = parsePatch(uniDiff);
+  }
+
+  var currentIndex = 0;
+
+  function processIndex() {
+    var index = uniDiff[currentIndex++];
+
+    if (!index) {
+      return options.complete();
+    }
+
+    options.loadFile(index, function (err, data) {
+      if (err) {
+        return options.complete(err);
+      }
+
+      var updatedContent = applyPatch(data, index, options);
+      options.patched(index, updatedContent, function (err) {
+        if (err) {
+          return options.complete(err);
+        }
+
+        processIndex();
+      });
+    });
+  }
+
+  processIndex();
+}
+
+function structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
+  if (!options) {
+    options = {};
+  }
+
+  if (typeof options.context === 'undefined') {
+    options.context = 4;
+  }
+
+  var diff = diffLines(oldStr, newStr, options);
+
+  if (!diff) {
+    return;
+  }
+
+  diff.push({
+    value: '',
+    lines: []
+  }); // Append an empty value to make cleanup easier
+
+  function contextLines(lines) {
+    return lines.map(function (entry) {
+      return ' ' + entry;
+    });
+  }
+
+  var hunks = [];
+  var oldRangeStart = 0,
+      newRangeStart = 0,
+      curRange = [],
+      oldLine = 1,
+      newLine = 1;
+
+  var _loop = function _loop(i) {
+    var current = diff[i],
+        lines = current.lines || current.value.replace(/\n$/, '').split('\n');
+    current.lines = lines;
+
+    if (current.added || current.removed) {
+      var _curRange;
+
+      // If we have previous context, start with that
+      if (!oldRangeStart) {
+        var prev = diff[i - 1];
+        oldRangeStart = oldLine;
+        newRangeStart = newLine;
+
+        if (prev) {
+          curRange = options.context > 0 ? contextLines(prev.lines.slice(-options.context)) : [];
+          oldRangeStart -= curRange.length;
+          newRangeStart -= curRange.length;
+        }
+      } // Output our changes
+
+
+      (_curRange = curRange).push.apply(_curRange, _toConsumableArray(lines.map(function (entry) {
+        return (current.added ? '+' : '-') + entry;
+      }))); // Track the updated file position
+
+
+      if (current.added) {
+        newLine += lines.length;
+      } else {
+        oldLine += lines.length;
+      }
+    } else {
+      // Identical context lines. Track line changes
+      if (oldRangeStart) {
+        // Close out any changes that have been output (or join overlapping)
+        if (lines.length <= options.context * 2 && i < diff.length - 2) {
+          var _curRange2;
+
+          // Overlapping
+          (_curRange2 = curRange).push.apply(_curRange2, _toConsumableArray(contextLines(lines)));
+        } else {
+          var _curRange3;
+
+          // end the range and output
+          var contextSize = Math.min(lines.length, options.context);
+
+          (_curRange3 = curRange).push.apply(_curRange3, _toConsumableArray(contextLines(lines.slice(0, contextSize))));
+
+          var hunk = {
+            oldStart: oldRangeStart,
+            oldLines: oldLine - oldRangeStart + contextSize,
+            newStart: newRangeStart,
+            newLines: newLine - newRangeStart + contextSize,
+            lines: curRange
+          };
+
+          if (i >= diff.length - 2 && lines.length <= options.context) {
+            // EOF is inside this hunk
+            var oldEOFNewline = /\n$/.test(oldStr);
+            var newEOFNewline = /\n$/.test(newStr);
+            var noNlBeforeAdds = lines.length == 0 && curRange.length > hunk.oldLines;
+
+            if (!oldEOFNewline && noNlBeforeAdds && oldStr.length > 0) {
+              // special case: old has no eol and no trailing context; no-nl can end up before adds
+              // however, if the old file is empty, do not output the no-nl line
+              curRange.splice(hunk.oldLines, 0, '\\ No newline at end of file');
+            }
+
+            if (!oldEOFNewline && !noNlBeforeAdds || !newEOFNewline) {
+              curRange.push('\\ No newline at end of file');
+            }
+          }
+
+          hunks.push(hunk);
+          oldRangeStart = 0;
+          newRangeStart = 0;
+          curRange = [];
+        }
+      }
+
+      oldLine += lines.length;
+      newLine += lines.length;
+    }
+  };
+
+  for (var i = 0; i < diff.length; i++) {
+    _loop(i);
+  }
+
+  return {
+    oldFileName: oldFileName,
+    newFileName: newFileName,
+    oldHeader: oldHeader,
+    newHeader: newHeader,
+    hunks: hunks
+  };
+}
+function formatPatch(diff) {
+  var ret = [];
+
+  if (diff.oldFileName == diff.newFileName) {
+    ret.push('Index: ' + diff.oldFileName);
+  }
+
+  ret.push('===================================================================');
+  ret.push('--- ' + diff.oldFileName + (typeof diff.oldHeader === 'undefined' ? '' : '\t' + diff.oldHeader));
+  ret.push('+++ ' + diff.newFileName + (typeof diff.newHeader === 'undefined' ? '' : '\t' + diff.newHeader));
+
+  for (var i = 0; i < diff.hunks.length; i++) {
+    var hunk = diff.hunks[i]; // Unified Diff Format quirk: If the chunk size is 0,
+    // the first number is one lower than one would expect.
+    // https://www.artima.com/weblogs/viewpost.jsp?thread=164293
+
+    if (hunk.oldLines === 0) {
+      hunk.oldStart -= 1;
+    }
+
+    if (hunk.newLines === 0) {
+      hunk.newStart -= 1;
+    }
+
+    ret.push('@@ -' + hunk.oldStart + ',' + hunk.oldLines + ' +' + hunk.newStart + ',' + hunk.newLines + ' @@');
+    ret.push.apply(ret, hunk.lines);
+  }
+
+  return ret.join('\n') + '\n';
+}
+function createTwoFilesPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
+  return formatPatch(structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options));
+}
+function createPatch(fileName, oldStr, newStr, oldHeader, newHeader, options) {
+  return createTwoFilesPatch(fileName, fileName, oldStr, newStr, oldHeader, newHeader, options);
+}
+
+function arrayEqual(a, b) {
+  if (a.length !== b.length) {
+    return false;
+  }
+
+  return arrayStartsWith(a, b);
+}
+function arrayStartsWith(array, start) {
+  if (start.length > array.length) {
+    return false;
+  }
+
+  for (var i = 0; i < start.length; i++) {
+    if (start[i] !== array[i]) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+function calcLineCount(hunk) {
+  var _calcOldNewLineCount = calcOldNewLineCount(hunk.lines),
+      oldLines = _calcOldNewLineCount.oldLines,
+      newLines = _calcOldNewLineCount.newLines;
+
+  if (oldLines !== undefined) {
+    hunk.oldLines = oldLines;
+  } else {
+    delete hunk.oldLines;
+  }
+
+  if (newLines !== undefined) {
+    hunk.newLines = newLines;
+  } else {
+    delete hunk.newLines;
+  }
+}
+function merge(mine, theirs, base) {
+  mine = loadPatch(mine, base);
+  theirs = loadPatch(theirs, base);
+  var ret = {}; // For index we just let it pass through as it doesn't have any necessary meaning.
+  // Leaving sanity checks on this to the API consumer that may know more about the
+  // meaning in their own context.
+
+  if (mine.index || theirs.index) {
+    ret.index = mine.index || theirs.index;
+  }
+
+  if (mine.newFileName || theirs.newFileName) {
+    if (!fileNameChanged(mine)) {
+      // No header or no change in ours, use theirs (and ours if theirs does not exist)
+      ret.oldFileName = theirs.oldFileName || mine.oldFileName;
+      ret.newFileName = theirs.newFileName || mine.newFileName;
+      ret.oldHeader = theirs.oldHeader || mine.oldHeader;
+      ret.newHeader = theirs.newHeader || mine.newHeader;
+    } else if (!fileNameChanged(theirs)) {
+      // No header or no change in theirs, use ours
+      ret.oldFileName = mine.oldFileName;
+      ret.newFileName = mine.newFileName;
+      ret.oldHeader = mine.oldHeader;
+      ret.newHeader = mine.newHeader;
+    } else {
+      // Both changed... figure it out
+      ret.oldFileName = selectField(ret, mine.oldFileName, theirs.oldFileName);
+      ret.newFileName = selectField(ret, mine.newFileName, theirs.newFileName);
+      ret.oldHeader = selectField(ret, mine.oldHeader, theirs.oldHeader);
+      ret.newHeader = selectField(ret, mine.newHeader, theirs.newHeader);
+    }
+  }
+
+  ret.hunks = [];
+  var mineIndex = 0,
+      theirsIndex = 0,
+      mineOffset = 0,
+      theirsOffset = 0;
+
+  while (mineIndex < mine.hunks.length || theirsIndex < theirs.hunks.length) {
+    var mineCurrent = mine.hunks[mineIndex] || {
+      oldStart: Infinity
+    },
+        theirsCurrent = theirs.hunks[theirsIndex] || {
+      oldStart: Infinity
+    };
+
+    if (hunkBefore(mineCurrent, theirsCurrent)) {
+      // This patch does not overlap with any of the others, yay.
+      ret.hunks.push(cloneHunk(mineCurrent, mineOffset));
+      mineIndex++;
+      theirsOffset += mineCurrent.newLines - mineCurrent.oldLines;
+    } else if (hunkBefore(theirsCurrent, mineCurrent)) {
+      // This patch does not overlap with any of the others, yay.
+      ret.hunks.push(cloneHunk(theirsCurrent, theirsOffset));
+      theirsIndex++;
+      mineOffset += theirsCurrent.newLines - theirsCurrent.oldLines;
+    } else {
+      // Overlap, merge as best we can
+      var mergedHunk = {
+        oldStart: Math.min(mineCurrent.oldStart, theirsCurrent.oldStart),
+        oldLines: 0,
+        newStart: Math.min(mineCurrent.newStart + mineOffset, theirsCurrent.oldStart + theirsOffset),
+        newLines: 0,
+        lines: []
+      };
+      mergeLines(mergedHunk, mineCurrent.oldStart, mineCurrent.lines, theirsCurrent.oldStart, theirsCurrent.lines);
+      theirsIndex++;
+      mineIndex++;
+      ret.hunks.push(mergedHunk);
+    }
+  }
+
+  return ret;
+}
+
+function loadPatch(param, base) {
+  if (typeof param === 'string') {
+    if (/^@@/m.test(param) || /^Index:/m.test(param)) {
+      return parsePatch(param)[0];
+    }
+
+    if (!base) {
+      throw new Error('Must provide a base reference or pass in a patch');
+    }
+
+    return structuredPatch(undefined, undefined, base, param);
+  }
+
+  return param;
+}
+
+function fileNameChanged(patch) {
+  return patch.newFileName && patch.newFileName !== patch.oldFileName;
+}
+
+function selectField(index, mine, theirs) {
+  if (mine === theirs) {
+    return mine;
+  } else {
+    index.conflict = true;
+    return {
+      mine: mine,
+      theirs: theirs
+    };
+  }
+}
+
+function hunkBefore(test, check) {
+  return test.oldStart < check.oldStart && test.oldStart + test.oldLines < check.oldStart;
+}
+
+function cloneHunk(hunk, offset) {
+  return {
+    oldStart: hunk.oldStart,
+    oldLines: hunk.oldLines,
+    newStart: hunk.newStart + offset,
+    newLines: hunk.newLines,
+    lines: hunk.lines
+  };
+}
+
+function mergeLines(hunk, mineOffset, mineLines, theirOffset, theirLines) {
+  // This will generally result in a conflicted hunk, but there are cases where the context
+  // is the only overlap where we can successfully merge the content here.
+  var mine = {
+    offset: mineOffset,
+    lines: mineLines,
+    index: 0
+  },
+      their = {
+    offset: theirOffset,
+    lines: theirLines,
+    index: 0
+  }; // Handle any leading content
+
+  insertLeading(hunk, mine, their);
+  insertLeading(hunk, their, mine); // Now in the overlap content. Scan through and select the best changes from each.
+
+  while (mine.index < mine.lines.length && their.index < their.lines.length) {
+    var mineCurrent = mine.lines[mine.index],
+        theirCurrent = their.lines[their.index];
+
+    if ((mineCurrent[0] === '-' || mineCurrent[0] === '+') && (theirCurrent[0] === '-' || theirCurrent[0] === '+')) {
+      // Both modified ...
+      mutualChange(hunk, mine, their);
+    } else if (mineCurrent[0] === '+' && theirCurrent[0] === ' ') {
+      var _hunk$lines;
+
+      // Mine inserted
+      (_hunk$lines = hunk.lines).push.apply(_hunk$lines, _toConsumableArray(collectChange(mine)));
+    } else if (theirCurrent[0] === '+' && mineCurrent[0] === ' ') {
+      var _hunk$lines2;
+
+      // Theirs inserted
+      (_hunk$lines2 = hunk.lines).push.apply(_hunk$lines2, _toConsumableArray(collectChange(their)));
+    } else if (mineCurrent[0] === '-' && theirCurrent[0] === ' ') {
+      // Mine removed or edited
+      removal(hunk, mine, their);
+    } else if (theirCurrent[0] === '-' && mineCurrent[0] === ' ') {
+      // Their removed or edited
+      removal(hunk, their, mine, true);
+    } else if (mineCurrent === theirCurrent) {
+      // Context identity
+      hunk.lines.push(mineCurrent);
+      mine.index++;
+      their.index++;
+    } else {
+      // Context mismatch
+      conflict(hunk, collectChange(mine), collectChange(their));
+    }
+  } // Now push anything that may be remaining
+
+
+  insertTrailing(hunk, mine);
+  insertTrailing(hunk, their);
+  calcLineCount(hunk);
+}
+
+function mutualChange(hunk, mine, their) {
+  var myChanges = collectChange(mine),
+      theirChanges = collectChange(their);
+
+  if (allRemoves(myChanges) && allRemoves(theirChanges)) {
+    // Special case for remove changes that are supersets of one another
+    if (arrayStartsWith(myChanges, theirChanges) && skipRemoveSuperset(their, myChanges, myChanges.length - theirChanges.length)) {
+      var _hunk$lines3;
+
+      (_hunk$lines3 = hunk.lines).push.apply(_hunk$lines3, _toConsumableArray(myChanges));
+
+      return;
+    } else if (arrayStartsWith(theirChanges, myChanges) && skipRemoveSuperset(mine, theirChanges, theirChanges.length - myChanges.length)) {
+      var _hunk$lines4;
+
+      (_hunk$lines4 = hunk.lines).push.apply(_hunk$lines4, _toConsumableArray(theirChanges));
+
+      return;
+    }
+  } else if (arrayEqual(myChanges, theirChanges)) {
+    var _hunk$lines5;
+
+    (_hunk$lines5 = hunk.lines).push.apply(_hunk$lines5, _toConsumableArray(myChanges));
+
+    return;
+  }
+
+  conflict(hunk, myChanges, theirChanges);
+}
+
+function removal(hunk, mine, their, swap) {
+  var myChanges = collectChange(mine),
+      theirChanges = collectContext(their, myChanges);
+
+  if (theirChanges.merged) {
+    var _hunk$lines6;
+
+    (_hunk$lines6 = hunk.lines).push.apply(_hunk$lines6, _toConsumableArray(theirChanges.merged));
+  } else {
+    conflict(hunk, swap ? theirChanges : myChanges, swap ? myChanges : theirChanges);
+  }
+}
+
+function conflict(hunk, mine, their) {
+  hunk.conflict = true;
+  hunk.lines.push({
+    conflict: true,
+    mine: mine,
+    theirs: their
+  });
+}
+
+function insertLeading(hunk, insert, their) {
+  while (insert.offset < their.offset && insert.index < insert.lines.length) {
+    var line = insert.lines[insert.index++];
+    hunk.lines.push(line);
+    insert.offset++;
+  }
+}
+
+function insertTrailing(hunk, insert) {
+  while (insert.index < insert.lines.length) {
+    var line = insert.lines[insert.index++];
+    hunk.lines.push(line);
+  }
+}
+
+function collectChange(state) {
+  var ret = [],
+      operation = state.lines[state.index][0];
+
+  while (state.index < state.lines.length) {
+    var line = state.lines[state.index]; // Group additions that are immediately after subtractions and treat them as one "atomic" modify change.
+
+    if (operation === '-' && line[0] === '+') {
+      operation = '+';
+    }
+
+    if (operation === line[0]) {
+      ret.push(line);
+      state.index++;
+    } else {
+      break;
+    }
+  }
+
+  return ret;
+}
+
+function collectContext(state, matchChanges) {
+  var changes = [],
+      merged = [],
+      matchIndex = 0,
+      contextChanges = false,
+      conflicted = false;
+
+  while (matchIndex < matchChanges.length && state.index < state.lines.length) {
+    var change = state.lines[state.index],
+        match = matchChanges[matchIndex]; // Once we've hit our add, then we are done
+
+    if (match[0] === '+') {
+      break;
+    }
+
+    contextChanges = contextChanges || change[0] !== ' ';
+    merged.push(match);
+    matchIndex++; // Consume any additions in the other block as a conflict to attempt
+    // to pull in the remaining context after this
+
+    if (change[0] === '+') {
+      conflicted = true;
+
+      while (change[0] === '+') {
+        changes.push(change);
+        change = state.lines[++state.index];
+      }
+    }
+
+    if (match.substr(1) === change.substr(1)) {
+      changes.push(change);
+      state.index++;
+    } else {
+      conflicted = true;
+    }
+  }
+
+  if ((matchChanges[matchIndex] || '')[0] === '+' && contextChanges) {
+    conflicted = true;
+  }
+
+  if (conflicted) {
+    return changes;
+  }
+
+  while (matchIndex < matchChanges.length) {
+    merged.push(matchChanges[matchIndex++]);
+  }
+
+  return {
+    merged: merged,
+    changes: changes
+  };
+}
+
+function allRemoves(changes) {
+  return changes.reduce(function (prev, change) {
+    return prev && change[0] === '-';
+  }, true);
+}
+
+function skipRemoveSuperset(state, removeChanges, delta) {
+  for (var i = 0; i < delta; i++) {
+    var changeContent = removeChanges[removeChanges.length - delta + i].substr(1);
+
+    if (state.lines[state.index + i] !== ' ' + changeContent) {
+      return false;
+    }
+  }
+
+  state.index += delta;
+  return true;
+}
+
+function calcOldNewLineCount(lines) {
+  var oldLines = 0;
+  var newLines = 0;
+  lines.forEach(function (line) {
+    if (typeof line !== 'string') {
+      var myCount = calcOldNewLineCount(line.mine);
+      var theirCount = calcOldNewLineCount(line.theirs);
+
+      if (oldLines !== undefined) {
+        if (myCount.oldLines === theirCount.oldLines) {
+          oldLines += myCount.oldLines;
+        } else {
+          oldLines = undefined;
+        }
+      }
+
+      if (newLines !== undefined) {
+        if (myCount.newLines === theirCount.newLines) {
+          newLines += myCount.newLines;
+        } else {
+          newLines = undefined;
+        }
+      }
+    } else {
+      if (newLines !== undefined && (line[0] === '+' || line[0] === ' ')) {
+        newLines++;
+      }
+
+      if (oldLines !== undefined && (line[0] === '-' || line[0] === ' ')) {
+        oldLines++;
+      }
+    }
+  });
+  return {
+    oldLines: oldLines,
+    newLines: newLines
+  };
+}
+
+// See: http://code.google.com/p/google-diff-match-patch/wiki/API
+function convertChangesToDMP(changes) {
+  var ret = [],
+      change,
+      operation;
+
+  for (var i = 0; i < changes.length; i++) {
+    change = changes[i];
+
+    if (change.added) {
+      operation = 1;
+    } else if (change.removed) {
+      operation = -1;
+    } else {
+      operation = 0;
+    }
+
+    ret.push([operation, change.value]);
+  }
+
+  return ret;
+}
+
+function convertChangesToXML(changes) {
+  var ret = [];
+
+  for (var i = 0; i < changes.length; i++) {
+    var change = changes[i];
+
+    if (change.added) {
+      ret.push('');
+    } else if (change.removed) {
+      ret.push('');
+    }
+
+    ret.push(escapeHTML(change.value));
+
+    if (change.added) {
+      ret.push('');
+    } else if (change.removed) {
+      ret.push('');
+    }
+  }
+
+  return ret.join('');
+}
+
+function escapeHTML(s) {
+  var n = s;
+  n = n.replace(/&/g, '&');
+  n = n.replace(//g, '>');
+  n = n.replace(/"/g, '"');
+  return n;
+}
+
+export { Diff, applyPatch, applyPatches, canonicalize, convertChangesToDMP, convertChangesToXML, createPatch, createTwoFilesPatch, diffArrays, diffChars, diffCss, diffJson, diffLines, diffSentences, diffTrimmedLines, diffWords, diffWordsWithSpace, merge, parsePatch, structuredPatch };
diff --git a/_extensions/d2/node_modules/diff/lib/index.js b/_extensions/d2/node_modules/diff/lib/index.js
new file mode 100644
index 00000000..920f0fee
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/index.js
@@ -0,0 +1,216 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+Object.defineProperty(exports, "Diff", {
+  enumerable: true,
+  get: function get() {
+    return _base["default"];
+  }
+});
+Object.defineProperty(exports, "diffChars", {
+  enumerable: true,
+  get: function get() {
+    return _character.diffChars;
+  }
+});
+Object.defineProperty(exports, "diffWords", {
+  enumerable: true,
+  get: function get() {
+    return _word.diffWords;
+  }
+});
+Object.defineProperty(exports, "diffWordsWithSpace", {
+  enumerable: true,
+  get: function get() {
+    return _word.diffWordsWithSpace;
+  }
+});
+Object.defineProperty(exports, "diffLines", {
+  enumerable: true,
+  get: function get() {
+    return _line.diffLines;
+  }
+});
+Object.defineProperty(exports, "diffTrimmedLines", {
+  enumerable: true,
+  get: function get() {
+    return _line.diffTrimmedLines;
+  }
+});
+Object.defineProperty(exports, "diffSentences", {
+  enumerable: true,
+  get: function get() {
+    return _sentence.diffSentences;
+  }
+});
+Object.defineProperty(exports, "diffCss", {
+  enumerable: true,
+  get: function get() {
+    return _css.diffCss;
+  }
+});
+Object.defineProperty(exports, "diffJson", {
+  enumerable: true,
+  get: function get() {
+    return _json.diffJson;
+  }
+});
+Object.defineProperty(exports, "canonicalize", {
+  enumerable: true,
+  get: function get() {
+    return _json.canonicalize;
+  }
+});
+Object.defineProperty(exports, "diffArrays", {
+  enumerable: true,
+  get: function get() {
+    return _array.diffArrays;
+  }
+});
+Object.defineProperty(exports, "applyPatch", {
+  enumerable: true,
+  get: function get() {
+    return _apply.applyPatch;
+  }
+});
+Object.defineProperty(exports, "applyPatches", {
+  enumerable: true,
+  get: function get() {
+    return _apply.applyPatches;
+  }
+});
+Object.defineProperty(exports, "parsePatch", {
+  enumerable: true,
+  get: function get() {
+    return _parse.parsePatch;
+  }
+});
+Object.defineProperty(exports, "merge", {
+  enumerable: true,
+  get: function get() {
+    return _merge.merge;
+  }
+});
+Object.defineProperty(exports, "structuredPatch", {
+  enumerable: true,
+  get: function get() {
+    return _create.structuredPatch;
+  }
+});
+Object.defineProperty(exports, "createTwoFilesPatch", {
+  enumerable: true,
+  get: function get() {
+    return _create.createTwoFilesPatch;
+  }
+});
+Object.defineProperty(exports, "createPatch", {
+  enumerable: true,
+  get: function get() {
+    return _create.createPatch;
+  }
+});
+Object.defineProperty(exports, "convertChangesToDMP", {
+  enumerable: true,
+  get: function get() {
+    return _dmp.convertChangesToDMP;
+  }
+});
+Object.defineProperty(exports, "convertChangesToXML", {
+  enumerable: true,
+  get: function get() {
+    return _xml.convertChangesToXML;
+  }
+});
+
+/*istanbul ignore end*/
+var
+/*istanbul ignore start*/
+_base = _interopRequireDefault(require("./diff/base"))
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_character = require("./diff/character")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_word = require("./diff/word")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_line = require("./diff/line")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_sentence = require("./diff/sentence")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_css = require("./diff/css")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_json = require("./diff/json")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_array = require("./diff/array")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_apply = require("./patch/apply")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_parse = require("./patch/parse")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_merge = require("./patch/merge")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_create = require("./patch/create")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_dmp = require("./convert/dmp")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_xml = require("./convert/xml")
+/*istanbul ignore end*/
+;
+
+/*istanbul ignore start*/ function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
+
+/*istanbul ignore end*/
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uL3NyYy9pbmRleC5qcyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiOzs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7OztBQWdCQTtBQUFBO0FBQUE7QUFBQTtBQUFBOztBQUNBO0FBQUE7QUFBQTtBQUFBO0FBQUE7O0FBQ0E7QUFBQTtBQUFBO0FBQUE7QUFBQTs7QUFDQTtBQUFBO0FBQUE7QUFBQTtBQUFBOztBQUNBO0FBQUE7QUFBQTtBQUFBO0FBQUE7O0FBRUE7QUFBQTtBQUFBO0FBQUE7QUFBQTs7QUFDQTtBQUFBO0FBQUE7QUFBQTtBQUFBOztBQUVBO0FBQUE7QUFBQTtBQUFBO0FBQUE7O0FBRUE7QUFBQTtBQUFBO0FBQUE7QUFBQTs7QUFDQTtBQUFBO0FBQUE7QUFBQTtBQUFBOztBQUNBO0FBQUE7QUFBQTtBQUFBO0FBQUE7O0FBQ0E7QUFBQTtBQUFBO0FBQUE7QUFBQTs7QUFFQTtBQUFBO0FBQUE7QUFBQTtBQUFBOztBQUNBO0FBQUE7QUFBQTtBQUFBO0FBQUEiLCJzb3VyY2VzQ29udGVudCI6WyIvKiBTZWUgTElDRU5TRSBmaWxlIGZvciB0ZXJtcyBvZiB1c2UgKi9cblxuLypcbiAqIFRleHQgZGlmZiBpbXBsZW1lbnRhdGlvbi5cbiAqXG4gKiBUaGlzIGxpYnJhcnkgc3VwcG9ydHMgdGhlIGZvbGxvd2luZyBBUElTOlxuICogSnNEaWZmLmRpZmZDaGFyczogQ2hhcmFjdGVyIGJ5IGNoYXJhY3RlciBkaWZmXG4gKiBKc0RpZmYuZGlmZldvcmRzOiBXb3JkIChhcyBkZWZpbmVkIGJ5IFxcYiByZWdleCkgZGlmZiB3aGljaCBpZ25vcmVzIHdoaXRlc3BhY2VcbiAqIEpzRGlmZi5kaWZmTGluZXM6IExpbmUgYmFzZWQgZGlmZlxuICpcbiAqIEpzRGlmZi5kaWZmQ3NzOiBEaWZmIHRhcmdldGVkIGF0IENTUyBjb250ZW50XG4gKlxuICogVGhlc2UgbWV0aG9kcyBhcmUgYmFzZWQgb24gdGhlIGltcGxlbWVudGF0aW9uIHByb3Bvc2VkIGluXG4gKiBcIkFuIE8oTkQpIERpZmZlcmVuY2UgQWxnb3JpdGhtIGFuZCBpdHMgVmFyaWF0aW9uc1wiIChNeWVycywgMTk4NikuXG4gKiBodHRwOi8vY2l0ZXNlZXJ4LmlzdC5wc3UuZWR1L3ZpZXdkb2Mvc3VtbWFyeT9kb2k9MTAuMS4xLjQuNjkyN1xuICovXG5pbXBvcnQgRGlmZiBmcm9tICcuL2RpZmYvYmFzZSc7XG5pbXBvcnQge2RpZmZDaGFyc30gZnJvbSAnLi9kaWZmL2NoYXJhY3Rlcic7XG5pbXBvcnQge2RpZmZXb3JkcywgZGlmZldvcmRzV2l0aFNwYWNlfSBmcm9tICcuL2RpZmYvd29yZCc7XG5pbXBvcnQge2RpZmZMaW5lcywgZGlmZlRyaW1tZWRMaW5lc30gZnJvbSAnLi9kaWZmL2xpbmUnO1xuaW1wb3J0IHtkaWZmU2VudGVuY2VzfSBmcm9tICcuL2RpZmYvc2VudGVuY2UnO1xuXG5pbXBvcnQge2RpZmZDc3N9IGZyb20gJy4vZGlmZi9jc3MnO1xuaW1wb3J0IHtkaWZmSnNvbiwgY2Fub25pY2FsaXplfSBmcm9tICcuL2RpZmYvanNvbic7XG5cbmltcG9ydCB7ZGlmZkFycmF5c30gZnJvbSAnLi9kaWZmL2FycmF5JztcblxuaW1wb3J0IHthcHBseVBhdGNoLCBhcHBseVBhdGNoZXN9IGZyb20gJy4vcGF0Y2gvYXBwbHknO1xuaW1wb3J0IHtwYXJzZVBhdGNofSBmcm9tICcuL3BhdGNoL3BhcnNlJztcbmltcG9ydCB7bWVyZ2V9IGZyb20gJy4vcGF0Y2gvbWVyZ2UnO1xuaW1wb3J0IHtzdHJ1Y3R1cmVkUGF0Y2gsIGNyZWF0ZVR3b0ZpbGVzUGF0Y2gsIGNyZWF0ZVBhdGNofSBmcm9tICcuL3BhdGNoL2NyZWF0ZSc7XG5cbmltcG9ydCB7Y29udmVydENoYW5nZXNUb0RNUH0gZnJvbSAnLi9jb252ZXJ0L2RtcCc7XG5pbXBvcnQge2NvbnZlcnRDaGFuZ2VzVG9YTUx9IGZyb20gJy4vY29udmVydC94bWwnO1xuXG5leHBvcnQge1xuICBEaWZmLFxuXG4gIGRpZmZDaGFycyxcbiAgZGlmZldvcmRzLFxuICBkaWZmV29yZHNXaXRoU3BhY2UsXG4gIGRpZmZMaW5lcyxcbiAgZGlmZlRyaW1tZWRMaW5lcyxcbiAgZGlmZlNlbnRlbmNlcyxcblxuICBkaWZmQ3NzLFxuICBkaWZmSnNvbixcblxuICBkaWZmQXJyYXlzLFxuXG4gIHN0cnVjdHVyZWRQYXRjaCxcbiAgY3JlYXRlVHdvRmlsZXNQYXRjaCxcbiAgY3JlYXRlUGF0Y2gsXG4gIGFwcGx5UGF0Y2gsXG4gIGFwcGx5UGF0Y2hlcyxcbiAgcGFyc2VQYXRjaCxcbiAgbWVyZ2UsXG4gIGNvbnZlcnRDaGFuZ2VzVG9ETVAsXG4gIGNvbnZlcnRDaGFuZ2VzVG9YTUwsXG4gIGNhbm9uaWNhbGl6ZVxufTtcbiJdfQ==
diff --git a/_extensions/d2/node_modules/diff/lib/index.mjs b/_extensions/d2/node_modules/diff/lib/index.mjs
new file mode 100644
index 00000000..c2a00135
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/index.mjs
@@ -0,0 +1,1561 @@
+function Diff() {}
+Diff.prototype = {
+  diff: function diff(oldString, newString) {
+    var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
+    var callback = options.callback;
+
+    if (typeof options === 'function') {
+      callback = options;
+      options = {};
+    }
+
+    this.options = options;
+    var self = this;
+
+    function done(value) {
+      if (callback) {
+        setTimeout(function () {
+          callback(undefined, value);
+        }, 0);
+        return true;
+      } else {
+        return value;
+      }
+    } // Allow subclasses to massage the input prior to running
+
+
+    oldString = this.castInput(oldString);
+    newString = this.castInput(newString);
+    oldString = this.removeEmpty(this.tokenize(oldString));
+    newString = this.removeEmpty(this.tokenize(newString));
+    var newLen = newString.length,
+        oldLen = oldString.length;
+    var editLength = 1;
+    var maxEditLength = newLen + oldLen;
+
+    if (options.maxEditLength) {
+      maxEditLength = Math.min(maxEditLength, options.maxEditLength);
+    }
+
+    var bestPath = [{
+      newPos: -1,
+      components: []
+    }]; // Seed editLength = 0, i.e. the content starts with the same values
+
+    var oldPos = this.extractCommon(bestPath[0], newString, oldString, 0);
+
+    if (bestPath[0].newPos + 1 >= newLen && oldPos + 1 >= oldLen) {
+      // Identity per the equality and tokenizer
+      return done([{
+        value: this.join(newString),
+        count: newString.length
+      }]);
+    } // Main worker method. checks all permutations of a given edit length for acceptance.
+
+
+    function execEditLength() {
+      for (var diagonalPath = -1 * editLength; diagonalPath <= editLength; diagonalPath += 2) {
+        var basePath = void 0;
+
+        var addPath = bestPath[diagonalPath - 1],
+            removePath = bestPath[diagonalPath + 1],
+            _oldPos = (removePath ? removePath.newPos : 0) - diagonalPath;
+
+        if (addPath) {
+          // No one else is going to attempt to use this value, clear it
+          bestPath[diagonalPath - 1] = undefined;
+        }
+
+        var canAdd = addPath && addPath.newPos + 1 < newLen,
+            canRemove = removePath && 0 <= _oldPos && _oldPos < oldLen;
+
+        if (!canAdd && !canRemove) {
+          // If this path is a terminal then prune
+          bestPath[diagonalPath] = undefined;
+          continue;
+        } // Select the diagonal that we want to branch from. We select the prior
+        // path whose position in the new string is the farthest from the origin
+        // and does not pass the bounds of the diff graph
+
+
+        if (!canAdd || canRemove && addPath.newPos < removePath.newPos) {
+          basePath = clonePath(removePath);
+          self.pushComponent(basePath.components, undefined, true);
+        } else {
+          basePath = addPath; // No need to clone, we've pulled it from the list
+
+          basePath.newPos++;
+          self.pushComponent(basePath.components, true, undefined);
+        }
+
+        _oldPos = self.extractCommon(basePath, newString, oldString, diagonalPath); // If we have hit the end of both strings, then we are done
+
+        if (basePath.newPos + 1 >= newLen && _oldPos + 1 >= oldLen) {
+          return done(buildValues(self, basePath.components, newString, oldString, self.useLongestToken));
+        } else {
+          // Otherwise track this path as a potential candidate and continue.
+          bestPath[diagonalPath] = basePath;
+        }
+      }
+
+      editLength++;
+    } // Performs the length of edit iteration. Is a bit fugly as this has to support the
+    // sync and async mode which is never fun. Loops over execEditLength until a value
+    // is produced, or until the edit length exceeds options.maxEditLength (if given),
+    // in which case it will return undefined.
+
+
+    if (callback) {
+      (function exec() {
+        setTimeout(function () {
+          if (editLength > maxEditLength) {
+            return callback();
+          }
+
+          if (!execEditLength()) {
+            exec();
+          }
+        }, 0);
+      })();
+    } else {
+      while (editLength <= maxEditLength) {
+        var ret = execEditLength();
+
+        if (ret) {
+          return ret;
+        }
+      }
+    }
+  },
+  pushComponent: function pushComponent(components, added, removed) {
+    var last = components[components.length - 1];
+
+    if (last && last.added === added && last.removed === removed) {
+      // We need to clone here as the component clone operation is just
+      // as shallow array clone
+      components[components.length - 1] = {
+        count: last.count + 1,
+        added: added,
+        removed: removed
+      };
+    } else {
+      components.push({
+        count: 1,
+        added: added,
+        removed: removed
+      });
+    }
+  },
+  extractCommon: function extractCommon(basePath, newString, oldString, diagonalPath) {
+    var newLen = newString.length,
+        oldLen = oldString.length,
+        newPos = basePath.newPos,
+        oldPos = newPos - diagonalPath,
+        commonCount = 0;
+
+    while (newPos + 1 < newLen && oldPos + 1 < oldLen && this.equals(newString[newPos + 1], oldString[oldPos + 1])) {
+      newPos++;
+      oldPos++;
+      commonCount++;
+    }
+
+    if (commonCount) {
+      basePath.components.push({
+        count: commonCount
+      });
+    }
+
+    basePath.newPos = newPos;
+    return oldPos;
+  },
+  equals: function equals(left, right) {
+    if (this.options.comparator) {
+      return this.options.comparator(left, right);
+    } else {
+      return left === right || this.options.ignoreCase && left.toLowerCase() === right.toLowerCase();
+    }
+  },
+  removeEmpty: function removeEmpty(array) {
+    var ret = [];
+
+    for (var i = 0; i < array.length; i++) {
+      if (array[i]) {
+        ret.push(array[i]);
+      }
+    }
+
+    return ret;
+  },
+  castInput: function castInput(value) {
+    return value;
+  },
+  tokenize: function tokenize(value) {
+    return value.split('');
+  },
+  join: function join(chars) {
+    return chars.join('');
+  }
+};
+
+function buildValues(diff, components, newString, oldString, useLongestToken) {
+  var componentPos = 0,
+      componentLen = components.length,
+      newPos = 0,
+      oldPos = 0;
+
+  for (; componentPos < componentLen; componentPos++) {
+    var component = components[componentPos];
+
+    if (!component.removed) {
+      if (!component.added && useLongestToken) {
+        var value = newString.slice(newPos, newPos + component.count);
+        value = value.map(function (value, i) {
+          var oldValue = oldString[oldPos + i];
+          return oldValue.length > value.length ? oldValue : value;
+        });
+        component.value = diff.join(value);
+      } else {
+        component.value = diff.join(newString.slice(newPos, newPos + component.count));
+      }
+
+      newPos += component.count; // Common case
+
+      if (!component.added) {
+        oldPos += component.count;
+      }
+    } else {
+      component.value = diff.join(oldString.slice(oldPos, oldPos + component.count));
+      oldPos += component.count; // Reverse add and remove so removes are output first to match common convention
+      // The diffing algorithm is tied to add then remove output and this is the simplest
+      // route to get the desired output with minimal overhead.
+
+      if (componentPos && components[componentPos - 1].added) {
+        var tmp = components[componentPos - 1];
+        components[componentPos - 1] = components[componentPos];
+        components[componentPos] = tmp;
+      }
+    }
+  } // Special case handle for when one terminal is ignored (i.e. whitespace).
+  // For this case we merge the terminal into the prior string and drop the change.
+  // This is only available for string mode.
+
+
+  var lastComponent = components[componentLen - 1];
+
+  if (componentLen > 1 && typeof lastComponent.value === 'string' && (lastComponent.added || lastComponent.removed) && diff.equals('', lastComponent.value)) {
+    components[componentLen - 2].value += lastComponent.value;
+    components.pop();
+  }
+
+  return components;
+}
+
+function clonePath(path) {
+  return {
+    newPos: path.newPos,
+    components: path.components.slice(0)
+  };
+}
+
+var characterDiff = new Diff();
+function diffChars(oldStr, newStr, options) {
+  return characterDiff.diff(oldStr, newStr, options);
+}
+
+function generateOptions(options, defaults) {
+  if (typeof options === 'function') {
+    defaults.callback = options;
+  } else if (options) {
+    for (var name in options) {
+      /* istanbul ignore else */
+      if (options.hasOwnProperty(name)) {
+        defaults[name] = options[name];
+      }
+    }
+  }
+
+  return defaults;
+}
+
+//
+// Ranges and exceptions:
+// Latin-1 Supplement, 0080–00FF
+//  - U+00D7  × Multiplication sign
+//  - U+00F7  ÷ Division sign
+// Latin Extended-A, 0100–017F
+// Latin Extended-B, 0180–024F
+// IPA Extensions, 0250–02AF
+// Spacing Modifier Letters, 02B0–02FF
+//  - U+02C7  ˇ ˇ  Caron
+//  - U+02D8  ˘ ˘  Breve
+//  - U+02D9  ˙ ˙  Dot Above
+//  - U+02DA  ˚ ˚  Ring Above
+//  - U+02DB  ˛ ˛  Ogonek
+//  - U+02DC  ˜ ˜  Small Tilde
+//  - U+02DD  ˝ ˝  Double Acute Accent
+// Latin Extended Additional, 1E00–1EFF
+
+var extendedWordChars = /^[A-Za-z\xC0-\u02C6\u02C8-\u02D7\u02DE-\u02FF\u1E00-\u1EFF]+$/;
+var reWhitespace = /\S/;
+var wordDiff = new Diff();
+
+wordDiff.equals = function (left, right) {
+  if (this.options.ignoreCase) {
+    left = left.toLowerCase();
+    right = right.toLowerCase();
+  }
+
+  return left === right || this.options.ignoreWhitespace && !reWhitespace.test(left) && !reWhitespace.test(right);
+};
+
+wordDiff.tokenize = function (value) {
+  // All whitespace symbols except newline group into one token, each newline - in separate token
+  var tokens = value.split(/([^\S\r\n]+|[()[\]{}'"\r\n]|\b)/); // Join the boundary splits that we do not consider to be boundaries. This is primarily the extended Latin character set.
+
+  for (var i = 0; i < tokens.length - 1; i++) {
+    // If we have an empty string in the next field and we have only word chars before and after, merge
+    if (!tokens[i + 1] && tokens[i + 2] && extendedWordChars.test(tokens[i]) && extendedWordChars.test(tokens[i + 2])) {
+      tokens[i] += tokens[i + 2];
+      tokens.splice(i + 1, 2);
+      i--;
+    }
+  }
+
+  return tokens;
+};
+
+function diffWords(oldStr, newStr, options) {
+  options = generateOptions(options, {
+    ignoreWhitespace: true
+  });
+  return wordDiff.diff(oldStr, newStr, options);
+}
+function diffWordsWithSpace(oldStr, newStr, options) {
+  return wordDiff.diff(oldStr, newStr, options);
+}
+
+var lineDiff = new Diff();
+
+lineDiff.tokenize = function (value) {
+  var retLines = [],
+      linesAndNewlines = value.split(/(\n|\r\n)/); // Ignore the final empty token that occurs if the string ends with a new line
+
+  if (!linesAndNewlines[linesAndNewlines.length - 1]) {
+    linesAndNewlines.pop();
+  } // Merge the content and line separators into single tokens
+
+
+  for (var i = 0; i < linesAndNewlines.length; i++) {
+    var line = linesAndNewlines[i];
+
+    if (i % 2 && !this.options.newlineIsToken) {
+      retLines[retLines.length - 1] += line;
+    } else {
+      if (this.options.ignoreWhitespace) {
+        line = line.trim();
+      }
+
+      retLines.push(line);
+    }
+  }
+
+  return retLines;
+};
+
+function diffLines(oldStr, newStr, callback) {
+  return lineDiff.diff(oldStr, newStr, callback);
+}
+function diffTrimmedLines(oldStr, newStr, callback) {
+  var options = generateOptions(callback, {
+    ignoreWhitespace: true
+  });
+  return lineDiff.diff(oldStr, newStr, options);
+}
+
+var sentenceDiff = new Diff();
+
+sentenceDiff.tokenize = function (value) {
+  return value.split(/(\S.+?[.!?])(?=\s+|$)/);
+};
+
+function diffSentences(oldStr, newStr, callback) {
+  return sentenceDiff.diff(oldStr, newStr, callback);
+}
+
+var cssDiff = new Diff();
+
+cssDiff.tokenize = function (value) {
+  return value.split(/([{}:;,]|\s+)/);
+};
+
+function diffCss(oldStr, newStr, callback) {
+  return cssDiff.diff(oldStr, newStr, callback);
+}
+
+function _typeof(obj) {
+  "@babel/helpers - typeof";
+
+  if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") {
+    _typeof = function (obj) {
+      return typeof obj;
+    };
+  } else {
+    _typeof = function (obj) {
+      return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj;
+    };
+  }
+
+  return _typeof(obj);
+}
+
+function _toConsumableArray(arr) {
+  return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _unsupportedIterableToArray(arr) || _nonIterableSpread();
+}
+
+function _arrayWithoutHoles(arr) {
+  if (Array.isArray(arr)) return _arrayLikeToArray(arr);
+}
+
+function _iterableToArray(iter) {
+  if (typeof Symbol !== "undefined" && Symbol.iterator in Object(iter)) return Array.from(iter);
+}
+
+function _unsupportedIterableToArray(o, minLen) {
+  if (!o) return;
+  if (typeof o === "string") return _arrayLikeToArray(o, minLen);
+  var n = Object.prototype.toString.call(o).slice(8, -1);
+  if (n === "Object" && o.constructor) n = o.constructor.name;
+  if (n === "Map" || n === "Set") return Array.from(o);
+  if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen);
+}
+
+function _arrayLikeToArray(arr, len) {
+  if (len == null || len > arr.length) len = arr.length;
+
+  for (var i = 0, arr2 = new Array(len); i < len; i++) arr2[i] = arr[i];
+
+  return arr2;
+}
+
+function _nonIterableSpread() {
+  throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.");
+}
+
+var objectPrototypeToString = Object.prototype.toString;
+var jsonDiff = new Diff(); // Discriminate between two lines of pretty-printed, serialized JSON where one of them has a
+// dangling comma and the other doesn't. Turns out including the dangling comma yields the nicest output:
+
+jsonDiff.useLongestToken = true;
+jsonDiff.tokenize = lineDiff.tokenize;
+
+jsonDiff.castInput = function (value) {
+  var _this$options = this.options,
+      undefinedReplacement = _this$options.undefinedReplacement,
+      _this$options$stringi = _this$options.stringifyReplacer,
+      stringifyReplacer = _this$options$stringi === void 0 ? function (k, v) {
+    return typeof v === 'undefined' ? undefinedReplacement : v;
+  } : _this$options$stringi;
+  return typeof value === 'string' ? value : JSON.stringify(canonicalize(value, null, null, stringifyReplacer), stringifyReplacer, '  ');
+};
+
+jsonDiff.equals = function (left, right) {
+  return Diff.prototype.equals.call(jsonDiff, left.replace(/,([\r\n])/g, '$1'), right.replace(/,([\r\n])/g, '$1'));
+};
+
+function diffJson(oldObj, newObj, options) {
+  return jsonDiff.diff(oldObj, newObj, options);
+} // This function handles the presence of circular references by bailing out when encountering an
+// object that is already on the "stack" of items being processed. Accepts an optional replacer
+
+function canonicalize(obj, stack, replacementStack, replacer, key) {
+  stack = stack || [];
+  replacementStack = replacementStack || [];
+
+  if (replacer) {
+    obj = replacer(key, obj);
+  }
+
+  var i;
+
+  for (i = 0; i < stack.length; i += 1) {
+    if (stack[i] === obj) {
+      return replacementStack[i];
+    }
+  }
+
+  var canonicalizedObj;
+
+  if ('[object Array]' === objectPrototypeToString.call(obj)) {
+    stack.push(obj);
+    canonicalizedObj = new Array(obj.length);
+    replacementStack.push(canonicalizedObj);
+
+    for (i = 0; i < obj.length; i += 1) {
+      canonicalizedObj[i] = canonicalize(obj[i], stack, replacementStack, replacer, key);
+    }
+
+    stack.pop();
+    replacementStack.pop();
+    return canonicalizedObj;
+  }
+
+  if (obj && obj.toJSON) {
+    obj = obj.toJSON();
+  }
+
+  if (_typeof(obj) === 'object' && obj !== null) {
+    stack.push(obj);
+    canonicalizedObj = {};
+    replacementStack.push(canonicalizedObj);
+
+    var sortedKeys = [],
+        _key;
+
+    for (_key in obj) {
+      /* istanbul ignore else */
+      if (obj.hasOwnProperty(_key)) {
+        sortedKeys.push(_key);
+      }
+    }
+
+    sortedKeys.sort();
+
+    for (i = 0; i < sortedKeys.length; i += 1) {
+      _key = sortedKeys[i];
+      canonicalizedObj[_key] = canonicalize(obj[_key], stack, replacementStack, replacer, _key);
+    }
+
+    stack.pop();
+    replacementStack.pop();
+  } else {
+    canonicalizedObj = obj;
+  }
+
+  return canonicalizedObj;
+}
+
+var arrayDiff = new Diff();
+
+arrayDiff.tokenize = function (value) {
+  return value.slice();
+};
+
+arrayDiff.join = arrayDiff.removeEmpty = function (value) {
+  return value;
+};
+
+function diffArrays(oldArr, newArr, callback) {
+  return arrayDiff.diff(oldArr, newArr, callback);
+}
+
+function parsePatch(uniDiff) {
+  var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};
+  var diffstr = uniDiff.split(/\r\n|[\n\v\f\r\x85]/),
+      delimiters = uniDiff.match(/\r\n|[\n\v\f\r\x85]/g) || [],
+      list = [],
+      i = 0;
+
+  function parseIndex() {
+    var index = {};
+    list.push(index); // Parse diff metadata
+
+    while (i < diffstr.length) {
+      var line = diffstr[i]; // File header found, end parsing diff metadata
+
+      if (/^(\-\-\-|\+\+\+|@@)\s/.test(line)) {
+        break;
+      } // Diff index
+
+
+      var header = /^(?:Index:|diff(?: -r \w+)+)\s+(.+?)\s*$/.exec(line);
+
+      if (header) {
+        index.index = header[1];
+      }
+
+      i++;
+    } // Parse file headers if they are defined. Unified diff requires them, but
+    // there's no technical issues to have an isolated hunk without file header
+
+
+    parseFileHeader(index);
+    parseFileHeader(index); // Parse hunks
+
+    index.hunks = [];
+
+    while (i < diffstr.length) {
+      var _line = diffstr[i];
+
+      if (/^(Index:|diff|\-\-\-|\+\+\+)\s/.test(_line)) {
+        break;
+      } else if (/^@@/.test(_line)) {
+        index.hunks.push(parseHunk());
+      } else if (_line && options.strict) {
+        // Ignore unexpected content unless in strict mode
+        throw new Error('Unknown line ' + (i + 1) + ' ' + JSON.stringify(_line));
+      } else {
+        i++;
+      }
+    }
+  } // Parses the --- and +++ headers, if none are found, no lines
+  // are consumed.
+
+
+  function parseFileHeader(index) {
+    var fileHeader = /^(---|\+\+\+)\s+(.*)$/.exec(diffstr[i]);
+
+    if (fileHeader) {
+      var keyPrefix = fileHeader[1] === '---' ? 'old' : 'new';
+      var data = fileHeader[2].split('\t', 2);
+      var fileName = data[0].replace(/\\\\/g, '\\');
+
+      if (/^".*"$/.test(fileName)) {
+        fileName = fileName.substr(1, fileName.length - 2);
+      }
+
+      index[keyPrefix + 'FileName'] = fileName;
+      index[keyPrefix + 'Header'] = (data[1] || '').trim();
+      i++;
+    }
+  } // Parses a hunk
+  // This assumes that we are at the start of a hunk.
+
+
+  function parseHunk() {
+    var chunkHeaderIndex = i,
+        chunkHeaderLine = diffstr[i++],
+        chunkHeader = chunkHeaderLine.split(/@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@/);
+    var hunk = {
+      oldStart: +chunkHeader[1],
+      oldLines: typeof chunkHeader[2] === 'undefined' ? 1 : +chunkHeader[2],
+      newStart: +chunkHeader[3],
+      newLines: typeof chunkHeader[4] === 'undefined' ? 1 : +chunkHeader[4],
+      lines: [],
+      linedelimiters: []
+    }; // Unified Diff Format quirk: If the chunk size is 0,
+    // the first number is one lower than one would expect.
+    // https://www.artima.com/weblogs/viewpost.jsp?thread=164293
+
+    if (hunk.oldLines === 0) {
+      hunk.oldStart += 1;
+    }
+
+    if (hunk.newLines === 0) {
+      hunk.newStart += 1;
+    }
+
+    var addCount = 0,
+        removeCount = 0;
+
+    for (; i < diffstr.length; i++) {
+      // Lines starting with '---' could be mistaken for the "remove line" operation
+      // But they could be the header for the next file. Therefore prune such cases out.
+      if (diffstr[i].indexOf('--- ') === 0 && i + 2 < diffstr.length && diffstr[i + 1].indexOf('+++ ') === 0 && diffstr[i + 2].indexOf('@@') === 0) {
+        break;
+      }
+
+      var operation = diffstr[i].length == 0 && i != diffstr.length - 1 ? ' ' : diffstr[i][0];
+
+      if (operation === '+' || operation === '-' || operation === ' ' || operation === '\\') {
+        hunk.lines.push(diffstr[i]);
+        hunk.linedelimiters.push(delimiters[i] || '\n');
+
+        if (operation === '+') {
+          addCount++;
+        } else if (operation === '-') {
+          removeCount++;
+        } else if (operation === ' ') {
+          addCount++;
+          removeCount++;
+        }
+      } else {
+        break;
+      }
+    } // Handle the empty block count case
+
+
+    if (!addCount && hunk.newLines === 1) {
+      hunk.newLines = 0;
+    }
+
+    if (!removeCount && hunk.oldLines === 1) {
+      hunk.oldLines = 0;
+    } // Perform optional sanity checking
+
+
+    if (options.strict) {
+      if (addCount !== hunk.newLines) {
+        throw new Error('Added line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
+      }
+
+      if (removeCount !== hunk.oldLines) {
+        throw new Error('Removed line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
+      }
+    }
+
+    return hunk;
+  }
+
+  while (i < diffstr.length) {
+    parseIndex();
+  }
+
+  return list;
+}
+
+// Iterator that traverses in the range of [min, max], stepping
+// by distance from a given start position. I.e. for [0, 4], with
+// start of 2, this will iterate 2, 3, 1, 4, 0.
+function distanceIterator (start, minLine, maxLine) {
+  var wantForward = true,
+      backwardExhausted = false,
+      forwardExhausted = false,
+      localOffset = 1;
+  return function iterator() {
+    if (wantForward && !forwardExhausted) {
+      if (backwardExhausted) {
+        localOffset++;
+      } else {
+        wantForward = false;
+      } // Check if trying to fit beyond text length, and if not, check it fits
+      // after offset location (or desired location on first iteration)
+
+
+      if (start + localOffset <= maxLine) {
+        return localOffset;
+      }
+
+      forwardExhausted = true;
+    }
+
+    if (!backwardExhausted) {
+      if (!forwardExhausted) {
+        wantForward = true;
+      } // Check if trying to fit before text beginning, and if not, check it fits
+      // before offset location
+
+
+      if (minLine <= start - localOffset) {
+        return -localOffset++;
+      }
+
+      backwardExhausted = true;
+      return iterator();
+    } // We tried to fit hunk before text beginning and beyond text length, then
+    // hunk can't fit on the text. Return undefined
+
+  };
+}
+
+function applyPatch(source, uniDiff) {
+  var options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
+
+  if (typeof uniDiff === 'string') {
+    uniDiff = parsePatch(uniDiff);
+  }
+
+  if (Array.isArray(uniDiff)) {
+    if (uniDiff.length > 1) {
+      throw new Error('applyPatch only works with a single input.');
+    }
+
+    uniDiff = uniDiff[0];
+  } // Apply the diff to the input
+
+
+  var lines = source.split(/\r\n|[\n\v\f\r\x85]/),
+      delimiters = source.match(/\r\n|[\n\v\f\r\x85]/g) || [],
+      hunks = uniDiff.hunks,
+      compareLine = options.compareLine || function (lineNumber, line, operation, patchContent) {
+    return line === patchContent;
+  },
+      errorCount = 0,
+      fuzzFactor = options.fuzzFactor || 0,
+      minLine = 0,
+      offset = 0,
+      removeEOFNL,
+      addEOFNL;
+  /**
+   * Checks if the hunk exactly fits on the provided location
+   */
+
+
+  function hunkFits(hunk, toPos) {
+    for (var j = 0; j < hunk.lines.length; j++) {
+      var line = hunk.lines[j],
+          operation = line.length > 0 ? line[0] : ' ',
+          content = line.length > 0 ? line.substr(1) : line;
+
+      if (operation === ' ' || operation === '-') {
+        // Context sanity check
+        if (!compareLine(toPos + 1, lines[toPos], operation, content)) {
+          errorCount++;
+
+          if (errorCount > fuzzFactor) {
+            return false;
+          }
+        }
+
+        toPos++;
+      }
+    }
+
+    return true;
+  } // Search best fit offsets for each hunk based on the previous ones
+
+
+  for (var i = 0; i < hunks.length; i++) {
+    var hunk = hunks[i],
+        maxLine = lines.length - hunk.oldLines,
+        localOffset = 0,
+        toPos = offset + hunk.oldStart - 1;
+    var iterator = distanceIterator(toPos, minLine, maxLine);
+
+    for (; localOffset !== undefined; localOffset = iterator()) {
+      if (hunkFits(hunk, toPos + localOffset)) {
+        hunk.offset = offset += localOffset;
+        break;
+      }
+    }
+
+    if (localOffset === undefined) {
+      return false;
+    } // Set lower text limit to end of the current hunk, so next ones don't try
+    // to fit over already patched text
+
+
+    minLine = hunk.offset + hunk.oldStart + hunk.oldLines;
+  } // Apply patch hunks
+
+
+  var diffOffset = 0;
+
+  for (var _i = 0; _i < hunks.length; _i++) {
+    var _hunk = hunks[_i],
+        _toPos = _hunk.oldStart + _hunk.offset + diffOffset - 1;
+
+    diffOffset += _hunk.newLines - _hunk.oldLines;
+
+    for (var j = 0; j < _hunk.lines.length; j++) {
+      var line = _hunk.lines[j],
+          operation = line.length > 0 ? line[0] : ' ',
+          content = line.length > 0 ? line.substr(1) : line,
+          delimiter = _hunk.linedelimiters[j];
+
+      if (operation === ' ') {
+        _toPos++;
+      } else if (operation === '-') {
+        lines.splice(_toPos, 1);
+        delimiters.splice(_toPos, 1);
+        /* istanbul ignore else */
+      } else if (operation === '+') {
+        lines.splice(_toPos, 0, content);
+        delimiters.splice(_toPos, 0, delimiter);
+        _toPos++;
+      } else if (operation === '\\') {
+        var previousOperation = _hunk.lines[j - 1] ? _hunk.lines[j - 1][0] : null;
+
+        if (previousOperation === '+') {
+          removeEOFNL = true;
+        } else if (previousOperation === '-') {
+          addEOFNL = true;
+        }
+      }
+    }
+  } // Handle EOFNL insertion/removal
+
+
+  if (removeEOFNL) {
+    while (!lines[lines.length - 1]) {
+      lines.pop();
+      delimiters.pop();
+    }
+  } else if (addEOFNL) {
+    lines.push('');
+    delimiters.push('\n');
+  }
+
+  for (var _k = 0; _k < lines.length - 1; _k++) {
+    lines[_k] = lines[_k] + delimiters[_k];
+  }
+
+  return lines.join('');
+} // Wrapper that supports multiple file patches via callbacks.
+
+function applyPatches(uniDiff, options) {
+  if (typeof uniDiff === 'string') {
+    uniDiff = parsePatch(uniDiff);
+  }
+
+  var currentIndex = 0;
+
+  function processIndex() {
+    var index = uniDiff[currentIndex++];
+
+    if (!index) {
+      return options.complete();
+    }
+
+    options.loadFile(index, function (err, data) {
+      if (err) {
+        return options.complete(err);
+      }
+
+      var updatedContent = applyPatch(data, index, options);
+      options.patched(index, updatedContent, function (err) {
+        if (err) {
+          return options.complete(err);
+        }
+
+        processIndex();
+      });
+    });
+  }
+
+  processIndex();
+}
+
+function structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
+  if (!options) {
+    options = {};
+  }
+
+  if (typeof options.context === 'undefined') {
+    options.context = 4;
+  }
+
+  var diff = diffLines(oldStr, newStr, options);
+
+  if (!diff) {
+    return;
+  }
+
+  diff.push({
+    value: '',
+    lines: []
+  }); // Append an empty value to make cleanup easier
+
+  function contextLines(lines) {
+    return lines.map(function (entry) {
+      return ' ' + entry;
+    });
+  }
+
+  var hunks = [];
+  var oldRangeStart = 0,
+      newRangeStart = 0,
+      curRange = [],
+      oldLine = 1,
+      newLine = 1;
+
+  var _loop = function _loop(i) {
+    var current = diff[i],
+        lines = current.lines || current.value.replace(/\n$/, '').split('\n');
+    current.lines = lines;
+
+    if (current.added || current.removed) {
+      var _curRange;
+
+      // If we have previous context, start with that
+      if (!oldRangeStart) {
+        var prev = diff[i - 1];
+        oldRangeStart = oldLine;
+        newRangeStart = newLine;
+
+        if (prev) {
+          curRange = options.context > 0 ? contextLines(prev.lines.slice(-options.context)) : [];
+          oldRangeStart -= curRange.length;
+          newRangeStart -= curRange.length;
+        }
+      } // Output our changes
+
+
+      (_curRange = curRange).push.apply(_curRange, _toConsumableArray(lines.map(function (entry) {
+        return (current.added ? '+' : '-') + entry;
+      }))); // Track the updated file position
+
+
+      if (current.added) {
+        newLine += lines.length;
+      } else {
+        oldLine += lines.length;
+      }
+    } else {
+      // Identical context lines. Track line changes
+      if (oldRangeStart) {
+        // Close out any changes that have been output (or join overlapping)
+        if (lines.length <= options.context * 2 && i < diff.length - 2) {
+          var _curRange2;
+
+          // Overlapping
+          (_curRange2 = curRange).push.apply(_curRange2, _toConsumableArray(contextLines(lines)));
+        } else {
+          var _curRange3;
+
+          // end the range and output
+          var contextSize = Math.min(lines.length, options.context);
+
+          (_curRange3 = curRange).push.apply(_curRange3, _toConsumableArray(contextLines(lines.slice(0, contextSize))));
+
+          var hunk = {
+            oldStart: oldRangeStart,
+            oldLines: oldLine - oldRangeStart + contextSize,
+            newStart: newRangeStart,
+            newLines: newLine - newRangeStart + contextSize,
+            lines: curRange
+          };
+
+          if (i >= diff.length - 2 && lines.length <= options.context) {
+            // EOF is inside this hunk
+            var oldEOFNewline = /\n$/.test(oldStr);
+            var newEOFNewline = /\n$/.test(newStr);
+            var noNlBeforeAdds = lines.length == 0 && curRange.length > hunk.oldLines;
+
+            if (!oldEOFNewline && noNlBeforeAdds && oldStr.length > 0) {
+              // special case: old has no eol and no trailing context; no-nl can end up before adds
+              // however, if the old file is empty, do not output the no-nl line
+              curRange.splice(hunk.oldLines, 0, '\\ No newline at end of file');
+            }
+
+            if (!oldEOFNewline && !noNlBeforeAdds || !newEOFNewline) {
+              curRange.push('\\ No newline at end of file');
+            }
+          }
+
+          hunks.push(hunk);
+          oldRangeStart = 0;
+          newRangeStart = 0;
+          curRange = [];
+        }
+      }
+
+      oldLine += lines.length;
+      newLine += lines.length;
+    }
+  };
+
+  for (var i = 0; i < diff.length; i++) {
+    _loop(i);
+  }
+
+  return {
+    oldFileName: oldFileName,
+    newFileName: newFileName,
+    oldHeader: oldHeader,
+    newHeader: newHeader,
+    hunks: hunks
+  };
+}
+function formatPatch(diff) {
+  var ret = [];
+
+  if (diff.oldFileName == diff.newFileName) {
+    ret.push('Index: ' + diff.oldFileName);
+  }
+
+  ret.push('===================================================================');
+  ret.push('--- ' + diff.oldFileName + (typeof diff.oldHeader === 'undefined' ? '' : '\t' + diff.oldHeader));
+  ret.push('+++ ' + diff.newFileName + (typeof diff.newHeader === 'undefined' ? '' : '\t' + diff.newHeader));
+
+  for (var i = 0; i < diff.hunks.length; i++) {
+    var hunk = diff.hunks[i]; // Unified Diff Format quirk: If the chunk size is 0,
+    // the first number is one lower than one would expect.
+    // https://www.artima.com/weblogs/viewpost.jsp?thread=164293
+
+    if (hunk.oldLines === 0) {
+      hunk.oldStart -= 1;
+    }
+
+    if (hunk.newLines === 0) {
+      hunk.newStart -= 1;
+    }
+
+    ret.push('@@ -' + hunk.oldStart + ',' + hunk.oldLines + ' +' + hunk.newStart + ',' + hunk.newLines + ' @@');
+    ret.push.apply(ret, hunk.lines);
+  }
+
+  return ret.join('\n') + '\n';
+}
+function createTwoFilesPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
+  return formatPatch(structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options));
+}
+function createPatch(fileName, oldStr, newStr, oldHeader, newHeader, options) {
+  return createTwoFilesPatch(fileName, fileName, oldStr, newStr, oldHeader, newHeader, options);
+}
+
+function arrayEqual(a, b) {
+  if (a.length !== b.length) {
+    return false;
+  }
+
+  return arrayStartsWith(a, b);
+}
+function arrayStartsWith(array, start) {
+  if (start.length > array.length) {
+    return false;
+  }
+
+  for (var i = 0; i < start.length; i++) {
+    if (start[i] !== array[i]) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+function calcLineCount(hunk) {
+  var _calcOldNewLineCount = calcOldNewLineCount(hunk.lines),
+      oldLines = _calcOldNewLineCount.oldLines,
+      newLines = _calcOldNewLineCount.newLines;
+
+  if (oldLines !== undefined) {
+    hunk.oldLines = oldLines;
+  } else {
+    delete hunk.oldLines;
+  }
+
+  if (newLines !== undefined) {
+    hunk.newLines = newLines;
+  } else {
+    delete hunk.newLines;
+  }
+}
+function merge(mine, theirs, base) {
+  mine = loadPatch(mine, base);
+  theirs = loadPatch(theirs, base);
+  var ret = {}; // For index we just let it pass through as it doesn't have any necessary meaning.
+  // Leaving sanity checks on this to the API consumer that may know more about the
+  // meaning in their own context.
+
+  if (mine.index || theirs.index) {
+    ret.index = mine.index || theirs.index;
+  }
+
+  if (mine.newFileName || theirs.newFileName) {
+    if (!fileNameChanged(mine)) {
+      // No header or no change in ours, use theirs (and ours if theirs does not exist)
+      ret.oldFileName = theirs.oldFileName || mine.oldFileName;
+      ret.newFileName = theirs.newFileName || mine.newFileName;
+      ret.oldHeader = theirs.oldHeader || mine.oldHeader;
+      ret.newHeader = theirs.newHeader || mine.newHeader;
+    } else if (!fileNameChanged(theirs)) {
+      // No header or no change in theirs, use ours
+      ret.oldFileName = mine.oldFileName;
+      ret.newFileName = mine.newFileName;
+      ret.oldHeader = mine.oldHeader;
+      ret.newHeader = mine.newHeader;
+    } else {
+      // Both changed... figure it out
+      ret.oldFileName = selectField(ret, mine.oldFileName, theirs.oldFileName);
+      ret.newFileName = selectField(ret, mine.newFileName, theirs.newFileName);
+      ret.oldHeader = selectField(ret, mine.oldHeader, theirs.oldHeader);
+      ret.newHeader = selectField(ret, mine.newHeader, theirs.newHeader);
+    }
+  }
+
+  ret.hunks = [];
+  var mineIndex = 0,
+      theirsIndex = 0,
+      mineOffset = 0,
+      theirsOffset = 0;
+
+  while (mineIndex < mine.hunks.length || theirsIndex < theirs.hunks.length) {
+    var mineCurrent = mine.hunks[mineIndex] || {
+      oldStart: Infinity
+    },
+        theirsCurrent = theirs.hunks[theirsIndex] || {
+      oldStart: Infinity
+    };
+
+    if (hunkBefore(mineCurrent, theirsCurrent)) {
+      // This patch does not overlap with any of the others, yay.
+      ret.hunks.push(cloneHunk(mineCurrent, mineOffset));
+      mineIndex++;
+      theirsOffset += mineCurrent.newLines - mineCurrent.oldLines;
+    } else if (hunkBefore(theirsCurrent, mineCurrent)) {
+      // This patch does not overlap with any of the others, yay.
+      ret.hunks.push(cloneHunk(theirsCurrent, theirsOffset));
+      theirsIndex++;
+      mineOffset += theirsCurrent.newLines - theirsCurrent.oldLines;
+    } else {
+      // Overlap, merge as best we can
+      var mergedHunk = {
+        oldStart: Math.min(mineCurrent.oldStart, theirsCurrent.oldStart),
+        oldLines: 0,
+        newStart: Math.min(mineCurrent.newStart + mineOffset, theirsCurrent.oldStart + theirsOffset),
+        newLines: 0,
+        lines: []
+      };
+      mergeLines(mergedHunk, mineCurrent.oldStart, mineCurrent.lines, theirsCurrent.oldStart, theirsCurrent.lines);
+      theirsIndex++;
+      mineIndex++;
+      ret.hunks.push(mergedHunk);
+    }
+  }
+
+  return ret;
+}
+
+function loadPatch(param, base) {
+  if (typeof param === 'string') {
+    if (/^@@/m.test(param) || /^Index:/m.test(param)) {
+      return parsePatch(param)[0];
+    }
+
+    if (!base) {
+      throw new Error('Must provide a base reference or pass in a patch');
+    }
+
+    return structuredPatch(undefined, undefined, base, param);
+  }
+
+  return param;
+}
+
+function fileNameChanged(patch) {
+  return patch.newFileName && patch.newFileName !== patch.oldFileName;
+}
+
+function selectField(index, mine, theirs) {
+  if (mine === theirs) {
+    return mine;
+  } else {
+    index.conflict = true;
+    return {
+      mine: mine,
+      theirs: theirs
+    };
+  }
+}
+
+function hunkBefore(test, check) {
+  return test.oldStart < check.oldStart && test.oldStart + test.oldLines < check.oldStart;
+}
+
+function cloneHunk(hunk, offset) {
+  return {
+    oldStart: hunk.oldStart,
+    oldLines: hunk.oldLines,
+    newStart: hunk.newStart + offset,
+    newLines: hunk.newLines,
+    lines: hunk.lines
+  };
+}
+
+function mergeLines(hunk, mineOffset, mineLines, theirOffset, theirLines) {
+  // This will generally result in a conflicted hunk, but there are cases where the context
+  // is the only overlap where we can successfully merge the content here.
+  var mine = {
+    offset: mineOffset,
+    lines: mineLines,
+    index: 0
+  },
+      their = {
+    offset: theirOffset,
+    lines: theirLines,
+    index: 0
+  }; // Handle any leading content
+
+  insertLeading(hunk, mine, their);
+  insertLeading(hunk, their, mine); // Now in the overlap content. Scan through and select the best changes from each.
+
+  while (mine.index < mine.lines.length && their.index < their.lines.length) {
+    var mineCurrent = mine.lines[mine.index],
+        theirCurrent = their.lines[their.index];
+
+    if ((mineCurrent[0] === '-' || mineCurrent[0] === '+') && (theirCurrent[0] === '-' || theirCurrent[0] === '+')) {
+      // Both modified ...
+      mutualChange(hunk, mine, their);
+    } else if (mineCurrent[0] === '+' && theirCurrent[0] === ' ') {
+      var _hunk$lines;
+
+      // Mine inserted
+      (_hunk$lines = hunk.lines).push.apply(_hunk$lines, _toConsumableArray(collectChange(mine)));
+    } else if (theirCurrent[0] === '+' && mineCurrent[0] === ' ') {
+      var _hunk$lines2;
+
+      // Theirs inserted
+      (_hunk$lines2 = hunk.lines).push.apply(_hunk$lines2, _toConsumableArray(collectChange(their)));
+    } else if (mineCurrent[0] === '-' && theirCurrent[0] === ' ') {
+      // Mine removed or edited
+      removal(hunk, mine, their);
+    } else if (theirCurrent[0] === '-' && mineCurrent[0] === ' ') {
+      // Their removed or edited
+      removal(hunk, their, mine, true);
+    } else if (mineCurrent === theirCurrent) {
+      // Context identity
+      hunk.lines.push(mineCurrent);
+      mine.index++;
+      their.index++;
+    } else {
+      // Context mismatch
+      conflict(hunk, collectChange(mine), collectChange(their));
+    }
+  } // Now push anything that may be remaining
+
+
+  insertTrailing(hunk, mine);
+  insertTrailing(hunk, their);
+  calcLineCount(hunk);
+}
+
+function mutualChange(hunk, mine, their) {
+  var myChanges = collectChange(mine),
+      theirChanges = collectChange(their);
+
+  if (allRemoves(myChanges) && allRemoves(theirChanges)) {
+    // Special case for remove changes that are supersets of one another
+    if (arrayStartsWith(myChanges, theirChanges) && skipRemoveSuperset(their, myChanges, myChanges.length - theirChanges.length)) {
+      var _hunk$lines3;
+
+      (_hunk$lines3 = hunk.lines).push.apply(_hunk$lines3, _toConsumableArray(myChanges));
+
+      return;
+    } else if (arrayStartsWith(theirChanges, myChanges) && skipRemoveSuperset(mine, theirChanges, theirChanges.length - myChanges.length)) {
+      var _hunk$lines4;
+
+      (_hunk$lines4 = hunk.lines).push.apply(_hunk$lines4, _toConsumableArray(theirChanges));
+
+      return;
+    }
+  } else if (arrayEqual(myChanges, theirChanges)) {
+    var _hunk$lines5;
+
+    (_hunk$lines5 = hunk.lines).push.apply(_hunk$lines5, _toConsumableArray(myChanges));
+
+    return;
+  }
+
+  conflict(hunk, myChanges, theirChanges);
+}
+
+function removal(hunk, mine, their, swap) {
+  var myChanges = collectChange(mine),
+      theirChanges = collectContext(their, myChanges);
+
+  if (theirChanges.merged) {
+    var _hunk$lines6;
+
+    (_hunk$lines6 = hunk.lines).push.apply(_hunk$lines6, _toConsumableArray(theirChanges.merged));
+  } else {
+    conflict(hunk, swap ? theirChanges : myChanges, swap ? myChanges : theirChanges);
+  }
+}
+
+function conflict(hunk, mine, their) {
+  hunk.conflict = true;
+  hunk.lines.push({
+    conflict: true,
+    mine: mine,
+    theirs: their
+  });
+}
+
+function insertLeading(hunk, insert, their) {
+  while (insert.offset < their.offset && insert.index < insert.lines.length) {
+    var line = insert.lines[insert.index++];
+    hunk.lines.push(line);
+    insert.offset++;
+  }
+}
+
+function insertTrailing(hunk, insert) {
+  while (insert.index < insert.lines.length) {
+    var line = insert.lines[insert.index++];
+    hunk.lines.push(line);
+  }
+}
+
+function collectChange(state) {
+  var ret = [],
+      operation = state.lines[state.index][0];
+
+  while (state.index < state.lines.length) {
+    var line = state.lines[state.index]; // Group additions that are immediately after subtractions and treat them as one "atomic" modify change.
+
+    if (operation === '-' && line[0] === '+') {
+      operation = '+';
+    }
+
+    if (operation === line[0]) {
+      ret.push(line);
+      state.index++;
+    } else {
+      break;
+    }
+  }
+
+  return ret;
+}
+
+function collectContext(state, matchChanges) {
+  var changes = [],
+      merged = [],
+      matchIndex = 0,
+      contextChanges = false,
+      conflicted = false;
+
+  while (matchIndex < matchChanges.length && state.index < state.lines.length) {
+    var change = state.lines[state.index],
+        match = matchChanges[matchIndex]; // Once we've hit our add, then we are done
+
+    if (match[0] === '+') {
+      break;
+    }
+
+    contextChanges = contextChanges || change[0] !== ' ';
+    merged.push(match);
+    matchIndex++; // Consume any additions in the other block as a conflict to attempt
+    // to pull in the remaining context after this
+
+    if (change[0] === '+') {
+      conflicted = true;
+
+      while (change[0] === '+') {
+        changes.push(change);
+        change = state.lines[++state.index];
+      }
+    }
+
+    if (match.substr(1) === change.substr(1)) {
+      changes.push(change);
+      state.index++;
+    } else {
+      conflicted = true;
+    }
+  }
+
+  if ((matchChanges[matchIndex] || '')[0] === '+' && contextChanges) {
+    conflicted = true;
+  }
+
+  if (conflicted) {
+    return changes;
+  }
+
+  while (matchIndex < matchChanges.length) {
+    merged.push(matchChanges[matchIndex++]);
+  }
+
+  return {
+    merged: merged,
+    changes: changes
+  };
+}
+
+function allRemoves(changes) {
+  return changes.reduce(function (prev, change) {
+    return prev && change[0] === '-';
+  }, true);
+}
+
+function skipRemoveSuperset(state, removeChanges, delta) {
+  for (var i = 0; i < delta; i++) {
+    var changeContent = removeChanges[removeChanges.length - delta + i].substr(1);
+
+    if (state.lines[state.index + i] !== ' ' + changeContent) {
+      return false;
+    }
+  }
+
+  state.index += delta;
+  return true;
+}
+
+function calcOldNewLineCount(lines) {
+  var oldLines = 0;
+  var newLines = 0;
+  lines.forEach(function (line) {
+    if (typeof line !== 'string') {
+      var myCount = calcOldNewLineCount(line.mine);
+      var theirCount = calcOldNewLineCount(line.theirs);
+
+      if (oldLines !== undefined) {
+        if (myCount.oldLines === theirCount.oldLines) {
+          oldLines += myCount.oldLines;
+        } else {
+          oldLines = undefined;
+        }
+      }
+
+      if (newLines !== undefined) {
+        if (myCount.newLines === theirCount.newLines) {
+          newLines += myCount.newLines;
+        } else {
+          newLines = undefined;
+        }
+      }
+    } else {
+      if (newLines !== undefined && (line[0] === '+' || line[0] === ' ')) {
+        newLines++;
+      }
+
+      if (oldLines !== undefined && (line[0] === '-' || line[0] === ' ')) {
+        oldLines++;
+      }
+    }
+  });
+  return {
+    oldLines: oldLines,
+    newLines: newLines
+  };
+}
+
+// See: http://code.google.com/p/google-diff-match-patch/wiki/API
+function convertChangesToDMP(changes) {
+  var ret = [],
+      change,
+      operation;
+
+  for (var i = 0; i < changes.length; i++) {
+    change = changes[i];
+
+    if (change.added) {
+      operation = 1;
+    } else if (change.removed) {
+      operation = -1;
+    } else {
+      operation = 0;
+    }
+
+    ret.push([operation, change.value]);
+  }
+
+  return ret;
+}
+
+function convertChangesToXML(changes) {
+  var ret = [];
+
+  for (var i = 0; i < changes.length; i++) {
+    var change = changes[i];
+
+    if (change.added) {
+      ret.push('');
+    } else if (change.removed) {
+      ret.push('');
+    }
+
+    ret.push(escapeHTML(change.value));
+
+    if (change.added) {
+      ret.push('');
+    } else if (change.removed) {
+      ret.push('');
+    }
+  }
+
+  return ret.join('');
+}
+
+function escapeHTML(s) {
+  var n = s;
+  n = n.replace(/&/g, '&');
+  n = n.replace(//g, '>');
+  n = n.replace(/"/g, '"');
+  return n;
+}
+
+export { Diff, applyPatch, applyPatches, canonicalize, convertChangesToDMP, convertChangesToXML, createPatch, createTwoFilesPatch, diffArrays, diffChars, diffCss, diffJson, diffLines, diffSentences, diffTrimmedLines, diffWords, diffWordsWithSpace, merge, parsePatch, structuredPatch };
diff --git a/_extensions/d2/node_modules/diff/lib/patch/apply.js b/_extensions/d2/node_modules/diff/lib/patch/apply.js
new file mode 100644
index 00000000..21c76ddb
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/patch/apply.js
@@ -0,0 +1,238 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.applyPatch = applyPatch;
+exports.applyPatches = applyPatches;
+
+/*istanbul ignore end*/
+var
+/*istanbul ignore start*/
+_parse = require("./parse")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_distanceIterator = _interopRequireDefault(require("../util/distance-iterator"))
+/*istanbul ignore end*/
+;
+
+/*istanbul ignore start*/ function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; }
+
+/*istanbul ignore end*/
+function applyPatch(source, uniDiff) {
+  /*istanbul ignore start*/
+  var
+  /*istanbul ignore end*/
+  options = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : {};
+
+  if (typeof uniDiff === 'string') {
+    uniDiff =
+    /*istanbul ignore start*/
+    (0,
+    /*istanbul ignore end*/
+
+    /*istanbul ignore start*/
+    _parse
+    /*istanbul ignore end*/
+    .
+    /*istanbul ignore start*/
+    parsePatch)
+    /*istanbul ignore end*/
+    (uniDiff);
+  }
+
+  if (Array.isArray(uniDiff)) {
+    if (uniDiff.length > 1) {
+      throw new Error('applyPatch only works with a single input.');
+    }
+
+    uniDiff = uniDiff[0];
+  } // Apply the diff to the input
+
+
+  var lines = source.split(/\r\n|[\n\v\f\r\x85]/),
+      delimiters = source.match(/\r\n|[\n\v\f\r\x85]/g) || [],
+      hunks = uniDiff.hunks,
+      compareLine = options.compareLine || function (lineNumber, line, operation, patchContent)
+  /*istanbul ignore start*/
+  {
+    return (
+      /*istanbul ignore end*/
+      line === patchContent
+    );
+  },
+      errorCount = 0,
+      fuzzFactor = options.fuzzFactor || 0,
+      minLine = 0,
+      offset = 0,
+      removeEOFNL,
+      addEOFNL;
+  /**
+   * Checks if the hunk exactly fits on the provided location
+   */
+
+
+  function hunkFits(hunk, toPos) {
+    for (var j = 0; j < hunk.lines.length; j++) {
+      var line = hunk.lines[j],
+          operation = line.length > 0 ? line[0] : ' ',
+          content = line.length > 0 ? line.substr(1) : line;
+
+      if (operation === ' ' || operation === '-') {
+        // Context sanity check
+        if (!compareLine(toPos + 1, lines[toPos], operation, content)) {
+          errorCount++;
+
+          if (errorCount > fuzzFactor) {
+            return false;
+          }
+        }
+
+        toPos++;
+      }
+    }
+
+    return true;
+  } // Search best fit offsets for each hunk based on the previous ones
+
+
+  for (var i = 0; i < hunks.length; i++) {
+    var hunk = hunks[i],
+        maxLine = lines.length - hunk.oldLines,
+        localOffset = 0,
+        toPos = offset + hunk.oldStart - 1;
+    var iterator =
+    /*istanbul ignore start*/
+    (0,
+    /*istanbul ignore end*/
+
+    /*istanbul ignore start*/
+    _distanceIterator
+    /*istanbul ignore end*/
+    [
+    /*istanbul ignore start*/
+    "default"
+    /*istanbul ignore end*/
+    ])(toPos, minLine, maxLine);
+
+    for (; localOffset !== undefined; localOffset = iterator()) {
+      if (hunkFits(hunk, toPos + localOffset)) {
+        hunk.offset = offset += localOffset;
+        break;
+      }
+    }
+
+    if (localOffset === undefined) {
+      return false;
+    } // Set lower text limit to end of the current hunk, so next ones don't try
+    // to fit over already patched text
+
+
+    minLine = hunk.offset + hunk.oldStart + hunk.oldLines;
+  } // Apply patch hunks
+
+
+  var diffOffset = 0;
+
+  for (var _i = 0; _i < hunks.length; _i++) {
+    var _hunk = hunks[_i],
+        _toPos = _hunk.oldStart + _hunk.offset + diffOffset - 1;
+
+    diffOffset += _hunk.newLines - _hunk.oldLines;
+
+    for (var j = 0; j < _hunk.lines.length; j++) {
+      var line = _hunk.lines[j],
+          operation = line.length > 0 ? line[0] : ' ',
+          content = line.length > 0 ? line.substr(1) : line,
+          delimiter = _hunk.linedelimiters[j];
+
+      if (operation === ' ') {
+        _toPos++;
+      } else if (operation === '-') {
+        lines.splice(_toPos, 1);
+        delimiters.splice(_toPos, 1);
+        /* istanbul ignore else */
+      } else if (operation === '+') {
+        lines.splice(_toPos, 0, content);
+        delimiters.splice(_toPos, 0, delimiter);
+        _toPos++;
+      } else if (operation === '\\') {
+        var previousOperation = _hunk.lines[j - 1] ? _hunk.lines[j - 1][0] : null;
+
+        if (previousOperation === '+') {
+          removeEOFNL = true;
+        } else if (previousOperation === '-') {
+          addEOFNL = true;
+        }
+      }
+    }
+  } // Handle EOFNL insertion/removal
+
+
+  if (removeEOFNL) {
+    while (!lines[lines.length - 1]) {
+      lines.pop();
+      delimiters.pop();
+    }
+  } else if (addEOFNL) {
+    lines.push('');
+    delimiters.push('\n');
+  }
+
+  for (var _k = 0; _k < lines.length - 1; _k++) {
+    lines[_k] = lines[_k] + delimiters[_k];
+  }
+
+  return lines.join('');
+} // Wrapper that supports multiple file patches via callbacks.
+
+
+function applyPatches(uniDiff, options) {
+  if (typeof uniDiff === 'string') {
+    uniDiff =
+    /*istanbul ignore start*/
+    (0,
+    /*istanbul ignore end*/
+
+    /*istanbul ignore start*/
+    _parse
+    /*istanbul ignore end*/
+    .
+    /*istanbul ignore start*/
+    parsePatch)
+    /*istanbul ignore end*/
+    (uniDiff);
+  }
+
+  var currentIndex = 0;
+
+  function processIndex() {
+    var index = uniDiff[currentIndex++];
+
+    if (!index) {
+      return options.complete();
+    }
+
+    options.loadFile(index, function (err, data) {
+      if (err) {
+        return options.complete(err);
+      }
+
+      var updatedContent = applyPatch(data, index, options);
+      options.patched(index, updatedContent, function (err) {
+        if (err) {
+          return options.complete(err);
+        }
+
+        processIndex();
+      });
+    });
+  }
+
+  processIndex();
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"sources":["../../src/patch/apply.js"],"names":["applyPatch","source","uniDiff","options","parsePatch","Array","isArray","length","Error","lines","split","delimiters","match","hunks","compareLine","lineNumber","line","operation","patchContent","errorCount","fuzzFactor","minLine","offset","removeEOFNL","addEOFNL","hunkFits","hunk","toPos","j","content","substr","i","maxLine","oldLines","localOffset","oldStart","iterator","distanceIterator","undefined","diffOffset","newLines","delimiter","linedelimiters","splice","previousOperation","pop","push","_k","join","applyPatches","currentIndex","processIndex","index","complete","loadFile","err","data","updatedContent","patched"],"mappings":";;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;;AACA;AAAA;AAAA;AAAA;AAAA;;;;;AAEO,SAASA,UAAT,CAAoBC,MAApB,EAA4BC,OAA5B,EAAmD;AAAA;AAAA;AAAA;AAAdC,EAAAA,OAAc,uEAAJ,EAAI;;AACxD,MAAI,OAAOD,OAAP,KAAmB,QAAvB,EAAiC;AAC/BA,IAAAA,OAAO;AAAG;AAAA;AAAA;;AAAAE;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAA;AAAA,KAAWF,OAAX,CAAV;AACD;;AAED,MAAIG,KAAK,CAACC,OAAN,CAAcJ,OAAd,CAAJ,EAA4B;AAC1B,QAAIA,OAAO,CAACK,MAAR,GAAiB,CAArB,EAAwB;AACtB,YAAM,IAAIC,KAAJ,CAAU,4CAAV,CAAN;AACD;;AAEDN,IAAAA,OAAO,GAAGA,OAAO,CAAC,CAAD,CAAjB;AACD,GAXuD,CAaxD;;;AACA,MAAIO,KAAK,GAAGR,MAAM,CAACS,KAAP,CAAa,qBAAb,CAAZ;AAAA,MACIC,UAAU,GAAGV,MAAM,CAACW,KAAP,CAAa,sBAAb,KAAwC,EADzD;AAAA,MAEIC,KAAK,GAAGX,OAAO,CAACW,KAFpB;AAAA,MAIIC,WAAW,GAAGX,OAAO,CAACW,WAAR,IAAwB,UAACC,UAAD,EAAaC,IAAb,EAAmBC,SAAnB,EAA8BC,YAA9B;AAAA;AAAA;AAAA;AAAA;AAA+CF,MAAAA,IAAI,KAAKE;AAAxD;AAAA,GAJ1C;AAAA,MAKIC,UAAU,GAAG,CALjB;AAAA,MAMIC,UAAU,GAAGjB,OAAO,CAACiB,UAAR,IAAsB,CANvC;AAAA,MAOIC,OAAO,GAAG,CAPd;AAAA,MAQIC,MAAM,GAAG,CARb;AAAA,MAUIC,WAVJ;AAAA,MAWIC,QAXJ;AAaA;;;;;AAGA,WAASC,QAAT,CAAkBC,IAAlB,EAAwBC,KAAxB,EAA+B;AAC7B,SAAK,IAAIC,CAAC,GAAG,CAAb,EAAgBA,CAAC,GAAGF,IAAI,CAACjB,KAAL,CAAWF,MAA/B,EAAuCqB,CAAC,EAAxC,EAA4C;AAC1C,UAAIZ,IAAI,GAAGU,IAAI,CAACjB,KAAL,CAAWmB,CAAX,CAAX;AAAA,UACIX,SAAS,GAAID,IAAI,CAACT,MAAL,GAAc,CAAd,GAAkBS,IAAI,CAAC,CAAD,CAAtB,GAA4B,GAD7C;AAAA,UAEIa,OAAO,GAAIb,IAAI,CAACT,MAAL,GAAc,CAAd,GAAkBS,IAAI,CAACc,MAAL,CAAY,CAAZ,CAAlB,GAAmCd,IAFlD;;AAIA,UAAIC,SAAS,KAAK,GAAd,IAAqBA,SAAS,KAAK,GAAvC,EAA4C;AAC1C;AACA,YAAI,CAACH,WAAW,CAACa,KAAK,GAAG,CAAT,EAAYlB,KAAK,CAACkB,KAAD,CAAjB,EAA0BV,SAA1B,EAAqCY,OAArC,CAAhB,EAA+D;AAC7DV,UAAAA,UAAU;;AAEV,cAAIA,UAAU,GAAGC,UAAjB,EAA6B;AAC3B,mBAAO,KAAP;AACD;AACF;;AACDO,QAAAA,KAAK;AACN;AACF;;AAED,WAAO,IAAP;AACD,GAlDuD,CAoDxD;;;AACA,OAAK,IAAII,CAAC,GAAG,CAAb,EAAgBA,CAAC,GAAGlB,KAAK,CAACN,MAA1B,EAAkCwB,CAAC,EAAnC,EAAuC;AACrC,QAAIL,IAAI,GAAGb,KAAK,CAACkB,CAAD,CAAhB;AAAA,QACIC,OAAO,GAAGvB,KAAK,CAACF,MAAN,GAAemB,IAAI,CAACO,QADlC;AAAA,QAEIC,WAAW,GAAG,CAFlB;AAAA,QAGIP,KAAK,GAAGL,MAAM,GAAGI,IAAI,CAACS,QAAd,GAAyB,CAHrC;AAKA,QAAIC,QAAQ;AAAG;AAAA;AAAA;;AAAAC;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA,OAAiBV,KAAjB,EAAwBN,OAAxB,EAAiCW,OAAjC,CAAf;;AAEA,WAAOE,WAAW,KAAKI,SAAvB,EAAkCJ,WAAW,GAAGE,QAAQ,EAAxD,EAA4D;AAC1D,UAAIX,QAAQ,CAACC,IAAD,EAAOC,KAAK,GAAGO,WAAf,CAAZ,EAAyC;AACvCR,QAAAA,IAAI,CAACJ,MAAL,GAAcA,MAAM,IAAIY,WAAxB;AACA;AACD;AACF;;AAED,QAAIA,WAAW,KAAKI,SAApB,EAA+B;AAC7B,aAAO,KAAP;AACD,KAjBoC,CAmBrC;AACA;;;AACAjB,IAAAA,OAAO,GAAGK,IAAI,CAACJ,MAAL,GAAcI,IAAI,CAACS,QAAnB,GAA8BT,IAAI,CAACO,QAA7C;AACD,GA3EuD,CA6ExD;;;AACA,MAAIM,UAAU,GAAG,CAAjB;;AACA,OAAK,IAAIR,EAAC,GAAG,CAAb,EAAgBA,EAAC,GAAGlB,KAAK,CAACN,MAA1B,EAAkCwB,EAAC,EAAnC,EAAuC;AACrC,QAAIL,KAAI,GAAGb,KAAK,CAACkB,EAAD,CAAhB;AAAA,QACIJ,MAAK,GAAGD,KAAI,CAACS,QAAL,GAAgBT,KAAI,CAACJ,MAArB,GAA8BiB,UAA9B,GAA2C,CADvD;;AAEAA,IAAAA,UAAU,IAAIb,KAAI,CAACc,QAAL,GAAgBd,KAAI,CAACO,QAAnC;;AAEA,SAAK,IAAIL,CAAC,GAAG,CAAb,EAAgBA,CAAC,GAAGF,KAAI,CAACjB,KAAL,CAAWF,MAA/B,EAAuCqB,CAAC,EAAxC,EAA4C;AAC1C,UAAIZ,IAAI,GAAGU,KAAI,CAACjB,KAAL,CAAWmB,CAAX,CAAX;AAAA,UACIX,SAAS,GAAID,IAAI,CAACT,MAAL,GAAc,CAAd,GAAkBS,IAAI,CAAC,CAAD,CAAtB,GAA4B,GAD7C;AAAA,UAEIa,OAAO,GAAIb,IAAI,CAACT,MAAL,GAAc,CAAd,GAAkBS,IAAI,CAACc,MAAL,CAAY,CAAZ,CAAlB,GAAmCd,IAFlD;AAAA,UAGIyB,SAAS,GAAGf,KAAI,CAACgB,cAAL,CAAoBd,CAApB,CAHhB;;AAKA,UAAIX,SAAS,KAAK,GAAlB,EAAuB;AACrBU,QAAAA,MAAK;AACN,OAFD,MAEO,IAAIV,SAAS,KAAK,GAAlB,EAAuB;AAC5BR,QAAAA,KAAK,CAACkC,MAAN,CAAahB,MAAb,EAAoB,CAApB;AACAhB,QAAAA,UAAU,CAACgC,MAAX,CAAkBhB,MAAlB,EAAyB,CAAzB;AACF;AACC,OAJM,MAIA,IAAIV,SAAS,KAAK,GAAlB,EAAuB;AAC5BR,QAAAA,KAAK,CAACkC,MAAN,CAAahB,MAAb,EAAoB,CAApB,EAAuBE,OAAvB;AACAlB,QAAAA,UAAU,CAACgC,MAAX,CAAkBhB,MAAlB,EAAyB,CAAzB,EAA4Bc,SAA5B;AACAd,QAAAA,MAAK;AACN,OAJM,MAIA,IAAIV,SAAS,KAAK,IAAlB,EAAwB;AAC7B,YAAI2B,iBAAiB,GAAGlB,KAAI,CAACjB,KAAL,CAAWmB,CAAC,GAAG,CAAf,IAAoBF,KAAI,CAACjB,KAAL,CAAWmB,CAAC,GAAG,CAAf,EAAkB,CAAlB,CAApB,GAA2C,IAAnE;;AACA,YAAIgB,iBAAiB,KAAK,GAA1B,EAA+B;AAC7BrB,UAAAA,WAAW,GAAG,IAAd;AACD,SAFD,MAEO,IAAIqB,iBAAiB,KAAK,GAA1B,EAA+B;AACpCpB,UAAAA,QAAQ,GAAG,IAAX;AACD;AACF;AACF;AACF,GA7GuD,CA+GxD;;;AACA,MAAID,WAAJ,EAAiB;AACf,WAAO,CAACd,KAAK,CAACA,KAAK,CAACF,MAAN,GAAe,CAAhB,CAAb,EAAiC;AAC/BE,MAAAA,KAAK,CAACoC,GAAN;AACAlC,MAAAA,UAAU,CAACkC,GAAX;AACD;AACF,GALD,MAKO,IAAIrB,QAAJ,EAAc;AACnBf,IAAAA,KAAK,CAACqC,IAAN,CAAW,EAAX;AACAnC,IAAAA,UAAU,CAACmC,IAAX,CAAgB,IAAhB;AACD;;AACD,OAAK,IAAIC,EAAE,GAAG,CAAd,EAAiBA,EAAE,GAAGtC,KAAK,CAACF,MAAN,GAAe,CAArC,EAAwCwC,EAAE,EAA1C,EAA8C;AAC5CtC,IAAAA,KAAK,CAACsC,EAAD,CAAL,GAAYtC,KAAK,CAACsC,EAAD,CAAL,GAAYpC,UAAU,CAACoC,EAAD,CAAlC;AACD;;AACD,SAAOtC,KAAK,CAACuC,IAAN,CAAW,EAAX,CAAP;AACD,C,CAED;;;AACO,SAASC,YAAT,CAAsB/C,OAAtB,EAA+BC,OAA/B,EAAwC;AAC7C,MAAI,OAAOD,OAAP,KAAmB,QAAvB,EAAiC;AAC/BA,IAAAA,OAAO;AAAG;AAAA;AAAA;;AAAAE;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAA;AAAA,KAAWF,OAAX,CAAV;AACD;;AAED,MAAIgD,YAAY,GAAG,CAAnB;;AACA,WAASC,YAAT,GAAwB;AACtB,QAAIC,KAAK,GAAGlD,OAAO,CAACgD,YAAY,EAAb,CAAnB;;AACA,QAAI,CAACE,KAAL,EAAY;AACV,aAAOjD,OAAO,CAACkD,QAAR,EAAP;AACD;;AAEDlD,IAAAA,OAAO,CAACmD,QAAR,CAAiBF,KAAjB,EAAwB,UAASG,GAAT,EAAcC,IAAd,EAAoB;AAC1C,UAAID,GAAJ,EAAS;AACP,eAAOpD,OAAO,CAACkD,QAAR,CAAiBE,GAAjB,CAAP;AACD;;AAED,UAAIE,cAAc,GAAGzD,UAAU,CAACwD,IAAD,EAAOJ,KAAP,EAAcjD,OAAd,CAA/B;AACAA,MAAAA,OAAO,CAACuD,OAAR,CAAgBN,KAAhB,EAAuBK,cAAvB,EAAuC,UAASF,GAAT,EAAc;AACnD,YAAIA,GAAJ,EAAS;AACP,iBAAOpD,OAAO,CAACkD,QAAR,CAAiBE,GAAjB,CAAP;AACD;;AAEDJ,QAAAA,YAAY;AACb,OAND;AAOD,KAbD;AAcD;;AACDA,EAAAA,YAAY;AACb","sourcesContent":["import {parsePatch} from './parse';\nimport distanceIterator from '../util/distance-iterator';\n\nexport function applyPatch(source, uniDiff, options = {}) {\n  if (typeof uniDiff === 'string') {\n    uniDiff = parsePatch(uniDiff);\n  }\n\n  if (Array.isArray(uniDiff)) {\n    if (uniDiff.length > 1) {\n      throw new Error('applyPatch only works with a single input.');\n    }\n\n    uniDiff = uniDiff[0];\n  }\n\n  // Apply the diff to the input\n  let lines = source.split(/\\r\\n|[\\n\\v\\f\\r\\x85]/),\n      delimiters = source.match(/\\r\\n|[\\n\\v\\f\\r\\x85]/g) || [],\n      hunks = uniDiff.hunks,\n\n      compareLine = options.compareLine || ((lineNumber, line, operation, patchContent) => line === patchContent),\n      errorCount = 0,\n      fuzzFactor = options.fuzzFactor || 0,\n      minLine = 0,\n      offset = 0,\n\n      removeEOFNL,\n      addEOFNL;\n\n  /**\n   * Checks if the hunk exactly fits on the provided location\n   */\n  function hunkFits(hunk, toPos) {\n    for (let j = 0; j < hunk.lines.length; j++) {\n      let line = hunk.lines[j],\n          operation = (line.length > 0 ? line[0] : ' '),\n          content = (line.length > 0 ? line.substr(1) : line);\n\n      if (operation === ' ' || operation === '-') {\n        // Context sanity check\n        if (!compareLine(toPos + 1, lines[toPos], operation, content)) {\n          errorCount++;\n\n          if (errorCount > fuzzFactor) {\n            return false;\n          }\n        }\n        toPos++;\n      }\n    }\n\n    return true;\n  }\n\n  // Search best fit offsets for each hunk based on the previous ones\n  for (let i = 0; i < hunks.length; i++) {\n    let hunk = hunks[i],\n        maxLine = lines.length - hunk.oldLines,\n        localOffset = 0,\n        toPos = offset + hunk.oldStart - 1;\n\n    let iterator = distanceIterator(toPos, minLine, maxLine);\n\n    for (; localOffset !== undefined; localOffset = iterator()) {\n      if (hunkFits(hunk, toPos + localOffset)) {\n        hunk.offset = offset += localOffset;\n        break;\n      }\n    }\n\n    if (localOffset === undefined) {\n      return false;\n    }\n\n    // Set lower text limit to end of the current hunk, so next ones don't try\n    // to fit over already patched text\n    minLine = hunk.offset + hunk.oldStart + hunk.oldLines;\n  }\n\n  // Apply patch hunks\n  let diffOffset = 0;\n  for (let i = 0; i < hunks.length; i++) {\n    let hunk = hunks[i],\n        toPos = hunk.oldStart + hunk.offset + diffOffset - 1;\n    diffOffset += hunk.newLines - hunk.oldLines;\n\n    for (let j = 0; j < hunk.lines.length; j++) {\n      let line = hunk.lines[j],\n          operation = (line.length > 0 ? line[0] : ' '),\n          content = (line.length > 0 ? line.substr(1) : line),\n          delimiter = hunk.linedelimiters[j];\n\n      if (operation === ' ') {\n        toPos++;\n      } else if (operation === '-') {\n        lines.splice(toPos, 1);\n        delimiters.splice(toPos, 1);\n      /* istanbul ignore else */\n      } else if (operation === '+') {\n        lines.splice(toPos, 0, content);\n        delimiters.splice(toPos, 0, delimiter);\n        toPos++;\n      } else if (operation === '\\\\') {\n        let previousOperation = hunk.lines[j - 1] ? hunk.lines[j - 1][0] : null;\n        if (previousOperation === '+') {\n          removeEOFNL = true;\n        } else if (previousOperation === '-') {\n          addEOFNL = true;\n        }\n      }\n    }\n  }\n\n  // Handle EOFNL insertion/removal\n  if (removeEOFNL) {\n    while (!lines[lines.length - 1]) {\n      lines.pop();\n      delimiters.pop();\n    }\n  } else if (addEOFNL) {\n    lines.push('');\n    delimiters.push('\\n');\n  }\n  for (let _k = 0; _k < lines.length - 1; _k++) {\n    lines[_k] = lines[_k] + delimiters[_k];\n  }\n  return lines.join('');\n}\n\n// Wrapper that supports multiple file patches via callbacks.\nexport function applyPatches(uniDiff, options) {\n  if (typeof uniDiff === 'string') {\n    uniDiff = parsePatch(uniDiff);\n  }\n\n  let currentIndex = 0;\n  function processIndex() {\n    let index = uniDiff[currentIndex++];\n    if (!index) {\n      return options.complete();\n    }\n\n    options.loadFile(index, function(err, data) {\n      if (err) {\n        return options.complete(err);\n      }\n\n      let updatedContent = applyPatch(data, index, options);\n      options.patched(index, updatedContent, function(err) {\n        if (err) {\n          return options.complete(err);\n        }\n\n        processIndex();\n      });\n    });\n  }\n  processIndex();\n}\n"]}
diff --git a/_extensions/d2/node_modules/diff/lib/patch/create.js b/_extensions/d2/node_modules/diff/lib/patch/create.js
new file mode 100644
index 00000000..1d3b4c30
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/patch/create.js
@@ -0,0 +1,272 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.structuredPatch = structuredPatch;
+exports.formatPatch = formatPatch;
+exports.createTwoFilesPatch = createTwoFilesPatch;
+exports.createPatch = createPatch;
+
+/*istanbul ignore end*/
+var
+/*istanbul ignore start*/
+_line = require("../diff/line")
+/*istanbul ignore end*/
+;
+
+/*istanbul ignore start*/ function _toConsumableArray(arr) { return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _unsupportedIterableToArray(arr) || _nonIterableSpread(); }
+
+function _nonIterableSpread() { throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method."); }
+
+function _unsupportedIterableToArray(o, minLen) { if (!o) return; if (typeof o === "string") return _arrayLikeToArray(o, minLen); var n = Object.prototype.toString.call(o).slice(8, -1); if (n === "Object" && o.constructor) n = o.constructor.name; if (n === "Map" || n === "Set") return Array.from(o); if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen); }
+
+function _iterableToArray(iter) { if (typeof Symbol !== "undefined" && Symbol.iterator in Object(iter)) return Array.from(iter); }
+
+function _arrayWithoutHoles(arr) { if (Array.isArray(arr)) return _arrayLikeToArray(arr); }
+
+function _arrayLikeToArray(arr, len) { if (len == null || len > arr.length) len = arr.length; for (var i = 0, arr2 = new Array(len); i < len; i++) { arr2[i] = arr[i]; } return arr2; }
+
+/*istanbul ignore end*/
+function structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
+  if (!options) {
+    options = {};
+  }
+
+  if (typeof options.context === 'undefined') {
+    options.context = 4;
+  }
+
+  var diff =
+  /*istanbul ignore start*/
+  (0,
+  /*istanbul ignore end*/
+
+  /*istanbul ignore start*/
+  _line
+  /*istanbul ignore end*/
+  .
+  /*istanbul ignore start*/
+  diffLines)
+  /*istanbul ignore end*/
+  (oldStr, newStr, options);
+
+  if (!diff) {
+    return;
+  }
+
+  diff.push({
+    value: '',
+    lines: []
+  }); // Append an empty value to make cleanup easier
+
+  function contextLines(lines) {
+    return lines.map(function (entry) {
+      return ' ' + entry;
+    });
+  }
+
+  var hunks = [];
+  var oldRangeStart = 0,
+      newRangeStart = 0,
+      curRange = [],
+      oldLine = 1,
+      newLine = 1;
+
+  /*istanbul ignore start*/
+  var _loop = function _loop(
+  /*istanbul ignore end*/
+  i) {
+    var current = diff[i],
+        lines = current.lines || current.value.replace(/\n$/, '').split('\n');
+    current.lines = lines;
+
+    if (current.added || current.removed) {
+      /*istanbul ignore start*/
+      var _curRange;
+
+      /*istanbul ignore end*/
+      // If we have previous context, start with that
+      if (!oldRangeStart) {
+        var prev = diff[i - 1];
+        oldRangeStart = oldLine;
+        newRangeStart = newLine;
+
+        if (prev) {
+          curRange = options.context > 0 ? contextLines(prev.lines.slice(-options.context)) : [];
+          oldRangeStart -= curRange.length;
+          newRangeStart -= curRange.length;
+        }
+      } // Output our changes
+
+
+      /*istanbul ignore start*/
+
+      /*istanbul ignore end*/
+
+      /*istanbul ignore start*/
+      (_curRange =
+      /*istanbul ignore end*/
+      curRange).push.apply(
+      /*istanbul ignore start*/
+      _curRange
+      /*istanbul ignore end*/
+      ,
+      /*istanbul ignore start*/
+      _toConsumableArray(
+      /*istanbul ignore end*/
+      lines.map(function (entry) {
+        return (current.added ? '+' : '-') + entry;
+      }))); // Track the updated file position
+
+
+      if (current.added) {
+        newLine += lines.length;
+      } else {
+        oldLine += lines.length;
+      }
+    } else {
+      // Identical context lines. Track line changes
+      if (oldRangeStart) {
+        // Close out any changes that have been output (or join overlapping)
+        if (lines.length <= options.context * 2 && i < diff.length - 2) {
+          /*istanbul ignore start*/
+          var _curRange2;
+
+          /*istanbul ignore end*/
+          // Overlapping
+
+          /*istanbul ignore start*/
+
+          /*istanbul ignore end*/
+
+          /*istanbul ignore start*/
+          (_curRange2 =
+          /*istanbul ignore end*/
+          curRange).push.apply(
+          /*istanbul ignore start*/
+          _curRange2
+          /*istanbul ignore end*/
+          ,
+          /*istanbul ignore start*/
+          _toConsumableArray(
+          /*istanbul ignore end*/
+          contextLines(lines)));
+        } else {
+          /*istanbul ignore start*/
+          var _curRange3;
+
+          /*istanbul ignore end*/
+          // end the range and output
+          var contextSize = Math.min(lines.length, options.context);
+
+          /*istanbul ignore start*/
+
+          /*istanbul ignore end*/
+
+          /*istanbul ignore start*/
+          (_curRange3 =
+          /*istanbul ignore end*/
+          curRange).push.apply(
+          /*istanbul ignore start*/
+          _curRange3
+          /*istanbul ignore end*/
+          ,
+          /*istanbul ignore start*/
+          _toConsumableArray(
+          /*istanbul ignore end*/
+          contextLines(lines.slice(0, contextSize))));
+
+          var hunk = {
+            oldStart: oldRangeStart,
+            oldLines: oldLine - oldRangeStart + contextSize,
+            newStart: newRangeStart,
+            newLines: newLine - newRangeStart + contextSize,
+            lines: curRange
+          };
+
+          if (i >= diff.length - 2 && lines.length <= options.context) {
+            // EOF is inside this hunk
+            var oldEOFNewline = /\n$/.test(oldStr);
+            var newEOFNewline = /\n$/.test(newStr);
+            var noNlBeforeAdds = lines.length == 0 && curRange.length > hunk.oldLines;
+
+            if (!oldEOFNewline && noNlBeforeAdds && oldStr.length > 0) {
+              // special case: old has no eol and no trailing context; no-nl can end up before adds
+              // however, if the old file is empty, do not output the no-nl line
+              curRange.splice(hunk.oldLines, 0, '\\ No newline at end of file');
+            }
+
+            if (!oldEOFNewline && !noNlBeforeAdds || !newEOFNewline) {
+              curRange.push('\\ No newline at end of file');
+            }
+          }
+
+          hunks.push(hunk);
+          oldRangeStart = 0;
+          newRangeStart = 0;
+          curRange = [];
+        }
+      }
+
+      oldLine += lines.length;
+      newLine += lines.length;
+    }
+  };
+
+  for (var i = 0; i < diff.length; i++) {
+    /*istanbul ignore start*/
+    _loop(
+    /*istanbul ignore end*/
+    i);
+  }
+
+  return {
+    oldFileName: oldFileName,
+    newFileName: newFileName,
+    oldHeader: oldHeader,
+    newHeader: newHeader,
+    hunks: hunks
+  };
+}
+
+function formatPatch(diff) {
+  var ret = [];
+
+  if (diff.oldFileName == diff.newFileName) {
+    ret.push('Index: ' + diff.oldFileName);
+  }
+
+  ret.push('===================================================================');
+  ret.push('--- ' + diff.oldFileName + (typeof diff.oldHeader === 'undefined' ? '' : '\t' + diff.oldHeader));
+  ret.push('+++ ' + diff.newFileName + (typeof diff.newHeader === 'undefined' ? '' : '\t' + diff.newHeader));
+
+  for (var i = 0; i < diff.hunks.length; i++) {
+    var hunk = diff.hunks[i]; // Unified Diff Format quirk: If the chunk size is 0,
+    // the first number is one lower than one would expect.
+    // https://www.artima.com/weblogs/viewpost.jsp?thread=164293
+
+    if (hunk.oldLines === 0) {
+      hunk.oldStart -= 1;
+    }
+
+    if (hunk.newLines === 0) {
+      hunk.newStart -= 1;
+    }
+
+    ret.push('@@ -' + hunk.oldStart + ',' + hunk.oldLines + ' +' + hunk.newStart + ',' + hunk.newLines + ' @@');
+    ret.push.apply(ret, hunk.lines);
+  }
+
+  return ret.join('\n') + '\n';
+}
+
+function createTwoFilesPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {
+  return formatPatch(structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options));
+}
+
+function createPatch(fileName, oldStr, newStr, oldHeader, newHeader, options) {
+  return createTwoFilesPatch(fileName, fileName, oldStr, newStr, oldHeader, newHeader, options);
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"sources":["../../src/patch/create.js"],"names":["structuredPatch","oldFileName","newFileName","oldStr","newStr","oldHeader","newHeader","options","context","diff","diffLines","push","value","lines","contextLines","map","entry","hunks","oldRangeStart","newRangeStart","curRange","oldLine","newLine","i","current","replace","split","added","removed","prev","slice","length","contextSize","Math","min","hunk","oldStart","oldLines","newStart","newLines","oldEOFNewline","test","newEOFNewline","noNlBeforeAdds","splice","formatPatch","ret","apply","join","createTwoFilesPatch","createPatch","fileName"],"mappings":";;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;;;;;;;;;;;;;;;AAEO,SAASA,eAAT,CAAyBC,WAAzB,EAAsCC,WAAtC,EAAmDC,MAAnD,EAA2DC,MAA3D,EAAmEC,SAAnE,EAA8EC,SAA9E,EAAyFC,OAAzF,EAAkG;AACvG,MAAI,CAACA,OAAL,EAAc;AACZA,IAAAA,OAAO,GAAG,EAAV;AACD;;AACD,MAAI,OAAOA,OAAO,CAACC,OAAf,KAA2B,WAA/B,EAA4C;AAC1CD,IAAAA,OAAO,CAACC,OAAR,GAAkB,CAAlB;AACD;;AAED,MAAMC,IAAI;AAAG;AAAA;AAAA;;AAAAC;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAA;AAAA,GAAUP,MAAV,EAAkBC,MAAlB,EAA0BG,OAA1B,CAAb;;AACA,MAAG,CAACE,IAAJ,EAAU;AACR;AACD;;AAEDA,EAAAA,IAAI,CAACE,IAAL,CAAU;AAACC,IAAAA,KAAK,EAAE,EAAR;AAAYC,IAAAA,KAAK,EAAE;AAAnB,GAAV,EAbuG,CAapE;;AAEnC,WAASC,YAAT,CAAsBD,KAAtB,EAA6B;AAC3B,WAAOA,KAAK,CAACE,GAAN,CAAU,UAASC,KAAT,EAAgB;AAAE,aAAO,MAAMA,KAAb;AAAqB,KAAjD,CAAP;AACD;;AAED,MAAIC,KAAK,GAAG,EAAZ;AACA,MAAIC,aAAa,GAAG,CAApB;AAAA,MAAuBC,aAAa,GAAG,CAAvC;AAAA,MAA0CC,QAAQ,GAAG,EAArD;AAAA,MACIC,OAAO,GAAG,CADd;AAAA,MACiBC,OAAO,GAAG,CAD3B;;AApBuG;AAAA;AAAA;AAsB9FC,EAAAA,CAtB8F;AAuBrG,QAAMC,OAAO,GAAGf,IAAI,CAACc,CAAD,CAApB;AAAA,QACMV,KAAK,GAAGW,OAAO,CAACX,KAAR,IAAiBW,OAAO,CAACZ,KAAR,CAAca,OAAd,CAAsB,KAAtB,EAA6B,EAA7B,EAAiCC,KAAjC,CAAuC,IAAvC,CAD/B;AAEAF,IAAAA,OAAO,CAACX,KAAR,GAAgBA,KAAhB;;AAEA,QAAIW,OAAO,CAACG,KAAR,IAAiBH,OAAO,CAACI,OAA7B,EAAsC;AAAA;AAAA;;AAAA;AACpC;AACA,UAAI,CAACV,aAAL,EAAoB;AAClB,YAAMW,IAAI,GAAGpB,IAAI,CAACc,CAAC,GAAG,CAAL,CAAjB;AACAL,QAAAA,aAAa,GAAGG,OAAhB;AACAF,QAAAA,aAAa,GAAGG,OAAhB;;AAEA,YAAIO,IAAJ,EAAU;AACRT,UAAAA,QAAQ,GAAGb,OAAO,CAACC,OAAR,GAAkB,CAAlB,GAAsBM,YAAY,CAACe,IAAI,CAAChB,KAAL,CAAWiB,KAAX,CAAiB,CAACvB,OAAO,CAACC,OAA1B,CAAD,CAAlC,GAAyE,EAApF;AACAU,UAAAA,aAAa,IAAIE,QAAQ,CAACW,MAA1B;AACAZ,UAAAA,aAAa,IAAIC,QAAQ,CAACW,MAA1B;AACD;AACF,OAZmC,CAcpC;;;AACA;;AAAA;;AAAA;AAAA;AAAA;AAAAX,MAAAA,QAAQ,EAACT,IAAT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAkBE,MAAAA,KAAK,CAACE,GAAN,CAAU,UAASC,KAAT,EAAgB;AAC1C,eAAO,CAACQ,OAAO,CAACG,KAAR,GAAgB,GAAhB,GAAsB,GAAvB,IAA8BX,KAArC;AACD,OAFiB,CAAlB,GAfoC,CAmBpC;;;AACA,UAAIQ,OAAO,CAACG,KAAZ,EAAmB;AACjBL,QAAAA,OAAO,IAAIT,KAAK,CAACkB,MAAjB;AACD,OAFD,MAEO;AACLV,QAAAA,OAAO,IAAIR,KAAK,CAACkB,MAAjB;AACD;AACF,KAzBD,MAyBO;AACL;AACA,UAAIb,aAAJ,EAAmB;AACjB;AACA,YAAIL,KAAK,CAACkB,MAAN,IAAgBxB,OAAO,CAACC,OAAR,GAAkB,CAAlC,IAAuCe,CAAC,GAAGd,IAAI,CAACsB,MAAL,GAAc,CAA7D,EAAgE;AAAA;AAAA;;AAAA;AAC9D;;AACA;;AAAA;;AAAA;AAAA;AAAA;AAAAX,UAAAA,QAAQ,EAACT,IAAT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAkBG,UAAAA,YAAY,CAACD,KAAD,CAA9B;AACD,SAHD,MAGO;AAAA;AAAA;;AAAA;AACL;AACA,cAAImB,WAAW,GAAGC,IAAI,CAACC,GAAL,CAASrB,KAAK,CAACkB,MAAf,EAAuBxB,OAAO,CAACC,OAA/B,CAAlB;;AACA;;AAAA;;AAAA;AAAA;AAAA;AAAAY,UAAAA,QAAQ,EAACT,IAAT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAkBG,UAAAA,YAAY,CAACD,KAAK,CAACiB,KAAN,CAAY,CAAZ,EAAeE,WAAf,CAAD,CAA9B;;AAEA,cAAIG,IAAI,GAAG;AACTC,YAAAA,QAAQ,EAAElB,aADD;AAETmB,YAAAA,QAAQ,EAAGhB,OAAO,GAAGH,aAAV,GAA0Bc,WAF5B;AAGTM,YAAAA,QAAQ,EAAEnB,aAHD;AAIToB,YAAAA,QAAQ,EAAGjB,OAAO,GAAGH,aAAV,GAA0Ba,WAJ5B;AAKTnB,YAAAA,KAAK,EAAEO;AALE,WAAX;;AAOA,cAAIG,CAAC,IAAId,IAAI,CAACsB,MAAL,GAAc,CAAnB,IAAwBlB,KAAK,CAACkB,MAAN,IAAgBxB,OAAO,CAACC,OAApD,EAA6D;AAC3D;AACA,gBAAIgC,aAAa,GAAK,KAAD,CAAQC,IAAR,CAAatC,MAAb,CAArB;AACA,gBAAIuC,aAAa,GAAK,KAAD,CAAQD,IAAR,CAAarC,MAAb,CAArB;AACA,gBAAIuC,cAAc,GAAG9B,KAAK,CAACkB,MAAN,IAAgB,CAAhB,IAAqBX,QAAQ,CAACW,MAAT,GAAkBI,IAAI,CAACE,QAAjE;;AACA,gBAAI,CAACG,aAAD,IAAkBG,cAAlB,IAAoCxC,MAAM,CAAC4B,MAAP,GAAgB,CAAxD,EAA2D;AACzD;AACA;AACAX,cAAAA,QAAQ,CAACwB,MAAT,CAAgBT,IAAI,CAACE,QAArB,EAA+B,CAA/B,EAAkC,8BAAlC;AACD;;AACD,gBAAK,CAACG,aAAD,IAAkB,CAACG,cAApB,IAAuC,CAACD,aAA5C,EAA2D;AACzDtB,cAAAA,QAAQ,CAACT,IAAT,CAAc,8BAAd;AACD;AACF;;AACDM,UAAAA,KAAK,CAACN,IAAN,CAAWwB,IAAX;AAEAjB,UAAAA,aAAa,GAAG,CAAhB;AACAC,UAAAA,aAAa,GAAG,CAAhB;AACAC,UAAAA,QAAQ,GAAG,EAAX;AACD;AACF;;AACDC,MAAAA,OAAO,IAAIR,KAAK,CAACkB,MAAjB;AACAT,MAAAA,OAAO,IAAIT,KAAK,CAACkB,MAAjB;AACD;AA9FoG;;AAsBvG,OAAK,IAAIR,CAAC,GAAG,CAAb,EAAgBA,CAAC,GAAGd,IAAI,CAACsB,MAAzB,EAAiCR,CAAC,EAAlC,EAAsC;AAAA;AAAA;AAAA;AAA7BA,IAAAA,CAA6B;AAyErC;;AAED,SAAO;AACLtB,IAAAA,WAAW,EAAEA,WADR;AACqBC,IAAAA,WAAW,EAAEA,WADlC;AAELG,IAAAA,SAAS,EAAEA,SAFN;AAEiBC,IAAAA,SAAS,EAAEA,SAF5B;AAGLW,IAAAA,KAAK,EAAEA;AAHF,GAAP;AAKD;;AAEM,SAAS4B,WAAT,CAAqBpC,IAArB,EAA2B;AAChC,MAAMqC,GAAG,GAAG,EAAZ;;AACA,MAAIrC,IAAI,CAACR,WAAL,IAAoBQ,IAAI,CAACP,WAA7B,EAA0C;AACxC4C,IAAAA,GAAG,CAACnC,IAAJ,CAAS,YAAYF,IAAI,CAACR,WAA1B;AACD;;AACD6C,EAAAA,GAAG,CAACnC,IAAJ,CAAS,qEAAT;AACAmC,EAAAA,GAAG,CAACnC,IAAJ,CAAS,SAASF,IAAI,CAACR,WAAd,IAA6B,OAAOQ,IAAI,CAACJ,SAAZ,KAA0B,WAA1B,GAAwC,EAAxC,GAA6C,OAAOI,IAAI,CAACJ,SAAtF,CAAT;AACAyC,EAAAA,GAAG,CAACnC,IAAJ,CAAS,SAASF,IAAI,CAACP,WAAd,IAA6B,OAAOO,IAAI,CAACH,SAAZ,KAA0B,WAA1B,GAAwC,EAAxC,GAA6C,OAAOG,IAAI,CAACH,SAAtF,CAAT;;AAEA,OAAK,IAAIiB,CAAC,GAAG,CAAb,EAAgBA,CAAC,GAAGd,IAAI,CAACQ,KAAL,CAAWc,MAA/B,EAAuCR,CAAC,EAAxC,EAA4C;AAC1C,QAAMY,IAAI,GAAG1B,IAAI,CAACQ,KAAL,CAAWM,CAAX,CAAb,CAD0C,CAE1C;AACA;AACA;;AACA,QAAIY,IAAI,CAACE,QAAL,KAAkB,CAAtB,EAAyB;AACvBF,MAAAA,IAAI,CAACC,QAAL,IAAiB,CAAjB;AACD;;AACD,QAAID,IAAI,CAACI,QAAL,KAAkB,CAAtB,EAAyB;AACvBJ,MAAAA,IAAI,CAACG,QAAL,IAAiB,CAAjB;AACD;;AACDQ,IAAAA,GAAG,CAACnC,IAAJ,CACE,SAASwB,IAAI,CAACC,QAAd,GAAyB,GAAzB,GAA+BD,IAAI,CAACE,QAApC,GACE,IADF,GACSF,IAAI,CAACG,QADd,GACyB,GADzB,GAC+BH,IAAI,CAACI,QADpC,GAEE,KAHJ;AAKAO,IAAAA,GAAG,CAACnC,IAAJ,CAASoC,KAAT,CAAeD,GAAf,EAAoBX,IAAI,CAACtB,KAAzB;AACD;;AAED,SAAOiC,GAAG,CAACE,IAAJ,CAAS,IAAT,IAAiB,IAAxB;AACD;;AAEM,SAASC,mBAAT,CAA6BhD,WAA7B,EAA0CC,WAA1C,EAAuDC,MAAvD,EAA+DC,MAA/D,EAAuEC,SAAvE,EAAkFC,SAAlF,EAA6FC,OAA7F,EAAsG;AAC3G,SAAOsC,WAAW,CAAC7C,eAAe,CAACC,WAAD,EAAcC,WAAd,EAA2BC,MAA3B,EAAmCC,MAAnC,EAA2CC,SAA3C,EAAsDC,SAAtD,EAAiEC,OAAjE,CAAhB,CAAlB;AACD;;AAEM,SAAS2C,WAAT,CAAqBC,QAArB,EAA+BhD,MAA/B,EAAuCC,MAAvC,EAA+CC,SAA/C,EAA0DC,SAA1D,EAAqEC,OAArE,EAA8E;AACnF,SAAO0C,mBAAmB,CAACE,QAAD,EAAWA,QAAX,EAAqBhD,MAArB,EAA6BC,MAA7B,EAAqCC,SAArC,EAAgDC,SAAhD,EAA2DC,OAA3D,CAA1B;AACD","sourcesContent":["import {diffLines} from '../diff/line';\n\nexport function structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {\n  if (!options) {\n    options = {};\n  }\n  if (typeof options.context === 'undefined') {\n    options.context = 4;\n  }\n\n  const diff = diffLines(oldStr, newStr, options);\n  if(!diff) {\n    return;\n  }\n\n  diff.push({value: '', lines: []}); // Append an empty value to make cleanup easier\n\n  function contextLines(lines) {\n    return lines.map(function(entry) { return ' ' + entry; });\n  }\n\n  let hunks = [];\n  let oldRangeStart = 0, newRangeStart = 0, curRange = [],\n      oldLine = 1, newLine = 1;\n  for (let i = 0; i < diff.length; i++) {\n    const current = diff[i],\n          lines = current.lines || current.value.replace(/\\n$/, '').split('\\n');\n    current.lines = lines;\n\n    if (current.added || current.removed) {\n      // If we have previous context, start with that\n      if (!oldRangeStart) {\n        const prev = diff[i - 1];\n        oldRangeStart = oldLine;\n        newRangeStart = newLine;\n\n        if (prev) {\n          curRange = options.context > 0 ? contextLines(prev.lines.slice(-options.context)) : [];\n          oldRangeStart -= curRange.length;\n          newRangeStart -= curRange.length;\n        }\n      }\n\n      // Output our changes\n      curRange.push(... lines.map(function(entry) {\n        return (current.added ? '+' : '-') + entry;\n      }));\n\n      // Track the updated file position\n      if (current.added) {\n        newLine += lines.length;\n      } else {\n        oldLine += lines.length;\n      }\n    } else {\n      // Identical context lines. Track line changes\n      if (oldRangeStart) {\n        // Close out any changes that have been output (or join overlapping)\n        if (lines.length <= options.context * 2 && i < diff.length - 2) {\n          // Overlapping\n          curRange.push(... contextLines(lines));\n        } else {\n          // end the range and output\n          let contextSize = Math.min(lines.length, options.context);\n          curRange.push(... contextLines(lines.slice(0, contextSize)));\n\n          let hunk = {\n            oldStart: oldRangeStart,\n            oldLines: (oldLine - oldRangeStart + contextSize),\n            newStart: newRangeStart,\n            newLines: (newLine - newRangeStart + contextSize),\n            lines: curRange\n          };\n          if (i >= diff.length - 2 && lines.length <= options.context) {\n            // EOF is inside this hunk\n            let oldEOFNewline = ((/\\n$/).test(oldStr));\n            let newEOFNewline = ((/\\n$/).test(newStr));\n            let noNlBeforeAdds = lines.length == 0 && curRange.length > hunk.oldLines;\n            if (!oldEOFNewline && noNlBeforeAdds && oldStr.length > 0) {\n              // special case: old has no eol and no trailing context; no-nl can end up before adds\n              // however, if the old file is empty, do not output the no-nl line\n              curRange.splice(hunk.oldLines, 0, '\\\\ No newline at end of file');\n            }\n            if ((!oldEOFNewline && !noNlBeforeAdds) || !newEOFNewline) {\n              curRange.push('\\\\ No newline at end of file');\n            }\n          }\n          hunks.push(hunk);\n\n          oldRangeStart = 0;\n          newRangeStart = 0;\n          curRange = [];\n        }\n      }\n      oldLine += lines.length;\n      newLine += lines.length;\n    }\n  }\n\n  return {\n    oldFileName: oldFileName, newFileName: newFileName,\n    oldHeader: oldHeader, newHeader: newHeader,\n    hunks: hunks\n  };\n}\n\nexport function formatPatch(diff) {\n  const ret = [];\n  if (diff.oldFileName == diff.newFileName) {\n    ret.push('Index: ' + diff.oldFileName);\n  }\n  ret.push('===================================================================');\n  ret.push('--- ' + diff.oldFileName + (typeof diff.oldHeader === 'undefined' ? '' : '\\t' + diff.oldHeader));\n  ret.push('+++ ' + diff.newFileName + (typeof diff.newHeader === 'undefined' ? '' : '\\t' + diff.newHeader));\n\n  for (let i = 0; i < diff.hunks.length; i++) {\n    const hunk = diff.hunks[i];\n    // Unified Diff Format quirk: If the chunk size is 0,\n    // the first number is one lower than one would expect.\n    // https://www.artima.com/weblogs/viewpost.jsp?thread=164293\n    if (hunk.oldLines === 0) {\n      hunk.oldStart -= 1;\n    }\n    if (hunk.newLines === 0) {\n      hunk.newStart -= 1;\n    }\n    ret.push(\n      '@@ -' + hunk.oldStart + ',' + hunk.oldLines\n      + ' +' + hunk.newStart + ',' + hunk.newLines\n      + ' @@'\n    );\n    ret.push.apply(ret, hunk.lines);\n  }\n\n  return ret.join('\\n') + '\\n';\n}\n\nexport function createTwoFilesPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options) {\n  return formatPatch(structuredPatch(oldFileName, newFileName, oldStr, newStr, oldHeader, newHeader, options));\n}\n\nexport function createPatch(fileName, oldStr, newStr, oldHeader, newHeader, options) {\n  return createTwoFilesPatch(fileName, fileName, oldStr, newStr, oldHeader, newHeader, options);\n}\n"]}
diff --git a/_extensions/d2/node_modules/diff/lib/patch/merge.js b/_extensions/d2/node_modules/diff/lib/patch/merge.js
new file mode 100644
index 00000000..b46faaab
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/patch/merge.js
@@ -0,0 +1,613 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.calcLineCount = calcLineCount;
+exports.merge = merge;
+
+/*istanbul ignore end*/
+var
+/*istanbul ignore start*/
+_create = require("./create")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_parse = require("./parse")
+/*istanbul ignore end*/
+;
+
+var
+/*istanbul ignore start*/
+_array = require("../util/array")
+/*istanbul ignore end*/
+;
+
+/*istanbul ignore start*/ function _toConsumableArray(arr) { return _arrayWithoutHoles(arr) || _iterableToArray(arr) || _unsupportedIterableToArray(arr) || _nonIterableSpread(); }
+
+function _nonIterableSpread() { throw new TypeError("Invalid attempt to spread non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method."); }
+
+function _unsupportedIterableToArray(o, minLen) { if (!o) return; if (typeof o === "string") return _arrayLikeToArray(o, minLen); var n = Object.prototype.toString.call(o).slice(8, -1); if (n === "Object" && o.constructor) n = o.constructor.name; if (n === "Map" || n === "Set") return Array.from(o); if (n === "Arguments" || /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)) return _arrayLikeToArray(o, minLen); }
+
+function _iterableToArray(iter) { if (typeof Symbol !== "undefined" && Symbol.iterator in Object(iter)) return Array.from(iter); }
+
+function _arrayWithoutHoles(arr) { if (Array.isArray(arr)) return _arrayLikeToArray(arr); }
+
+function _arrayLikeToArray(arr, len) { if (len == null || len > arr.length) len = arr.length; for (var i = 0, arr2 = new Array(len); i < len; i++) { arr2[i] = arr[i]; } return arr2; }
+
+/*istanbul ignore end*/
+function calcLineCount(hunk) {
+  /*istanbul ignore start*/
+  var _calcOldNewLineCount =
+  /*istanbul ignore end*/
+  calcOldNewLineCount(hunk.lines),
+      oldLines = _calcOldNewLineCount.oldLines,
+      newLines = _calcOldNewLineCount.newLines;
+
+  if (oldLines !== undefined) {
+    hunk.oldLines = oldLines;
+  } else {
+    delete hunk.oldLines;
+  }
+
+  if (newLines !== undefined) {
+    hunk.newLines = newLines;
+  } else {
+    delete hunk.newLines;
+  }
+}
+
+function merge(mine, theirs, base) {
+  mine = loadPatch(mine, base);
+  theirs = loadPatch(theirs, base);
+  var ret = {}; // For index we just let it pass through as it doesn't have any necessary meaning.
+  // Leaving sanity checks on this to the API consumer that may know more about the
+  // meaning in their own context.
+
+  if (mine.index || theirs.index) {
+    ret.index = mine.index || theirs.index;
+  }
+
+  if (mine.newFileName || theirs.newFileName) {
+    if (!fileNameChanged(mine)) {
+      // No header or no change in ours, use theirs (and ours if theirs does not exist)
+      ret.oldFileName = theirs.oldFileName || mine.oldFileName;
+      ret.newFileName = theirs.newFileName || mine.newFileName;
+      ret.oldHeader = theirs.oldHeader || mine.oldHeader;
+      ret.newHeader = theirs.newHeader || mine.newHeader;
+    } else if (!fileNameChanged(theirs)) {
+      // No header or no change in theirs, use ours
+      ret.oldFileName = mine.oldFileName;
+      ret.newFileName = mine.newFileName;
+      ret.oldHeader = mine.oldHeader;
+      ret.newHeader = mine.newHeader;
+    } else {
+      // Both changed... figure it out
+      ret.oldFileName = selectField(ret, mine.oldFileName, theirs.oldFileName);
+      ret.newFileName = selectField(ret, mine.newFileName, theirs.newFileName);
+      ret.oldHeader = selectField(ret, mine.oldHeader, theirs.oldHeader);
+      ret.newHeader = selectField(ret, mine.newHeader, theirs.newHeader);
+    }
+  }
+
+  ret.hunks = [];
+  var mineIndex = 0,
+      theirsIndex = 0,
+      mineOffset = 0,
+      theirsOffset = 0;
+
+  while (mineIndex < mine.hunks.length || theirsIndex < theirs.hunks.length) {
+    var mineCurrent = mine.hunks[mineIndex] || {
+      oldStart: Infinity
+    },
+        theirsCurrent = theirs.hunks[theirsIndex] || {
+      oldStart: Infinity
+    };
+
+    if (hunkBefore(mineCurrent, theirsCurrent)) {
+      // This patch does not overlap with any of the others, yay.
+      ret.hunks.push(cloneHunk(mineCurrent, mineOffset));
+      mineIndex++;
+      theirsOffset += mineCurrent.newLines - mineCurrent.oldLines;
+    } else if (hunkBefore(theirsCurrent, mineCurrent)) {
+      // This patch does not overlap with any of the others, yay.
+      ret.hunks.push(cloneHunk(theirsCurrent, theirsOffset));
+      theirsIndex++;
+      mineOffset += theirsCurrent.newLines - theirsCurrent.oldLines;
+    } else {
+      // Overlap, merge as best we can
+      var mergedHunk = {
+        oldStart: Math.min(mineCurrent.oldStart, theirsCurrent.oldStart),
+        oldLines: 0,
+        newStart: Math.min(mineCurrent.newStart + mineOffset, theirsCurrent.oldStart + theirsOffset),
+        newLines: 0,
+        lines: []
+      };
+      mergeLines(mergedHunk, mineCurrent.oldStart, mineCurrent.lines, theirsCurrent.oldStart, theirsCurrent.lines);
+      theirsIndex++;
+      mineIndex++;
+      ret.hunks.push(mergedHunk);
+    }
+  }
+
+  return ret;
+}
+
+function loadPatch(param, base) {
+  if (typeof param === 'string') {
+    if (/^@@/m.test(param) || /^Index:/m.test(param)) {
+      return (
+        /*istanbul ignore start*/
+        (0,
+        /*istanbul ignore end*/
+
+        /*istanbul ignore start*/
+        _parse
+        /*istanbul ignore end*/
+        .
+        /*istanbul ignore start*/
+        parsePatch)
+        /*istanbul ignore end*/
+        (param)[0]
+      );
+    }
+
+    if (!base) {
+      throw new Error('Must provide a base reference or pass in a patch');
+    }
+
+    return (
+      /*istanbul ignore start*/
+      (0,
+      /*istanbul ignore end*/
+
+      /*istanbul ignore start*/
+      _create
+      /*istanbul ignore end*/
+      .
+      /*istanbul ignore start*/
+      structuredPatch)
+      /*istanbul ignore end*/
+      (undefined, undefined, base, param)
+    );
+  }
+
+  return param;
+}
+
+function fileNameChanged(patch) {
+  return patch.newFileName && patch.newFileName !== patch.oldFileName;
+}
+
+function selectField(index, mine, theirs) {
+  if (mine === theirs) {
+    return mine;
+  } else {
+    index.conflict = true;
+    return {
+      mine: mine,
+      theirs: theirs
+    };
+  }
+}
+
+function hunkBefore(test, check) {
+  return test.oldStart < check.oldStart && test.oldStart + test.oldLines < check.oldStart;
+}
+
+function cloneHunk(hunk, offset) {
+  return {
+    oldStart: hunk.oldStart,
+    oldLines: hunk.oldLines,
+    newStart: hunk.newStart + offset,
+    newLines: hunk.newLines,
+    lines: hunk.lines
+  };
+}
+
+function mergeLines(hunk, mineOffset, mineLines, theirOffset, theirLines) {
+  // This will generally result in a conflicted hunk, but there are cases where the context
+  // is the only overlap where we can successfully merge the content here.
+  var mine = {
+    offset: mineOffset,
+    lines: mineLines,
+    index: 0
+  },
+      their = {
+    offset: theirOffset,
+    lines: theirLines,
+    index: 0
+  }; // Handle any leading content
+
+  insertLeading(hunk, mine, their);
+  insertLeading(hunk, their, mine); // Now in the overlap content. Scan through and select the best changes from each.
+
+  while (mine.index < mine.lines.length && their.index < their.lines.length) {
+    var mineCurrent = mine.lines[mine.index],
+        theirCurrent = their.lines[their.index];
+
+    if ((mineCurrent[0] === '-' || mineCurrent[0] === '+') && (theirCurrent[0] === '-' || theirCurrent[0] === '+')) {
+      // Both modified ...
+      mutualChange(hunk, mine, their);
+    } else if (mineCurrent[0] === '+' && theirCurrent[0] === ' ') {
+      /*istanbul ignore start*/
+      var _hunk$lines;
+
+      /*istanbul ignore end*/
+      // Mine inserted
+
+      /*istanbul ignore start*/
+
+      /*istanbul ignore end*/
+
+      /*istanbul ignore start*/
+      (_hunk$lines =
+      /*istanbul ignore end*/
+      hunk.lines).push.apply(
+      /*istanbul ignore start*/
+      _hunk$lines
+      /*istanbul ignore end*/
+      ,
+      /*istanbul ignore start*/
+      _toConsumableArray(
+      /*istanbul ignore end*/
+      collectChange(mine)));
+    } else if (theirCurrent[0] === '+' && mineCurrent[0] === ' ') {
+      /*istanbul ignore start*/
+      var _hunk$lines2;
+
+      /*istanbul ignore end*/
+      // Theirs inserted
+
+      /*istanbul ignore start*/
+
+      /*istanbul ignore end*/
+
+      /*istanbul ignore start*/
+      (_hunk$lines2 =
+      /*istanbul ignore end*/
+      hunk.lines).push.apply(
+      /*istanbul ignore start*/
+      _hunk$lines2
+      /*istanbul ignore end*/
+      ,
+      /*istanbul ignore start*/
+      _toConsumableArray(
+      /*istanbul ignore end*/
+      collectChange(their)));
+    } else if (mineCurrent[0] === '-' && theirCurrent[0] === ' ') {
+      // Mine removed or edited
+      removal(hunk, mine, their);
+    } else if (theirCurrent[0] === '-' && mineCurrent[0] === ' ') {
+      // Their removed or edited
+      removal(hunk, their, mine, true);
+    } else if (mineCurrent === theirCurrent) {
+      // Context identity
+      hunk.lines.push(mineCurrent);
+      mine.index++;
+      their.index++;
+    } else {
+      // Context mismatch
+      conflict(hunk, collectChange(mine), collectChange(their));
+    }
+  } // Now push anything that may be remaining
+
+
+  insertTrailing(hunk, mine);
+  insertTrailing(hunk, their);
+  calcLineCount(hunk);
+}
+
+function mutualChange(hunk, mine, their) {
+  var myChanges = collectChange(mine),
+      theirChanges = collectChange(their);
+
+  if (allRemoves(myChanges) && allRemoves(theirChanges)) {
+    // Special case for remove changes that are supersets of one another
+    if (
+    /*istanbul ignore start*/
+    (0,
+    /*istanbul ignore end*/
+
+    /*istanbul ignore start*/
+    _array
+    /*istanbul ignore end*/
+    .
+    /*istanbul ignore start*/
+    arrayStartsWith)
+    /*istanbul ignore end*/
+    (myChanges, theirChanges) && skipRemoveSuperset(their, myChanges, myChanges.length - theirChanges.length)) {
+      /*istanbul ignore start*/
+      var _hunk$lines3;
+
+      /*istanbul ignore end*/
+
+      /*istanbul ignore start*/
+
+      /*istanbul ignore end*/
+
+      /*istanbul ignore start*/
+      (_hunk$lines3 =
+      /*istanbul ignore end*/
+      hunk.lines).push.apply(
+      /*istanbul ignore start*/
+      _hunk$lines3
+      /*istanbul ignore end*/
+      ,
+      /*istanbul ignore start*/
+      _toConsumableArray(
+      /*istanbul ignore end*/
+      myChanges));
+
+      return;
+    } else if (
+    /*istanbul ignore start*/
+    (0,
+    /*istanbul ignore end*/
+
+    /*istanbul ignore start*/
+    _array
+    /*istanbul ignore end*/
+    .
+    /*istanbul ignore start*/
+    arrayStartsWith)
+    /*istanbul ignore end*/
+    (theirChanges, myChanges) && skipRemoveSuperset(mine, theirChanges, theirChanges.length - myChanges.length)) {
+      /*istanbul ignore start*/
+      var _hunk$lines4;
+
+      /*istanbul ignore end*/
+
+      /*istanbul ignore start*/
+
+      /*istanbul ignore end*/
+
+      /*istanbul ignore start*/
+      (_hunk$lines4 =
+      /*istanbul ignore end*/
+      hunk.lines).push.apply(
+      /*istanbul ignore start*/
+      _hunk$lines4
+      /*istanbul ignore end*/
+      ,
+      /*istanbul ignore start*/
+      _toConsumableArray(
+      /*istanbul ignore end*/
+      theirChanges));
+
+      return;
+    }
+  } else if (
+  /*istanbul ignore start*/
+  (0,
+  /*istanbul ignore end*/
+
+  /*istanbul ignore start*/
+  _array
+  /*istanbul ignore end*/
+  .
+  /*istanbul ignore start*/
+  arrayEqual)
+  /*istanbul ignore end*/
+  (myChanges, theirChanges)) {
+    /*istanbul ignore start*/
+    var _hunk$lines5;
+
+    /*istanbul ignore end*/
+
+    /*istanbul ignore start*/
+
+    /*istanbul ignore end*/
+
+    /*istanbul ignore start*/
+    (_hunk$lines5 =
+    /*istanbul ignore end*/
+    hunk.lines).push.apply(
+    /*istanbul ignore start*/
+    _hunk$lines5
+    /*istanbul ignore end*/
+    ,
+    /*istanbul ignore start*/
+    _toConsumableArray(
+    /*istanbul ignore end*/
+    myChanges));
+
+    return;
+  }
+
+  conflict(hunk, myChanges, theirChanges);
+}
+
+function removal(hunk, mine, their, swap) {
+  var myChanges = collectChange(mine),
+      theirChanges = collectContext(their, myChanges);
+
+  if (theirChanges.merged) {
+    /*istanbul ignore start*/
+    var _hunk$lines6;
+
+    /*istanbul ignore end*/
+
+    /*istanbul ignore start*/
+
+    /*istanbul ignore end*/
+
+    /*istanbul ignore start*/
+    (_hunk$lines6 =
+    /*istanbul ignore end*/
+    hunk.lines).push.apply(
+    /*istanbul ignore start*/
+    _hunk$lines6
+    /*istanbul ignore end*/
+    ,
+    /*istanbul ignore start*/
+    _toConsumableArray(
+    /*istanbul ignore end*/
+    theirChanges.merged));
+  } else {
+    conflict(hunk, swap ? theirChanges : myChanges, swap ? myChanges : theirChanges);
+  }
+}
+
+function conflict(hunk, mine, their) {
+  hunk.conflict = true;
+  hunk.lines.push({
+    conflict: true,
+    mine: mine,
+    theirs: their
+  });
+}
+
+function insertLeading(hunk, insert, their) {
+  while (insert.offset < their.offset && insert.index < insert.lines.length) {
+    var line = insert.lines[insert.index++];
+    hunk.lines.push(line);
+    insert.offset++;
+  }
+}
+
+function insertTrailing(hunk, insert) {
+  while (insert.index < insert.lines.length) {
+    var line = insert.lines[insert.index++];
+    hunk.lines.push(line);
+  }
+}
+
+function collectChange(state) {
+  var ret = [],
+      operation = state.lines[state.index][0];
+
+  while (state.index < state.lines.length) {
+    var line = state.lines[state.index]; // Group additions that are immediately after subtractions and treat them as one "atomic" modify change.
+
+    if (operation === '-' && line[0] === '+') {
+      operation = '+';
+    }
+
+    if (operation === line[0]) {
+      ret.push(line);
+      state.index++;
+    } else {
+      break;
+    }
+  }
+
+  return ret;
+}
+
+function collectContext(state, matchChanges) {
+  var changes = [],
+      merged = [],
+      matchIndex = 0,
+      contextChanges = false,
+      conflicted = false;
+
+  while (matchIndex < matchChanges.length && state.index < state.lines.length) {
+    var change = state.lines[state.index],
+        match = matchChanges[matchIndex]; // Once we've hit our add, then we are done
+
+    if (match[0] === '+') {
+      break;
+    }
+
+    contextChanges = contextChanges || change[0] !== ' ';
+    merged.push(match);
+    matchIndex++; // Consume any additions in the other block as a conflict to attempt
+    // to pull in the remaining context after this
+
+    if (change[0] === '+') {
+      conflicted = true;
+
+      while (change[0] === '+') {
+        changes.push(change);
+        change = state.lines[++state.index];
+      }
+    }
+
+    if (match.substr(1) === change.substr(1)) {
+      changes.push(change);
+      state.index++;
+    } else {
+      conflicted = true;
+    }
+  }
+
+  if ((matchChanges[matchIndex] || '')[0] === '+' && contextChanges) {
+    conflicted = true;
+  }
+
+  if (conflicted) {
+    return changes;
+  }
+
+  while (matchIndex < matchChanges.length) {
+    merged.push(matchChanges[matchIndex++]);
+  }
+
+  return {
+    merged: merged,
+    changes: changes
+  };
+}
+
+function allRemoves(changes) {
+  return changes.reduce(function (prev, change) {
+    return prev && change[0] === '-';
+  }, true);
+}
+
+function skipRemoveSuperset(state, removeChanges, delta) {
+  for (var i = 0; i < delta; i++) {
+    var changeContent = removeChanges[removeChanges.length - delta + i].substr(1);
+
+    if (state.lines[state.index + i] !== ' ' + changeContent) {
+      return false;
+    }
+  }
+
+  state.index += delta;
+  return true;
+}
+
+function calcOldNewLineCount(lines) {
+  var oldLines = 0;
+  var newLines = 0;
+  lines.forEach(function (line) {
+    if (typeof line !== 'string') {
+      var myCount = calcOldNewLineCount(line.mine);
+      var theirCount = calcOldNewLineCount(line.theirs);
+
+      if (oldLines !== undefined) {
+        if (myCount.oldLines === theirCount.oldLines) {
+          oldLines += myCount.oldLines;
+        } else {
+          oldLines = undefined;
+        }
+      }
+
+      if (newLines !== undefined) {
+        if (myCount.newLines === theirCount.newLines) {
+          newLines += myCount.newLines;
+        } else {
+          newLines = undefined;
+        }
+      }
+    } else {
+      if (newLines !== undefined && (line[0] === '+' || line[0] === ' ')) {
+        newLines++;
+      }
+
+      if (oldLines !== undefined && (line[0] === '-' || line[0] === ' ')) {
+        oldLines++;
+      }
+    }
+  });
+  return {
+    oldLines: oldLines,
+    newLines: newLines
+  };
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"sources":["../../src/patch/merge.js"],"names":["calcLineCount","hunk","calcOldNewLineCount","lines","oldLines","newLines","undefined","merge","mine","theirs","base","loadPatch","ret","index","newFileName","fileNameChanged","oldFileName","oldHeader","newHeader","selectField","hunks","mineIndex","theirsIndex","mineOffset","theirsOffset","length","mineCurrent","oldStart","Infinity","theirsCurrent","hunkBefore","push","cloneHunk","mergedHunk","Math","min","newStart","mergeLines","param","test","parsePatch","Error","structuredPatch","patch","conflict","check","offset","mineLines","theirOffset","theirLines","their","insertLeading","theirCurrent","mutualChange","collectChange","removal","insertTrailing","myChanges","theirChanges","allRemoves","arrayStartsWith","skipRemoveSuperset","arrayEqual","swap","collectContext","merged","insert","line","state","operation","matchChanges","changes","matchIndex","contextChanges","conflicted","change","match","substr","reduce","prev","removeChanges","delta","i","changeContent","forEach","myCount","theirCount"],"mappings":";;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;;AACA;AAAA;AAAA;AAAA;AAAA;;AAEA;AAAA;AAAA;AAAA;AAAA;;;;;;;;;;;;;;;AAEO,SAASA,aAAT,CAAuBC,IAAvB,EAA6B;AAAA;AAAA;AAAA;AACLC,EAAAA,mBAAmB,CAACD,IAAI,CAACE,KAAN,CADd;AAAA,MAC3BC,QAD2B,wBAC3BA,QAD2B;AAAA,MACjBC,QADiB,wBACjBA,QADiB;;AAGlC,MAAID,QAAQ,KAAKE,SAAjB,EAA4B;AAC1BL,IAAAA,IAAI,CAACG,QAAL,GAAgBA,QAAhB;AACD,GAFD,MAEO;AACL,WAAOH,IAAI,CAACG,QAAZ;AACD;;AAED,MAAIC,QAAQ,KAAKC,SAAjB,EAA4B;AAC1BL,IAAAA,IAAI,CAACI,QAAL,GAAgBA,QAAhB;AACD,GAFD,MAEO;AACL,WAAOJ,IAAI,CAACI,QAAZ;AACD;AACF;;AAEM,SAASE,KAAT,CAAeC,IAAf,EAAqBC,MAArB,EAA6BC,IAA7B,EAAmC;AACxCF,EAAAA,IAAI,GAAGG,SAAS,CAACH,IAAD,EAAOE,IAAP,CAAhB;AACAD,EAAAA,MAAM,GAAGE,SAAS,CAACF,MAAD,EAASC,IAAT,CAAlB;AAEA,MAAIE,GAAG,GAAG,EAAV,CAJwC,CAMxC;AACA;AACA;;AACA,MAAIJ,IAAI,CAACK,KAAL,IAAcJ,MAAM,CAACI,KAAzB,EAAgC;AAC9BD,IAAAA,GAAG,CAACC,KAAJ,GAAYL,IAAI,CAACK,KAAL,IAAcJ,MAAM,CAACI,KAAjC;AACD;;AAED,MAAIL,IAAI,CAACM,WAAL,IAAoBL,MAAM,CAACK,WAA/B,EAA4C;AAC1C,QAAI,CAACC,eAAe,CAACP,IAAD,CAApB,EAA4B;AAC1B;AACAI,MAAAA,GAAG,CAACI,WAAJ,GAAkBP,MAAM,CAACO,WAAP,IAAsBR,IAAI,CAACQ,WAA7C;AACAJ,MAAAA,GAAG,CAACE,WAAJ,GAAkBL,MAAM,CAACK,WAAP,IAAsBN,IAAI,CAACM,WAA7C;AACAF,MAAAA,GAAG,CAACK,SAAJ,GAAgBR,MAAM,CAACQ,SAAP,IAAoBT,IAAI,CAACS,SAAzC;AACAL,MAAAA,GAAG,CAACM,SAAJ,GAAgBT,MAAM,CAACS,SAAP,IAAoBV,IAAI,CAACU,SAAzC;AACD,KAND,MAMO,IAAI,CAACH,eAAe,CAACN,MAAD,CAApB,EAA8B;AACnC;AACAG,MAAAA,GAAG,CAACI,WAAJ,GAAkBR,IAAI,CAACQ,WAAvB;AACAJ,MAAAA,GAAG,CAACE,WAAJ,GAAkBN,IAAI,CAACM,WAAvB;AACAF,MAAAA,GAAG,CAACK,SAAJ,GAAgBT,IAAI,CAACS,SAArB;AACAL,MAAAA,GAAG,CAACM,SAAJ,GAAgBV,IAAI,CAACU,SAArB;AACD,KANM,MAMA;AACL;AACAN,MAAAA,GAAG,CAACI,WAAJ,GAAkBG,WAAW,CAACP,GAAD,EAAMJ,IAAI,CAACQ,WAAX,EAAwBP,MAAM,CAACO,WAA/B,CAA7B;AACAJ,MAAAA,GAAG,CAACE,WAAJ,GAAkBK,WAAW,CAACP,GAAD,EAAMJ,IAAI,CAACM,WAAX,EAAwBL,MAAM,CAACK,WAA/B,CAA7B;AACAF,MAAAA,GAAG,CAACK,SAAJ,GAAgBE,WAAW,CAACP,GAAD,EAAMJ,IAAI,CAACS,SAAX,EAAsBR,MAAM,CAACQ,SAA7B,CAA3B;AACAL,MAAAA,GAAG,CAACM,SAAJ,GAAgBC,WAAW,CAACP,GAAD,EAAMJ,IAAI,CAACU,SAAX,EAAsBT,MAAM,CAACS,SAA7B,CAA3B;AACD;AACF;;AAEDN,EAAAA,GAAG,CAACQ,KAAJ,GAAY,EAAZ;AAEA,MAAIC,SAAS,GAAG,CAAhB;AAAA,MACIC,WAAW,GAAG,CADlB;AAAA,MAEIC,UAAU,GAAG,CAFjB;AAAA,MAGIC,YAAY,GAAG,CAHnB;;AAKA,SAAOH,SAAS,GAAGb,IAAI,CAACY,KAAL,CAAWK,MAAvB,IAAiCH,WAAW,GAAGb,MAAM,CAACW,KAAP,CAAaK,MAAnE,EAA2E;AACzE,QAAIC,WAAW,GAAGlB,IAAI,CAACY,KAAL,CAAWC,SAAX,KAAyB;AAACM,MAAAA,QAAQ,EAAEC;AAAX,KAA3C;AAAA,QACIC,aAAa,GAAGpB,MAAM,CAACW,KAAP,CAAaE,WAAb,KAA6B;AAACK,MAAAA,QAAQ,EAAEC;AAAX,KADjD;;AAGA,QAAIE,UAAU,CAACJ,WAAD,EAAcG,aAAd,CAAd,EAA4C;AAC1C;AACAjB,MAAAA,GAAG,CAACQ,KAAJ,CAAUW,IAAV,CAAeC,SAAS,CAACN,WAAD,EAAcH,UAAd,CAAxB;AACAF,MAAAA,SAAS;AACTG,MAAAA,YAAY,IAAIE,WAAW,CAACrB,QAAZ,GAAuBqB,WAAW,CAACtB,QAAnD;AACD,KALD,MAKO,IAAI0B,UAAU,CAACD,aAAD,EAAgBH,WAAhB,CAAd,EAA4C;AACjD;AACAd,MAAAA,GAAG,CAACQ,KAAJ,CAAUW,IAAV,CAAeC,SAAS,CAACH,aAAD,EAAgBL,YAAhB,CAAxB;AACAF,MAAAA,WAAW;AACXC,MAAAA,UAAU,IAAIM,aAAa,CAACxB,QAAd,GAAyBwB,aAAa,CAACzB,QAArD;AACD,KALM,MAKA;AACL;AACA,UAAI6B,UAAU,GAAG;AACfN,QAAAA,QAAQ,EAAEO,IAAI,CAACC,GAAL,CAAST,WAAW,CAACC,QAArB,EAA+BE,aAAa,CAACF,QAA7C,CADK;AAEfvB,QAAAA,QAAQ,EAAE,CAFK;AAGfgC,QAAAA,QAAQ,EAAEF,IAAI,CAACC,GAAL,CAAST,WAAW,CAACU,QAAZ,GAAuBb,UAAhC,EAA4CM,aAAa,CAACF,QAAd,GAAyBH,YAArE,CAHK;AAIfnB,QAAAA,QAAQ,EAAE,CAJK;AAKfF,QAAAA,KAAK,EAAE;AALQ,OAAjB;AAOAkC,MAAAA,UAAU,CAACJ,UAAD,EAAaP,WAAW,CAACC,QAAzB,EAAmCD,WAAW,CAACvB,KAA/C,EAAsD0B,aAAa,CAACF,QAApE,EAA8EE,aAAa,CAAC1B,KAA5F,CAAV;AACAmB,MAAAA,WAAW;AACXD,MAAAA,SAAS;AAETT,MAAAA,GAAG,CAACQ,KAAJ,CAAUW,IAAV,CAAeE,UAAf;AACD;AACF;;AAED,SAAOrB,GAAP;AACD;;AAED,SAASD,SAAT,CAAmB2B,KAAnB,EAA0B5B,IAA1B,EAAgC;AAC9B,MAAI,OAAO4B,KAAP,KAAiB,QAArB,EAA+B;AAC7B,QAAK,MAAD,CAASC,IAAT,CAAcD,KAAd,KAA0B,UAAD,CAAaC,IAAb,CAAkBD,KAAlB,CAA7B,EAAwD;AACtD,aAAO;AAAA;AAAA;AAAA;;AAAAE;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAA;AAAA,SAAWF,KAAX,EAAkB,CAAlB;AAAP;AACD;;AAED,QAAI,CAAC5B,IAAL,EAAW;AACT,YAAM,IAAI+B,KAAJ,CAAU,kDAAV,CAAN;AACD;;AACD,WAAO;AAAA;AAAA;AAAA;;AAAAC;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAA;AAAA,OAAgBpC,SAAhB,EAA2BA,SAA3B,EAAsCI,IAAtC,EAA4C4B,KAA5C;AAAP;AACD;;AAED,SAAOA,KAAP;AACD;;AAED,SAASvB,eAAT,CAAyB4B,KAAzB,EAAgC;AAC9B,SAAOA,KAAK,CAAC7B,WAAN,IAAqB6B,KAAK,CAAC7B,WAAN,KAAsB6B,KAAK,CAAC3B,WAAxD;AACD;;AAED,SAASG,WAAT,CAAqBN,KAArB,EAA4BL,IAA5B,EAAkCC,MAAlC,EAA0C;AACxC,MAAID,IAAI,KAAKC,MAAb,EAAqB;AACnB,WAAOD,IAAP;AACD,GAFD,MAEO;AACLK,IAAAA,KAAK,CAAC+B,QAAN,GAAiB,IAAjB;AACA,WAAO;AAACpC,MAAAA,IAAI,EAAJA,IAAD;AAAOC,MAAAA,MAAM,EAANA;AAAP,KAAP;AACD;AACF;;AAED,SAASqB,UAAT,CAAoBS,IAApB,EAA0BM,KAA1B,EAAiC;AAC/B,SAAON,IAAI,CAACZ,QAAL,GAAgBkB,KAAK,CAAClB,QAAtB,IACDY,IAAI,CAACZ,QAAL,GAAgBY,IAAI,CAACnC,QAAtB,GAAkCyC,KAAK,CAAClB,QAD7C;AAED;;AAED,SAASK,SAAT,CAAmB/B,IAAnB,EAAyB6C,MAAzB,EAAiC;AAC/B,SAAO;AACLnB,IAAAA,QAAQ,EAAE1B,IAAI,CAAC0B,QADV;AACoBvB,IAAAA,QAAQ,EAAEH,IAAI,CAACG,QADnC;AAELgC,IAAAA,QAAQ,EAAEnC,IAAI,CAACmC,QAAL,GAAgBU,MAFrB;AAE6BzC,IAAAA,QAAQ,EAAEJ,IAAI,CAACI,QAF5C;AAGLF,IAAAA,KAAK,EAAEF,IAAI,CAACE;AAHP,GAAP;AAKD;;AAED,SAASkC,UAAT,CAAoBpC,IAApB,EAA0BsB,UAA1B,EAAsCwB,SAAtC,EAAiDC,WAAjD,EAA8DC,UAA9D,EAA0E;AACxE;AACA;AACA,MAAIzC,IAAI,GAAG;AAACsC,IAAAA,MAAM,EAAEvB,UAAT;AAAqBpB,IAAAA,KAAK,EAAE4C,SAA5B;AAAuClC,IAAAA,KAAK,EAAE;AAA9C,GAAX;AAAA,MACIqC,KAAK,GAAG;AAACJ,IAAAA,MAAM,EAAEE,WAAT;AAAsB7C,IAAAA,KAAK,EAAE8C,UAA7B;AAAyCpC,IAAAA,KAAK,EAAE;AAAhD,GADZ,CAHwE,CAMxE;;AACAsC,EAAAA,aAAa,CAAClD,IAAD,EAAOO,IAAP,EAAa0C,KAAb,CAAb;AACAC,EAAAA,aAAa,CAAClD,IAAD,EAAOiD,KAAP,EAAc1C,IAAd,CAAb,CARwE,CAUxE;;AACA,SAAOA,IAAI,CAACK,KAAL,GAAaL,IAAI,CAACL,KAAL,CAAWsB,MAAxB,IAAkCyB,KAAK,CAACrC,KAAN,GAAcqC,KAAK,CAAC/C,KAAN,CAAYsB,MAAnE,EAA2E;AACzE,QAAIC,WAAW,GAAGlB,IAAI,CAACL,KAAL,CAAWK,IAAI,CAACK,KAAhB,CAAlB;AAAA,QACIuC,YAAY,GAAGF,KAAK,CAAC/C,KAAN,CAAY+C,KAAK,CAACrC,KAAlB,CADnB;;AAGA,QAAI,CAACa,WAAW,CAAC,CAAD,CAAX,KAAmB,GAAnB,IAA0BA,WAAW,CAAC,CAAD,CAAX,KAAmB,GAA9C,MACI0B,YAAY,CAAC,CAAD,CAAZ,KAAoB,GAApB,IAA2BA,YAAY,CAAC,CAAD,CAAZ,KAAoB,GADnD,CAAJ,EAC6D;AAC3D;AACAC,MAAAA,YAAY,CAACpD,IAAD,EAAOO,IAAP,EAAa0C,KAAb,CAAZ;AACD,KAJD,MAIO,IAAIxB,WAAW,CAAC,CAAD,CAAX,KAAmB,GAAnB,IAA0B0B,YAAY,CAAC,CAAD,CAAZ,KAAoB,GAAlD,EAAuD;AAAA;AAAA;;AAAA;AAC5D;;AACA;;AAAA;;AAAA;AAAA;AAAA;AAAAnD,MAAAA,IAAI,CAACE,KAAL,EAAW4B,IAAX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAoBuB,MAAAA,aAAa,CAAC9C,IAAD,CAAjC;AACD,KAHM,MAGA,IAAI4C,YAAY,CAAC,CAAD,CAAZ,KAAoB,GAApB,IAA2B1B,WAAW,CAAC,CAAD,CAAX,KAAmB,GAAlD,EAAuD;AAAA;AAAA;;AAAA;AAC5D;;AACA;;AAAA;;AAAA;AAAA;AAAA;AAAAzB,MAAAA,IAAI,CAACE,KAAL,EAAW4B,IAAX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAoBuB,MAAAA,aAAa,CAACJ,KAAD,CAAjC;AACD,KAHM,MAGA,IAAIxB,WAAW,CAAC,CAAD,CAAX,KAAmB,GAAnB,IAA0B0B,YAAY,CAAC,CAAD,CAAZ,KAAoB,GAAlD,EAAuD;AAC5D;AACAG,MAAAA,OAAO,CAACtD,IAAD,EAAOO,IAAP,EAAa0C,KAAb,CAAP;AACD,KAHM,MAGA,IAAIE,YAAY,CAAC,CAAD,CAAZ,KAAoB,GAApB,IAA2B1B,WAAW,CAAC,CAAD,CAAX,KAAmB,GAAlD,EAAuD;AAC5D;AACA6B,MAAAA,OAAO,CAACtD,IAAD,EAAOiD,KAAP,EAAc1C,IAAd,EAAoB,IAApB,CAAP;AACD,KAHM,MAGA,IAAIkB,WAAW,KAAK0B,YAApB,EAAkC;AACvC;AACAnD,MAAAA,IAAI,CAACE,KAAL,CAAW4B,IAAX,CAAgBL,WAAhB;AACAlB,MAAAA,IAAI,CAACK,KAAL;AACAqC,MAAAA,KAAK,CAACrC,KAAN;AACD,KALM,MAKA;AACL;AACA+B,MAAAA,QAAQ,CAAC3C,IAAD,EAAOqD,aAAa,CAAC9C,IAAD,CAApB,EAA4B8C,aAAa,CAACJ,KAAD,CAAzC,CAAR;AACD;AACF,GAxCuE,CA0CxE;;;AACAM,EAAAA,cAAc,CAACvD,IAAD,EAAOO,IAAP,CAAd;AACAgD,EAAAA,cAAc,CAACvD,IAAD,EAAOiD,KAAP,CAAd;AAEAlD,EAAAA,aAAa,CAACC,IAAD,CAAb;AACD;;AAED,SAASoD,YAAT,CAAsBpD,IAAtB,EAA4BO,IAA5B,EAAkC0C,KAAlC,EAAyC;AACvC,MAAIO,SAAS,GAAGH,aAAa,CAAC9C,IAAD,CAA7B;AAAA,MACIkD,YAAY,GAAGJ,aAAa,CAACJ,KAAD,CADhC;;AAGA,MAAIS,UAAU,CAACF,SAAD,CAAV,IAAyBE,UAAU,CAACD,YAAD,CAAvC,EAAuD;AACrD;AACA;AAAI;AAAA;AAAA;;AAAAE;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAA;AAAA,KAAgBH,SAAhB,EAA2BC,YAA3B,KACGG,kBAAkB,CAACX,KAAD,EAAQO,SAAR,EAAmBA,SAAS,CAAChC,MAAV,GAAmBiC,YAAY,CAACjC,MAAnD,CADzB,EACqF;AAAA;AAAA;;AAAA;;AACnF;;AAAA;;AAAA;AAAA;AAAA;AAAAxB,MAAAA,IAAI,CAACE,KAAL,EAAW4B,IAAX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAoB0B,MAAAA,SAApB;;AACA;AACD,KAJD,MAIO;AAAI;AAAA;AAAA;;AAAAG;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAA;AAAA,KAAgBF,YAAhB,EAA8BD,SAA9B,KACJI,kBAAkB,CAACrD,IAAD,EAAOkD,YAAP,EAAqBA,YAAY,CAACjC,MAAb,GAAsBgC,SAAS,CAAChC,MAArD,CADlB,EACgF;AAAA;AAAA;;AAAA;;AACrF;;AAAA;;AAAA;AAAA;AAAA;AAAAxB,MAAAA,IAAI,CAACE,KAAL,EAAW4B,IAAX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAoB2B,MAAAA,YAApB;;AACA;AACD;AACF,GAXD,MAWO;AAAI;AAAA;AAAA;;AAAAI;AAAAA;AAAAA;AAAAA;AAAAA;AAAAA;AAAA;AAAA,GAAWL,SAAX,EAAsBC,YAAtB,CAAJ,EAAyC;AAAA;AAAA;;AAAA;;AAC9C;;AAAA;;AAAA;AAAA;AAAA;AAAAzD,IAAAA,IAAI,CAACE,KAAL,EAAW4B,IAAX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAoB0B,IAAAA,SAApB;;AACA;AACD;;AAEDb,EAAAA,QAAQ,CAAC3C,IAAD,EAAOwD,SAAP,EAAkBC,YAAlB,CAAR;AACD;;AAED,SAASH,OAAT,CAAiBtD,IAAjB,EAAuBO,IAAvB,EAA6B0C,KAA7B,EAAoCa,IAApC,EAA0C;AACxC,MAAIN,SAAS,GAAGH,aAAa,CAAC9C,IAAD,CAA7B;AAAA,MACIkD,YAAY,GAAGM,cAAc,CAACd,KAAD,EAAQO,SAAR,CADjC;;AAEA,MAAIC,YAAY,CAACO,MAAjB,EAAyB;AAAA;AAAA;;AAAA;;AACvB;;AAAA;;AAAA;AAAA;AAAA;AAAAhE,IAAAA,IAAI,CAACE,KAAL,EAAW4B,IAAX;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAoB2B,IAAAA,YAAY,CAACO,MAAjC;AACD,GAFD,MAEO;AACLrB,IAAAA,QAAQ,CAAC3C,IAAD,EAAO8D,IAAI,GAAGL,YAAH,GAAkBD,SAA7B,EAAwCM,IAAI,GAAGN,SAAH,GAAeC,YAA3D,CAAR;AACD;AACF;;AAED,SAASd,QAAT,CAAkB3C,IAAlB,EAAwBO,IAAxB,EAA8B0C,KAA9B,EAAqC;AACnCjD,EAAAA,IAAI,CAAC2C,QAAL,GAAgB,IAAhB;AACA3C,EAAAA,IAAI,CAACE,KAAL,CAAW4B,IAAX,CAAgB;AACda,IAAAA,QAAQ,EAAE,IADI;AAEdpC,IAAAA,IAAI,EAAEA,IAFQ;AAGdC,IAAAA,MAAM,EAAEyC;AAHM,GAAhB;AAKD;;AAED,SAASC,aAAT,CAAuBlD,IAAvB,EAA6BiE,MAA7B,EAAqChB,KAArC,EAA4C;AAC1C,SAAOgB,MAAM,CAACpB,MAAP,GAAgBI,KAAK,CAACJ,MAAtB,IAAgCoB,MAAM,CAACrD,KAAP,GAAeqD,MAAM,CAAC/D,KAAP,CAAasB,MAAnE,EAA2E;AACzE,QAAI0C,IAAI,GAAGD,MAAM,CAAC/D,KAAP,CAAa+D,MAAM,CAACrD,KAAP,EAAb,CAAX;AACAZ,IAAAA,IAAI,CAACE,KAAL,CAAW4B,IAAX,CAAgBoC,IAAhB;AACAD,IAAAA,MAAM,CAACpB,MAAP;AACD;AACF;;AACD,SAASU,cAAT,CAAwBvD,IAAxB,EAA8BiE,MAA9B,EAAsC;AACpC,SAAOA,MAAM,CAACrD,KAAP,GAAeqD,MAAM,CAAC/D,KAAP,CAAasB,MAAnC,EAA2C;AACzC,QAAI0C,IAAI,GAAGD,MAAM,CAAC/D,KAAP,CAAa+D,MAAM,CAACrD,KAAP,EAAb,CAAX;AACAZ,IAAAA,IAAI,CAACE,KAAL,CAAW4B,IAAX,CAAgBoC,IAAhB;AACD;AACF;;AAED,SAASb,aAAT,CAAuBc,KAAvB,EAA8B;AAC5B,MAAIxD,GAAG,GAAG,EAAV;AAAA,MACIyD,SAAS,GAAGD,KAAK,CAACjE,KAAN,CAAYiE,KAAK,CAACvD,KAAlB,EAAyB,CAAzB,CADhB;;AAEA,SAAOuD,KAAK,CAACvD,KAAN,GAAcuD,KAAK,CAACjE,KAAN,CAAYsB,MAAjC,EAAyC;AACvC,QAAI0C,IAAI,GAAGC,KAAK,CAACjE,KAAN,CAAYiE,KAAK,CAACvD,KAAlB,CAAX,CADuC,CAGvC;;AACA,QAAIwD,SAAS,KAAK,GAAd,IAAqBF,IAAI,CAAC,CAAD,CAAJ,KAAY,GAArC,EAA0C;AACxCE,MAAAA,SAAS,GAAG,GAAZ;AACD;;AAED,QAAIA,SAAS,KAAKF,IAAI,CAAC,CAAD,CAAtB,EAA2B;AACzBvD,MAAAA,GAAG,CAACmB,IAAJ,CAASoC,IAAT;AACAC,MAAAA,KAAK,CAACvD,KAAN;AACD,KAHD,MAGO;AACL;AACD;AACF;;AAED,SAAOD,GAAP;AACD;;AACD,SAASoD,cAAT,CAAwBI,KAAxB,EAA+BE,YAA/B,EAA6C;AAC3C,MAAIC,OAAO,GAAG,EAAd;AAAA,MACIN,MAAM,GAAG,EADb;AAAA,MAEIO,UAAU,GAAG,CAFjB;AAAA,MAGIC,cAAc,GAAG,KAHrB;AAAA,MAIIC,UAAU,GAAG,KAJjB;;AAKA,SAAOF,UAAU,GAAGF,YAAY,CAAC7C,MAA1B,IACE2C,KAAK,CAACvD,KAAN,GAAcuD,KAAK,CAACjE,KAAN,CAAYsB,MADnC,EAC2C;AACzC,QAAIkD,MAAM,GAAGP,KAAK,CAACjE,KAAN,CAAYiE,KAAK,CAACvD,KAAlB,CAAb;AAAA,QACI+D,KAAK,GAAGN,YAAY,CAACE,UAAD,CADxB,CADyC,CAIzC;;AACA,QAAII,KAAK,CAAC,CAAD,CAAL,KAAa,GAAjB,EAAsB;AACpB;AACD;;AAEDH,IAAAA,cAAc,GAAGA,cAAc,IAAIE,MAAM,CAAC,CAAD,CAAN,KAAc,GAAjD;AAEAV,IAAAA,MAAM,CAAClC,IAAP,CAAY6C,KAAZ;AACAJ,IAAAA,UAAU,GAZ+B,CAczC;AACA;;AACA,QAAIG,MAAM,CAAC,CAAD,CAAN,KAAc,GAAlB,EAAuB;AACrBD,MAAAA,UAAU,GAAG,IAAb;;AAEA,aAAOC,MAAM,CAAC,CAAD,CAAN,KAAc,GAArB,EAA0B;AACxBJ,QAAAA,OAAO,CAACxC,IAAR,CAAa4C,MAAb;AACAA,QAAAA,MAAM,GAAGP,KAAK,CAACjE,KAAN,CAAY,EAAEiE,KAAK,CAACvD,KAApB,CAAT;AACD;AACF;;AAED,QAAI+D,KAAK,CAACC,MAAN,CAAa,CAAb,MAAoBF,MAAM,CAACE,MAAP,CAAc,CAAd,CAAxB,EAA0C;AACxCN,MAAAA,OAAO,CAACxC,IAAR,CAAa4C,MAAb;AACAP,MAAAA,KAAK,CAACvD,KAAN;AACD,KAHD,MAGO;AACL6D,MAAAA,UAAU,GAAG,IAAb;AACD;AACF;;AAED,MAAI,CAACJ,YAAY,CAACE,UAAD,CAAZ,IAA4B,EAA7B,EAAiC,CAAjC,MAAwC,GAAxC,IACGC,cADP,EACuB;AACrBC,IAAAA,UAAU,GAAG,IAAb;AACD;;AAED,MAAIA,UAAJ,EAAgB;AACd,WAAOH,OAAP;AACD;;AAED,SAAOC,UAAU,GAAGF,YAAY,CAAC7C,MAAjC,EAAyC;AACvCwC,IAAAA,MAAM,CAAClC,IAAP,CAAYuC,YAAY,CAACE,UAAU,EAAX,CAAxB;AACD;;AAED,SAAO;AACLP,IAAAA,MAAM,EAANA,MADK;AAELM,IAAAA,OAAO,EAAPA;AAFK,GAAP;AAID;;AAED,SAASZ,UAAT,CAAoBY,OAApB,EAA6B;AAC3B,SAAOA,OAAO,CAACO,MAAR,CAAe,UAASC,IAAT,EAAeJ,MAAf,EAAuB;AAC3C,WAAOI,IAAI,IAAIJ,MAAM,CAAC,CAAD,CAAN,KAAc,GAA7B;AACD,GAFM,EAEJ,IAFI,CAAP;AAGD;;AACD,SAASd,kBAAT,CAA4BO,KAA5B,EAAmCY,aAAnC,EAAkDC,KAAlD,EAAyD;AACvD,OAAK,IAAIC,CAAC,GAAG,CAAb,EAAgBA,CAAC,GAAGD,KAApB,EAA2BC,CAAC,EAA5B,EAAgC;AAC9B,QAAIC,aAAa,GAAGH,aAAa,CAACA,aAAa,CAACvD,MAAd,GAAuBwD,KAAvB,GAA+BC,CAAhC,CAAb,CAAgDL,MAAhD,CAAuD,CAAvD,CAApB;;AACA,QAAIT,KAAK,CAACjE,KAAN,CAAYiE,KAAK,CAACvD,KAAN,GAAcqE,CAA1B,MAAiC,MAAMC,aAA3C,EAA0D;AACxD,aAAO,KAAP;AACD;AACF;;AAEDf,EAAAA,KAAK,CAACvD,KAAN,IAAeoE,KAAf;AACA,SAAO,IAAP;AACD;;AAED,SAAS/E,mBAAT,CAA6BC,KAA7B,EAAoC;AAClC,MAAIC,QAAQ,GAAG,CAAf;AACA,MAAIC,QAAQ,GAAG,CAAf;AAEAF,EAAAA,KAAK,CAACiF,OAAN,CAAc,UAASjB,IAAT,EAAe;AAC3B,QAAI,OAAOA,IAAP,KAAgB,QAApB,EAA8B;AAC5B,UAAIkB,OAAO,GAAGnF,mBAAmB,CAACiE,IAAI,CAAC3D,IAAN,CAAjC;AACA,UAAI8E,UAAU,GAAGpF,mBAAmB,CAACiE,IAAI,CAAC1D,MAAN,CAApC;;AAEA,UAAIL,QAAQ,KAAKE,SAAjB,EAA4B;AAC1B,YAAI+E,OAAO,CAACjF,QAAR,KAAqBkF,UAAU,CAAClF,QAApC,EAA8C;AAC5CA,UAAAA,QAAQ,IAAIiF,OAAO,CAACjF,QAApB;AACD,SAFD,MAEO;AACLA,UAAAA,QAAQ,GAAGE,SAAX;AACD;AACF;;AAED,UAAID,QAAQ,KAAKC,SAAjB,EAA4B;AAC1B,YAAI+E,OAAO,CAAChF,QAAR,KAAqBiF,UAAU,CAACjF,QAApC,EAA8C;AAC5CA,UAAAA,QAAQ,IAAIgF,OAAO,CAAChF,QAApB;AACD,SAFD,MAEO;AACLA,UAAAA,QAAQ,GAAGC,SAAX;AACD;AACF;AACF,KAnBD,MAmBO;AACL,UAAID,QAAQ,KAAKC,SAAb,KAA2B6D,IAAI,CAAC,CAAD,CAAJ,KAAY,GAAZ,IAAmBA,IAAI,CAAC,CAAD,CAAJ,KAAY,GAA1D,CAAJ,EAAoE;AAClE9D,QAAAA,QAAQ;AACT;;AACD,UAAID,QAAQ,KAAKE,SAAb,KAA2B6D,IAAI,CAAC,CAAD,CAAJ,KAAY,GAAZ,IAAmBA,IAAI,CAAC,CAAD,CAAJ,KAAY,GAA1D,CAAJ,EAAoE;AAClE/D,QAAAA,QAAQ;AACT;AACF;AACF,GA5BD;AA8BA,SAAO;AAACA,IAAAA,QAAQ,EAARA,QAAD;AAAWC,IAAAA,QAAQ,EAARA;AAAX,GAAP;AACD","sourcesContent":["import {structuredPatch} from './create';\nimport {parsePatch} from './parse';\n\nimport {arrayEqual, arrayStartsWith} from '../util/array';\n\nexport function calcLineCount(hunk) {\n  const {oldLines, newLines} = calcOldNewLineCount(hunk.lines);\n\n  if (oldLines !== undefined) {\n    hunk.oldLines = oldLines;\n  } else {\n    delete hunk.oldLines;\n  }\n\n  if (newLines !== undefined) {\n    hunk.newLines = newLines;\n  } else {\n    delete hunk.newLines;\n  }\n}\n\nexport function merge(mine, theirs, base) {\n  mine = loadPatch(mine, base);\n  theirs = loadPatch(theirs, base);\n\n  let ret = {};\n\n  // For index we just let it pass through as it doesn't have any necessary meaning.\n  // Leaving sanity checks on this to the API consumer that may know more about the\n  // meaning in their own context.\n  if (mine.index || theirs.index) {\n    ret.index = mine.index || theirs.index;\n  }\n\n  if (mine.newFileName || theirs.newFileName) {\n    if (!fileNameChanged(mine)) {\n      // No header or no change in ours, use theirs (and ours if theirs does not exist)\n      ret.oldFileName = theirs.oldFileName || mine.oldFileName;\n      ret.newFileName = theirs.newFileName || mine.newFileName;\n      ret.oldHeader = theirs.oldHeader || mine.oldHeader;\n      ret.newHeader = theirs.newHeader || mine.newHeader;\n    } else if (!fileNameChanged(theirs)) {\n      // No header or no change in theirs, use ours\n      ret.oldFileName = mine.oldFileName;\n      ret.newFileName = mine.newFileName;\n      ret.oldHeader = mine.oldHeader;\n      ret.newHeader = mine.newHeader;\n    } else {\n      // Both changed... figure it out\n      ret.oldFileName = selectField(ret, mine.oldFileName, theirs.oldFileName);\n      ret.newFileName = selectField(ret, mine.newFileName, theirs.newFileName);\n      ret.oldHeader = selectField(ret, mine.oldHeader, theirs.oldHeader);\n      ret.newHeader = selectField(ret, mine.newHeader, theirs.newHeader);\n    }\n  }\n\n  ret.hunks = [];\n\n  let mineIndex = 0,\n      theirsIndex = 0,\n      mineOffset = 0,\n      theirsOffset = 0;\n\n  while (mineIndex < mine.hunks.length || theirsIndex < theirs.hunks.length) {\n    let mineCurrent = mine.hunks[mineIndex] || {oldStart: Infinity},\n        theirsCurrent = theirs.hunks[theirsIndex] || {oldStart: Infinity};\n\n    if (hunkBefore(mineCurrent, theirsCurrent)) {\n      // This patch does not overlap with any of the others, yay.\n      ret.hunks.push(cloneHunk(mineCurrent, mineOffset));\n      mineIndex++;\n      theirsOffset += mineCurrent.newLines - mineCurrent.oldLines;\n    } else if (hunkBefore(theirsCurrent, mineCurrent)) {\n      // This patch does not overlap with any of the others, yay.\n      ret.hunks.push(cloneHunk(theirsCurrent, theirsOffset));\n      theirsIndex++;\n      mineOffset += theirsCurrent.newLines - theirsCurrent.oldLines;\n    } else {\n      // Overlap, merge as best we can\n      let mergedHunk = {\n        oldStart: Math.min(mineCurrent.oldStart, theirsCurrent.oldStart),\n        oldLines: 0,\n        newStart: Math.min(mineCurrent.newStart + mineOffset, theirsCurrent.oldStart + theirsOffset),\n        newLines: 0,\n        lines: []\n      };\n      mergeLines(mergedHunk, mineCurrent.oldStart, mineCurrent.lines, theirsCurrent.oldStart, theirsCurrent.lines);\n      theirsIndex++;\n      mineIndex++;\n\n      ret.hunks.push(mergedHunk);\n    }\n  }\n\n  return ret;\n}\n\nfunction loadPatch(param, base) {\n  if (typeof param === 'string') {\n    if ((/^@@/m).test(param) || ((/^Index:/m).test(param))) {\n      return parsePatch(param)[0];\n    }\n\n    if (!base) {\n      throw new Error('Must provide a base reference or pass in a patch');\n    }\n    return structuredPatch(undefined, undefined, base, param);\n  }\n\n  return param;\n}\n\nfunction fileNameChanged(patch) {\n  return patch.newFileName && patch.newFileName !== patch.oldFileName;\n}\n\nfunction selectField(index, mine, theirs) {\n  if (mine === theirs) {\n    return mine;\n  } else {\n    index.conflict = true;\n    return {mine, theirs};\n  }\n}\n\nfunction hunkBefore(test, check) {\n  return test.oldStart < check.oldStart\n    && (test.oldStart + test.oldLines) < check.oldStart;\n}\n\nfunction cloneHunk(hunk, offset) {\n  return {\n    oldStart: hunk.oldStart, oldLines: hunk.oldLines,\n    newStart: hunk.newStart + offset, newLines: hunk.newLines,\n    lines: hunk.lines\n  };\n}\n\nfunction mergeLines(hunk, mineOffset, mineLines, theirOffset, theirLines) {\n  // This will generally result in a conflicted hunk, but there are cases where the context\n  // is the only overlap where we can successfully merge the content here.\n  let mine = {offset: mineOffset, lines: mineLines, index: 0},\n      their = {offset: theirOffset, lines: theirLines, index: 0};\n\n  // Handle any leading content\n  insertLeading(hunk, mine, their);\n  insertLeading(hunk, their, mine);\n\n  // Now in the overlap content. Scan through and select the best changes from each.\n  while (mine.index < mine.lines.length && their.index < their.lines.length) {\n    let mineCurrent = mine.lines[mine.index],\n        theirCurrent = their.lines[their.index];\n\n    if ((mineCurrent[0] === '-' || mineCurrent[0] === '+')\n        && (theirCurrent[0] === '-' || theirCurrent[0] === '+')) {\n      // Both modified ...\n      mutualChange(hunk, mine, their);\n    } else if (mineCurrent[0] === '+' && theirCurrent[0] === ' ') {\n      // Mine inserted\n      hunk.lines.push(... collectChange(mine));\n    } else if (theirCurrent[0] === '+' && mineCurrent[0] === ' ') {\n      // Theirs inserted\n      hunk.lines.push(... collectChange(their));\n    } else if (mineCurrent[0] === '-' && theirCurrent[0] === ' ') {\n      // Mine removed or edited\n      removal(hunk, mine, their);\n    } else if (theirCurrent[0] === '-' && mineCurrent[0] === ' ') {\n      // Their removed or edited\n      removal(hunk, their, mine, true);\n    } else if (mineCurrent === theirCurrent) {\n      // Context identity\n      hunk.lines.push(mineCurrent);\n      mine.index++;\n      their.index++;\n    } else {\n      // Context mismatch\n      conflict(hunk, collectChange(mine), collectChange(their));\n    }\n  }\n\n  // Now push anything that may be remaining\n  insertTrailing(hunk, mine);\n  insertTrailing(hunk, their);\n\n  calcLineCount(hunk);\n}\n\nfunction mutualChange(hunk, mine, their) {\n  let myChanges = collectChange(mine),\n      theirChanges = collectChange(their);\n\n  if (allRemoves(myChanges) && allRemoves(theirChanges)) {\n    // Special case for remove changes that are supersets of one another\n    if (arrayStartsWith(myChanges, theirChanges)\n        && skipRemoveSuperset(their, myChanges, myChanges.length - theirChanges.length)) {\n      hunk.lines.push(... myChanges);\n      return;\n    } else if (arrayStartsWith(theirChanges, myChanges)\n        && skipRemoveSuperset(mine, theirChanges, theirChanges.length - myChanges.length)) {\n      hunk.lines.push(... theirChanges);\n      return;\n    }\n  } else if (arrayEqual(myChanges, theirChanges)) {\n    hunk.lines.push(... myChanges);\n    return;\n  }\n\n  conflict(hunk, myChanges, theirChanges);\n}\n\nfunction removal(hunk, mine, their, swap) {\n  let myChanges = collectChange(mine),\n      theirChanges = collectContext(their, myChanges);\n  if (theirChanges.merged) {\n    hunk.lines.push(... theirChanges.merged);\n  } else {\n    conflict(hunk, swap ? theirChanges : myChanges, swap ? myChanges : theirChanges);\n  }\n}\n\nfunction conflict(hunk, mine, their) {\n  hunk.conflict = true;\n  hunk.lines.push({\n    conflict: true,\n    mine: mine,\n    theirs: their\n  });\n}\n\nfunction insertLeading(hunk, insert, their) {\n  while (insert.offset < their.offset && insert.index < insert.lines.length) {\n    let line = insert.lines[insert.index++];\n    hunk.lines.push(line);\n    insert.offset++;\n  }\n}\nfunction insertTrailing(hunk, insert) {\n  while (insert.index < insert.lines.length) {\n    let line = insert.lines[insert.index++];\n    hunk.lines.push(line);\n  }\n}\n\nfunction collectChange(state) {\n  let ret = [],\n      operation = state.lines[state.index][0];\n  while (state.index < state.lines.length) {\n    let line = state.lines[state.index];\n\n    // Group additions that are immediately after subtractions and treat them as one \"atomic\" modify change.\n    if (operation === '-' && line[0] === '+') {\n      operation = '+';\n    }\n\n    if (operation === line[0]) {\n      ret.push(line);\n      state.index++;\n    } else {\n      break;\n    }\n  }\n\n  return ret;\n}\nfunction collectContext(state, matchChanges) {\n  let changes = [],\n      merged = [],\n      matchIndex = 0,\n      contextChanges = false,\n      conflicted = false;\n  while (matchIndex < matchChanges.length\n        && state.index < state.lines.length) {\n    let change = state.lines[state.index],\n        match = matchChanges[matchIndex];\n\n    // Once we've hit our add, then we are done\n    if (match[0] === '+') {\n      break;\n    }\n\n    contextChanges = contextChanges || change[0] !== ' ';\n\n    merged.push(match);\n    matchIndex++;\n\n    // Consume any additions in the other block as a conflict to attempt\n    // to pull in the remaining context after this\n    if (change[0] === '+') {\n      conflicted = true;\n\n      while (change[0] === '+') {\n        changes.push(change);\n        change = state.lines[++state.index];\n      }\n    }\n\n    if (match.substr(1) === change.substr(1)) {\n      changes.push(change);\n      state.index++;\n    } else {\n      conflicted = true;\n    }\n  }\n\n  if ((matchChanges[matchIndex] || '')[0] === '+'\n      && contextChanges) {\n    conflicted = true;\n  }\n\n  if (conflicted) {\n    return changes;\n  }\n\n  while (matchIndex < matchChanges.length) {\n    merged.push(matchChanges[matchIndex++]);\n  }\n\n  return {\n    merged,\n    changes\n  };\n}\n\nfunction allRemoves(changes) {\n  return changes.reduce(function(prev, change) {\n    return prev && change[0] === '-';\n  }, true);\n}\nfunction skipRemoveSuperset(state, removeChanges, delta) {\n  for (let i = 0; i < delta; i++) {\n    let changeContent = removeChanges[removeChanges.length - delta + i].substr(1);\n    if (state.lines[state.index + i] !== ' ' + changeContent) {\n      return false;\n    }\n  }\n\n  state.index += delta;\n  return true;\n}\n\nfunction calcOldNewLineCount(lines) {\n  let oldLines = 0;\n  let newLines = 0;\n\n  lines.forEach(function(line) {\n    if (typeof line !== 'string') {\n      let myCount = calcOldNewLineCount(line.mine);\n      let theirCount = calcOldNewLineCount(line.theirs);\n\n      if (oldLines !== undefined) {\n        if (myCount.oldLines === theirCount.oldLines) {\n          oldLines += myCount.oldLines;\n        } else {\n          oldLines = undefined;\n        }\n      }\n\n      if (newLines !== undefined) {\n        if (myCount.newLines === theirCount.newLines) {\n          newLines += myCount.newLines;\n        } else {\n          newLines = undefined;\n        }\n      }\n    } else {\n      if (newLines !== undefined && (line[0] === '+' || line[0] === ' ')) {\n        newLines++;\n      }\n      if (oldLines !== undefined && (line[0] === '-' || line[0] === ' ')) {\n        oldLines++;\n      }\n    }\n  });\n\n  return {oldLines, newLines};\n}\n"]}
diff --git a/_extensions/d2/node_modules/diff/lib/patch/parse.js b/_extensions/d2/node_modules/diff/lib/patch/parse.js
new file mode 100644
index 00000000..f1501048
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/patch/parse.js
@@ -0,0 +1,167 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.parsePatch = parsePatch;
+
+/*istanbul ignore end*/
+function parsePatch(uniDiff) {
+  /*istanbul ignore start*/
+  var
+  /*istanbul ignore end*/
+  options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {};
+  var diffstr = uniDiff.split(/\r\n|[\n\v\f\r\x85]/),
+      delimiters = uniDiff.match(/\r\n|[\n\v\f\r\x85]/g) || [],
+      list = [],
+      i = 0;
+
+  function parseIndex() {
+    var index = {};
+    list.push(index); // Parse diff metadata
+
+    while (i < diffstr.length) {
+      var line = diffstr[i]; // File header found, end parsing diff metadata
+
+      if (/^(\-\-\-|\+\+\+|@@)\s/.test(line)) {
+        break;
+      } // Diff index
+
+
+      var header = /^(?:Index:|diff(?: -r \w+)+)\s+(.+?)\s*$/.exec(line);
+
+      if (header) {
+        index.index = header[1];
+      }
+
+      i++;
+    } // Parse file headers if they are defined. Unified diff requires them, but
+    // there's no technical issues to have an isolated hunk without file header
+
+
+    parseFileHeader(index);
+    parseFileHeader(index); // Parse hunks
+
+    index.hunks = [];
+
+    while (i < diffstr.length) {
+      var _line = diffstr[i];
+
+      if (/^(Index:|diff|\-\-\-|\+\+\+)\s/.test(_line)) {
+        break;
+      } else if (/^@@/.test(_line)) {
+        index.hunks.push(parseHunk());
+      } else if (_line && options.strict) {
+        // Ignore unexpected content unless in strict mode
+        throw new Error('Unknown line ' + (i + 1) + ' ' + JSON.stringify(_line));
+      } else {
+        i++;
+      }
+    }
+  } // Parses the --- and +++ headers, if none are found, no lines
+  // are consumed.
+
+
+  function parseFileHeader(index) {
+    var fileHeader = /^(---|\+\+\+)\s+(.*)$/.exec(diffstr[i]);
+
+    if (fileHeader) {
+      var keyPrefix = fileHeader[1] === '---' ? 'old' : 'new';
+      var data = fileHeader[2].split('\t', 2);
+      var fileName = data[0].replace(/\\\\/g, '\\');
+
+      if (/^".*"$/.test(fileName)) {
+        fileName = fileName.substr(1, fileName.length - 2);
+      }
+
+      index[keyPrefix + 'FileName'] = fileName;
+      index[keyPrefix + 'Header'] = (data[1] || '').trim();
+      i++;
+    }
+  } // Parses a hunk
+  // This assumes that we are at the start of a hunk.
+
+
+  function parseHunk() {
+    var chunkHeaderIndex = i,
+        chunkHeaderLine = diffstr[i++],
+        chunkHeader = chunkHeaderLine.split(/@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@/);
+    var hunk = {
+      oldStart: +chunkHeader[1],
+      oldLines: typeof chunkHeader[2] === 'undefined' ? 1 : +chunkHeader[2],
+      newStart: +chunkHeader[3],
+      newLines: typeof chunkHeader[4] === 'undefined' ? 1 : +chunkHeader[4],
+      lines: [],
+      linedelimiters: []
+    }; // Unified Diff Format quirk: If the chunk size is 0,
+    // the first number is one lower than one would expect.
+    // https://www.artima.com/weblogs/viewpost.jsp?thread=164293
+
+    if (hunk.oldLines === 0) {
+      hunk.oldStart += 1;
+    }
+
+    if (hunk.newLines === 0) {
+      hunk.newStart += 1;
+    }
+
+    var addCount = 0,
+        removeCount = 0;
+
+    for (; i < diffstr.length; i++) {
+      // Lines starting with '---' could be mistaken for the "remove line" operation
+      // But they could be the header for the next file. Therefore prune such cases out.
+      if (diffstr[i].indexOf('--- ') === 0 && i + 2 < diffstr.length && diffstr[i + 1].indexOf('+++ ') === 0 && diffstr[i + 2].indexOf('@@') === 0) {
+        break;
+      }
+
+      var operation = diffstr[i].length == 0 && i != diffstr.length - 1 ? ' ' : diffstr[i][0];
+
+      if (operation === '+' || operation === '-' || operation === ' ' || operation === '\\') {
+        hunk.lines.push(diffstr[i]);
+        hunk.linedelimiters.push(delimiters[i] || '\n');
+
+        if (operation === '+') {
+          addCount++;
+        } else if (operation === '-') {
+          removeCount++;
+        } else if (operation === ' ') {
+          addCount++;
+          removeCount++;
+        }
+      } else {
+        break;
+      }
+    } // Handle the empty block count case
+
+
+    if (!addCount && hunk.newLines === 1) {
+      hunk.newLines = 0;
+    }
+
+    if (!removeCount && hunk.oldLines === 1) {
+      hunk.oldLines = 0;
+    } // Perform optional sanity checking
+
+
+    if (options.strict) {
+      if (addCount !== hunk.newLines) {
+        throw new Error('Added line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
+      }
+
+      if (removeCount !== hunk.oldLines) {
+        throw new Error('Removed line count did not match for hunk at line ' + (chunkHeaderIndex + 1));
+      }
+    }
+
+    return hunk;
+  }
+
+  while (i < diffstr.length) {
+    parseIndex();
+  }
+
+  return list;
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,{"version":3,"sources":["../../src/patch/parse.js"],"names":["parsePatch","uniDiff","options","diffstr","split","delimiters","match","list","i","parseIndex","index","push","length","line","test","header","exec","parseFileHeader","hunks","parseHunk","strict","Error","JSON","stringify","fileHeader","keyPrefix","data","fileName","replace","substr","trim","chunkHeaderIndex","chunkHeaderLine","chunkHeader","hunk","oldStart","oldLines","newStart","newLines","lines","linedelimiters","addCount","removeCount","indexOf","operation"],"mappings":";;;;;;;;;AAAO,SAASA,UAAT,CAAoBC,OAApB,EAA2C;AAAA;AAAA;AAAA;AAAdC,EAAAA,OAAc,uEAAJ,EAAI;AAChD,MAAIC,OAAO,GAAGF,OAAO,CAACG,KAAR,CAAc,qBAAd,CAAd;AAAA,MACIC,UAAU,GAAGJ,OAAO,CAACK,KAAR,CAAc,sBAAd,KAAyC,EAD1D;AAAA,MAEIC,IAAI,GAAG,EAFX;AAAA,MAGIC,CAAC,GAAG,CAHR;;AAKA,WAASC,UAAT,GAAsB;AACpB,QAAIC,KAAK,GAAG,EAAZ;AACAH,IAAAA,IAAI,CAACI,IAAL,CAAUD,KAAV,EAFoB,CAIpB;;AACA,WAAOF,CAAC,GAAGL,OAAO,CAACS,MAAnB,EAA2B;AACzB,UAAIC,IAAI,GAAGV,OAAO,CAACK,CAAD,CAAlB,CADyB,CAGzB;;AACA,UAAK,uBAAD,CAA0BM,IAA1B,CAA+BD,IAA/B,CAAJ,EAA0C;AACxC;AACD,OANwB,CAQzB;;;AACA,UAAIE,MAAM,GAAI,0CAAD,CAA6CC,IAA7C,CAAkDH,IAAlD,CAAb;;AACA,UAAIE,MAAJ,EAAY;AACVL,QAAAA,KAAK,CAACA,KAAN,GAAcK,MAAM,CAAC,CAAD,CAApB;AACD;;AAEDP,MAAAA,CAAC;AACF,KApBmB,CAsBpB;AACA;;;AACAS,IAAAA,eAAe,CAACP,KAAD,CAAf;AACAO,IAAAA,eAAe,CAACP,KAAD,CAAf,CAzBoB,CA2BpB;;AACAA,IAAAA,KAAK,CAACQ,KAAN,GAAc,EAAd;;AAEA,WAAOV,CAAC,GAAGL,OAAO,CAACS,MAAnB,EAA2B;AACzB,UAAIC,KAAI,GAAGV,OAAO,CAACK,CAAD,CAAlB;;AAEA,UAAK,gCAAD,CAAmCM,IAAnC,CAAwCD,KAAxC,CAAJ,EAAmD;AACjD;AACD,OAFD,MAEO,IAAK,KAAD,CAAQC,IAAR,CAAaD,KAAb,CAAJ,EAAwB;AAC7BH,QAAAA,KAAK,CAACQ,KAAN,CAAYP,IAAZ,CAAiBQ,SAAS,EAA1B;AACD,OAFM,MAEA,IAAIN,KAAI,IAAIX,OAAO,CAACkB,MAApB,EAA4B;AACjC;AACA,cAAM,IAAIC,KAAJ,CAAU,mBAAmBb,CAAC,GAAG,CAAvB,IAA4B,GAA5B,GAAkCc,IAAI,CAACC,SAAL,CAAeV,KAAf,CAA5C,CAAN;AACD,OAHM,MAGA;AACLL,QAAAA,CAAC;AACF;AACF;AACF,GAlD+C,CAoDhD;AACA;;;AACA,WAASS,eAAT,CAAyBP,KAAzB,EAAgC;AAC9B,QAAMc,UAAU,GAAI,uBAAD,CAA0BR,IAA1B,CAA+Bb,OAAO,CAACK,CAAD,CAAtC,CAAnB;;AACA,QAAIgB,UAAJ,EAAgB;AACd,UAAIC,SAAS,GAAGD,UAAU,CAAC,CAAD,CAAV,KAAkB,KAAlB,GAA0B,KAA1B,GAAkC,KAAlD;AACA,UAAME,IAAI,GAAGF,UAAU,CAAC,CAAD,CAAV,CAAcpB,KAAd,CAAoB,IAApB,EAA0B,CAA1B,CAAb;AACA,UAAIuB,QAAQ,GAAGD,IAAI,CAAC,CAAD,CAAJ,CAAQE,OAAR,CAAgB,OAAhB,EAAyB,IAAzB,CAAf;;AACA,UAAK,QAAD,CAAWd,IAAX,CAAgBa,QAAhB,CAAJ,EAA+B;AAC7BA,QAAAA,QAAQ,GAAGA,QAAQ,CAACE,MAAT,CAAgB,CAAhB,EAAmBF,QAAQ,CAACf,MAAT,GAAkB,CAArC,CAAX;AACD;;AACDF,MAAAA,KAAK,CAACe,SAAS,GAAG,UAAb,CAAL,GAAgCE,QAAhC;AACAjB,MAAAA,KAAK,CAACe,SAAS,GAAG,QAAb,CAAL,GAA8B,CAACC,IAAI,CAAC,CAAD,CAAJ,IAAW,EAAZ,EAAgBI,IAAhB,EAA9B;AAEAtB,MAAAA,CAAC;AACF;AACF,GApE+C,CAsEhD;AACA;;;AACA,WAASW,SAAT,GAAqB;AACnB,QAAIY,gBAAgB,GAAGvB,CAAvB;AAAA,QACIwB,eAAe,GAAG7B,OAAO,CAACK,CAAC,EAAF,CAD7B;AAAA,QAEIyB,WAAW,GAAGD,eAAe,CAAC5B,KAAhB,CAAsB,4CAAtB,CAFlB;AAIA,QAAI8B,IAAI,GAAG;AACTC,MAAAA,QAAQ,EAAE,CAACF,WAAW,CAAC,CAAD,CADb;AAETG,MAAAA,QAAQ,EAAE,OAAOH,WAAW,CAAC,CAAD,CAAlB,KAA0B,WAA1B,GAAwC,CAAxC,GAA4C,CAACA,WAAW,CAAC,CAAD,CAFzD;AAGTI,MAAAA,QAAQ,EAAE,CAACJ,WAAW,CAAC,CAAD,CAHb;AAITK,MAAAA,QAAQ,EAAE,OAAOL,WAAW,CAAC,CAAD,CAAlB,KAA0B,WAA1B,GAAwC,CAAxC,GAA4C,CAACA,WAAW,CAAC,CAAD,CAJzD;AAKTM,MAAAA,KAAK,EAAE,EALE;AAMTC,MAAAA,cAAc,EAAE;AANP,KAAX,CALmB,CAcnB;AACA;AACA;;AACA,QAAIN,IAAI,CAACE,QAAL,KAAkB,CAAtB,EAAyB;AACvBF,MAAAA,IAAI,CAACC,QAAL,IAAiB,CAAjB;AACD;;AACD,QAAID,IAAI,CAACI,QAAL,KAAkB,CAAtB,EAAyB;AACvBJ,MAAAA,IAAI,CAACG,QAAL,IAAiB,CAAjB;AACD;;AAED,QAAII,QAAQ,GAAG,CAAf;AAAA,QACIC,WAAW,GAAG,CADlB;;AAEA,WAAOlC,CAAC,GAAGL,OAAO,CAACS,MAAnB,EAA2BJ,CAAC,EAA5B,EAAgC;AAC9B;AACA;AACA,UAAIL,OAAO,CAACK,CAAD,CAAP,CAAWmC,OAAX,CAAmB,MAAnB,MAA+B,CAA/B,IACMnC,CAAC,GAAG,CAAJ,GAAQL,OAAO,CAACS,MADtB,IAEKT,OAAO,CAACK,CAAC,GAAG,CAAL,CAAP,CAAemC,OAAf,CAAuB,MAAvB,MAAmC,CAFxC,IAGKxC,OAAO,CAACK,CAAC,GAAG,CAAL,CAAP,CAAemC,OAAf,CAAuB,IAAvB,MAAiC,CAH1C,EAG6C;AACzC;AACH;;AACD,UAAIC,SAAS,GAAIzC,OAAO,CAACK,CAAD,CAAP,CAAWI,MAAX,IAAqB,CAArB,IAA0BJ,CAAC,IAAKL,OAAO,CAACS,MAAR,GAAiB,CAAlD,GAAwD,GAAxD,GAA8DT,OAAO,CAACK,CAAD,CAAP,CAAW,CAAX,CAA9E;;AAEA,UAAIoC,SAAS,KAAK,GAAd,IAAqBA,SAAS,KAAK,GAAnC,IAA0CA,SAAS,KAAK,GAAxD,IAA+DA,SAAS,KAAK,IAAjF,EAAuF;AACrFV,QAAAA,IAAI,CAACK,KAAL,CAAW5B,IAAX,CAAgBR,OAAO,CAACK,CAAD,CAAvB;AACA0B,QAAAA,IAAI,CAACM,cAAL,CAAoB7B,IAApB,CAAyBN,UAAU,CAACG,CAAD,CAAV,IAAiB,IAA1C;;AAEA,YAAIoC,SAAS,KAAK,GAAlB,EAAuB;AACrBH,UAAAA,QAAQ;AACT,SAFD,MAEO,IAAIG,SAAS,KAAK,GAAlB,EAAuB;AAC5BF,UAAAA,WAAW;AACZ,SAFM,MAEA,IAAIE,SAAS,KAAK,GAAlB,EAAuB;AAC5BH,UAAAA,QAAQ;AACRC,UAAAA,WAAW;AACZ;AACF,OAZD,MAYO;AACL;AACD;AACF,KApDkB,CAsDnB;;;AACA,QAAI,CAACD,QAAD,IAAaP,IAAI,CAACI,QAAL,KAAkB,CAAnC,EAAsC;AACpCJ,MAAAA,IAAI,CAACI,QAAL,GAAgB,CAAhB;AACD;;AACD,QAAI,CAACI,WAAD,IAAgBR,IAAI,CAACE,QAAL,KAAkB,CAAtC,EAAyC;AACvCF,MAAAA,IAAI,CAACE,QAAL,GAAgB,CAAhB;AACD,KA5DkB,CA8DnB;;;AACA,QAAIlC,OAAO,CAACkB,MAAZ,EAAoB;AAClB,UAAIqB,QAAQ,KAAKP,IAAI,CAACI,QAAtB,EAAgC;AAC9B,cAAM,IAAIjB,KAAJ,CAAU,sDAAsDU,gBAAgB,GAAG,CAAzE,CAAV,CAAN;AACD;;AACD,UAAIW,WAAW,KAAKR,IAAI,CAACE,QAAzB,EAAmC;AACjC,cAAM,IAAIf,KAAJ,CAAU,wDAAwDU,gBAAgB,GAAG,CAA3E,CAAV,CAAN;AACD;AACF;;AAED,WAAOG,IAAP;AACD;;AAED,SAAO1B,CAAC,GAAGL,OAAO,CAACS,MAAnB,EAA2B;AACzBH,IAAAA,UAAU;AACX;;AAED,SAAOF,IAAP;AACD","sourcesContent":["export function parsePatch(uniDiff, options = {}) {\n  let diffstr = uniDiff.split(/\\r\\n|[\\n\\v\\f\\r\\x85]/),\n      delimiters = uniDiff.match(/\\r\\n|[\\n\\v\\f\\r\\x85]/g) || [],\n      list = [],\n      i = 0;\n\n  function parseIndex() {\n    let index = {};\n    list.push(index);\n\n    // Parse diff metadata\n    while (i < diffstr.length) {\n      let line = diffstr[i];\n\n      // File header found, end parsing diff metadata\n      if ((/^(\\-\\-\\-|\\+\\+\\+|@@)\\s/).test(line)) {\n        break;\n      }\n\n      // Diff index\n      let header = (/^(?:Index:|diff(?: -r \\w+)+)\\s+(.+?)\\s*$/).exec(line);\n      if (header) {\n        index.index = header[1];\n      }\n\n      i++;\n    }\n\n    // Parse file headers if they are defined. Unified diff requires them, but\n    // there's no technical issues to have an isolated hunk without file header\n    parseFileHeader(index);\n    parseFileHeader(index);\n\n    // Parse hunks\n    index.hunks = [];\n\n    while (i < diffstr.length) {\n      let line = diffstr[i];\n\n      if ((/^(Index:|diff|\\-\\-\\-|\\+\\+\\+)\\s/).test(line)) {\n        break;\n      } else if ((/^@@/).test(line)) {\n        index.hunks.push(parseHunk());\n      } else if (line && options.strict) {\n        // Ignore unexpected content unless in strict mode\n        throw new Error('Unknown line ' + (i + 1) + ' ' + JSON.stringify(line));\n      } else {\n        i++;\n      }\n    }\n  }\n\n  // Parses the --- and +++ headers, if none are found, no lines\n  // are consumed.\n  function parseFileHeader(index) {\n    const fileHeader = (/^(---|\\+\\+\\+)\\s+(.*)$/).exec(diffstr[i]);\n    if (fileHeader) {\n      let keyPrefix = fileHeader[1] === '---' ? 'old' : 'new';\n      const data = fileHeader[2].split('\\t', 2);\n      let fileName = data[0].replace(/\\\\\\\\/g, '\\\\');\n      if ((/^\".*\"$/).test(fileName)) {\n        fileName = fileName.substr(1, fileName.length - 2);\n      }\n      index[keyPrefix + 'FileName'] = fileName;\n      index[keyPrefix + 'Header'] = (data[1] || '').trim();\n\n      i++;\n    }\n  }\n\n  // Parses a hunk\n  // This assumes that we are at the start of a hunk.\n  function parseHunk() {\n    let chunkHeaderIndex = i,\n        chunkHeaderLine = diffstr[i++],\n        chunkHeader = chunkHeaderLine.split(/@@ -(\\d+)(?:,(\\d+))? \\+(\\d+)(?:,(\\d+))? @@/);\n\n    let hunk = {\n      oldStart: +chunkHeader[1],\n      oldLines: typeof chunkHeader[2] === 'undefined' ? 1 : +chunkHeader[2],\n      newStart: +chunkHeader[3],\n      newLines: typeof chunkHeader[4] === 'undefined' ? 1 : +chunkHeader[4],\n      lines: [],\n      linedelimiters: []\n    };\n\n    // Unified Diff Format quirk: If the chunk size is 0,\n    // the first number is one lower than one would expect.\n    // https://www.artima.com/weblogs/viewpost.jsp?thread=164293\n    if (hunk.oldLines === 0) {\n      hunk.oldStart += 1;\n    }\n    if (hunk.newLines === 0) {\n      hunk.newStart += 1;\n    }\n\n    let addCount = 0,\n        removeCount = 0;\n    for (; i < diffstr.length; i++) {\n      // Lines starting with '---' could be mistaken for the \"remove line\" operation\n      // But they could be the header for the next file. Therefore prune such cases out.\n      if (diffstr[i].indexOf('--- ') === 0\n            && (i + 2 < diffstr.length)\n            && diffstr[i + 1].indexOf('+++ ') === 0\n            && diffstr[i + 2].indexOf('@@') === 0) {\n          break;\n      }\n      let operation = (diffstr[i].length == 0 && i != (diffstr.length - 1)) ? ' ' : diffstr[i][0];\n\n      if (operation === '+' || operation === '-' || operation === ' ' || operation === '\\\\') {\n        hunk.lines.push(diffstr[i]);\n        hunk.linedelimiters.push(delimiters[i] || '\\n');\n\n        if (operation === '+') {\n          addCount++;\n        } else if (operation === '-') {\n          removeCount++;\n        } else if (operation === ' ') {\n          addCount++;\n          removeCount++;\n        }\n      } else {\n        break;\n      }\n    }\n\n    // Handle the empty block count case\n    if (!addCount && hunk.newLines === 1) {\n      hunk.newLines = 0;\n    }\n    if (!removeCount && hunk.oldLines === 1) {\n      hunk.oldLines = 0;\n    }\n\n    // Perform optional sanity checking\n    if (options.strict) {\n      if (addCount !== hunk.newLines) {\n        throw new Error('Added line count did not match for hunk at line ' + (chunkHeaderIndex + 1));\n      }\n      if (removeCount !== hunk.oldLines) {\n        throw new Error('Removed line count did not match for hunk at line ' + (chunkHeaderIndex + 1));\n      }\n    }\n\n    return hunk;\n  }\n\n  while (i < diffstr.length) {\n    parseIndex();\n  }\n\n  return list;\n}\n"]}
diff --git a/_extensions/d2/node_modules/diff/lib/util/array.js b/_extensions/d2/node_modules/diff/lib/util/array.js
new file mode 100644
index 00000000..aecf67ac
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/util/array.js
@@ -0,0 +1,32 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.arrayEqual = arrayEqual;
+exports.arrayStartsWith = arrayStartsWith;
+
+/*istanbul ignore end*/
+function arrayEqual(a, b) {
+  if (a.length !== b.length) {
+    return false;
+  }
+
+  return arrayStartsWith(a, b);
+}
+
+function arrayStartsWith(array, start) {
+  if (start.length > array.length) {
+    return false;
+  }
+
+  for (var i = 0; i < start.length; i++) {
+    if (start[i] !== array[i]) {
+      return false;
+    }
+  }
+
+  return true;
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy91dGlsL2FycmF5LmpzIl0sIm5hbWVzIjpbImFycmF5RXF1YWwiLCJhIiwiYiIsImxlbmd0aCIsImFycmF5U3RhcnRzV2l0aCIsImFycmF5Iiwic3RhcnQiLCJpIl0sIm1hcHBpbmdzIjoiOzs7Ozs7Ozs7O0FBQU8sU0FBU0EsVUFBVCxDQUFvQkMsQ0FBcEIsRUFBdUJDLENBQXZCLEVBQTBCO0FBQy9CLE1BQUlELENBQUMsQ0FBQ0UsTUFBRixLQUFhRCxDQUFDLENBQUNDLE1BQW5CLEVBQTJCO0FBQ3pCLFdBQU8sS0FBUDtBQUNEOztBQUVELFNBQU9DLGVBQWUsQ0FBQ0gsQ0FBRCxFQUFJQyxDQUFKLENBQXRCO0FBQ0Q7O0FBRU0sU0FBU0UsZUFBVCxDQUF5QkMsS0FBekIsRUFBZ0NDLEtBQWhDLEVBQXVDO0FBQzVDLE1BQUlBLEtBQUssQ0FBQ0gsTUFBTixHQUFlRSxLQUFLLENBQUNGLE1BQXpCLEVBQWlDO0FBQy9CLFdBQU8sS0FBUDtBQUNEOztBQUVELE9BQUssSUFBSUksQ0FBQyxHQUFHLENBQWIsRUFBZ0JBLENBQUMsR0FBR0QsS0FBSyxDQUFDSCxNQUExQixFQUFrQ0ksQ0FBQyxFQUFuQyxFQUF1QztBQUNyQyxRQUFJRCxLQUFLLENBQUNDLENBQUQsQ0FBTCxLQUFhRixLQUFLLENBQUNFLENBQUQsQ0FBdEIsRUFBMkI7QUFDekIsYUFBTyxLQUFQO0FBQ0Q7QUFDRjs7QUFFRCxTQUFPLElBQVA7QUFDRCIsInNvdXJjZXNDb250ZW50IjpbImV4cG9ydCBmdW5jdGlvbiBhcnJheUVxdWFsKGEsIGIpIHtcbiAgaWYgKGEubGVuZ3RoICE9PSBiLmxlbmd0aCkge1xuICAgIHJldHVybiBmYWxzZTtcbiAgfVxuXG4gIHJldHVybiBhcnJheVN0YXJ0c1dpdGgoYSwgYik7XG59XG5cbmV4cG9ydCBmdW5jdGlvbiBhcnJheVN0YXJ0c1dpdGgoYXJyYXksIHN0YXJ0KSB7XG4gIGlmIChzdGFydC5sZW5ndGggPiBhcnJheS5sZW5ndGgpIHtcbiAgICByZXR1cm4gZmFsc2U7XG4gIH1cblxuICBmb3IgKGxldCBpID0gMDsgaSA8IHN0YXJ0Lmxlbmd0aDsgaSsrKSB7XG4gICAgaWYgKHN0YXJ0W2ldICE9PSBhcnJheVtpXSkge1xuICAgICAgcmV0dXJuIGZhbHNlO1xuICAgIH1cbiAgfVxuXG4gIHJldHVybiB0cnVlO1xufVxuIl19
diff --git a/_extensions/d2/node_modules/diff/lib/util/distance-iterator.js b/_extensions/d2/node_modules/diff/lib/util/distance-iterator.js
new file mode 100644
index 00000000..57c06a3f
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/util/distance-iterator.js
@@ -0,0 +1,57 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports["default"] = _default;
+
+/*istanbul ignore end*/
+// Iterator that traverses in the range of [min, max], stepping
+// by distance from a given start position. I.e. for [0, 4], with
+// start of 2, this will iterate 2, 3, 1, 4, 0.
+function
+/*istanbul ignore start*/
+_default
+/*istanbul ignore end*/
+(start, minLine, maxLine) {
+  var wantForward = true,
+      backwardExhausted = false,
+      forwardExhausted = false,
+      localOffset = 1;
+  return function iterator() {
+    if (wantForward && !forwardExhausted) {
+      if (backwardExhausted) {
+        localOffset++;
+      } else {
+        wantForward = false;
+      } // Check if trying to fit beyond text length, and if not, check it fits
+      // after offset location (or desired location on first iteration)
+
+
+      if (start + localOffset <= maxLine) {
+        return localOffset;
+      }
+
+      forwardExhausted = true;
+    }
+
+    if (!backwardExhausted) {
+      if (!forwardExhausted) {
+        wantForward = true;
+      } // Check if trying to fit before text beginning, and if not, check it fits
+      // before offset location
+
+
+      if (minLine <= start - localOffset) {
+        return -localOffset++;
+      }
+
+      backwardExhausted = true;
+      return iterator();
+    } // We tried to fit hunk before text beginning and beyond text length, then
+    // hunk can't fit on the text. Return undefined
+
+  };
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy91dGlsL2Rpc3RhbmNlLWl0ZXJhdG9yLmpzIl0sIm5hbWVzIjpbInN0YXJ0IiwibWluTGluZSIsIm1heExpbmUiLCJ3YW50Rm9yd2FyZCIsImJhY2t3YXJkRXhoYXVzdGVkIiwiZm9yd2FyZEV4aGF1c3RlZCIsImxvY2FsT2Zmc2V0IiwiaXRlcmF0b3IiXSwibWFwcGluZ3MiOiI7Ozs7Ozs7OztBQUFBO0FBQ0E7QUFDQTtBQUNlO0FBQUE7QUFBQTtBQUFBO0FBQUEsQ0FBU0EsS0FBVCxFQUFnQkMsT0FBaEIsRUFBeUJDLE9BQXpCLEVBQWtDO0FBQy9DLE1BQUlDLFdBQVcsR0FBRyxJQUFsQjtBQUFBLE1BQ0lDLGlCQUFpQixHQUFHLEtBRHhCO0FBQUEsTUFFSUMsZ0JBQWdCLEdBQUcsS0FGdkI7QUFBQSxNQUdJQyxXQUFXLEdBQUcsQ0FIbEI7QUFLQSxTQUFPLFNBQVNDLFFBQVQsR0FBb0I7QUFDekIsUUFBSUosV0FBVyxJQUFJLENBQUNFLGdCQUFwQixFQUFzQztBQUNwQyxVQUFJRCxpQkFBSixFQUF1QjtBQUNyQkUsUUFBQUEsV0FBVztBQUNaLE9BRkQsTUFFTztBQUNMSCxRQUFBQSxXQUFXLEdBQUcsS0FBZDtBQUNELE9BTG1DLENBT3BDO0FBQ0E7OztBQUNBLFVBQUlILEtBQUssR0FBR00sV0FBUixJQUF1QkosT0FBM0IsRUFBb0M7QUFDbEMsZUFBT0ksV0FBUDtBQUNEOztBQUVERCxNQUFBQSxnQkFBZ0IsR0FBRyxJQUFuQjtBQUNEOztBQUVELFFBQUksQ0FBQ0QsaUJBQUwsRUFBd0I7QUFDdEIsVUFBSSxDQUFDQyxnQkFBTCxFQUF1QjtBQUNyQkYsUUFBQUEsV0FBVyxHQUFHLElBQWQ7QUFDRCxPQUhxQixDQUt0QjtBQUNBOzs7QUFDQSxVQUFJRixPQUFPLElBQUlELEtBQUssR0FBR00sV0FBdkIsRUFBb0M7QUFDbEMsZUFBTyxDQUFDQSxXQUFXLEVBQW5CO0FBQ0Q7O0FBRURGLE1BQUFBLGlCQUFpQixHQUFHLElBQXBCO0FBQ0EsYUFBT0csUUFBUSxFQUFmO0FBQ0QsS0E5QndCLENBZ0N6QjtBQUNBOztBQUNELEdBbENEO0FBbUNEIiwic291cmNlc0NvbnRlbnQiOlsiLy8gSXRlcmF0b3IgdGhhdCB0cmF2ZXJzZXMgaW4gdGhlIHJhbmdlIG9mIFttaW4sIG1heF0sIHN0ZXBwaW5nXG4vLyBieSBkaXN0YW5jZSBmcm9tIGEgZ2l2ZW4gc3RhcnQgcG9zaXRpb24uIEkuZS4gZm9yIFswLCA0XSwgd2l0aFxuLy8gc3RhcnQgb2YgMiwgdGhpcyB3aWxsIGl0ZXJhdGUgMiwgMywgMSwgNCwgMC5cbmV4cG9ydCBkZWZhdWx0IGZ1bmN0aW9uKHN0YXJ0LCBtaW5MaW5lLCBtYXhMaW5lKSB7XG4gIGxldCB3YW50Rm9yd2FyZCA9IHRydWUsXG4gICAgICBiYWNrd2FyZEV4aGF1c3RlZCA9IGZhbHNlLFxuICAgICAgZm9yd2FyZEV4aGF1c3RlZCA9IGZhbHNlLFxuICAgICAgbG9jYWxPZmZzZXQgPSAxO1xuXG4gIHJldHVybiBmdW5jdGlvbiBpdGVyYXRvcigpIHtcbiAgICBpZiAod2FudEZvcndhcmQgJiYgIWZvcndhcmRFeGhhdXN0ZWQpIHtcbiAgICAgIGlmIChiYWNrd2FyZEV4aGF1c3RlZCkge1xuICAgICAgICBsb2NhbE9mZnNldCsrO1xuICAgICAgfSBlbHNlIHtcbiAgICAgICAgd2FudEZvcndhcmQgPSBmYWxzZTtcbiAgICAgIH1cblxuICAgICAgLy8gQ2hlY2sgaWYgdHJ5aW5nIHRvIGZpdCBiZXlvbmQgdGV4dCBsZW5ndGgsIGFuZCBpZiBub3QsIGNoZWNrIGl0IGZpdHNcbiAgICAgIC8vIGFmdGVyIG9mZnNldCBsb2NhdGlvbiAob3IgZGVzaXJlZCBsb2NhdGlvbiBvbiBmaXJzdCBpdGVyYXRpb24pXG4gICAgICBpZiAoc3RhcnQgKyBsb2NhbE9mZnNldCA8PSBtYXhMaW5lKSB7XG4gICAgICAgIHJldHVybiBsb2NhbE9mZnNldDtcbiAgICAgIH1cblxuICAgICAgZm9yd2FyZEV4aGF1c3RlZCA9IHRydWU7XG4gICAgfVxuXG4gICAgaWYgKCFiYWNrd2FyZEV4aGF1c3RlZCkge1xuICAgICAgaWYgKCFmb3J3YXJkRXhoYXVzdGVkKSB7XG4gICAgICAgIHdhbnRGb3J3YXJkID0gdHJ1ZTtcbiAgICAgIH1cblxuICAgICAgLy8gQ2hlY2sgaWYgdHJ5aW5nIHRvIGZpdCBiZWZvcmUgdGV4dCBiZWdpbm5pbmcsIGFuZCBpZiBub3QsIGNoZWNrIGl0IGZpdHNcbiAgICAgIC8vIGJlZm9yZSBvZmZzZXQgbG9jYXRpb25cbiAgICAgIGlmIChtaW5MaW5lIDw9IHN0YXJ0IC0gbG9jYWxPZmZzZXQpIHtcbiAgICAgICAgcmV0dXJuIC1sb2NhbE9mZnNldCsrO1xuICAgICAgfVxuXG4gICAgICBiYWNrd2FyZEV4aGF1c3RlZCA9IHRydWU7XG4gICAgICByZXR1cm4gaXRlcmF0b3IoKTtcbiAgICB9XG5cbiAgICAvLyBXZSB0cmllZCB0byBmaXQgaHVuayBiZWZvcmUgdGV4dCBiZWdpbm5pbmcgYW5kIGJleW9uZCB0ZXh0IGxlbmd0aCwgdGhlblxuICAgIC8vIGh1bmsgY2FuJ3QgZml0IG9uIHRoZSB0ZXh0LiBSZXR1cm4gdW5kZWZpbmVkXG4gIH07XG59XG4iXX0=
diff --git a/_extensions/d2/node_modules/diff/lib/util/params.js b/_extensions/d2/node_modules/diff/lib/util/params.js
new file mode 100644
index 00000000..e838eb2f
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/lib/util/params.js
@@ -0,0 +1,24 @@
+/*istanbul ignore start*/
+"use strict";
+
+Object.defineProperty(exports, "__esModule", {
+  value: true
+});
+exports.generateOptions = generateOptions;
+
+/*istanbul ignore end*/
+function generateOptions(options, defaults) {
+  if (typeof options === 'function') {
+    defaults.callback = options;
+  } else if (options) {
+    for (var name in options) {
+      /* istanbul ignore else */
+      if (options.hasOwnProperty(name)) {
+        defaults[name] = options[name];
+      }
+    }
+  }
+
+  return defaults;
+}
+//# sourceMappingURL=data:application/json;charset=utf-8;base64,eyJ2ZXJzaW9uIjozLCJzb3VyY2VzIjpbIi4uLy4uL3NyYy91dGlsL3BhcmFtcy5qcyJdLCJuYW1lcyI6WyJnZW5lcmF0ZU9wdGlvbnMiLCJvcHRpb25zIiwiZGVmYXVsdHMiLCJjYWxsYmFjayIsIm5hbWUiLCJoYXNPd25Qcm9wZXJ0eSJdLCJtYXBwaW5ncyI6Ijs7Ozs7Ozs7O0FBQU8sU0FBU0EsZUFBVCxDQUF5QkMsT0FBekIsRUFBa0NDLFFBQWxDLEVBQTRDO0FBQ2pELE1BQUksT0FBT0QsT0FBUCxLQUFtQixVQUF2QixFQUFtQztBQUNqQ0MsSUFBQUEsUUFBUSxDQUFDQyxRQUFULEdBQW9CRixPQUFwQjtBQUNELEdBRkQsTUFFTyxJQUFJQSxPQUFKLEVBQWE7QUFDbEIsU0FBSyxJQUFJRyxJQUFULElBQWlCSCxPQUFqQixFQUEwQjtBQUN4QjtBQUNBLFVBQUlBLE9BQU8sQ0FBQ0ksY0FBUixDQUF1QkQsSUFBdkIsQ0FBSixFQUFrQztBQUNoQ0YsUUFBQUEsUUFBUSxDQUFDRSxJQUFELENBQVIsR0FBaUJILE9BQU8sQ0FBQ0csSUFBRCxDQUF4QjtBQUNEO0FBQ0Y7QUFDRjs7QUFDRCxTQUFPRixRQUFQO0FBQ0QiLCJzb3VyY2VzQ29udGVudCI6WyJleHBvcnQgZnVuY3Rpb24gZ2VuZXJhdGVPcHRpb25zKG9wdGlvbnMsIGRlZmF1bHRzKSB7XG4gIGlmICh0eXBlb2Ygb3B0aW9ucyA9PT0gJ2Z1bmN0aW9uJykge1xuICAgIGRlZmF1bHRzLmNhbGxiYWNrID0gb3B0aW9ucztcbiAgfSBlbHNlIGlmIChvcHRpb25zKSB7XG4gICAgZm9yIChsZXQgbmFtZSBpbiBvcHRpb25zKSB7XG4gICAgICAvKiBpc3RhbmJ1bCBpZ25vcmUgZWxzZSAqL1xuICAgICAgaWYgKG9wdGlvbnMuaGFzT3duUHJvcGVydHkobmFtZSkpIHtcbiAgICAgICAgZGVmYXVsdHNbbmFtZV0gPSBvcHRpb25zW25hbWVdO1xuICAgICAgfVxuICAgIH1cbiAgfVxuICByZXR1cm4gZGVmYXVsdHM7XG59XG4iXX0=
diff --git a/_extensions/d2/node_modules/diff/package.json b/_extensions/d2/node_modules/diff/package.json
new file mode 100644
index 00000000..a2fc30c5
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/package.json
@@ -0,0 +1,87 @@
+{
+  "name": "diff",
+  "version": "5.1.0",
+  "description": "A javascript text diff implementation.",
+  "keywords": [
+    "diff",
+    "jsdiff",
+    "compare",
+    "patch",
+    "text",
+    "json",
+    "css",
+    "javascript"
+  ],
+  "maintainers": [
+    "Kevin Decker  (http://incaseofstairs.com)"
+  ],
+  "bugs": {
+    "email": "kpdecker@gmail.com",
+    "url": "http://github.com/kpdecker/jsdiff/issues"
+  },
+  "license": "BSD-3-Clause",
+  "repository": {
+    "type": "git",
+    "url": "git://github.com/kpdecker/jsdiff.git"
+  },
+  "engines": {
+    "node": ">=0.3.1"
+  },
+  "main": "./lib/index.js",
+  "module": "./lib/index.es6.js",
+  "browser": "./dist/diff.js",
+  "unpkg": "./dist/diff.js",
+  "exports": {
+    ".": {
+      "import": "./lib/index.mjs",
+      "require": "./lib/index.js"
+    },
+    "./package.json": "./package.json",
+    "./": "./"
+  },
+  "scripts": {
+    "clean": "rm -rf lib/ dist/",
+    "build:node": "yarn babel --out-dir lib  --source-maps=inline src",
+    "test": "grunt"
+  },
+  "devDependencies": {
+    "@babel/cli": "^7.2.3",
+    "@babel/core": "^7.2.2",
+    "@babel/plugin-transform-modules-commonjs": "^7.2.0",
+    "@babel/preset-env": "^7.2.3",
+    "@babel/register": "^7.0.0",
+    "babel-eslint": "^10.0.1",
+    "babel-loader": "^8.0.5",
+    "chai": "^4.2.0",
+    "colors": "^1.3.3",
+    "eslint": "^5.12.0",
+    "grunt": "^1.0.3",
+    "grunt-babel": "^8.0.0",
+    "grunt-cli": "^1.3.2",
+    "grunt-contrib-clean": "^2.0.0",
+    "grunt-contrib-copy": "^1.0.0",
+    "grunt-contrib-uglify": "^5.0.0",
+    "grunt-contrib-watch": "^1.1.0",
+    "grunt-eslint": "^23.0.0",
+    "grunt-exec": "^3.0.0",
+    "grunt-karma": "^4.0.0",
+    "grunt-mocha-istanbul": "^5.0.2",
+    "grunt-mocha-test": "^0.13.3",
+    "grunt-webpack": "^3.1.3",
+    "istanbul": "github:kpdecker/istanbul",
+    "karma": "^5.1.1",
+    "karma-chrome-launcher": "^3.1.0",
+    "karma-mocha": "^2.0.1",
+    "karma-mocha-reporter": "^2.0.0",
+    "karma-sauce-launcher": "^4.1.5",
+    "karma-sourcemap-loader": "^0.3.6",
+    "karma-webpack": "^4.0.2",
+    "mocha": "^6.0.0",
+    "rollup": "^1.0.2",
+    "rollup-plugin-babel": "^4.2.0",
+    "semver": "^7.3.2",
+    "webpack": "^4.28.3",
+    "webpack-dev-server": "^3.1.14"
+  },
+  "optionalDependencies": {}
+}
diff --git a/_extensions/d2/node_modules/diff/runtime.js b/_extensions/d2/node_modules/diff/runtime.js
new file mode 100644
index 00000000..82ea7e69
--- /dev/null
+++ b/_extensions/d2/node_modules/diff/runtime.js
@@ -0,0 +1,3 @@
+require('@babel/register')({
+  ignore: ['lib', 'node_modules']
+});
diff --git a/_extensions/d2/node_modules/extend/.editorconfig b/_extensions/d2/node_modules/extend/.editorconfig
new file mode 100644
index 00000000..bc228f82
--- /dev/null
+++ b/_extensions/d2/node_modules/extend/.editorconfig
@@ -0,0 +1,20 @@
+root = true
+
+[*]
+indent_style = tab
+indent_size = 4
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
+max_line_length = 150
+
+[CHANGELOG.md]
+indent_style = space
+indent_size = 2
+
+[*.json]
+max_line_length = off
+
+[Makefile]
+max_line_length = off
diff --git a/_extensions/d2/node_modules/extend/.eslintrc b/_extensions/d2/node_modules/extend/.eslintrc
new file mode 100644
index 00000000..a34cf283
--- /dev/null
+++ b/_extensions/d2/node_modules/extend/.eslintrc
@@ -0,0 +1,17 @@
+{
+	"root": true,
+
+	"extends": "@ljharb",
+
+	"rules": {
+		"complexity": [2, 20],
+		"eqeqeq": [2, "allow-null"],
+		"func-name-matching": [1],
+		"max-depth": [1, 4],
+		"max-statements": [2, 26],
+		"no-extra-parens": [1],
+		"no-magic-numbers": [0],
+		"no-restricted-syntax": [2, "BreakStatement", "ContinueStatement", "DebuggerStatement", "LabeledStatement", "WithStatement"],
+		"sort-keys": [0],
+	}
+}
diff --git a/_extensions/d2/node_modules/extend/.jscs.json b/_extensions/d2/node_modules/extend/.jscs.json
new file mode 100644
index 00000000..3cce01d7
--- /dev/null
+++ b/_extensions/d2/node_modules/extend/.jscs.json
@@ -0,0 +1,175 @@
+{
+	"es3": true,
+
+	"additionalRules": [],
+
+	"requireSemicolons": true,
+
+	"disallowMultipleSpaces": true,
+
+	"disallowIdentifierNames": [],
+
+	"requireCurlyBraces": {
+		"allExcept": [],
+		"keywords": ["if", "else", "for", "while", "do", "try", "catch"]
+	},
+
+	"requireSpaceAfterKeywords": ["if", "else", "for", "while", "do", "switch", "return", "try", "catch", "function"],
+
+	"disallowSpaceAfterKeywords": [],
+
+	"disallowSpaceBeforeComma": true,
+	"disallowSpaceAfterComma": false,
+	"disallowSpaceBeforeSemicolon": true,
+
+	"disallowNodeTypes": [
+		"DebuggerStatement",
+		"LabeledStatement",
+		"SwitchCase",
+		"SwitchStatement",
+		"WithStatement"
+	],
+
+	"requireObjectKeysOnNewLine": { "allExcept": ["sameLine"] },
+
+	"requireSpacesInAnonymousFunctionExpression": { "beforeOpeningRoundBrace": true, "beforeOpeningCurlyBrace": true },
+	"requireSpacesInNamedFunctionExpression": { "beforeOpeningCurlyBrace": true },
+	"disallowSpacesInNamedFunctionExpression": { "beforeOpeningRoundBrace": true },
+	"requireSpacesInFunctionDeclaration": { "beforeOpeningCurlyBrace": true },
+	"disallowSpacesInFunctionDeclaration": { "beforeOpeningRoundBrace": true },
+
+	"requireSpaceBetweenArguments": true,
+
+	"disallowSpacesInsideParentheses": true,
+
+	"disallowSpacesInsideArrayBrackets": true,
+
+	"disallowQuotedKeysInObjects": { "allExcept": ["reserved"] },
+
+	"disallowSpaceAfterObjectKeys": true,
+
+	"requireCommaBeforeLineBreak": true,
+
+	"disallowSpaceAfterPrefixUnaryOperators": ["++", "--", "+", "-", "~", "!"],
+	"requireSpaceAfterPrefixUnaryOperators": [],
+
+	"disallowSpaceBeforePostfixUnaryOperators": ["++", "--"],
+	"requireSpaceBeforePostfixUnaryOperators": [],
+
+	"disallowSpaceBeforeBinaryOperators": [],
+	"requireSpaceBeforeBinaryOperators": ["+", "-", "/", "*", "=", "==", "===", "!=", "!=="],
+
+	"requireSpaceAfterBinaryOperators": ["+", "-", "/", "*", "=", "==", "===", "!=", "!=="],
+	"disallowSpaceAfterBinaryOperators": [],
+
+	"disallowImplicitTypeConversion": ["binary", "string"],
+
+	"disallowKeywords": ["with", "eval"],
+
+	"requireKeywordsOnNewLine": [],
+	"disallowKeywordsOnNewLine": ["else"],
+
+	"requireLineFeedAtFileEnd": true,
+
+	"disallowTrailingWhitespace": true,
+
+	"disallowTrailingComma": true,
+
+	"excludeFiles": ["node_modules/**", "vendor/**"],
+
+	"disallowMultipleLineStrings": true,
+
+	"requireDotNotation": { "allExcept": ["keywords"] },
+
+	"requireParenthesesAroundIIFE": true,
+
+	"validateLineBreaks": "LF",
+
+	"validateQuoteMarks": {
+		"escape": true,
+		"mark": "'"
+	},
+
+	"disallowOperatorBeforeLineBreak": [],
+
+	"requireSpaceBeforeKeywords": [
+		"do",
+		"for",
+		"if",
+		"else",
+		"switch",
+		"case",
+		"try",
+		"catch",
+		"finally",
+		"while",
+		"with",
+		"return"
+	],
+
+	"validateAlignedFunctionParameters": {
+		"lineBreakAfterOpeningBraces": true,
+		"lineBreakBeforeClosingBraces": true
+	},
+
+	"requirePaddingNewLinesBeforeExport": true,
+
+	"validateNewlineAfterArrayElements": {
+		"maximum": 6
+	},
+
+	"requirePaddingNewLinesAfterUseStrict": true,
+
+	"disallowArrowFunctions": true,
+
+	"disallowMultiLineTernary": true,
+
+	"validateOrderInObjectKeys": false,
+
+	"disallowIdenticalDestructuringNames": true,
+
+	"disallowNestedTernaries": { "maxLevel": 1 },
+
+	"requireSpaceAfterComma": { "allExcept": ["trailing"] },
+	"requireAlignedMultilineParams": false,
+
+	"requireSpacesInGenerator": {
+		"afterStar": true
+	},
+
+	"disallowSpacesInGenerator": {
+		"beforeStar": true
+	},
+
+	"disallowVar": false,
+
+	"requireArrayDestructuring": false,
+
+	"requireEnhancedObjectLiterals": false,
+
+	"requireObjectDestructuring": false,
+
+	"requireEarlyReturn": false,
+
+	"requireCapitalizedConstructorsNew": {
+		"allExcept": ["Function", "String", "Object", "Symbol", "Number", "Date", "RegExp", "Error", "Boolean", "Array"]
+	},
+
+	"requireImportAlphabetized": false,
+
+	"requireSpaceBeforeObjectValues": true,
+	"requireSpaceBeforeDestructuredValues": true,
+
+	"disallowSpacesInsideTemplateStringPlaceholders": true,
+
+	"disallowArrayDestructuringReturn": false,
+
+	"requireNewlineBeforeSingleStatementsInIf": false,
+
+	"disallowUnusedVariables": true,
+
+	"requireSpacesInsideImportedObjectBraces": true,
+
+	"requireUseStrict": true
+}
+
diff --git a/_extensions/d2/node_modules/extend/.travis.yml b/_extensions/d2/node_modules/extend/.travis.yml
new file mode 100644
index 00000000..5ccdfc49
--- /dev/null
+++ b/_extensions/d2/node_modules/extend/.travis.yml
@@ -0,0 +1,230 @@
+language: node_js
+os:
+ - linux
+node_js:
+  - "10.7"
+  - "9.11"
+  - "8.11"
+  - "7.10"
+  - "6.14"
+  - "5.12"
+  - "4.9"
+  - "iojs-v3.3"
+  - "iojs-v2.5"
+  - "iojs-v1.8"
+  - "0.12"
+  - "0.10"
+  - "0.8"
+before_install:
+  - 'case "${TRAVIS_NODE_VERSION}" in 0.*) export NPM_CONFIG_STRICT_SSL=false ;; esac'
+  - 'nvm install-latest-npm'
+install:
+  - 'if [ "${TRAVIS_NODE_VERSION}" = "0.6" ] || [ "${TRAVIS_NODE_VERSION}" = "0.9" ]; then nvm install --latest-npm 0.8 && npm install && nvm use "${TRAVIS_NODE_VERSION}"; else npm install; fi;'
+script:
+  - 'if [ -n "${PRETEST-}" ]; then npm run pretest ; fi'
+  - 'if [ -n "${POSTTEST-}" ]; then npm run posttest ; fi'
+  - 'if [ -n "${COVERAGE-}" ]; then npm run coverage ; fi'
+  - 'if [ -n "${TEST-}" ]; then npm run tests-only ; fi'
+sudo: false
+env:
+  - TEST=true
+matrix:
+  fast_finish: true
+  include:
+    - node_js: "lts/*"
+      env: PRETEST=true
+    - node_js: "lts/*"
+      env: POSTTEST=true
+    - node_js: "4"
+      env: COVERAGE=true
+    - node_js: "10.6"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "10.5"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "10.4"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "10.3"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "10.2"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "10.1"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "10.0"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "9.10"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "9.9"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "9.8"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "9.7"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "9.6"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "9.5"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "9.4"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "9.3"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "9.2"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "9.1"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "9.0"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "8.10"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "8.9"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "8.8"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "8.7"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "8.6"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "8.5"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "8.4"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "8.3"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "8.2"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "8.1"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "8.0"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "7.9"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "7.8"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "7.7"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "7.6"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "7.5"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "7.4"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "7.3"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "7.2"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "7.1"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "7.0"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.13"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.12"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.11"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.10"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.9"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.8"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.7"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.6"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.5"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.4"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.3"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.2"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.1"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "6.0"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "5.11"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "5.10"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "5.9"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "5.8"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "5.7"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "5.6"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "5.5"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "5.4"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "5.3"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "5.2"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "5.1"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "5.0"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "4.8"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "4.7"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "4.6"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "4.5"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "4.4"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "4.3"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "4.2"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "4.1"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "4.0"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v3.2"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v3.1"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v3.0"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v2.4"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v2.3"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v2.2"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v2.1"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v2.0"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v1.7"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v1.6"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v1.5"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v1.4"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v1.3"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v1.2"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v1.1"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "iojs-v1.0"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "0.11"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "0.9"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "0.6"
+      env: TEST=true ALLOW_FAILURE=true
+    - node_js: "0.4"
+      env: TEST=true ALLOW_FAILURE=true
+  allow_failures:
+    - os: osx
+    - env: TEST=true ALLOW_FAILURE=true
diff --git a/_extensions/d2/node_modules/extend/CHANGELOG.md b/_extensions/d2/node_modules/extend/CHANGELOG.md
new file mode 100644
index 00000000..2cf7de6f
--- /dev/null
+++ b/_extensions/d2/node_modules/extend/CHANGELOG.md
@@ -0,0 +1,83 @@
+3.0.2 / 2018-07-19
+==================
+  * [Fix] Prevent merging `__proto__` property (#48)
+  * [Dev Deps] update `eslint`, `@ljharb/eslint-config`, `tape`
+  * [Tests] up to `node` `v10.7`, `v9.11`, `v8.11`, `v7.10`, `v6.14`, `v4.9`; use `nvm install-latest-npm`
+
+3.0.1 / 2017-04-27
+==================
+  * [Fix] deep extending should work with a non-object (#46)
+  * [Dev Deps] update `tape`, `eslint`, `@ljharb/eslint-config`
+  * [Tests] up to `node` `v7.9`, `v6.10`, `v4.8`; improve matrix
+  * [Docs] Switch from vb.teelaun.ch to versionbadg.es for the npm version badge SVG.
+  * [Docs] Add example to readme (#34)
+
+3.0.0 / 2015-07-01
+==================
+  * [Possible breaking change] Use global "strict" directive (#32)
+  * [Tests] `int` is an ES3 reserved word
+  * [Tests] Test up to `io.js` `v2.3`
+  * [Tests] Add `npm run eslint`
+  * [Dev Deps] Update `covert`, `jscs`
+
+2.0.1 / 2015-04-25
+==================
+  * Use an inline `isArray` check, for ES3 browsers. (#27)
+  * Some old browsers fail when an identifier is `toString`
+  * Test latest `node` and `io.js` versions on `travis-ci`; speed up builds
+  * Add license info to package.json (#25)
+  * Update `tape`, `jscs`
+  * Adding a CHANGELOG
+
+2.0.0 / 2014-10-01
+==================
+  * Increase code coverage to 100%; run code coverage as part of tests
+  * Add `npm run lint`; Run linter as part of tests
+  * Remove nodeType and setInterval checks in isPlainObject
+  * Updating `tape`, `jscs`, `covert`
+  * General style and README cleanup
+
+1.3.0 / 2014-06-20
+==================
+  * Add component.json for browser support (#18)
+  * Use SVG for badges in README (#16)
+  * Updating `tape`, `covert`
+  * Updating travis-ci to work with multiple node versions
+  * Fix `deep === false` bug (returning target as {}) (#14)
+  * Fixing constructor checks in isPlainObject
+  * Adding additional test coverage
+  * Adding `npm run coverage`
+  * Add LICENSE (#13)
+  * Adding a warning about `false`, per #11
+  * General style and whitespace cleanup
+
+1.2.1 / 2013-09-14
+==================
+  * Fixing hasOwnProperty bugs that would only have shown up in specific browsers. Fixes #8
+  * Updating `tape`
+
+1.2.0 / 2013-09-02
+==================
+  * Updating the README: add badges
+  * Adding a missing variable reference.
+  * Using `tape` instead of `buster` for tests; add more tests (#7)
+  * Adding node 0.10 to Travis CI (#6)
+  * Enabling "npm test" and cleaning up package.json (#5)
+  * Add Travis CI.
+
+1.1.3 / 2012-12-06
+==================
+  * Added unit tests.
+  * Ensure extend function is named. (Looks nicer in a stack trace.)
+  * README cleanup.
+
+1.1.1 / 2012-11-07
+==================
+  * README cleanup.
+  * Added installation instructions.
+  * Added a missing semicolon
+
+1.0.0 / 2012-04-08
+==================
+  * Initial commit
+
diff --git a/_extensions/d2/node_modules/extend/LICENSE b/_extensions/d2/node_modules/extend/LICENSE
new file mode 100644
index 00000000..e16d6a56
--- /dev/null
+++ b/_extensions/d2/node_modules/extend/LICENSE
@@ -0,0 +1,23 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Stefan Thomas
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/_extensions/d2/node_modules/extend/README.md b/_extensions/d2/node_modules/extend/README.md
new file mode 100644
index 00000000..5b8249aa
--- /dev/null
+++ b/_extensions/d2/node_modules/extend/README.md
@@ -0,0 +1,81 @@
+[![Build Status][travis-svg]][travis-url]
+[![dependency status][deps-svg]][deps-url]
+[![dev dependency status][dev-deps-svg]][dev-deps-url]
+
+# extend() for Node.js [![Version Badge][npm-version-png]][npm-url]
+
+`node-extend` is a port of the classic extend() method from jQuery. It behaves as you expect. It is simple, tried and true.
+
+Notes:
+
+* Since Node.js >= 4,
+  [`Object.assign`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/assign)
+  now offers the same functionality natively (but without the "deep copy" option).
+  See [ECMAScript 2015 (ES6) in Node.js](https://nodejs.org/en/docs/es6).
+* Some native implementations of `Object.assign` in both Node.js and many
+  browsers (since NPM modules are for the browser too) may not be fully
+  spec-compliant.
+  Check [`object.assign`](https://www.npmjs.com/package/object.assign) module for
+  a compliant candidate.
+
+## Installation
+
+This package is available on [npm][npm-url] as: `extend`
+
+``` sh
+npm install extend
+```
+
+## Usage
+
+**Syntax:** extend **(** [`deep`], `target`, `object1`, [`objectN`] **)**
+
+*Extend one object with one or more others, returning the modified object.*
+
+**Example:**
+
+``` js
+var extend = require('extend');
+extend(targetObject, object1, object2);
+```
+
+Keep in mind that the target object will be modified, and will be returned from extend().
+
+If a boolean true is specified as the first argument, extend performs a deep copy, recursively copying any objects it finds. Otherwise, the copy will share structure with the original object(s).
+Undefined properties are not copied. However, properties inherited from the object's prototype will be copied over.
+Warning: passing `false` as the first argument is not supported.
+
+### Arguments
+
+* `deep` *Boolean* (optional)
+If set, the merge becomes recursive (i.e. deep copy).
+* `target`	*Object*
+The object to extend.
+* `object1`	*Object*
+The object that will be merged into the first.
+* `objectN` *Object* (Optional)
+More objects to merge into the first.
+
+## License
+
+`node-extend` is licensed under the [MIT License][mit-license-url].
+
+## Acknowledgements
+
+All credit to the jQuery authors for perfecting this amazing utility.
+
+Ported to Node.js by [Stefan Thomas][github-justmoon] with contributions by [Jonathan Buchanan][github-insin] and [Jordan Harband][github-ljharb].
+
+[travis-svg]: https://travis-ci.org/justmoon/node-extend.svg
+[travis-url]: https://travis-ci.org/justmoon/node-extend
+[npm-url]: https://npmjs.org/package/extend
+[mit-license-url]: http://opensource.org/licenses/MIT
+[github-justmoon]: https://github.com/justmoon
+[github-insin]: https://github.com/insin
+[github-ljharb]: https://github.com/ljharb
+[npm-version-png]: http://versionbadg.es/justmoon/node-extend.svg
+[deps-svg]: https://david-dm.org/justmoon/node-extend.svg
+[deps-url]: https://david-dm.org/justmoon/node-extend
+[dev-deps-svg]: https://david-dm.org/justmoon/node-extend/dev-status.svg
+[dev-deps-url]: https://david-dm.org/justmoon/node-extend#info=devDependencies
+
diff --git a/_extensions/d2/node_modules/extend/component.json b/_extensions/d2/node_modules/extend/component.json
new file mode 100644
index 00000000..1500a2f3
--- /dev/null
+++ b/_extensions/d2/node_modules/extend/component.json
@@ -0,0 +1,32 @@
+{
+	"name": "extend",
+	"author": "Stefan Thomas  (http://www.justmoon.net)",
+	"version": "3.0.0",
+	"description": "Port of jQuery.extend for node.js and the browser.",
+	"scripts": [
+		"index.js"
+	],
+	"contributors": [
+		{
+			"name": "Jordan Harband",
+			"url": "https://github.com/ljharb"
+		}
+	],
+	"keywords": [
+		"extend",
+		"clone",
+		"merge"
+	],
+	"repository" : {
+		"type": "git",
+		"url": "https://github.com/justmoon/node-extend.git"
+	},
+	"dependencies": {
+	},
+	"devDependencies": {
+		"tape" : "~3.0.0",
+		"covert": "~0.4.0",
+		"jscs": "~1.6.2"
+	}
+}
+
diff --git a/_extensions/d2/node_modules/extend/index.js b/_extensions/d2/node_modules/extend/index.js
new file mode 100644
index 00000000..2aa3faae
--- /dev/null
+++ b/_extensions/d2/node_modules/extend/index.js
@@ -0,0 +1,117 @@
+'use strict';
+
+var hasOwn = Object.prototype.hasOwnProperty;
+var toStr = Object.prototype.toString;
+var defineProperty = Object.defineProperty;
+var gOPD = Object.getOwnPropertyDescriptor;
+
+var isArray = function isArray(arr) {
+	if (typeof Array.isArray === 'function') {
+		return Array.isArray(arr);
+	}
+
+	return toStr.call(arr) === '[object Array]';
+};
+
+var isPlainObject = function isPlainObject(obj) {
+	if (!obj || toStr.call(obj) !== '[object Object]') {
+		return false;
+	}
+
+	var hasOwnConstructor = hasOwn.call(obj, 'constructor');
+	var hasIsPrototypeOf = obj.constructor && obj.constructor.prototype && hasOwn.call(obj.constructor.prototype, 'isPrototypeOf');
+	// Not own constructor property must be Object
+	if (obj.constructor && !hasOwnConstructor && !hasIsPrototypeOf) {
+		return false;
+	}
+
+	// Own properties are enumerated firstly, so to speed up,
+	// if last one is own, then all properties are own.
+	var key;
+	for (key in obj) { /**/ }
+
+	return typeof key === 'undefined' || hasOwn.call(obj, key);
+};
+
+// If name is '__proto__', and Object.defineProperty is available, define __proto__ as an own property on target
+var setProperty = function setProperty(target, options) {
+	if (defineProperty && options.name === '__proto__') {
+		defineProperty(target, options.name, {
+			enumerable: true,
+			configurable: true,
+			value: options.newValue,
+			writable: true
+		});
+	} else {
+		target[options.name] = options.newValue;
+	}
+};
+
+// Return undefined instead of __proto__ if '__proto__' is not an own property
+var getProperty = function getProperty(obj, name) {
+	if (name === '__proto__') {
+		if (!hasOwn.call(obj, name)) {
+			return void 0;
+		} else if (gOPD) {
+			// In early versions of node, obj['__proto__'] is buggy when obj has
+			// __proto__ as an own property. Object.getOwnPropertyDescriptor() works.
+			return gOPD(obj, name).value;
+		}
+	}
+
+	return obj[name];
+};
+
+module.exports = function extend() {
+	var options, name, src, copy, copyIsArray, clone;
+	var target = arguments[0];
+	var i = 1;
+	var length = arguments.length;
+	var deep = false;
+
+	// Handle a deep copy situation
+	if (typeof target === 'boolean') {
+		deep = target;
+		target = arguments[1] || {};
+		// skip the boolean and the target
+		i = 2;
+	}
+	if (target == null || (typeof target !== 'object' && typeof target !== 'function')) {
+		target = {};
+	}
+
+	for (; i < length; ++i) {
+		options = arguments[i];
+		// Only deal with non-null/undefined values
+		if (options != null) {
+			// Extend the base object
+			for (name in options) {
+				src = getProperty(target, name);
+				copy = getProperty(options, name);
+
+				// Prevent never-ending loop
+				if (target !== copy) {
+					// Recurse if we're merging plain objects or arrays
+					if (deep && copy && (isPlainObject(copy) || (copyIsArray = isArray(copy)))) {
+						if (copyIsArray) {
+							copyIsArray = false;
+							clone = src && isArray(src) ? src : [];
+						} else {
+							clone = src && isPlainObject(src) ? src : {};
+						}
+
+						// Never move original objects, clone them
+						setProperty(target, { name: name, newValue: extend(deep, clone, copy) });
+
+					// Don't bring in undefined values
+					} else if (typeof copy !== 'undefined') {
+						setProperty(target, { name: name, newValue: copy });
+					}
+				}
+			}
+		}
+	}
+
+	// Return the modified object
+	return target;
+};
diff --git a/_extensions/d2/node_modules/extend/package.json b/_extensions/d2/node_modules/extend/package.json
new file mode 100644
index 00000000..85279f78
--- /dev/null
+++ b/_extensions/d2/node_modules/extend/package.json
@@ -0,0 +1,42 @@
+{
+	"name": "extend",
+	"author": "Stefan Thomas  (http://www.justmoon.net)",
+	"version": "3.0.2",
+	"description": "Port of jQuery.extend for node.js and the browser",
+	"main": "index",
+	"scripts": {
+		"pretest": "npm run lint",
+		"test": "npm run tests-only",
+		"posttest": "npm run coverage-quiet",
+		"tests-only": "node test",
+		"coverage": "covert test/index.js",
+		"coverage-quiet": "covert test/index.js --quiet",
+		"lint": "npm run jscs && npm run eslint",
+		"jscs": "jscs *.js */*.js",
+		"eslint": "eslint *.js */*.js"
+	},
+	"contributors": [
+		{
+			"name": "Jordan Harband",
+			"url": "https://github.com/ljharb"
+		}
+	],
+	"keywords": [
+		"extend",
+		"clone",
+		"merge"
+	],
+	"repository": {
+		"type": "git",
+		"url": "https://github.com/justmoon/node-extend.git"
+	},
+	"dependencies": {},
+	"devDependencies": {
+		"@ljharb/eslint-config": "^12.2.1",
+		"covert": "^1.1.0",
+		"eslint": "^4.19.1",
+		"jscs": "^3.0.7",
+		"tape": "^4.9.1"
+	},
+	"license": "MIT"
+}
diff --git a/_extensions/d2/node_modules/fs.realpath/LICENSE b/_extensions/d2/node_modules/fs.realpath/LICENSE
new file mode 100644
index 00000000..5bd884c2
--- /dev/null
+++ b/_extensions/d2/node_modules/fs.realpath/LICENSE
@@ -0,0 +1,43 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+----
+
+This library bundles a version of the `fs.realpath` and `fs.realpathSync`
+methods from Node.js v0.10 under the terms of the Node.js MIT license.
+
+Node's license follows, also included at the header of `old.js` which contains
+the licensed code:
+
+  Copyright Joyent, Inc. and other Node contributors.
+
+  Permission is hereby granted, free of charge, to any person obtaining a
+  copy of this software and associated documentation files (the "Software"),
+  to deal in the Software without restriction, including without limitation
+  the rights to use, copy, modify, merge, publish, distribute, sublicense,
+  and/or sell copies of the Software, and to permit persons to whom the
+  Software is furnished to do so, subject to the following conditions:
+
+  The above copyright notice and this permission notice shall be included in
+  all copies or substantial portions of the Software.
+
+  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+  FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+  AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+  LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+  DEALINGS IN THE SOFTWARE.
diff --git a/_extensions/d2/node_modules/fs.realpath/README.md b/_extensions/d2/node_modules/fs.realpath/README.md
new file mode 100644
index 00000000..a42ceac6
--- /dev/null
+++ b/_extensions/d2/node_modules/fs.realpath/README.md
@@ -0,0 +1,33 @@
+# fs.realpath
+
+A backwards-compatible fs.realpath for Node v6 and above
+
+In Node v6, the JavaScript implementation of fs.realpath was replaced
+with a faster (but less resilient) native implementation.  That raises
+new and platform-specific errors and cannot handle long or excessively
+symlink-looping paths.
+
+This module handles those cases by detecting the new errors and
+falling back to the JavaScript implementation.  On versions of Node
+prior to v6, it has no effect.
+
+## USAGE
+
+```js
+var rp = require('fs.realpath')
+
+// async version
+rp.realpath(someLongAndLoopingPath, function (er, real) {
+  // the ELOOP was handled, but it was a bit slower
+})
+
+// sync version
+var real = rp.realpathSync(someLongAndLoopingPath)
+
+// monkeypatch at your own risk!
+// This replaces the fs.realpath/fs.realpathSync builtins
+rp.monkeypatch()
+
+// un-do the monkeypatching
+rp.unmonkeypatch()
+```
diff --git a/_extensions/d2/node_modules/fs.realpath/index.js b/_extensions/d2/node_modules/fs.realpath/index.js
new file mode 100644
index 00000000..b09c7c7e
--- /dev/null
+++ b/_extensions/d2/node_modules/fs.realpath/index.js
@@ -0,0 +1,66 @@
+module.exports = realpath
+realpath.realpath = realpath
+realpath.sync = realpathSync
+realpath.realpathSync = realpathSync
+realpath.monkeypatch = monkeypatch
+realpath.unmonkeypatch = unmonkeypatch
+
+var fs = require('fs')
+var origRealpath = fs.realpath
+var origRealpathSync = fs.realpathSync
+
+var version = process.version
+var ok = /^v[0-5]\./.test(version)
+var old = require('./old.js')
+
+function newError (er) {
+  return er && er.syscall === 'realpath' && (
+    er.code === 'ELOOP' ||
+    er.code === 'ENOMEM' ||
+    er.code === 'ENAMETOOLONG'
+  )
+}
+
+function realpath (p, cache, cb) {
+  if (ok) {
+    return origRealpath(p, cache, cb)
+  }
+
+  if (typeof cache === 'function') {
+    cb = cache
+    cache = null
+  }
+  origRealpath(p, cache, function (er, result) {
+    if (newError(er)) {
+      old.realpath(p, cache, cb)
+    } else {
+      cb(er, result)
+    }
+  })
+}
+
+function realpathSync (p, cache) {
+  if (ok) {
+    return origRealpathSync(p, cache)
+  }
+
+  try {
+    return origRealpathSync(p, cache)
+  } catch (er) {
+    if (newError(er)) {
+      return old.realpathSync(p, cache)
+    } else {
+      throw er
+    }
+  }
+}
+
+function monkeypatch () {
+  fs.realpath = realpath
+  fs.realpathSync = realpathSync
+}
+
+function unmonkeypatch () {
+  fs.realpath = origRealpath
+  fs.realpathSync = origRealpathSync
+}
diff --git a/_extensions/d2/node_modules/fs.realpath/old.js b/_extensions/d2/node_modules/fs.realpath/old.js
new file mode 100644
index 00000000..b40305e7
--- /dev/null
+++ b/_extensions/d2/node_modules/fs.realpath/old.js
@@ -0,0 +1,303 @@
+// Copyright Joyent, Inc. and other Node contributors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a
+// copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to permit
+// persons to whom the Software is furnished to do so, subject to the
+// following conditions:
+//
+// The above copyright notice and this permission notice shall be included
+// in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
+// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+// USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+var pathModule = require('path');
+var isWindows = process.platform === 'win32';
+var fs = require('fs');
+
+// JavaScript implementation of realpath, ported from node pre-v6
+
+var DEBUG = process.env.NODE_DEBUG && /fs/.test(process.env.NODE_DEBUG);
+
+function rethrow() {
+  // Only enable in debug mode. A backtrace uses ~1000 bytes of heap space and
+  // is fairly slow to generate.
+  var callback;
+  if (DEBUG) {
+    var backtrace = new Error;
+    callback = debugCallback;
+  } else
+    callback = missingCallback;
+
+  return callback;
+
+  function debugCallback(err) {
+    if (err) {
+      backtrace.message = err.message;
+      err = backtrace;
+      missingCallback(err);
+    }
+  }
+
+  function missingCallback(err) {
+    if (err) {
+      if (process.throwDeprecation)
+        throw err;  // Forgot a callback but don't know where? Use NODE_DEBUG=fs
+      else if (!process.noDeprecation) {
+        var msg = 'fs: missing callback ' + (err.stack || err.message);
+        if (process.traceDeprecation)
+          console.trace(msg);
+        else
+          console.error(msg);
+      }
+    }
+  }
+}
+
+function maybeCallback(cb) {
+  return typeof cb === 'function' ? cb : rethrow();
+}
+
+var normalize = pathModule.normalize;
+
+// Regexp that finds the next partion of a (partial) path
+// result is [base_with_slash, base], e.g. ['somedir/', 'somedir']
+if (isWindows) {
+  var nextPartRe = /(.*?)(?:[\/\\]+|$)/g;
+} else {
+  var nextPartRe = /(.*?)(?:[\/]+|$)/g;
+}
+
+// Regex to find the device root, including trailing slash. E.g. 'c:\\'.
+if (isWindows) {
+  var splitRootRe = /^(?:[a-zA-Z]:|[\\\/]{2}[^\\\/]+[\\\/][^\\\/]+)?[\\\/]*/;
+} else {
+  var splitRootRe = /^[\/]*/;
+}
+
+exports.realpathSync = function realpathSync(p, cache) {
+  // make p is absolute
+  p = pathModule.resolve(p);
+
+  if (cache && Object.prototype.hasOwnProperty.call(cache, p)) {
+    return cache[p];
+  }
+
+  var original = p,
+      seenLinks = {},
+      knownHard = {};
+
+  // current character position in p
+  var pos;
+  // the partial path so far, including a trailing slash if any
+  var current;
+  // the partial path without a trailing slash (except when pointing at a root)
+  var base;
+  // the partial path scanned in the previous round, with slash
+  var previous;
+
+  start();
+
+  function start() {
+    // Skip over roots
+    var m = splitRootRe.exec(p);
+    pos = m[0].length;
+    current = m[0];
+    base = m[0];
+    previous = '';
+
+    // On windows, check that the root exists. On unix there is no need.
+    if (isWindows && !knownHard[base]) {
+      fs.lstatSync(base);
+      knownHard[base] = true;
+    }
+  }
+
+  // walk down the path, swapping out linked pathparts for their real
+  // values
+  // NB: p.length changes.
+  while (pos < p.length) {
+    // find the next part
+    nextPartRe.lastIndex = pos;
+    var result = nextPartRe.exec(p);
+    previous = current;
+    current += result[0];
+    base = previous + result[1];
+    pos = nextPartRe.lastIndex;
+
+    // continue if not a symlink
+    if (knownHard[base] || (cache && cache[base] === base)) {
+      continue;
+    }
+
+    var resolvedLink;
+    if (cache && Object.prototype.hasOwnProperty.call(cache, base)) {
+      // some known symbolic link.  no need to stat again.
+      resolvedLink = cache[base];
+    } else {
+      var stat = fs.lstatSync(base);
+      if (!stat.isSymbolicLink()) {
+        knownHard[base] = true;
+        if (cache) cache[base] = base;
+        continue;
+      }
+
+      // read the link if it wasn't read before
+      // dev/ino always return 0 on windows, so skip the check.
+      var linkTarget = null;
+      if (!isWindows) {
+        var id = stat.dev.toString(32) + ':' + stat.ino.toString(32);
+        if (seenLinks.hasOwnProperty(id)) {
+          linkTarget = seenLinks[id];
+        }
+      }
+      if (linkTarget === null) {
+        fs.statSync(base);
+        linkTarget = fs.readlinkSync(base);
+      }
+      resolvedLink = pathModule.resolve(previous, linkTarget);
+      // track this, if given a cache.
+      if (cache) cache[base] = resolvedLink;
+      if (!isWindows) seenLinks[id] = linkTarget;
+    }
+
+    // resolve the link, then start over
+    p = pathModule.resolve(resolvedLink, p.slice(pos));
+    start();
+  }
+
+  if (cache) cache[original] = p;
+
+  return p;
+};
+
+
+exports.realpath = function realpath(p, cache, cb) {
+  if (typeof cb !== 'function') {
+    cb = maybeCallback(cache);
+    cache = null;
+  }
+
+  // make p is absolute
+  p = pathModule.resolve(p);
+
+  if (cache && Object.prototype.hasOwnProperty.call(cache, p)) {
+    return process.nextTick(cb.bind(null, null, cache[p]));
+  }
+
+  var original = p,
+      seenLinks = {},
+      knownHard = {};
+
+  // current character position in p
+  var pos;
+  // the partial path so far, including a trailing slash if any
+  var current;
+  // the partial path without a trailing slash (except when pointing at a root)
+  var base;
+  // the partial path scanned in the previous round, with slash
+  var previous;
+
+  start();
+
+  function start() {
+    // Skip over roots
+    var m = splitRootRe.exec(p);
+    pos = m[0].length;
+    current = m[0];
+    base = m[0];
+    previous = '';
+
+    // On windows, check that the root exists. On unix there is no need.
+    if (isWindows && !knownHard[base]) {
+      fs.lstat(base, function(err) {
+        if (err) return cb(err);
+        knownHard[base] = true;
+        LOOP();
+      });
+    } else {
+      process.nextTick(LOOP);
+    }
+  }
+
+  // walk down the path, swapping out linked pathparts for their real
+  // values
+  function LOOP() {
+    // stop if scanned past end of path
+    if (pos >= p.length) {
+      if (cache) cache[original] = p;
+      return cb(null, p);
+    }
+
+    // find the next part
+    nextPartRe.lastIndex = pos;
+    var result = nextPartRe.exec(p);
+    previous = current;
+    current += result[0];
+    base = previous + result[1];
+    pos = nextPartRe.lastIndex;
+
+    // continue if not a symlink
+    if (knownHard[base] || (cache && cache[base] === base)) {
+      return process.nextTick(LOOP);
+    }
+
+    if (cache && Object.prototype.hasOwnProperty.call(cache, base)) {
+      // known symbolic link.  no need to stat again.
+      return gotResolvedLink(cache[base]);
+    }
+
+    return fs.lstat(base, gotStat);
+  }
+
+  function gotStat(err, stat) {
+    if (err) return cb(err);
+
+    // if not a symlink, skip to the next path part
+    if (!stat.isSymbolicLink()) {
+      knownHard[base] = true;
+      if (cache) cache[base] = base;
+      return process.nextTick(LOOP);
+    }
+
+    // stat & read the link if not read before
+    // call gotTarget as soon as the link target is known
+    // dev/ino always return 0 on windows, so skip the check.
+    if (!isWindows) {
+      var id = stat.dev.toString(32) + ':' + stat.ino.toString(32);
+      if (seenLinks.hasOwnProperty(id)) {
+        return gotTarget(null, seenLinks[id], base);
+      }
+    }
+    fs.stat(base, function(err) {
+      if (err) return cb(err);
+
+      fs.readlink(base, function(err, target) {
+        if (!isWindows) seenLinks[id] = target;
+        gotTarget(err, target);
+      });
+    });
+  }
+
+  function gotTarget(err, target, base) {
+    if (err) return cb(err);
+
+    var resolvedLink = pathModule.resolve(previous, target);
+    if (cache) cache[base] = resolvedLink;
+    gotResolvedLink(resolvedLink);
+  }
+
+  function gotResolvedLink(resolvedLink) {
+    // resolve the link, then start over
+    p = pathModule.resolve(resolvedLink, p.slice(pos));
+    start();
+  }
+};
diff --git a/_extensions/d2/node_modules/fs.realpath/package.json b/_extensions/d2/node_modules/fs.realpath/package.json
new file mode 100644
index 00000000..3edc57d2
--- /dev/null
+++ b/_extensions/d2/node_modules/fs.realpath/package.json
@@ -0,0 +1,26 @@
+{
+  "name": "fs.realpath",
+  "version": "1.0.0",
+  "description": "Use node's fs.realpath, but fall back to the JS implementation if the native one fails",
+  "main": "index.js",
+  "dependencies": {},
+  "devDependencies": {},
+  "scripts": {
+    "test": "tap test/*.js --cov"
+  },
+  "repository": {
+    "type": "git",
+    "url": "git+https://github.com/isaacs/fs.realpath.git"
+  },
+  "keywords": [
+    "realpath",
+    "fs",
+    "polyfill"
+  ],
+  "author": "Isaac Z. Schlueter  (http://blog.izs.me/)",
+  "license": "ISC",
+  "files": [
+    "old.js",
+    "index.js"
+  ]
+}
diff --git a/_extensions/d2/node_modules/get-stdin/index.d.ts b/_extensions/d2/node_modules/get-stdin/index.d.ts
new file mode 100644
index 00000000..9bda3c14
--- /dev/null
+++ b/_extensions/d2/node_modules/get-stdin/index.d.ts
@@ -0,0 +1,33 @@
+/// 
+
+declare const getStdin: {
+	/**
+	Get [`stdin`](https://nodejs.org/api/process.html#process_process_stdin) as a `string`.
+
+	@returns A promise that is resolved when the `end` event fires on the `stdin` stream, indicating that there is no more data to be read. In a TTY context, an empty `string` is returned.
+
+	@example
+	```
+	// example.ts
+	import getStdin = require('get-stdin');
+
+	(async () => {
+		console.log(await getStdin());
+		//=> 'unicorns'
+	})
+
+	// $ echo unicorns | ts-node example.ts
+	// unicorns
+	```
+	*/
+	(): Promise;
+
+	/**
+	Get [`stdin`](https://nodejs.org/api/process.html#process_process_stdin) as a `Buffer`.
+
+	@returns A promise that is resolved when the `end` event fires on the `stdin` stream, indicating that there is no more data to be read. In a TTY context, an empty `Buffer` is returned.
+	*/
+	buffer(): Promise;
+};
+
+export = getStdin;
diff --git a/_extensions/d2/node_modules/get-stdin/index.js b/_extensions/d2/node_modules/get-stdin/index.js
new file mode 100644
index 00000000..b091359c
--- /dev/null
+++ b/_extensions/d2/node_modules/get-stdin/index.js
@@ -0,0 +1,52 @@
+'use strict';
+const {stdin} = process;
+
+module.exports = () => {
+	let result = '';
+
+	return new Promise(resolve => {
+		if (stdin.isTTY) {
+			resolve(result);
+			return;
+		}
+
+		stdin.setEncoding('utf8');
+
+		stdin.on('readable', () => {
+			let chunk;
+
+			while ((chunk = stdin.read())) {
+				result += chunk;
+			}
+		});
+
+		stdin.on('end', () => {
+			resolve(result);
+		});
+	});
+};
+
+module.exports.buffer = () => {
+	const result = [];
+	let length = 0;
+
+	return new Promise(resolve => {
+		if (stdin.isTTY) {
+			resolve(Buffer.concat([]));
+			return;
+		}
+
+		stdin.on('readable', () => {
+			let chunk;
+
+			while ((chunk = stdin.read())) {
+				result.push(chunk);
+				length += chunk.length;
+			}
+		});
+
+		stdin.on('end', () => {
+			resolve(Buffer.concat(result, length));
+		});
+	});
+};
diff --git a/_extensions/d2/node_modules/get-stdin/license b/_extensions/d2/node_modules/get-stdin/license
new file mode 100644
index 00000000..e7af2f77
--- /dev/null
+++ b/_extensions/d2/node_modules/get-stdin/license
@@ -0,0 +1,9 @@
+MIT License
+
+Copyright (c) Sindre Sorhus  (sindresorhus.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/_extensions/d2/node_modules/get-stdin/package.json b/_extensions/d2/node_modules/get-stdin/package.json
new file mode 100644
index 00000000..7cbc1c4d
--- /dev/null
+++ b/_extensions/d2/node_modules/get-stdin/package.json
@@ -0,0 +1,39 @@
+{
+	"name": "get-stdin",
+	"version": "7.0.0",
+	"description": "Get stdin as a string or buffer",
+	"license": "MIT",
+	"repository": "sindresorhus/get-stdin",
+	"author": {
+		"name": "Sindre Sorhus",
+		"email": "sindresorhus@gmail.com",
+		"url": "sindresorhus.com"
+	},
+	"engines": {
+		"node": ">=8"
+	},
+	"scripts": {
+		"test": "xo && ava test.js test-buffer.js && echo unicorns | node test-real.js && tsd"
+	},
+	"files": [
+		"index.js",
+		"index.d.ts"
+	],
+	"keywords": [
+		"std",
+		"stdin",
+		"stdio",
+		"concat",
+		"buffer",
+		"stream",
+		"process",
+		"read"
+	],
+	"devDependencies": {
+		"@types/node": "^11.13.4",
+		"ava": "^1.4.1",
+		"delay": "^4.2.0",
+		"tsd": "^0.7.2",
+		"xo": "^0.24.0"
+	}
+}
diff --git a/_extensions/d2/node_modules/get-stdin/readme.md b/_extensions/d2/node_modules/get-stdin/readme.md
new file mode 100644
index 00000000..7c70eee3
--- /dev/null
+++ b/_extensions/d2/node_modules/get-stdin/readme.md
@@ -0,0 +1,55 @@
+# get-stdin [![Build Status](https://travis-ci.org/sindresorhus/get-stdin.svg?branch=master)](https://travis-ci.org/sindresorhus/get-stdin)
+
+> Get [stdin](https://nodejs.org/api/process.html#process_process_stdin) as a string or buffer
+
+
+## Install
+
+```
+$ npm install get-stdin
+```
+
+
+## Usage
+
+```js
+// example.js
+const getStdin = require('get-stdin');
+
+(async () => {
+	console.log(await getStdin());
+	//=> 'unicorns'
+})();
+```
+
+```
+$ echo unicorns | node example.js
+unicorns
+```
+
+
+## API
+
+Both methods returns a promise that is resolved when the `end` event fires on the `stdin` stream, indicating that there is no more data to be read.
+
+### getStdin()
+
+Get `stdin` as a `string`.
+
+In a TTY context, a promise that resolves to an empty `string` is returned.
+
+### getStdin.buffer()
+
+Get `stdin` as a `Buffer`.
+
+In a TTY context, a promise that resolves to an empty `Buffer` is returned.
+
+
+## Related
+
+- [get-stream](https://github.com/sindresorhus/get-stream) - Get a stream as a string or buffer
+
+
+## License
+
+MIT © [Sindre Sorhus](https://sindresorhus.com)
diff --git a/_extensions/d2/node_modules/glob/LICENSE b/_extensions/d2/node_modules/glob/LICENSE
new file mode 100644
index 00000000..42ca266d
--- /dev/null
+++ b/_extensions/d2/node_modules/glob/LICENSE
@@ -0,0 +1,21 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+## Glob Logo
+
+Glob's logo created by Tanya Brassie , licensed
+under a Creative Commons Attribution-ShareAlike 4.0 International License
+https://creativecommons.org/licenses/by-sa/4.0/
diff --git a/_extensions/d2/node_modules/glob/README.md b/_extensions/d2/node_modules/glob/README.md
new file mode 100644
index 00000000..83f0c83a
--- /dev/null
+++ b/_extensions/d2/node_modules/glob/README.md
@@ -0,0 +1,378 @@
+# Glob
+
+Match files using the patterns the shell uses, like stars and stuff.
+
+[![Build Status](https://travis-ci.org/isaacs/node-glob.svg?branch=master)](https://travis-ci.org/isaacs/node-glob/) [![Build Status](https://ci.appveyor.com/api/projects/status/kd7f3yftf7unxlsx?svg=true)](https://ci.appveyor.com/project/isaacs/node-glob) [![Coverage Status](https://coveralls.io/repos/isaacs/node-glob/badge.svg?branch=master&service=github)](https://coveralls.io/github/isaacs/node-glob?branch=master)
+
+This is a glob implementation in JavaScript.  It uses the `minimatch`
+library to do its matching.
+
+![a fun cartoon logo made of glob characters](logo/glob.png)
+
+## Usage
+
+Install with npm
+
+```
+npm i glob
+```
+
+```javascript
+var glob = require("glob")
+
+// options is optional
+glob("**/*.js", options, function (er, files) {
+  // files is an array of filenames.
+  // If the `nonull` option is set, and nothing
+  // was found, then files is ["**/*.js"]
+  // er is an error object or null.
+})
+```
+
+## Glob Primer
+
+"Globs" are the patterns you type when you do stuff like `ls *.js` on
+the command line, or put `build/*` in a `.gitignore` file.
+
+Before parsing the path part patterns, braced sections are expanded
+into a set.  Braced sections start with `{` and end with `}`, with any
+number of comma-delimited sections within.  Braced sections may contain
+slash characters, so `a{/b/c,bcd}` would expand into `a/b/c` and `abcd`.
+
+The following characters have special magic meaning when used in a
+path portion:
+
+* `*` Matches 0 or more characters in a single path portion
+* `?` Matches 1 character
+* `[...]` Matches a range of characters, similar to a RegExp range.
+  If the first character of the range is `!` or `^` then it matches
+  any character not in the range.
+* `!(pattern|pattern|pattern)` Matches anything that does not match
+  any of the patterns provided.
+* `?(pattern|pattern|pattern)` Matches zero or one occurrence of the
+  patterns provided.
+* `+(pattern|pattern|pattern)` Matches one or more occurrences of the
+  patterns provided.
+* `*(a|b|c)` Matches zero or more occurrences of the patterns provided
+* `@(pattern|pat*|pat?erN)` Matches exactly one of the patterns
+  provided
+* `**` If a "globstar" is alone in a path portion, then it matches
+  zero or more directories and subdirectories searching for matches.
+  It does not crawl symlinked directories.
+
+### Dots
+
+If a file or directory path portion has a `.` as the first character,
+then it will not match any glob pattern unless that pattern's
+corresponding path part also has a `.` as its first character.
+
+For example, the pattern `a/.*/c` would match the file at `a/.b/c`.
+However the pattern `a/*/c` would not, because `*` does not start with
+a dot character.
+
+You can make glob treat dots as normal characters by setting
+`dot:true` in the options.
+
+### Basename Matching
+
+If you set `matchBase:true` in the options, and the pattern has no
+slashes in it, then it will seek for any file anywhere in the tree
+with a matching basename.  For example, `*.js` would match
+`test/simple/basic.js`.
+
+### Empty Sets
+
+If no matching files are found, then an empty array is returned.  This
+differs from the shell, where the pattern itself is returned.  For
+example:
+
+    $ echo a*s*d*f
+    a*s*d*f
+
+To get the bash-style behavior, set the `nonull:true` in the options.
+
+### See Also:
+
+* `man sh`
+* `man bash` (Search for "Pattern Matching")
+* `man 3 fnmatch`
+* `man 5 gitignore`
+* [minimatch documentation](https://github.com/isaacs/minimatch)
+
+## glob.hasMagic(pattern, [options])
+
+Returns `true` if there are any special characters in the pattern, and
+`false` otherwise.
+
+Note that the options affect the results.  If `noext:true` is set in
+the options object, then `+(a|b)` will not be considered a magic
+pattern.  If the pattern has a brace expansion, like `a/{b/c,x/y}`
+then that is considered magical, unless `nobrace:true` is set in the
+options.
+
+## glob(pattern, [options], cb)
+
+* `pattern` `{String}` Pattern to be matched
+* `options` `{Object}`
+* `cb` `{Function}`
+  * `err` `{Error | null}`
+  * `matches` `{Array}` filenames found matching the pattern
+
+Perform an asynchronous glob search.
+
+## glob.sync(pattern, [options])
+
+* `pattern` `{String}` Pattern to be matched
+* `options` `{Object}`
+* return: `{Array}` filenames found matching the pattern
+
+Perform a synchronous glob search.
+
+## Class: glob.Glob
+
+Create a Glob object by instantiating the `glob.Glob` class.
+
+```javascript
+var Glob = require("glob").Glob
+var mg = new Glob(pattern, options, cb)
+```
+
+It's an EventEmitter, and starts walking the filesystem to find matches
+immediately.
+
+### new glob.Glob(pattern, [options], [cb])
+
+* `pattern` `{String}` pattern to search for
+* `options` `{Object}`
+* `cb` `{Function}` Called when an error occurs, or matches are found
+  * `err` `{Error | null}`
+  * `matches` `{Array}` filenames found matching the pattern
+
+Note that if the `sync` flag is set in the options, then matches will
+be immediately available on the `g.found` member.
+
+### Properties
+
+* `minimatch` The minimatch object that the glob uses.
+* `options` The options object passed in.
+* `aborted` Boolean which is set to true when calling `abort()`.  There
+  is no way at this time to continue a glob search after aborting, but
+  you can re-use the statCache to avoid having to duplicate syscalls.
+* `cache` Convenience object.  Each field has the following possible
+  values:
+  * `false` - Path does not exist
+  * `true` - Path exists
+  * `'FILE'` - Path exists, and is not a directory
+  * `'DIR'` - Path exists, and is a directory
+  * `[file, entries, ...]` - Path exists, is a directory, and the
+    array value is the results of `fs.readdir`
+* `statCache` Cache of `fs.stat` results, to prevent statting the same
+  path multiple times.
+* `symlinks` A record of which paths are symbolic links, which is
+  relevant in resolving `**` patterns.
+* `realpathCache` An optional object which is passed to `fs.realpath`
+  to minimize unnecessary syscalls.  It is stored on the instantiated
+  Glob object, and may be re-used.
+
+### Events
+
+* `end` When the matching is finished, this is emitted with all the
+  matches found.  If the `nonull` option is set, and no match was found,
+  then the `matches` list contains the original pattern.  The matches
+  are sorted, unless the `nosort` flag is set.
+* `match` Every time a match is found, this is emitted with the specific
+  thing that matched. It is not deduplicated or resolved to a realpath.
+* `error` Emitted when an unexpected error is encountered, or whenever
+  any fs error occurs if `options.strict` is set.
+* `abort` When `abort()` is called, this event is raised.
+
+### Methods
+
+* `pause` Temporarily stop the search
+* `resume` Resume the search
+* `abort` Stop the search forever
+
+### Options
+
+All the options that can be passed to Minimatch can also be passed to
+Glob to change pattern matching behavior.  Also, some have been added,
+or have glob-specific ramifications.
+
+All options are false by default, unless otherwise noted.
+
+All options are added to the Glob object, as well.
+
+If you are running many `glob` operations, you can pass a Glob object
+as the `options` argument to a subsequent operation to shortcut some
+`stat` and `readdir` calls.  At the very least, you may pass in shared
+`symlinks`, `statCache`, `realpathCache`, and `cache` options, so that
+parallel glob operations will be sped up by sharing information about
+the filesystem.
+
+* `cwd` The current working directory in which to search.  Defaults
+  to `process.cwd()`.
+* `root` The place where patterns starting with `/` will be mounted
+  onto.  Defaults to `path.resolve(options.cwd, "/")` (`/` on Unix
+  systems, and `C:\` or some such on Windows.)
+* `dot` Include `.dot` files in normal matches and `globstar` matches.
+  Note that an explicit dot in a portion of the pattern will always
+  match dot files.
+* `nomount` By default, a pattern starting with a forward-slash will be
+  "mounted" onto the root setting, so that a valid filesystem path is
+  returned.  Set this flag to disable that behavior.
+* `mark` Add a `/` character to directory matches.  Note that this
+  requires additional stat calls.
+* `nosort` Don't sort the results.
+* `stat` Set to true to stat *all* results.  This reduces performance
+  somewhat, and is completely unnecessary, unless `readdir` is presumed
+  to be an untrustworthy indicator of file existence.
+* `silent` When an unusual error is encountered when attempting to
+  read a directory, a warning will be printed to stderr.  Set the
+  `silent` option to true to suppress these warnings.
+* `strict` When an unusual error is encountered when attempting to
+  read a directory, the process will just continue on in search of
+  other matches.  Set the `strict` option to raise an error in these
+  cases.
+* `cache` See `cache` property above.  Pass in a previously generated
+  cache object to save some fs calls.
+* `statCache` A cache of results of filesystem information, to prevent
+  unnecessary stat calls.  While it should not normally be necessary
+  to set this, you may pass the statCache from one glob() call to the
+  options object of another, if you know that the filesystem will not
+  change between calls.  (See "Race Conditions" below.)
+* `symlinks` A cache of known symbolic links.  You may pass in a
+  previously generated `symlinks` object to save `lstat` calls when
+  resolving `**` matches.
+* `sync` DEPRECATED: use `glob.sync(pattern, opts)` instead.
+* `nounique` In some cases, brace-expanded patterns can result in the
+  same file showing up multiple times in the result set.  By default,
+  this implementation prevents duplicates in the result set.  Set this
+  flag to disable that behavior.
+* `nonull` Set to never return an empty set, instead returning a set
+  containing the pattern itself.  This is the default in glob(3).
+* `debug` Set to enable debug logging in minimatch and glob.
+* `nobrace` Do not expand `{a,b}` and `{1..3}` brace sets.
+* `noglobstar` Do not match `**` against multiple filenames.  (Ie,
+  treat it as a normal `*` instead.)
+* `noext` Do not match `+(a|b)` "extglob" patterns.
+* `nocase` Perform a case-insensitive match.  Note: on
+  case-insensitive filesystems, non-magic patterns will match by
+  default, since `stat` and `readdir` will not raise errors.
+* `matchBase` Perform a basename-only match if the pattern does not
+  contain any slash characters.  That is, `*.js` would be treated as
+  equivalent to `**/*.js`, matching all js files in all directories.
+* `nodir` Do not match directories, only files.  (Note: to match
+  *only* directories, simply put a `/` at the end of the pattern.)
+* `ignore` Add a pattern or an array of glob patterns to exclude matches.
+  Note: `ignore` patterns are *always* in `dot:true` mode, regardless
+  of any other settings.
+* `follow` Follow symlinked directories when expanding `**` patterns.
+  Note that this can result in a lot of duplicate references in the
+  presence of cyclic links.
+* `realpath` Set to true to call `fs.realpath` on all of the results.
+  In the case of a symlink that cannot be resolved, the full absolute
+  path to the matched entry is returned (though it will usually be a
+  broken symlink)
+* `absolute` Set to true to always receive absolute paths for matched
+  files.  Unlike `realpath`, this also affects the values returned in
+  the `match` event.
+* `fs` File-system object with Node's `fs` API. By default, the built-in
+  `fs` module will be used. Set to a volume provided by a library like
+  `memfs` to avoid using the "real" file-system.
+
+## Comparisons to other fnmatch/glob implementations
+
+While strict compliance with the existing standards is a worthwhile
+goal, some discrepancies exist between node-glob and other
+implementations, and are intentional.
+
+The double-star character `**` is supported by default, unless the
+`noglobstar` flag is set.  This is supported in the manner of bsdglob
+and bash 4.3, where `**` only has special significance if it is the only
+thing in a path part.  That is, `a/**/b` will match `a/x/y/b`, but
+`a/**b` will not.
+
+Note that symlinked directories are not crawled as part of a `**`,
+though their contents may match against subsequent portions of the
+pattern.  This prevents infinite loops and duplicates and the like.
+
+If an escaped pattern has no matches, and the `nonull` flag is set,
+then glob returns the pattern as-provided, rather than
+interpreting the character escapes.  For example,
+`glob.match([], "\\*a\\?")` will return `"\\*a\\?"` rather than
+`"*a?"`.  This is akin to setting the `nullglob` option in bash, except
+that it does not resolve escaped pattern characters.
+
+If brace expansion is not disabled, then it is performed before any
+other interpretation of the glob pattern.  Thus, a pattern like
+`+(a|{b),c)}`, which would not be valid in bash or zsh, is expanded
+**first** into the set of `+(a|b)` and `+(a|c)`, and those patterns are
+checked for validity.  Since those two are valid, matching proceeds.
+
+### Comments and Negation
+
+Previously, this module let you mark a pattern as a "comment" if it
+started with a `#` character, or a "negated" pattern if it started
+with a `!` character.
+
+These options were deprecated in version 5, and removed in version 6.
+
+To specify things that should not match, use the `ignore` option.
+
+## Windows
+
+**Please only use forward-slashes in glob expressions.**
+
+Though windows uses either `/` or `\` as its path separator, only `/`
+characters are used by this glob implementation.  You must use
+forward-slashes **only** in glob expressions.  Back-slashes will always
+be interpreted as escape characters, not path separators.
+
+Results from absolute patterns such as `/foo/*` are mounted onto the
+root setting using `path.join`.  On windows, this will by default result
+in `/foo/*` matching `C:\foo\bar.txt`.
+
+## Race Conditions
+
+Glob searching, by its very nature, is susceptible to race conditions,
+since it relies on directory walking and such.
+
+As a result, it is possible that a file that exists when glob looks for
+it may have been deleted or modified by the time it returns the result.
+
+As part of its internal implementation, this program caches all stat
+and readdir calls that it makes, in order to cut down on system
+overhead.  However, this also makes it even more susceptible to races,
+especially if the cache or statCache objects are reused between glob
+calls.
+
+Users are thus advised not to use a glob result as a guarantee of
+filesystem state in the face of rapid changes.  For the vast majority
+of operations, this is never a problem.
+
+## Glob Logo
+Glob's logo was created by [Tanya Brassie](http://tanyabrassie.com/). Logo files can be found [here](https://github.com/isaacs/node-glob/tree/master/logo).
+
+The logo is licensed under a [Creative Commons Attribution-ShareAlike 4.0 International License](https://creativecommons.org/licenses/by-sa/4.0/).
+
+## Contributing
+
+Any change to behavior (including bugfixes) must come with a test.
+
+Patches that fail tests or reduce performance will be rejected.
+
+```
+# to run tests
+npm test
+
+# to re-generate test fixtures
+npm run test-regen
+
+# to benchmark against bash/zsh
+npm run bench
+
+# to profile javascript
+npm run prof
+```
+
+![](oh-my-glob.gif)
diff --git a/_extensions/d2/node_modules/glob/common.js b/_extensions/d2/node_modules/glob/common.js
new file mode 100644
index 00000000..424c46e1
--- /dev/null
+++ b/_extensions/d2/node_modules/glob/common.js
@@ -0,0 +1,238 @@
+exports.setopts = setopts
+exports.ownProp = ownProp
+exports.makeAbs = makeAbs
+exports.finish = finish
+exports.mark = mark
+exports.isIgnored = isIgnored
+exports.childrenIgnored = childrenIgnored
+
+function ownProp (obj, field) {
+  return Object.prototype.hasOwnProperty.call(obj, field)
+}
+
+var fs = require("fs")
+var path = require("path")
+var minimatch = require("minimatch")
+var isAbsolute = require("path-is-absolute")
+var Minimatch = minimatch.Minimatch
+
+function alphasort (a, b) {
+  return a.localeCompare(b, 'en')
+}
+
+function setupIgnores (self, options) {
+  self.ignore = options.ignore || []
+
+  if (!Array.isArray(self.ignore))
+    self.ignore = [self.ignore]
+
+  if (self.ignore.length) {
+    self.ignore = self.ignore.map(ignoreMap)
+  }
+}
+
+// ignore patterns are always in dot:true mode.
+function ignoreMap (pattern) {
+  var gmatcher = null
+  if (pattern.slice(-3) === '/**') {
+    var gpattern = pattern.replace(/(\/\*\*)+$/, '')
+    gmatcher = new Minimatch(gpattern, { dot: true })
+  }
+
+  return {
+    matcher: new Minimatch(pattern, { dot: true }),
+    gmatcher: gmatcher
+  }
+}
+
+function setopts (self, pattern, options) {
+  if (!options)
+    options = {}
+
+  // base-matching: just use globstar for that.
+  if (options.matchBase && -1 === pattern.indexOf("/")) {
+    if (options.noglobstar) {
+      throw new Error("base matching requires globstar")
+    }
+    pattern = "**/" + pattern
+  }
+
+  self.silent = !!options.silent
+  self.pattern = pattern
+  self.strict = options.strict !== false
+  self.realpath = !!options.realpath
+  self.realpathCache = options.realpathCache || Object.create(null)
+  self.follow = !!options.follow
+  self.dot = !!options.dot
+  self.mark = !!options.mark
+  self.nodir = !!options.nodir
+  if (self.nodir)
+    self.mark = true
+  self.sync = !!options.sync
+  self.nounique = !!options.nounique
+  self.nonull = !!options.nonull
+  self.nosort = !!options.nosort
+  self.nocase = !!options.nocase
+  self.stat = !!options.stat
+  self.noprocess = !!options.noprocess
+  self.absolute = !!options.absolute
+  self.fs = options.fs || fs
+
+  self.maxLength = options.maxLength || Infinity
+  self.cache = options.cache || Object.create(null)
+  self.statCache = options.statCache || Object.create(null)
+  self.symlinks = options.symlinks || Object.create(null)
+
+  setupIgnores(self, options)
+
+  self.changedCwd = false
+  var cwd = process.cwd()
+  if (!ownProp(options, "cwd"))
+    self.cwd = cwd
+  else {
+    self.cwd = path.resolve(options.cwd)
+    self.changedCwd = self.cwd !== cwd
+  }
+
+  self.root = options.root || path.resolve(self.cwd, "/")
+  self.root = path.resolve(self.root)
+  if (process.platform === "win32")
+    self.root = self.root.replace(/\\/g, "/")
+
+  // TODO: is an absolute `cwd` supposed to be resolved against `root`?
+  // e.g. { cwd: '/test', root: __dirname } === path.join(__dirname, '/test')
+  self.cwdAbs = isAbsolute(self.cwd) ? self.cwd : makeAbs(self, self.cwd)
+  if (process.platform === "win32")
+    self.cwdAbs = self.cwdAbs.replace(/\\/g, "/")
+  self.nomount = !!options.nomount
+
+  // disable comments and negation in Minimatch.
+  // Note that they are not supported in Glob itself anyway.
+  options.nonegate = true
+  options.nocomment = true
+  // always treat \ in patterns as escapes, not path separators
+  options.allowWindowsEscape = false
+
+  self.minimatch = new Minimatch(pattern, options)
+  self.options = self.minimatch.options
+}
+
+function finish (self) {
+  var nou = self.nounique
+  var all = nou ? [] : Object.create(null)
+
+  for (var i = 0, l = self.matches.length; i < l; i ++) {
+    var matches = self.matches[i]
+    if (!matches || Object.keys(matches).length === 0) {
+      if (self.nonull) {
+        // do like the shell, and spit out the literal glob
+        var literal = self.minimatch.globSet[i]
+        if (nou)
+          all.push(literal)
+        else
+          all[literal] = true
+      }
+    } else {
+      // had matches
+      var m = Object.keys(matches)
+      if (nou)
+        all.push.apply(all, m)
+      else
+        m.forEach(function (m) {
+          all[m] = true
+        })
+    }
+  }
+
+  if (!nou)
+    all = Object.keys(all)
+
+  if (!self.nosort)
+    all = all.sort(alphasort)
+
+  // at *some* point we statted all of these
+  if (self.mark) {
+    for (var i = 0; i < all.length; i++) {
+      all[i] = self._mark(all[i])
+    }
+    if (self.nodir) {
+      all = all.filter(function (e) {
+        var notDir = !(/\/$/.test(e))
+        var c = self.cache[e] || self.cache[makeAbs(self, e)]
+        if (notDir && c)
+          notDir = c !== 'DIR' && !Array.isArray(c)
+        return notDir
+      })
+    }
+  }
+
+  if (self.ignore.length)
+    all = all.filter(function(m) {
+      return !isIgnored(self, m)
+    })
+
+  self.found = all
+}
+
+function mark (self, p) {
+  var abs = makeAbs(self, p)
+  var c = self.cache[abs]
+  var m = p
+  if (c) {
+    var isDir = c === 'DIR' || Array.isArray(c)
+    var slash = p.slice(-1) === '/'
+
+    if (isDir && !slash)
+      m += '/'
+    else if (!isDir && slash)
+      m = m.slice(0, -1)
+
+    if (m !== p) {
+      var mabs = makeAbs(self, m)
+      self.statCache[mabs] = self.statCache[abs]
+      self.cache[mabs] = self.cache[abs]
+    }
+  }
+
+  return m
+}
+
+// lotta situps...
+function makeAbs (self, f) {
+  var abs = f
+  if (f.charAt(0) === '/') {
+    abs = path.join(self.root, f)
+  } else if (isAbsolute(f) || f === '') {
+    abs = f
+  } else if (self.changedCwd) {
+    abs = path.resolve(self.cwd, f)
+  } else {
+    abs = path.resolve(f)
+  }
+
+  if (process.platform === 'win32')
+    abs = abs.replace(/\\/g, '/')
+
+  return abs
+}
+
+
+// Return true, if pattern ends with globstar '**', for the accompanying parent directory.
+// Ex:- If node_modules/** is the pattern, add 'node_modules' to ignore list along with it's contents
+function isIgnored (self, path) {
+  if (!self.ignore.length)
+    return false
+
+  return self.ignore.some(function(item) {
+    return item.matcher.match(path) || !!(item.gmatcher && item.gmatcher.match(path))
+  })
+}
+
+function childrenIgnored (self, path) {
+  if (!self.ignore.length)
+    return false
+
+  return self.ignore.some(function(item) {
+    return !!(item.gmatcher && item.gmatcher.match(path))
+  })
+}
diff --git a/_extensions/d2/node_modules/glob/glob.js b/_extensions/d2/node_modules/glob/glob.js
new file mode 100644
index 00000000..37a4d7e6
--- /dev/null
+++ b/_extensions/d2/node_modules/glob/glob.js
@@ -0,0 +1,790 @@
+// Approach:
+//
+// 1. Get the minimatch set
+// 2. For each pattern in the set, PROCESS(pattern, false)
+// 3. Store matches per-set, then uniq them
+//
+// PROCESS(pattern, inGlobStar)
+// Get the first [n] items from pattern that are all strings
+// Join these together.  This is PREFIX.
+//   If there is no more remaining, then stat(PREFIX) and
+//   add to matches if it succeeds.  END.
+//
+// If inGlobStar and PREFIX is symlink and points to dir
+//   set ENTRIES = []
+// else readdir(PREFIX) as ENTRIES
+//   If fail, END
+//
+// with ENTRIES
+//   If pattern[n] is GLOBSTAR
+//     // handle the case where the globstar match is empty
+//     // by pruning it out, and testing the resulting pattern
+//     PROCESS(pattern[0..n] + pattern[n+1 .. $], false)
+//     // handle other cases.
+//     for ENTRY in ENTRIES (not dotfiles)
+//       // attach globstar + tail onto the entry
+//       // Mark that this entry is a globstar match
+//       PROCESS(pattern[0..n] + ENTRY + pattern[n .. $], true)
+//
+//   else // not globstar
+//     for ENTRY in ENTRIES (not dotfiles, unless pattern[n] is dot)
+//       Test ENTRY against pattern[n]
+//       If fails, continue
+//       If passes, PROCESS(pattern[0..n] + item + pattern[n+1 .. $])
+//
+// Caveat:
+//   Cache all stats and readdirs results to minimize syscall.  Since all
+//   we ever care about is existence and directory-ness, we can just keep
+//   `true` for files, and [children,...] for directories, or `false` for
+//   things that don't exist.
+
+module.exports = glob
+
+var rp = require('fs.realpath')
+var minimatch = require('minimatch')
+var Minimatch = minimatch.Minimatch
+var inherits = require('inherits')
+var EE = require('events').EventEmitter
+var path = require('path')
+var assert = require('assert')
+var isAbsolute = require('path-is-absolute')
+var globSync = require('./sync.js')
+var common = require('./common.js')
+var setopts = common.setopts
+var ownProp = common.ownProp
+var inflight = require('inflight')
+var util = require('util')
+var childrenIgnored = common.childrenIgnored
+var isIgnored = common.isIgnored
+
+var once = require('once')
+
+function glob (pattern, options, cb) {
+  if (typeof options === 'function') cb = options, options = {}
+  if (!options) options = {}
+
+  if (options.sync) {
+    if (cb)
+      throw new TypeError('callback provided to sync glob')
+    return globSync(pattern, options)
+  }
+
+  return new Glob(pattern, options, cb)
+}
+
+glob.sync = globSync
+var GlobSync = glob.GlobSync = globSync.GlobSync
+
+// old api surface
+glob.glob = glob
+
+function extend (origin, add) {
+  if (add === null || typeof add !== 'object') {
+    return origin
+  }
+
+  var keys = Object.keys(add)
+  var i = keys.length
+  while (i--) {
+    origin[keys[i]] = add[keys[i]]
+  }
+  return origin
+}
+
+glob.hasMagic = function (pattern, options_) {
+  var options = extend({}, options_)
+  options.noprocess = true
+
+  var g = new Glob(pattern, options)
+  var set = g.minimatch.set
+
+  if (!pattern)
+    return false
+
+  if (set.length > 1)
+    return true
+
+  for (var j = 0; j < set[0].length; j++) {
+    if (typeof set[0][j] !== 'string')
+      return true
+  }
+
+  return false
+}
+
+glob.Glob = Glob
+inherits(Glob, EE)
+function Glob (pattern, options, cb) {
+  if (typeof options === 'function') {
+    cb = options
+    options = null
+  }
+
+  if (options && options.sync) {
+    if (cb)
+      throw new TypeError('callback provided to sync glob')
+    return new GlobSync(pattern, options)
+  }
+
+  if (!(this instanceof Glob))
+    return new Glob(pattern, options, cb)
+
+  setopts(this, pattern, options)
+  this._didRealPath = false
+
+  // process each pattern in the minimatch set
+  var n = this.minimatch.set.length
+
+  // The matches are stored as {: true,...} so that
+  // duplicates are automagically pruned.
+  // Later, we do an Object.keys() on these.
+  // Keep them as a list so we can fill in when nonull is set.
+  this.matches = new Array(n)
+
+  if (typeof cb === 'function') {
+    cb = once(cb)
+    this.on('error', cb)
+    this.on('end', function (matches) {
+      cb(null, matches)
+    })
+  }
+
+  var self = this
+  this._processing = 0
+
+  this._emitQueue = []
+  this._processQueue = []
+  this.paused = false
+
+  if (this.noprocess)
+    return this
+
+  if (n === 0)
+    return done()
+
+  var sync = true
+  for (var i = 0; i < n; i ++) {
+    this._process(this.minimatch.set[i], i, false, done)
+  }
+  sync = false
+
+  function done () {
+    --self._processing
+    if (self._processing <= 0) {
+      if (sync) {
+        process.nextTick(function () {
+          self._finish()
+        })
+      } else {
+        self._finish()
+      }
+    }
+  }
+}
+
+Glob.prototype._finish = function () {
+  assert(this instanceof Glob)
+  if (this.aborted)
+    return
+
+  if (this.realpath && !this._didRealpath)
+    return this._realpath()
+
+  common.finish(this)
+  this.emit('end', this.found)
+}
+
+Glob.prototype._realpath = function () {
+  if (this._didRealpath)
+    return
+
+  this._didRealpath = true
+
+  var n = this.matches.length
+  if (n === 0)
+    return this._finish()
+
+  var self = this
+  for (var i = 0; i < this.matches.length; i++)
+    this._realpathSet(i, next)
+
+  function next () {
+    if (--n === 0)
+      self._finish()
+  }
+}
+
+Glob.prototype._realpathSet = function (index, cb) {
+  var matchset = this.matches[index]
+  if (!matchset)
+    return cb()
+
+  var found = Object.keys(matchset)
+  var self = this
+  var n = found.length
+
+  if (n === 0)
+    return cb()
+
+  var set = this.matches[index] = Object.create(null)
+  found.forEach(function (p, i) {
+    // If there's a problem with the stat, then it means that
+    // one or more of the links in the realpath couldn't be
+    // resolved.  just return the abs value in that case.
+    p = self._makeAbs(p)
+    rp.realpath(p, self.realpathCache, function (er, real) {
+      if (!er)
+        set[real] = true
+      else if (er.syscall === 'stat')
+        set[p] = true
+      else
+        self.emit('error', er) // srsly wtf right here
+
+      if (--n === 0) {
+        self.matches[index] = set
+        cb()
+      }
+    })
+  })
+}
+
+Glob.prototype._mark = function (p) {
+  return common.mark(this, p)
+}
+
+Glob.prototype._makeAbs = function (f) {
+  return common.makeAbs(this, f)
+}
+
+Glob.prototype.abort = function () {
+  this.aborted = true
+  this.emit('abort')
+}
+
+Glob.prototype.pause = function () {
+  if (!this.paused) {
+    this.paused = true
+    this.emit('pause')
+  }
+}
+
+Glob.prototype.resume = function () {
+  if (this.paused) {
+    this.emit('resume')
+    this.paused = false
+    if (this._emitQueue.length) {
+      var eq = this._emitQueue.slice(0)
+      this._emitQueue.length = 0
+      for (var i = 0; i < eq.length; i ++) {
+        var e = eq[i]
+        this._emitMatch(e[0], e[1])
+      }
+    }
+    if (this._processQueue.length) {
+      var pq = this._processQueue.slice(0)
+      this._processQueue.length = 0
+      for (var i = 0; i < pq.length; i ++) {
+        var p = pq[i]
+        this._processing--
+        this._process(p[0], p[1], p[2], p[3])
+      }
+    }
+  }
+}
+
+Glob.prototype._process = function (pattern, index, inGlobStar, cb) {
+  assert(this instanceof Glob)
+  assert(typeof cb === 'function')
+
+  if (this.aborted)
+    return
+
+  this._processing++
+  if (this.paused) {
+    this._processQueue.push([pattern, index, inGlobStar, cb])
+    return
+  }
+
+  //console.error('PROCESS %d', this._processing, pattern)
+
+  // Get the first [n] parts of pattern that are all strings.
+  var n = 0
+  while (typeof pattern[n] === 'string') {
+    n ++
+  }
+  // now n is the index of the first one that is *not* a string.
+
+  // see if there's anything else
+  var prefix
+  switch (n) {
+    // if not, then this is rather simple
+    case pattern.length:
+      this._processSimple(pattern.join('/'), index, cb)
+      return
+
+    case 0:
+      // pattern *starts* with some non-trivial item.
+      // going to readdir(cwd), but not include the prefix in matches.
+      prefix = null
+      break
+
+    default:
+      // pattern has some string bits in the front.
+      // whatever it starts with, whether that's 'absolute' like /foo/bar,
+      // or 'relative' like '../baz'
+      prefix = pattern.slice(0, n).join('/')
+      break
+  }
+
+  var remain = pattern.slice(n)
+
+  // get the list of entries.
+  var read
+  if (prefix === null)
+    read = '.'
+  else if (isAbsolute(prefix) ||
+      isAbsolute(pattern.map(function (p) {
+        return typeof p === 'string' ? p : '[*]'
+      }).join('/'))) {
+    if (!prefix || !isAbsolute(prefix))
+      prefix = '/' + prefix
+    read = prefix
+  } else
+    read = prefix
+
+  var abs = this._makeAbs(read)
+
+  //if ignored, skip _processing
+  if (childrenIgnored(this, read))
+    return cb()
+
+  var isGlobStar = remain[0] === minimatch.GLOBSTAR
+  if (isGlobStar)
+    this._processGlobStar(prefix, read, abs, remain, index, inGlobStar, cb)
+  else
+    this._processReaddir(prefix, read, abs, remain, index, inGlobStar, cb)
+}
+
+Glob.prototype._processReaddir = function (prefix, read, abs, remain, index, inGlobStar, cb) {
+  var self = this
+  this._readdir(abs, inGlobStar, function (er, entries) {
+    return self._processReaddir2(prefix, read, abs, remain, index, inGlobStar, entries, cb)
+  })
+}
+
+Glob.prototype._processReaddir2 = function (prefix, read, abs, remain, index, inGlobStar, entries, cb) {
+
+  // if the abs isn't a dir, then nothing can match!
+  if (!entries)
+    return cb()
+
+  // It will only match dot entries if it starts with a dot, or if
+  // dot is set.  Stuff like @(.foo|.bar) isn't allowed.
+  var pn = remain[0]
+  var negate = !!this.minimatch.negate
+  var rawGlob = pn._glob
+  var dotOk = this.dot || rawGlob.charAt(0) === '.'
+
+  var matchedEntries = []
+  for (var i = 0; i < entries.length; i++) {
+    var e = entries[i]
+    if (e.charAt(0) !== '.' || dotOk) {
+      var m
+      if (negate && !prefix) {
+        m = !e.match(pn)
+      } else {
+        m = e.match(pn)
+      }
+      if (m)
+        matchedEntries.push(e)
+    }
+  }
+
+  //console.error('prd2', prefix, entries, remain[0]._glob, matchedEntries)
+
+  var len = matchedEntries.length
+  // If there are no matched entries, then nothing matches.
+  if (len === 0)
+    return cb()
+
+  // if this is the last remaining pattern bit, then no need for
+  // an additional stat *unless* the user has specified mark or
+  // stat explicitly.  We know they exist, since readdir returned
+  // them.
+
+  if (remain.length === 1 && !this.mark && !this.stat) {
+    if (!this.matches[index])
+      this.matches[index] = Object.create(null)
+
+    for (var i = 0; i < len; i ++) {
+      var e = matchedEntries[i]
+      if (prefix) {
+        if (prefix !== '/')
+          e = prefix + '/' + e
+        else
+          e = prefix + e
+      }
+
+      if (e.charAt(0) === '/' && !this.nomount) {
+        e = path.join(this.root, e)
+      }
+      this._emitMatch(index, e)
+    }
+    // This was the last one, and no stats were needed
+    return cb()
+  }
+
+  // now test all matched entries as stand-ins for that part
+  // of the pattern.
+  remain.shift()
+  for (var i = 0; i < len; i ++) {
+    var e = matchedEntries[i]
+    var newPattern
+    if (prefix) {
+      if (prefix !== '/')
+        e = prefix + '/' + e
+      else
+        e = prefix + e
+    }
+    this._process([e].concat(remain), index, inGlobStar, cb)
+  }
+  cb()
+}
+
+Glob.prototype._emitMatch = function (index, e) {
+  if (this.aborted)
+    return
+
+  if (isIgnored(this, e))
+    return
+
+  if (this.paused) {
+    this._emitQueue.push([index, e])
+    return
+  }
+
+  var abs = isAbsolute(e) ? e : this._makeAbs(e)
+
+  if (this.mark)
+    e = this._mark(e)
+
+  if (this.absolute)
+    e = abs
+
+  if (this.matches[index][e])
+    return
+
+  if (this.nodir) {
+    var c = this.cache[abs]
+    if (c === 'DIR' || Array.isArray(c))
+      return
+  }
+
+  this.matches[index][e] = true
+
+  var st = this.statCache[abs]
+  if (st)
+    this.emit('stat', e, st)
+
+  this.emit('match', e)
+}
+
+Glob.prototype._readdirInGlobStar = function (abs, cb) {
+  if (this.aborted)
+    return
+
+  // follow all symlinked directories forever
+  // just proceed as if this is a non-globstar situation
+  if (this.follow)
+    return this._readdir(abs, false, cb)
+
+  var lstatkey = 'lstat\0' + abs
+  var self = this
+  var lstatcb = inflight(lstatkey, lstatcb_)
+
+  if (lstatcb)
+    self.fs.lstat(abs, lstatcb)
+
+  function lstatcb_ (er, lstat) {
+    if (er && er.code === 'ENOENT')
+      return cb()
+
+    var isSym = lstat && lstat.isSymbolicLink()
+    self.symlinks[abs] = isSym
+
+    // If it's not a symlink or a dir, then it's definitely a regular file.
+    // don't bother doing a readdir in that case.
+    if (!isSym && lstat && !lstat.isDirectory()) {
+      self.cache[abs] = 'FILE'
+      cb()
+    } else
+      self._readdir(abs, false, cb)
+  }
+}
+
+Glob.prototype._readdir = function (abs, inGlobStar, cb) {
+  if (this.aborted)
+    return
+
+  cb = inflight('readdir\0'+abs+'\0'+inGlobStar, cb)
+  if (!cb)
+    return
+
+  //console.error('RD %j %j', +inGlobStar, abs)
+  if (inGlobStar && !ownProp(this.symlinks, abs))
+    return this._readdirInGlobStar(abs, cb)
+
+  if (ownProp(this.cache, abs)) {
+    var c = this.cache[abs]
+    if (!c || c === 'FILE')
+      return cb()
+
+    if (Array.isArray(c))
+      return cb(null, c)
+  }
+
+  var self = this
+  self.fs.readdir(abs, readdirCb(this, abs, cb))
+}
+
+function readdirCb (self, abs, cb) {
+  return function (er, entries) {
+    if (er)
+      self._readdirError(abs, er, cb)
+    else
+      self._readdirEntries(abs, entries, cb)
+  }
+}
+
+Glob.prototype._readdirEntries = function (abs, entries, cb) {
+  if (this.aborted)
+    return
+
+  // if we haven't asked to stat everything, then just
+  // assume that everything in there exists, so we can avoid
+  // having to stat it a second time.
+  if (!this.mark && !this.stat) {
+    for (var i = 0; i < entries.length; i ++) {
+      var e = entries[i]
+      if (abs === '/')
+        e = abs + e
+      else
+        e = abs + '/' + e
+      this.cache[e] = true
+    }
+  }
+
+  this.cache[abs] = entries
+  return cb(null, entries)
+}
+
+Glob.prototype._readdirError = function (f, er, cb) {
+  if (this.aborted)
+    return
+
+  // handle errors, and cache the information
+  switch (er.code) {
+    case 'ENOTSUP': // https://github.com/isaacs/node-glob/issues/205
+    case 'ENOTDIR': // totally normal. means it *does* exist.
+      var abs = this._makeAbs(f)
+      this.cache[abs] = 'FILE'
+      if (abs === this.cwdAbs) {
+        var error = new Error(er.code + ' invalid cwd ' + this.cwd)
+        error.path = this.cwd
+        error.code = er.code
+        this.emit('error', error)
+        this.abort()
+      }
+      break
+
+    case 'ENOENT': // not terribly unusual
+    case 'ELOOP':
+    case 'ENAMETOOLONG':
+    case 'UNKNOWN':
+      this.cache[this._makeAbs(f)] = false
+      break
+
+    default: // some unusual error.  Treat as failure.
+      this.cache[this._makeAbs(f)] = false
+      if (this.strict) {
+        this.emit('error', er)
+        // If the error is handled, then we abort
+        // if not, we threw out of here
+        this.abort()
+      }
+      if (!this.silent)
+        console.error('glob error', er)
+      break
+  }
+
+  return cb()
+}
+
+Glob.prototype._processGlobStar = function (prefix, read, abs, remain, index, inGlobStar, cb) {
+  var self = this
+  this._readdir(abs, inGlobStar, function (er, entries) {
+    self._processGlobStar2(prefix, read, abs, remain, index, inGlobStar, entries, cb)
+  })
+}
+
+
+Glob.prototype._processGlobStar2 = function (prefix, read, abs, remain, index, inGlobStar, entries, cb) {
+  //console.error('pgs2', prefix, remain[0], entries)
+
+  // no entries means not a dir, so it can never have matches
+  // foo.txt/** doesn't match foo.txt
+  if (!entries)
+    return cb()
+
+  // test without the globstar, and with every child both below
+  // and replacing the globstar.
+  var remainWithoutGlobStar = remain.slice(1)
+  var gspref = prefix ? [ prefix ] : []
+  var noGlobStar = gspref.concat(remainWithoutGlobStar)
+
+  // the noGlobStar pattern exits the inGlobStar state
+  this._process(noGlobStar, index, false, cb)
+
+  var isSym = this.symlinks[abs]
+  var len = entries.length
+
+  // If it's a symlink, and we're in a globstar, then stop
+  if (isSym && inGlobStar)
+    return cb()
+
+  for (var i = 0; i < len; i++) {
+    var e = entries[i]
+    if (e.charAt(0) === '.' && !this.dot)
+      continue
+
+    // these two cases enter the inGlobStar state
+    var instead = gspref.concat(entries[i], remainWithoutGlobStar)
+    this._process(instead, index, true, cb)
+
+    var below = gspref.concat(entries[i], remain)
+    this._process(below, index, true, cb)
+  }
+
+  cb()
+}
+
+Glob.prototype._processSimple = function (prefix, index, cb) {
+  // XXX review this.  Shouldn't it be doing the mounting etc
+  // before doing stat?  kinda weird?
+  var self = this
+  this._stat(prefix, function (er, exists) {
+    self._processSimple2(prefix, index, er, exists, cb)
+  })
+}
+Glob.prototype._processSimple2 = function (prefix, index, er, exists, cb) {
+
+  //console.error('ps2', prefix, exists)
+
+  if (!this.matches[index])
+    this.matches[index] = Object.create(null)
+
+  // If it doesn't exist, then just mark the lack of results
+  if (!exists)
+    return cb()
+
+  if (prefix && isAbsolute(prefix) && !this.nomount) {
+    var trail = /[\/\\]$/.test(prefix)
+    if (prefix.charAt(0) === '/') {
+      prefix = path.join(this.root, prefix)
+    } else {
+      prefix = path.resolve(this.root, prefix)
+      if (trail)
+        prefix += '/'
+    }
+  }
+
+  if (process.platform === 'win32')
+    prefix = prefix.replace(/\\/g, '/')
+
+  // Mark this as a match
+  this._emitMatch(index, prefix)
+  cb()
+}
+
+// Returns either 'DIR', 'FILE', or false
+Glob.prototype._stat = function (f, cb) {
+  var abs = this._makeAbs(f)
+  var needDir = f.slice(-1) === '/'
+
+  if (f.length > this.maxLength)
+    return cb()
+
+  if (!this.stat && ownProp(this.cache, abs)) {
+    var c = this.cache[abs]
+
+    if (Array.isArray(c))
+      c = 'DIR'
+
+    // It exists, but maybe not how we need it
+    if (!needDir || c === 'DIR')
+      return cb(null, c)
+
+    if (needDir && c === 'FILE')
+      return cb()
+
+    // otherwise we have to stat, because maybe c=true
+    // if we know it exists, but not what it is.
+  }
+
+  var exists
+  var stat = this.statCache[abs]
+  if (stat !== undefined) {
+    if (stat === false)
+      return cb(null, stat)
+    else {
+      var type = stat.isDirectory() ? 'DIR' : 'FILE'
+      if (needDir && type === 'FILE')
+        return cb()
+      else
+        return cb(null, type, stat)
+    }
+  }
+
+  var self = this
+  var statcb = inflight('stat\0' + abs, lstatcb_)
+  if (statcb)
+    self.fs.lstat(abs, statcb)
+
+  function lstatcb_ (er, lstat) {
+    if (lstat && lstat.isSymbolicLink()) {
+      // If it's a symlink, then treat it as the target, unless
+      // the target does not exist, then treat it as a file.
+      return self.fs.stat(abs, function (er, stat) {
+        if (er)
+          self._stat2(f, abs, null, lstat, cb)
+        else
+          self._stat2(f, abs, er, stat, cb)
+      })
+    } else {
+      self._stat2(f, abs, er, lstat, cb)
+    }
+  }
+}
+
+Glob.prototype._stat2 = function (f, abs, er, stat, cb) {
+  if (er && (er.code === 'ENOENT' || er.code === 'ENOTDIR')) {
+    this.statCache[abs] = false
+    return cb()
+  }
+
+  var needDir = f.slice(-1) === '/'
+  this.statCache[abs] = stat
+
+  if (abs.slice(-1) === '/' && stat && !stat.isDirectory())
+    return cb(null, false, stat)
+
+  var c = true
+  if (stat)
+    c = stat.isDirectory() ? 'DIR' : 'FILE'
+  this.cache[abs] = this.cache[abs] || c
+
+  if (needDir && c === 'FILE')
+    return cb()
+
+  return cb(null, c, stat)
+}
diff --git a/_extensions/d2/node_modules/glob/package.json b/_extensions/d2/node_modules/glob/package.json
new file mode 100644
index 00000000..5940b649
--- /dev/null
+++ b/_extensions/d2/node_modules/glob/package.json
@@ -0,0 +1,55 @@
+{
+  "author": "Isaac Z. Schlueter  (http://blog.izs.me/)",
+  "name": "glob",
+  "description": "a little globber",
+  "version": "7.2.3",
+  "publishConfig": {
+    "tag": "v7-legacy"
+  },
+  "repository": {
+    "type": "git",
+    "url": "git://github.com/isaacs/node-glob.git"
+  },
+  "main": "glob.js",
+  "files": [
+    "glob.js",
+    "sync.js",
+    "common.js"
+  ],
+  "engines": {
+    "node": "*"
+  },
+  "dependencies": {
+    "fs.realpath": "^1.0.0",
+    "inflight": "^1.0.4",
+    "inherits": "2",
+    "minimatch": "^3.1.1",
+    "once": "^1.3.0",
+    "path-is-absolute": "^1.0.0"
+  },
+  "devDependencies": {
+    "memfs": "^3.2.0",
+    "mkdirp": "0",
+    "rimraf": "^2.2.8",
+    "tap": "^15.0.6",
+    "tick": "0.0.6"
+  },
+  "tap": {
+    "before": "test/00-setup.js",
+    "after": "test/zz-cleanup.js",
+    "jobs": 1
+  },
+  "scripts": {
+    "prepublish": "npm run benchclean",
+    "profclean": "rm -f v8.log profile.txt",
+    "test": "tap",
+    "test-regen": "npm run profclean && TEST_REGEN=1 node test/00-setup.js",
+    "bench": "bash benchmark.sh",
+    "prof": "bash prof.sh && cat profile.txt",
+    "benchclean": "node benchclean.js"
+  },
+  "license": "ISC",
+  "funding": {
+    "url": "https://github.com/sponsors/isaacs"
+  }
+}
diff --git a/_extensions/d2/node_modules/glob/sync.js b/_extensions/d2/node_modules/glob/sync.js
new file mode 100644
index 00000000..2c4f4801
--- /dev/null
+++ b/_extensions/d2/node_modules/glob/sync.js
@@ -0,0 +1,486 @@
+module.exports = globSync
+globSync.GlobSync = GlobSync
+
+var rp = require('fs.realpath')
+var minimatch = require('minimatch')
+var Minimatch = minimatch.Minimatch
+var Glob = require('./glob.js').Glob
+var util = require('util')
+var path = require('path')
+var assert = require('assert')
+var isAbsolute = require('path-is-absolute')
+var common = require('./common.js')
+var setopts = common.setopts
+var ownProp = common.ownProp
+var childrenIgnored = common.childrenIgnored
+var isIgnored = common.isIgnored
+
+function globSync (pattern, options) {
+  if (typeof options === 'function' || arguments.length === 3)
+    throw new TypeError('callback provided to sync glob\n'+
+                        'See: https://github.com/isaacs/node-glob/issues/167')
+
+  return new GlobSync(pattern, options).found
+}
+
+function GlobSync (pattern, options) {
+  if (!pattern)
+    throw new Error('must provide pattern')
+
+  if (typeof options === 'function' || arguments.length === 3)
+    throw new TypeError('callback provided to sync glob\n'+
+                        'See: https://github.com/isaacs/node-glob/issues/167')
+
+  if (!(this instanceof GlobSync))
+    return new GlobSync(pattern, options)
+
+  setopts(this, pattern, options)
+
+  if (this.noprocess)
+    return this
+
+  var n = this.minimatch.set.length
+  this.matches = new Array(n)
+  for (var i = 0; i < n; i ++) {
+    this._process(this.minimatch.set[i], i, false)
+  }
+  this._finish()
+}
+
+GlobSync.prototype._finish = function () {
+  assert.ok(this instanceof GlobSync)
+  if (this.realpath) {
+    var self = this
+    this.matches.forEach(function (matchset, index) {
+      var set = self.matches[index] = Object.create(null)
+      for (var p in matchset) {
+        try {
+          p = self._makeAbs(p)
+          var real = rp.realpathSync(p, self.realpathCache)
+          set[real] = true
+        } catch (er) {
+          if (er.syscall === 'stat')
+            set[self._makeAbs(p)] = true
+          else
+            throw er
+        }
+      }
+    })
+  }
+  common.finish(this)
+}
+
+
+GlobSync.prototype._process = function (pattern, index, inGlobStar) {
+  assert.ok(this instanceof GlobSync)
+
+  // Get the first [n] parts of pattern that are all strings.
+  var n = 0
+  while (typeof pattern[n] === 'string') {
+    n ++
+  }
+  // now n is the index of the first one that is *not* a string.
+
+  // See if there's anything else
+  var prefix
+  switch (n) {
+    // if not, then this is rather simple
+    case pattern.length:
+      this._processSimple(pattern.join('/'), index)
+      return
+
+    case 0:
+      // pattern *starts* with some non-trivial item.
+      // going to readdir(cwd), but not include the prefix in matches.
+      prefix = null
+      break
+
+    default:
+      // pattern has some string bits in the front.
+      // whatever it starts with, whether that's 'absolute' like /foo/bar,
+      // or 'relative' like '../baz'
+      prefix = pattern.slice(0, n).join('/')
+      break
+  }
+
+  var remain = pattern.slice(n)
+
+  // get the list of entries.
+  var read
+  if (prefix === null)
+    read = '.'
+  else if (isAbsolute(prefix) ||
+      isAbsolute(pattern.map(function (p) {
+        return typeof p === 'string' ? p : '[*]'
+      }).join('/'))) {
+    if (!prefix || !isAbsolute(prefix))
+      prefix = '/' + prefix
+    read = prefix
+  } else
+    read = prefix
+
+  var abs = this._makeAbs(read)
+
+  //if ignored, skip processing
+  if (childrenIgnored(this, read))
+    return
+
+  var isGlobStar = remain[0] === minimatch.GLOBSTAR
+  if (isGlobStar)
+    this._processGlobStar(prefix, read, abs, remain, index, inGlobStar)
+  else
+    this._processReaddir(prefix, read, abs, remain, index, inGlobStar)
+}
+
+
+GlobSync.prototype._processReaddir = function (prefix, read, abs, remain, index, inGlobStar) {
+  var entries = this._readdir(abs, inGlobStar)
+
+  // if the abs isn't a dir, then nothing can match!
+  if (!entries)
+    return
+
+  // It will only match dot entries if it starts with a dot, or if
+  // dot is set.  Stuff like @(.foo|.bar) isn't allowed.
+  var pn = remain[0]
+  var negate = !!this.minimatch.negate
+  var rawGlob = pn._glob
+  var dotOk = this.dot || rawGlob.charAt(0) === '.'
+
+  var matchedEntries = []
+  for (var i = 0; i < entries.length; i++) {
+    var e = entries[i]
+    if (e.charAt(0) !== '.' || dotOk) {
+      var m
+      if (negate && !prefix) {
+        m = !e.match(pn)
+      } else {
+        m = e.match(pn)
+      }
+      if (m)
+        matchedEntries.push(e)
+    }
+  }
+
+  var len = matchedEntries.length
+  // If there are no matched entries, then nothing matches.
+  if (len === 0)
+    return
+
+  // if this is the last remaining pattern bit, then no need for
+  // an additional stat *unless* the user has specified mark or
+  // stat explicitly.  We know they exist, since readdir returned
+  // them.
+
+  if (remain.length === 1 && !this.mark && !this.stat) {
+    if (!this.matches[index])
+      this.matches[index] = Object.create(null)
+
+    for (var i = 0; i < len; i ++) {
+      var e = matchedEntries[i]
+      if (prefix) {
+        if (prefix.slice(-1) !== '/')
+          e = prefix + '/' + e
+        else
+          e = prefix + e
+      }
+
+      if (e.charAt(0) === '/' && !this.nomount) {
+        e = path.join(this.root, e)
+      }
+      this._emitMatch(index, e)
+    }
+    // This was the last one, and no stats were needed
+    return
+  }
+
+  // now test all matched entries as stand-ins for that part
+  // of the pattern.
+  remain.shift()
+  for (var i = 0; i < len; i ++) {
+    var e = matchedEntries[i]
+    var newPattern
+    if (prefix)
+      newPattern = [prefix, e]
+    else
+      newPattern = [e]
+    this._process(newPattern.concat(remain), index, inGlobStar)
+  }
+}
+
+
+GlobSync.prototype._emitMatch = function (index, e) {
+  if (isIgnored(this, e))
+    return
+
+  var abs = this._makeAbs(e)
+
+  if (this.mark)
+    e = this._mark(e)
+
+  if (this.absolute) {
+    e = abs
+  }
+
+  if (this.matches[index][e])
+    return
+
+  if (this.nodir) {
+    var c = this.cache[abs]
+    if (c === 'DIR' || Array.isArray(c))
+      return
+  }
+
+  this.matches[index][e] = true
+
+  if (this.stat)
+    this._stat(e)
+}
+
+
+GlobSync.prototype._readdirInGlobStar = function (abs) {
+  // follow all symlinked directories forever
+  // just proceed as if this is a non-globstar situation
+  if (this.follow)
+    return this._readdir(abs, false)
+
+  var entries
+  var lstat
+  var stat
+  try {
+    lstat = this.fs.lstatSync(abs)
+  } catch (er) {
+    if (er.code === 'ENOENT') {
+      // lstat failed, doesn't exist
+      return null
+    }
+  }
+
+  var isSym = lstat && lstat.isSymbolicLink()
+  this.symlinks[abs] = isSym
+
+  // If it's not a symlink or a dir, then it's definitely a regular file.
+  // don't bother doing a readdir in that case.
+  if (!isSym && lstat && !lstat.isDirectory())
+    this.cache[abs] = 'FILE'
+  else
+    entries = this._readdir(abs, false)
+
+  return entries
+}
+
+GlobSync.prototype._readdir = function (abs, inGlobStar) {
+  var entries
+
+  if (inGlobStar && !ownProp(this.symlinks, abs))
+    return this._readdirInGlobStar(abs)
+
+  if (ownProp(this.cache, abs)) {
+    var c = this.cache[abs]
+    if (!c || c === 'FILE')
+      return null
+
+    if (Array.isArray(c))
+      return c
+  }
+
+  try {
+    return this._readdirEntries(abs, this.fs.readdirSync(abs))
+  } catch (er) {
+    this._readdirError(abs, er)
+    return null
+  }
+}
+
+GlobSync.prototype._readdirEntries = function (abs, entries) {
+  // if we haven't asked to stat everything, then just
+  // assume that everything in there exists, so we can avoid
+  // having to stat it a second time.
+  if (!this.mark && !this.stat) {
+    for (var i = 0; i < entries.length; i ++) {
+      var e = entries[i]
+      if (abs === '/')
+        e = abs + e
+      else
+        e = abs + '/' + e
+      this.cache[e] = true
+    }
+  }
+
+  this.cache[abs] = entries
+
+  // mark and cache dir-ness
+  return entries
+}
+
+GlobSync.prototype._readdirError = function (f, er) {
+  // handle errors, and cache the information
+  switch (er.code) {
+    case 'ENOTSUP': // https://github.com/isaacs/node-glob/issues/205
+    case 'ENOTDIR': // totally normal. means it *does* exist.
+      var abs = this._makeAbs(f)
+      this.cache[abs] = 'FILE'
+      if (abs === this.cwdAbs) {
+        var error = new Error(er.code + ' invalid cwd ' + this.cwd)
+        error.path = this.cwd
+        error.code = er.code
+        throw error
+      }
+      break
+
+    case 'ENOENT': // not terribly unusual
+    case 'ELOOP':
+    case 'ENAMETOOLONG':
+    case 'UNKNOWN':
+      this.cache[this._makeAbs(f)] = false
+      break
+
+    default: // some unusual error.  Treat as failure.
+      this.cache[this._makeAbs(f)] = false
+      if (this.strict)
+        throw er
+      if (!this.silent)
+        console.error('glob error', er)
+      break
+  }
+}
+
+GlobSync.prototype._processGlobStar = function (prefix, read, abs, remain, index, inGlobStar) {
+
+  var entries = this._readdir(abs, inGlobStar)
+
+  // no entries means not a dir, so it can never have matches
+  // foo.txt/** doesn't match foo.txt
+  if (!entries)
+    return
+
+  // test without the globstar, and with every child both below
+  // and replacing the globstar.
+  var remainWithoutGlobStar = remain.slice(1)
+  var gspref = prefix ? [ prefix ] : []
+  var noGlobStar = gspref.concat(remainWithoutGlobStar)
+
+  // the noGlobStar pattern exits the inGlobStar state
+  this._process(noGlobStar, index, false)
+
+  var len = entries.length
+  var isSym = this.symlinks[abs]
+
+  // If it's a symlink, and we're in a globstar, then stop
+  if (isSym && inGlobStar)
+    return
+
+  for (var i = 0; i < len; i++) {
+    var e = entries[i]
+    if (e.charAt(0) === '.' && !this.dot)
+      continue
+
+    // these two cases enter the inGlobStar state
+    var instead = gspref.concat(entries[i], remainWithoutGlobStar)
+    this._process(instead, index, true)
+
+    var below = gspref.concat(entries[i], remain)
+    this._process(below, index, true)
+  }
+}
+
+GlobSync.prototype._processSimple = function (prefix, index) {
+  // XXX review this.  Shouldn't it be doing the mounting etc
+  // before doing stat?  kinda weird?
+  var exists = this._stat(prefix)
+
+  if (!this.matches[index])
+    this.matches[index] = Object.create(null)
+
+  // If it doesn't exist, then just mark the lack of results
+  if (!exists)
+    return
+
+  if (prefix && isAbsolute(prefix) && !this.nomount) {
+    var trail = /[\/\\]$/.test(prefix)
+    if (prefix.charAt(0) === '/') {
+      prefix = path.join(this.root, prefix)
+    } else {
+      prefix = path.resolve(this.root, prefix)
+      if (trail)
+        prefix += '/'
+    }
+  }
+
+  if (process.platform === 'win32')
+    prefix = prefix.replace(/\\/g, '/')
+
+  // Mark this as a match
+  this._emitMatch(index, prefix)
+}
+
+// Returns either 'DIR', 'FILE', or false
+GlobSync.prototype._stat = function (f) {
+  var abs = this._makeAbs(f)
+  var needDir = f.slice(-1) === '/'
+
+  if (f.length > this.maxLength)
+    return false
+
+  if (!this.stat && ownProp(this.cache, abs)) {
+    var c = this.cache[abs]
+
+    if (Array.isArray(c))
+      c = 'DIR'
+
+    // It exists, but maybe not how we need it
+    if (!needDir || c === 'DIR')
+      return c
+
+    if (needDir && c === 'FILE')
+      return false
+
+    // otherwise we have to stat, because maybe c=true
+    // if we know it exists, but not what it is.
+  }
+
+  var exists
+  var stat = this.statCache[abs]
+  if (!stat) {
+    var lstat
+    try {
+      lstat = this.fs.lstatSync(abs)
+    } catch (er) {
+      if (er && (er.code === 'ENOENT' || er.code === 'ENOTDIR')) {
+        this.statCache[abs] = false
+        return false
+      }
+    }
+
+    if (lstat && lstat.isSymbolicLink()) {
+      try {
+        stat = this.fs.statSync(abs)
+      } catch (er) {
+        stat = lstat
+      }
+    } else {
+      stat = lstat
+    }
+  }
+
+  this.statCache[abs] = stat
+
+  var c = true
+  if (stat)
+    c = stat.isDirectory() ? 'DIR' : 'FILE'
+
+  this.cache[abs] = this.cache[abs] || c
+
+  if (needDir && c === 'FILE')
+    return false
+
+  return c
+}
+
+GlobSync.prototype._mark = function (p) {
+  return common.mark(this, p)
+}
+
+GlobSync.prototype._makeAbs = function (f) {
+  return common.makeAbs(this, f)
+}
diff --git a/_extensions/d2/node_modules/inflight/LICENSE b/_extensions/d2/node_modules/inflight/LICENSE
new file mode 100644
index 00000000..05eeeb88
--- /dev/null
+++ b/_extensions/d2/node_modules/inflight/LICENSE
@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/_extensions/d2/node_modules/inflight/README.md b/_extensions/d2/node_modules/inflight/README.md
new file mode 100644
index 00000000..6dc89291
--- /dev/null
+++ b/_extensions/d2/node_modules/inflight/README.md
@@ -0,0 +1,37 @@
+# inflight
+
+Add callbacks to requests in flight to avoid async duplication
+
+## USAGE
+
+```javascript
+var inflight = require('inflight')
+
+// some request that does some stuff
+function req(key, callback) {
+  // key is any random string.  like a url or filename or whatever.
+  //
+  // will return either a falsey value, indicating that the
+  // request for this key is already in flight, or a new callback
+  // which when called will call all callbacks passed to inflightk
+  // with the same key
+  callback = inflight(key, callback)
+
+  // If we got a falsey value back, then there's already a req going
+  if (!callback) return
+
+  // this is where you'd fetch the url or whatever
+  // callback is also once()-ified, so it can safely be assigned
+  // to multiple events etc.  First call wins.
+  setTimeout(function() {
+    callback(null, key)
+  }, 100)
+}
+
+// only assigns a single setTimeout
+// when it dings, all cbs get called
+req('foo', cb1)
+req('foo', cb2)
+req('foo', cb3)
+req('foo', cb4)
+```
diff --git a/_extensions/d2/node_modules/inflight/inflight.js b/_extensions/d2/node_modules/inflight/inflight.js
new file mode 100644
index 00000000..48202b3c
--- /dev/null
+++ b/_extensions/d2/node_modules/inflight/inflight.js
@@ -0,0 +1,54 @@
+var wrappy = require('wrappy')
+var reqs = Object.create(null)
+var once = require('once')
+
+module.exports = wrappy(inflight)
+
+function inflight (key, cb) {
+  if (reqs[key]) {
+    reqs[key].push(cb)
+    return null
+  } else {
+    reqs[key] = [cb]
+    return makeres(key)
+  }
+}
+
+function makeres (key) {
+  return once(function RES () {
+    var cbs = reqs[key]
+    var len = cbs.length
+    var args = slice(arguments)
+
+    // XXX It's somewhat ambiguous whether a new callback added in this
+    // pass should be queued for later execution if something in the
+    // list of callbacks throws, or if it should just be discarded.
+    // However, it's such an edge case that it hardly matters, and either
+    // choice is likely as surprising as the other.
+    // As it happens, we do go ahead and schedule it for later execution.
+    try {
+      for (var i = 0; i < len; i++) {
+        cbs[i].apply(null, args)
+      }
+    } finally {
+      if (cbs.length > len) {
+        // added more in the interim.
+        // de-zalgo, just in case, but don't call again.
+        cbs.splice(0, len)
+        process.nextTick(function () {
+          RES.apply(null, args)
+        })
+      } else {
+        delete reqs[key]
+      }
+    }
+  })
+}
+
+function slice (args) {
+  var length = args.length
+  var array = []
+
+  for (var i = 0; i < length; i++) array[i] = args[i]
+  return array
+}
diff --git a/_extensions/d2/node_modules/inflight/package.json b/_extensions/d2/node_modules/inflight/package.json
new file mode 100644
index 00000000..6084d350
--- /dev/null
+++ b/_extensions/d2/node_modules/inflight/package.json
@@ -0,0 +1,29 @@
+{
+  "name": "inflight",
+  "version": "1.0.6",
+  "description": "Add callbacks to requests in flight to avoid async duplication",
+  "main": "inflight.js",
+  "files": [
+    "inflight.js"
+  ],
+  "dependencies": {
+    "once": "^1.3.0",
+    "wrappy": "1"
+  },
+  "devDependencies": {
+    "tap": "^7.1.2"
+  },
+  "scripts": {
+    "test": "tap test.js --100"
+  },
+  "repository": {
+    "type": "git",
+    "url": "https://github.com/npm/inflight.git"
+  },
+  "author": "Isaac Z. Schlueter  (http://blog.izs.me/)",
+  "bugs": {
+    "url": "https://github.com/isaacs/inflight/issues"
+  },
+  "homepage": "https://github.com/isaacs/inflight",
+  "license": "ISC"
+}
diff --git a/_extensions/d2/node_modules/inherits/LICENSE b/_extensions/d2/node_modules/inherits/LICENSE
new file mode 100644
index 00000000..dea3013d
--- /dev/null
+++ b/_extensions/d2/node_modules/inherits/LICENSE
@@ -0,0 +1,16 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
diff --git a/_extensions/d2/node_modules/inherits/README.md b/_extensions/d2/node_modules/inherits/README.md
new file mode 100644
index 00000000..b1c56658
--- /dev/null
+++ b/_extensions/d2/node_modules/inherits/README.md
@@ -0,0 +1,42 @@
+Browser-friendly inheritance fully compatible with standard node.js
+[inherits](http://nodejs.org/api/util.html#util_util_inherits_constructor_superconstructor).
+
+This package exports standard `inherits` from node.js `util` module in
+node environment, but also provides alternative browser-friendly
+implementation through [browser
+field](https://gist.github.com/shtylman/4339901). Alternative
+implementation is a literal copy of standard one located in standalone
+module to avoid requiring of `util`. It also has a shim for old
+browsers with no `Object.create` support.
+
+While keeping you sure you are using standard `inherits`
+implementation in node.js environment, it allows bundlers such as
+[browserify](https://github.com/substack/node-browserify) to not
+include full `util` package to your client code if all you need is
+just `inherits` function. It worth, because browser shim for `util`
+package is large and `inherits` is often the single function you need
+from it.
+
+It's recommended to use this package instead of
+`require('util').inherits` for any code that has chances to be used
+not only in node.js but in browser too.
+
+## usage
+
+```js
+var inherits = require('inherits');
+// then use exactly as the standard one
+```
+
+## note on version ~1.0
+
+Version ~1.0 had completely different motivation and is not compatible
+neither with 2.0 nor with standard node.js `inherits`.
+
+If you are using version ~1.0 and planning to switch to ~2.0, be
+careful:
+
+* new version uses `super_` instead of `super` for referencing
+  superclass
+* new version overwrites current prototype while old one preserves any
+  existing fields on it
diff --git a/_extensions/d2/node_modules/inherits/inherits.js b/_extensions/d2/node_modules/inherits/inherits.js
new file mode 100644
index 00000000..f71f2d93
--- /dev/null
+++ b/_extensions/d2/node_modules/inherits/inherits.js
@@ -0,0 +1,9 @@
+try {
+  var util = require('util');
+  /* istanbul ignore next */
+  if (typeof util.inherits !== 'function') throw '';
+  module.exports = util.inherits;
+} catch (e) {
+  /* istanbul ignore next */
+  module.exports = require('./inherits_browser.js');
+}
diff --git a/_extensions/d2/node_modules/inherits/inherits_browser.js b/_extensions/d2/node_modules/inherits/inherits_browser.js
new file mode 100644
index 00000000..86bbb3dc
--- /dev/null
+++ b/_extensions/d2/node_modules/inherits/inherits_browser.js
@@ -0,0 +1,27 @@
+if (typeof Object.create === 'function') {
+  // implementation from standard node.js 'util' module
+  module.exports = function inherits(ctor, superCtor) {
+    if (superCtor) {
+      ctor.super_ = superCtor
+      ctor.prototype = Object.create(superCtor.prototype, {
+        constructor: {
+          value: ctor,
+          enumerable: false,
+          writable: true,
+          configurable: true
+        }
+      })
+    }
+  };
+} else {
+  // old school shim for old browsers
+  module.exports = function inherits(ctor, superCtor) {
+    if (superCtor) {
+      ctor.super_ = superCtor
+      var TempCtor = function () {}
+      TempCtor.prototype = superCtor.prototype
+      ctor.prototype = new TempCtor()
+      ctor.prototype.constructor = ctor
+    }
+  }
+}
diff --git a/_extensions/d2/node_modules/inherits/package.json b/_extensions/d2/node_modules/inherits/package.json
new file mode 100644
index 00000000..37b4366b
--- /dev/null
+++ b/_extensions/d2/node_modules/inherits/package.json
@@ -0,0 +1,29 @@
+{
+  "name": "inherits",
+  "description": "Browser-friendly inheritance fully compatible with standard node.js inherits()",
+  "version": "2.0.4",
+  "keywords": [
+    "inheritance",
+    "class",
+    "klass",
+    "oop",
+    "object-oriented",
+    "inherits",
+    "browser",
+    "browserify"
+  ],
+  "main": "./inherits.js",
+  "browser": "./inherits_browser.js",
+  "repository": "git://github.com/isaacs/inherits",
+  "license": "ISC",
+  "scripts": {
+    "test": "tap"
+  },
+  "devDependencies": {
+    "tap": "^14.2.4"
+  },
+  "files": [
+    "inherits.js",
+    "inherits_browser.js"
+  ]
+}
diff --git a/_extensions/d2/node_modules/is-buffer/LICENSE b/_extensions/d2/node_modules/is-buffer/LICENSE
new file mode 100644
index 00000000..0c068cee
--- /dev/null
+++ b/_extensions/d2/node_modules/is-buffer/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) Feross Aboukhadijeh
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/_extensions/d2/node_modules/is-buffer/README.md b/_extensions/d2/node_modules/is-buffer/README.md
new file mode 100644
index 00000000..8c9785f2
--- /dev/null
+++ b/_extensions/d2/node_modules/is-buffer/README.md
@@ -0,0 +1,54 @@
+# is-buffer [![travis][travis-image]][travis-url] [![npm][npm-image]][npm-url] [![downloads][downloads-image]][downloads-url] [![javascript style guide][standard-image]][standard-url]
+
+[travis-image]: https://img.shields.io/travis/feross/is-buffer/master.svg
+[travis-url]: https://travis-ci.org/feross/is-buffer
+[npm-image]: https://img.shields.io/npm/v/is-buffer.svg
+[npm-url]: https://npmjs.org/package/is-buffer
+[downloads-image]: https://img.shields.io/npm/dm/is-buffer.svg
+[downloads-url]: https://npmjs.org/package/is-buffer
+[standard-image]: https://img.shields.io/badge/code_style-standard-brightgreen.svg
+[standard-url]: https://standardjs.com
+
+#### Determine if an object is a [`Buffer`](http://nodejs.org/api/buffer.html) (including the [browserify Buffer](https://github.com/feross/buffer))
+
+[![saucelabs][saucelabs-image]][saucelabs-url]
+
+[saucelabs-image]: https://saucelabs.com/browser-matrix/is-buffer.svg
+[saucelabs-url]: https://saucelabs.com/u/is-buffer
+
+## Why not use `Buffer.isBuffer`?
+
+This module lets you check if an object is a `Buffer` without using `Buffer.isBuffer` (which includes the whole [buffer](https://github.com/feross/buffer) module in [browserify](http://browserify.org/)).
+
+It's future-proof and works in node too!
+
+## install
+
+```bash
+npm install is-buffer
+```
+
+## usage
+
+```js
+var isBuffer = require('is-buffer')
+
+isBuffer(new Buffer(4)) // true
+isBuffer(Buffer.alloc(4)) //true
+
+isBuffer(undefined) // false
+isBuffer(null) // false
+isBuffer('') // false
+isBuffer(true) // false
+isBuffer(false) // false
+isBuffer(0) // false
+isBuffer(1) // false
+isBuffer(1.0) // false
+isBuffer('string') // false
+isBuffer({}) // false
+isBuffer(function foo () {}) // false
+```
+
+## license
+
+MIT. Copyright (C) [Feross Aboukhadijeh](http://feross.org).
diff --git a/_extensions/d2/node_modules/is-buffer/index.d.ts b/_extensions/d2/node_modules/is-buffer/index.d.ts
new file mode 100644
index 00000000..7065c69f
--- /dev/null
+++ b/_extensions/d2/node_modules/is-buffer/index.d.ts
@@ -0,0 +1,2 @@
+declare function isBuffer(obj: any): boolean
+export = isBuffer
diff --git a/_extensions/d2/node_modules/is-buffer/index.js b/_extensions/d2/node_modules/is-buffer/index.js
new file mode 100644
index 00000000..da9bfdd7
--- /dev/null
+++ b/_extensions/d2/node_modules/is-buffer/index.js
@@ -0,0 +1,11 @@
+/*!
+ * Determine if an object is a Buffer
+ *
+ * @author   Feross Aboukhadijeh 
+ * @license  MIT
+ */
+
+module.exports = function isBuffer (obj) {
+  return obj != null && obj.constructor != null &&
+    typeof obj.constructor.isBuffer === 'function' && obj.constructor.isBuffer(obj)
+}
diff --git a/_extensions/d2/node_modules/is-buffer/package.json b/_extensions/d2/node_modules/is-buffer/package.json
new file mode 100644
index 00000000..7cd70d42
--- /dev/null
+++ b/_extensions/d2/node_modules/is-buffer/package.json
@@ -0,0 +1,65 @@
+{
+  "name": "is-buffer",
+  "description": "Determine if an object is a Buffer",
+  "version": "2.0.5",
+  "author": {
+    "name": "Feross Aboukhadijeh",
+    "email": "feross@feross.org",
+    "url": "https://feross.org"
+  },
+  "bugs": {
+    "url": "https://github.com/feross/is-buffer/issues"
+  },
+  "dependencies": {},
+  "devDependencies": {
+    "airtap": "^3.0.0",
+    "standard": "*",
+    "tape": "^5.0.1"
+  },
+  "engines": {
+    "node": ">=4"
+  },
+  "keywords": [
+    "arraybuffer",
+    "browser",
+    "browser buffer",
+    "browserify",
+    "buffer",
+    "buffers",
+    "core buffer",
+    "dataview",
+    "float32array",
+    "float64array",
+    "int16array",
+    "int32array",
+    "type",
+    "typed array",
+    "uint32array"
+  ],
+  "license": "MIT",
+  "main": "index.js",
+  "repository": {
+    "type": "git",
+    "url": "git://github.com/feross/is-buffer.git"
+  },
+  "scripts": {
+    "test": "standard && npm run test-node && npm run test-browser",
+    "test-browser": "airtap -- test/*.js",
+    "test-browser-local": "airtap --local -- test/*.js",
+    "test-node": "tape test/*.js"
+  },
+  "funding": [
+    {
+      "type": "github",
+      "url": "https://github.com/sponsors/feross"
+    },
+    {
+      "type": "patreon",
+      "url": "https://www.patreon.com/feross"
+    },
+    {
+      "type": "consulting",
+      "url": "https://feross.org/support"
+    }
+  ]
+}
diff --git a/_extensions/d2/node_modules/is-plain-obj/index.d.ts b/_extensions/d2/node_modules/is-plain-obj/index.d.ts
new file mode 100644
index 00000000..3794c42e
--- /dev/null
+++ b/_extensions/d2/node_modules/is-plain-obj/index.d.ts
@@ -0,0 +1,35 @@
+/**
+Check if a value is a plain object.
+
+An object is plain if it's created by either `{}`, `new Object()`, or `Object.create(null)`.
+
+@example
+```
+import isPlainObject from 'is-plain-obj';
+import {runInNewContext} from 'node:vm';
+
+isPlainObject({foo: 'bar'});
+//=> true
+
+isPlainObject(new Object());
+//=> true
+
+isPlainObject(Object.create(null));
+//=> true
+
+// This works across realms
+isPlainObject(runInNewContext('({})'));
+//=> true
+
+isPlainObject([1, 2, 3]);
+//=> false
+
+class Unicorn {}
+isPlainObject(new Unicorn());
+//=> false
+
+isPlainObject(Math);
+//=> false
+```
+*/
+export default function isPlainObject(value: unknown): value is Record;
diff --git a/_extensions/d2/node_modules/is-plain-obj/index.js b/_extensions/d2/node_modules/is-plain-obj/index.js
new file mode 100644
index 00000000..92555c3d
--- /dev/null
+++ b/_extensions/d2/node_modules/is-plain-obj/index.js
@@ -0,0 +1,8 @@
+export default function isPlainObject(value) {
+	if (typeof value !== 'object' || value === null) {
+		return false;
+	}
+
+	const prototype = Object.getPrototypeOf(value);
+	return (prototype === null || prototype === Object.prototype || Object.getPrototypeOf(prototype) === null) && !(Symbol.toStringTag in value) && !(Symbol.iterator in value);
+}
diff --git a/_extensions/d2/node_modules/is-plain-obj/license b/_extensions/d2/node_modules/is-plain-obj/license
new file mode 100644
index 00000000..fa7ceba3
--- /dev/null
+++ b/_extensions/d2/node_modules/is-plain-obj/license
@@ -0,0 +1,9 @@
+MIT License
+
+Copyright (c) Sindre Sorhus  (https://sindresorhus.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/_extensions/d2/node_modules/is-plain-obj/package.json b/_extensions/d2/node_modules/is-plain-obj/package.json
new file mode 100644
index 00000000..2ee5a142
--- /dev/null
+++ b/_extensions/d2/node_modules/is-plain-obj/package.json
@@ -0,0 +1,41 @@
+{
+	"name": "is-plain-obj",
+	"version": "4.1.0",
+	"description": "Check if a value is a plain object",
+	"license": "MIT",
+	"repository": "sindresorhus/is-plain-obj",
+	"funding": "https://github.com/sponsors/sindresorhus",
+	"author": {
+		"name": "Sindre Sorhus",
+		"email": "sindresorhus@gmail.com",
+		"url": "https://sindresorhus.com"
+	},
+	"type": "module",
+	"exports": "./index.js",
+	"engines": {
+		"node": ">=12"
+	},
+	"scripts": {
+		"test": "xo && ava && tsd"
+	},
+	"files": [
+		"index.js",
+		"index.d.ts"
+	],
+	"keywords": [
+		"object",
+		"is",
+		"check",
+		"test",
+		"type",
+		"plain",
+		"vanilla",
+		"pure",
+		"simple"
+	],
+	"devDependencies": {
+		"ava": "^3.15.0",
+		"tsd": "^0.14.0",
+		"xo": "^0.38.2"
+	}
+}
diff --git a/_extensions/d2/node_modules/is-plain-obj/readme.md b/_extensions/d2/node_modules/is-plain-obj/readme.md
new file mode 100644
index 00000000..28de6fb5
--- /dev/null
+++ b/_extensions/d2/node_modules/is-plain-obj/readme.md
@@ -0,0 +1,58 @@
+# is-plain-obj
+
+> Check if a value is a plain object
+
+An object is plain if it's created by either `{}`, `new Object()`, or `Object.create(null)`.
+
+## Install
+
+```
+$ npm install is-plain-obj
+```
+
+## Usage
+
+```js
+import isPlainObject from 'is-plain-obj';
+import {runInNewContext} from 'node:vm';
+
+isPlainObject({foo: 'bar'});
+//=> true
+
+isPlainObject(new Object());
+//=> true
+
+isPlainObject(Object.create(null));
+//=> true
+
+// This works across realms
+isPlainObject(runInNewContext('({})'));
+//=> true
+
+isPlainObject([1, 2, 3]);
+//=> false
+
+class Unicorn {}
+isPlainObject(new Unicorn());
+//=> false
+
+isPlainObject(Math);
+//=> false
+```
+
+## Related
+
+- [is-obj](https://github.com/sindresorhus/is-obj) - Check if a value is an object
+- [is](https://github.com/sindresorhus/is) - Type check values
+
+---
+
+
+ + Get professional support for this package with a Tidelift subscription + +
+ + Tidelift helps make open source sustainable for maintainers while giving companies
assurances about security, maintenance, and licensing for their dependencies. +
+
diff --git a/_extensions/d2/node_modules/isexe/.npmignore b/_extensions/d2/node_modules/isexe/.npmignore new file mode 100644 index 00000000..c1cb757a --- /dev/null +++ b/_extensions/d2/node_modules/isexe/.npmignore @@ -0,0 +1,2 @@ +.nyc_output/ +coverage/ diff --git a/_extensions/d2/node_modules/isexe/LICENSE b/_extensions/d2/node_modules/isexe/LICENSE new file mode 100644 index 00000000..19129e31 --- /dev/null +++ b/_extensions/d2/node_modules/isexe/LICENSE @@ -0,0 +1,15 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/_extensions/d2/node_modules/isexe/README.md b/_extensions/d2/node_modules/isexe/README.md new file mode 100644 index 00000000..35769e84 --- /dev/null +++ b/_extensions/d2/node_modules/isexe/README.md @@ -0,0 +1,51 @@ +# isexe + +Minimal module to check if a file is executable, and a normal file. + +Uses `fs.stat` and tests against the `PATHEXT` environment variable on +Windows. + +## USAGE + +```javascript +var isexe = require('isexe') +isexe('some-file-name', function (err, isExe) { + if (err) { + console.error('probably file does not exist or something', err) + } else if (isExe) { + console.error('this thing can be run') + } else { + console.error('cannot be run') + } +}) + +// same thing but synchronous, throws errors +var isExe = isexe.sync('some-file-name') + +// treat errors as just "not executable" +isexe('maybe-missing-file', { ignoreErrors: true }, callback) +var isExe = isexe.sync('maybe-missing-file', { ignoreErrors: true }) +``` + +## API + +### `isexe(path, [options], [callback])` + +Check if the path is executable. If no callback provided, and a +global `Promise` object is available, then a Promise will be returned. + +Will raise whatever errors may be raised by `fs.stat`, unless +`options.ignoreErrors` is set to true. + +### `isexe.sync(path, [options])` + +Same as `isexe` but returns the value and throws any errors raised. + +### Options + +* `ignoreErrors` Treat all errors as "no, this is not executable", but + don't raise them. +* `uid` Number to use as the user id +* `gid` Number to use as the group id +* `pathExt` List of path extensions to use instead of `PATHEXT` + environment variable on Windows. diff --git a/_extensions/d2/node_modules/isexe/index.js b/_extensions/d2/node_modules/isexe/index.js new file mode 100644 index 00000000..553fb32b --- /dev/null +++ b/_extensions/d2/node_modules/isexe/index.js @@ -0,0 +1,57 @@ +var fs = require('fs') +var core +if (process.platform === 'win32' || global.TESTING_WINDOWS) { + core = require('./windows.js') +} else { + core = require('./mode.js') +} + +module.exports = isexe +isexe.sync = sync + +function isexe (path, options, cb) { + if (typeof options === 'function') { + cb = options + options = {} + } + + if (!cb) { + if (typeof Promise !== 'function') { + throw new TypeError('callback not provided') + } + + return new Promise(function (resolve, reject) { + isexe(path, options || {}, function (er, is) { + if (er) { + reject(er) + } else { + resolve(is) + } + }) + }) + } + + core(path, options || {}, function (er, is) { + // ignore EACCES because that just means we aren't allowed to run it + if (er) { + if (er.code === 'EACCES' || options && options.ignoreErrors) { + er = null + is = false + } + } + cb(er, is) + }) +} + +function sync (path, options) { + // my kingdom for a filtered catch + try { + return core.sync(path, options || {}) + } catch (er) { + if (options && options.ignoreErrors || er.code === 'EACCES') { + return false + } else { + throw er + } + } +} diff --git a/_extensions/d2/node_modules/isexe/mode.js b/_extensions/d2/node_modules/isexe/mode.js new file mode 100644 index 00000000..1995ea4a --- /dev/null +++ b/_extensions/d2/node_modules/isexe/mode.js @@ -0,0 +1,41 @@ +module.exports = isexe +isexe.sync = sync + +var fs = require('fs') + +function isexe (path, options, cb) { + fs.stat(path, function (er, stat) { + cb(er, er ? false : checkStat(stat, options)) + }) +} + +function sync (path, options) { + return checkStat(fs.statSync(path), options) +} + +function checkStat (stat, options) { + return stat.isFile() && checkMode(stat, options) +} + +function checkMode (stat, options) { + var mod = stat.mode + var uid = stat.uid + var gid = stat.gid + + var myUid = options.uid !== undefined ? + options.uid : process.getuid && process.getuid() + var myGid = options.gid !== undefined ? + options.gid : process.getgid && process.getgid() + + var u = parseInt('100', 8) + var g = parseInt('010', 8) + var o = parseInt('001', 8) + var ug = u | g + + var ret = (mod & o) || + (mod & g) && gid === myGid || + (mod & u) && uid === myUid || + (mod & ug) && myUid === 0 + + return ret +} diff --git a/_extensions/d2/node_modules/isexe/package.json b/_extensions/d2/node_modules/isexe/package.json new file mode 100644 index 00000000..e4526894 --- /dev/null +++ b/_extensions/d2/node_modules/isexe/package.json @@ -0,0 +1,31 @@ +{ + "name": "isexe", + "version": "2.0.0", + "description": "Minimal module to check if a file is executable.", + "main": "index.js", + "directories": { + "test": "test" + }, + "devDependencies": { + "mkdirp": "^0.5.1", + "rimraf": "^2.5.0", + "tap": "^10.3.0" + }, + "scripts": { + "test": "tap test/*.js --100", + "preversion": "npm test", + "postversion": "npm publish", + "postpublish": "git push origin --all; git push origin --tags" + }, + "author": "Isaac Z. Schlueter (http://blog.izs.me/)", + "license": "ISC", + "repository": { + "type": "git", + "url": "git+https://github.com/isaacs/isexe.git" + }, + "keywords": [], + "bugs": { + "url": "https://github.com/isaacs/isexe/issues" + }, + "homepage": "https://github.com/isaacs/isexe#readme" +} diff --git a/_extensions/d2/node_modules/isexe/test/basic.js b/_extensions/d2/node_modules/isexe/test/basic.js new file mode 100644 index 00000000..d926df64 --- /dev/null +++ b/_extensions/d2/node_modules/isexe/test/basic.js @@ -0,0 +1,221 @@ +var t = require('tap') +var fs = require('fs') +var path = require('path') +var fixture = path.resolve(__dirname, 'fixtures') +var meow = fixture + '/meow.cat' +var mine = fixture + '/mine.cat' +var ours = fixture + '/ours.cat' +var fail = fixture + '/fail.false' +var noent = fixture + '/enoent.exe' +var mkdirp = require('mkdirp') +var rimraf = require('rimraf') + +var isWindows = process.platform === 'win32' +var hasAccess = typeof fs.access === 'function' +var winSkip = isWindows && 'windows' +var accessSkip = !hasAccess && 'no fs.access function' +var hasPromise = typeof Promise === 'function' +var promiseSkip = !hasPromise && 'no global Promise' + +function reset () { + delete require.cache[require.resolve('../')] + return require('../') +} + +t.test('setup fixtures', function (t) { + rimraf.sync(fixture) + mkdirp.sync(fixture) + fs.writeFileSync(meow, '#!/usr/bin/env cat\nmeow\n') + fs.chmodSync(meow, parseInt('0755', 8)) + fs.writeFileSync(fail, '#!/usr/bin/env false\n') + fs.chmodSync(fail, parseInt('0644', 8)) + fs.writeFileSync(mine, '#!/usr/bin/env cat\nmine\n') + fs.chmodSync(mine, parseInt('0744', 8)) + fs.writeFileSync(ours, '#!/usr/bin/env cat\nours\n') + fs.chmodSync(ours, parseInt('0754', 8)) + t.end() +}) + +t.test('promise', { skip: promiseSkip }, function (t) { + var isexe = reset() + t.test('meow async', function (t) { + isexe(meow).then(function (is) { + t.ok(is) + t.end() + }) + }) + t.test('fail async', function (t) { + isexe(fail).then(function (is) { + t.notOk(is) + t.end() + }) + }) + t.test('noent async', function (t) { + isexe(noent).catch(function (er) { + t.ok(er) + t.end() + }) + }) + t.test('noent ignore async', function (t) { + isexe(noent, { ignoreErrors: true }).then(function (is) { + t.notOk(is) + t.end() + }) + }) + t.end() +}) + +t.test('no promise', function (t) { + global.Promise = null + var isexe = reset() + t.throws('try to meow a promise', function () { + isexe(meow) + }) + t.end() +}) + +t.test('access', { skip: accessSkip || winSkip }, function (t) { + runTest(t) +}) + +t.test('mode', { skip: winSkip }, function (t) { + delete fs.access + delete fs.accessSync + var isexe = reset() + t.ok(isexe.sync(ours, { uid: 0, gid: 0 })) + t.ok(isexe.sync(mine, { uid: 0, gid: 0 })) + runTest(t) +}) + +t.test('windows', function (t) { + global.TESTING_WINDOWS = true + var pathExt = '.EXE;.CAT;.CMD;.COM' + t.test('pathExt option', function (t) { + runTest(t, { pathExt: '.EXE;.CAT;.CMD;.COM' }) + }) + t.test('pathExt env', function (t) { + process.env.PATHEXT = pathExt + runTest(t) + }) + t.test('no pathExt', function (t) { + // with a pathExt of '', any filename is fine. + // so the "fail" one would still pass. + runTest(t, { pathExt: '', skipFail: true }) + }) + t.test('pathext with empty entry', function (t) { + // with a pathExt of '', any filename is fine. + // so the "fail" one would still pass. + runTest(t, { pathExt: ';' + pathExt, skipFail: true }) + }) + t.end() +}) + +t.test('cleanup', function (t) { + rimraf.sync(fixture) + t.end() +}) + +function runTest (t, options) { + var isexe = reset() + + var optionsIgnore = Object.create(options || {}) + optionsIgnore.ignoreErrors = true + + if (!options || !options.skipFail) { + t.notOk(isexe.sync(fail, options)) + } + t.notOk(isexe.sync(noent, optionsIgnore)) + if (!options) { + t.ok(isexe.sync(meow)) + } else { + t.ok(isexe.sync(meow, options)) + } + + t.ok(isexe.sync(mine, options)) + t.ok(isexe.sync(ours, options)) + t.throws(function () { + isexe.sync(noent, options) + }) + + t.test('meow async', function (t) { + if (!options) { + isexe(meow, function (er, is) { + if (er) { + throw er + } + t.ok(is) + t.end() + }) + } else { + isexe(meow, options, function (er, is) { + if (er) { + throw er + } + t.ok(is) + t.end() + }) + } + }) + + t.test('mine async', function (t) { + isexe(mine, options, function (er, is) { + if (er) { + throw er + } + t.ok(is) + t.end() + }) + }) + + t.test('ours async', function (t) { + isexe(ours, options, function (er, is) { + if (er) { + throw er + } + t.ok(is) + t.end() + }) + }) + + if (!options || !options.skipFail) { + t.test('fail async', function (t) { + isexe(fail, options, function (er, is) { + if (er) { + throw er + } + t.notOk(is) + t.end() + }) + }) + } + + t.test('noent async', function (t) { + isexe(noent, options, function (er, is) { + t.ok(er) + t.notOk(is) + t.end() + }) + }) + + t.test('noent ignore async', function (t) { + isexe(noent, optionsIgnore, function (er, is) { + if (er) { + throw er + } + t.notOk(is) + t.end() + }) + }) + + t.test('directory is not executable', function (t) { + isexe(__dirname, options, function (er, is) { + if (er) { + throw er + } + t.notOk(is) + t.end() + }) + }) + + t.end() +} diff --git a/_extensions/d2/node_modules/isexe/windows.js b/_extensions/d2/node_modules/isexe/windows.js new file mode 100644 index 00000000..34996734 --- /dev/null +++ b/_extensions/d2/node_modules/isexe/windows.js @@ -0,0 +1,42 @@ +module.exports = isexe +isexe.sync = sync + +var fs = require('fs') + +function checkPathExt (path, options) { + var pathext = options.pathExt !== undefined ? + options.pathExt : process.env.PATHEXT + + if (!pathext) { + return true + } + + pathext = pathext.split(';') + if (pathext.indexOf('') !== -1) { + return true + } + for (var i = 0; i < pathext.length; i++) { + var p = pathext[i].toLowerCase() + if (p && path.substr(-p.length).toLowerCase() === p) { + return true + } + } + return false +} + +function checkStat (stat, path, options) { + if (!stat.isSymbolicLink() && !stat.isFile()) { + return false + } + return checkPathExt(path, options) +} + +function isexe (path, options, cb) { + fs.stat(path, function (er, stat) { + cb(er, er ? false : checkStat(stat, path, options)) + }) +} + +function sync (path, options) { + return checkStat(fs.statSync(path), path, options) +} diff --git a/_extensions/d2/node_modules/kleur/colors.d.ts b/_extensions/d2/node_modules/kleur/colors.d.ts new file mode 100644 index 00000000..cab25c66 --- /dev/null +++ b/_extensions/d2/node_modules/kleur/colors.d.ts @@ -0,0 +1,38 @@ +declare function print(input: string | boolean | number): string; +declare function print(input: undefined | void): undefined; +declare function print(input: null): null; +type Colorize = typeof print; + +export declare const $: { enabled: boolean }; + +// Colors +export declare const black: Colorize; +export declare const red: Colorize; +export declare const green: Colorize; +export declare const yellow: Colorize; +export declare const blue: Colorize; +export declare const magenta: Colorize; +export declare const cyan: Colorize; +export declare const white: Colorize; +export declare const gray: Colorize; +export declare const grey: Colorize; + +// Backgrounds +export declare const bgBlack: Colorize; +export declare const bgRed: Colorize; +export declare const bgGreen: Colorize; +export declare const bgYellow: Colorize; +export declare const bgBlue: Colorize; +export declare const bgMagenta: Colorize; +export declare const bgCyan: Colorize; +export declare const bgWhite: Colorize; + +// Modifiers +export declare const reset: Colorize; +export declare const bold: Colorize; +export declare const dim: Colorize; +export declare const italic: Colorize; +export declare const underline: Colorize; +export declare const inverse: Colorize; +export declare const hidden: Colorize; +export declare const strikethrough: Colorize; diff --git a/_extensions/d2/node_modules/kleur/colors.js b/_extensions/d2/node_modules/kleur/colors.js new file mode 100644 index 00000000..89fb012d --- /dev/null +++ b/_extensions/d2/node_modules/kleur/colors.js @@ -0,0 +1,53 @@ +let FORCE_COLOR, NODE_DISABLE_COLORS, NO_COLOR, TERM, isTTY=true; +if (typeof process !== 'undefined') { + ({ FORCE_COLOR, NODE_DISABLE_COLORS, NO_COLOR, TERM } = process.env || {}); + isTTY = process.stdout && process.stdout.isTTY; +} + +const $ = exports.$ = { + enabled: !NODE_DISABLE_COLORS && NO_COLOR == null && TERM !== 'dumb' && ( + FORCE_COLOR != null && FORCE_COLOR !== '0' || isTTY + ) +} + +function init(x, y) { + let rgx = new RegExp(`\\x1b\\[${y}m`, 'g'); + let open = `\x1b[${x}m`, close = `\x1b[${y}m`; + + return function (txt) { + if (!$.enabled || txt == null) return txt; + return open + (!!~(''+txt).indexOf(close) ? txt.replace(rgx, close + open) : txt) + close; + }; +} + +// modifiers +exports.reset = init(0, 0); +exports.bold = init(1, 22); +exports.dim = init(2, 22); +exports.italic = init(3, 23); +exports.underline = init(4, 24); +exports.inverse = init(7, 27); +exports.hidden = init(8, 28); +exports.strikethrough = init(9, 29); + +// colors +exports.black = init(30, 39); +exports.red = init(31, 39); +exports.green = init(32, 39); +exports.yellow = init(33, 39); +exports.blue = init(34, 39); +exports.magenta = init(35, 39); +exports.cyan = init(36, 39); +exports.white = init(37, 39); +exports.gray = init(90, 39); +exports.grey = init(90, 39); + +// background colors +exports.bgBlack = init(40, 49); +exports.bgRed = init(41, 49); +exports.bgGreen = init(42, 49); +exports.bgYellow = init(43, 49); +exports.bgBlue = init(44, 49); +exports.bgMagenta = init(45, 49); +exports.bgCyan = init(46, 49); +exports.bgWhite = init(47, 49); diff --git a/_extensions/d2/node_modules/kleur/colors.mjs b/_extensions/d2/node_modules/kleur/colors.mjs new file mode 100644 index 00000000..e9feb83e --- /dev/null +++ b/_extensions/d2/node_modules/kleur/colors.mjs @@ -0,0 +1,53 @@ +let FORCE_COLOR, NODE_DISABLE_COLORS, NO_COLOR, TERM, isTTY=true; +if (typeof process !== 'undefined') { + ({ FORCE_COLOR, NODE_DISABLE_COLORS, NO_COLOR, TERM } = process.env || {}); + isTTY = process.stdout && process.stdout.isTTY; +} + +export const $ = { + enabled: !NODE_DISABLE_COLORS && NO_COLOR == null && TERM !== 'dumb' && ( + FORCE_COLOR != null && FORCE_COLOR !== '0' || isTTY + ) +} + +function init(x, y) { + let rgx = new RegExp(`\\x1b\\[${y}m`, 'g'); + let open = `\x1b[${x}m`, close = `\x1b[${y}m`; + + return function (txt) { + if (!$.enabled || txt == null) return txt; + return open + (!!~(''+txt).indexOf(close) ? txt.replace(rgx, close + open) : txt) + close; + }; +} + +// modifiers +export const reset = init(0, 0); +export const bold = init(1, 22); +export const dim = init(2, 22); +export const italic = init(3, 23); +export const underline = init(4, 24); +export const inverse = init(7, 27); +export const hidden = init(8, 28); +export const strikethrough = init(9, 29); + +// colors +export const black = init(30, 39); +export const red = init(31, 39); +export const green = init(32, 39); +export const yellow = init(33, 39); +export const blue = init(34, 39); +export const magenta = init(35, 39); +export const cyan = init(36, 39); +export const white = init(37, 39); +export const gray = init(90, 39); +export const grey = init(90, 39); + +// background colors +export const bgBlack = init(40, 49); +export const bgRed = init(41, 49); +export const bgGreen = init(42, 49); +export const bgYellow = init(43, 49); +export const bgBlue = init(44, 49); +export const bgMagenta = init(45, 49); +export const bgCyan = init(46, 49); +export const bgWhite = init(47, 49); diff --git a/_extensions/d2/node_modules/kleur/index.d.ts b/_extensions/d2/node_modules/kleur/index.d.ts new file mode 100644 index 00000000..fdc26ca9 --- /dev/null +++ b/_extensions/d2/node_modules/kleur/index.d.ts @@ -0,0 +1,45 @@ +// Originally by: Rogier Schouten +// Adapted by: Madhav Varshney +declare namespace kleur { + interface Color { + (x: string | number): string; + (): Kleur; + } + + interface Kleur { + // Colors + black: Color; + red: Color; + green: Color; + yellow: Color; + blue: Color; + magenta: Color; + cyan: Color; + white: Color; + gray: Color; + grey: Color; + + // Backgrounds + bgBlack: Color; + bgRed: Color; + bgGreen: Color; + bgYellow: Color; + bgBlue: Color; + bgMagenta: Color; + bgCyan: Color; + bgWhite: Color; + + // Modifiers + reset: Color; + bold: Color; + dim: Color; + italic: Color; + underline: Color; + inverse: Color; + hidden: Color; + strikethrough: Color; + } +} + +declare let kleur: kleur.Kleur & { enabled: boolean }; +export = kleur; diff --git a/_extensions/d2/node_modules/kleur/index.js b/_extensions/d2/node_modules/kleur/index.js new file mode 100644 index 00000000..5800b981 --- /dev/null +++ b/_extensions/d2/node_modules/kleur/index.js @@ -0,0 +1,110 @@ +'use strict'; + +let FORCE_COLOR, NODE_DISABLE_COLORS, NO_COLOR, TERM, isTTY=true; +if (typeof process !== 'undefined') { + ({ FORCE_COLOR, NODE_DISABLE_COLORS, NO_COLOR, TERM } = process.env || {}); + isTTY = process.stdout && process.stdout.isTTY; +} + +const $ = { + enabled: !NODE_DISABLE_COLORS && NO_COLOR == null && TERM !== 'dumb' && ( + FORCE_COLOR != null && FORCE_COLOR !== '0' || isTTY + ), + + // modifiers + reset: init(0, 0), + bold: init(1, 22), + dim: init(2, 22), + italic: init(3, 23), + underline: init(4, 24), + inverse: init(7, 27), + hidden: init(8, 28), + strikethrough: init(9, 29), + + // colors + black: init(30, 39), + red: init(31, 39), + green: init(32, 39), + yellow: init(33, 39), + blue: init(34, 39), + magenta: init(35, 39), + cyan: init(36, 39), + white: init(37, 39), + gray: init(90, 39), + grey: init(90, 39), + + // background colors + bgBlack: init(40, 49), + bgRed: init(41, 49), + bgGreen: init(42, 49), + bgYellow: init(43, 49), + bgBlue: init(44, 49), + bgMagenta: init(45, 49), + bgCyan: init(46, 49), + bgWhite: init(47, 49) +}; + +function run(arr, str) { + let i=0, tmp, beg='', end=''; + for (; i < arr.length; i++) { + tmp = arr[i]; + beg += tmp.open; + end += tmp.close; + if (!!~str.indexOf(tmp.close)) { + str = str.replace(tmp.rgx, tmp.close + tmp.open); + } + } + return beg + str + end; +} + +function chain(has, keys) { + let ctx = { has, keys }; + + ctx.reset = $.reset.bind(ctx); + ctx.bold = $.bold.bind(ctx); + ctx.dim = $.dim.bind(ctx); + ctx.italic = $.italic.bind(ctx); + ctx.underline = $.underline.bind(ctx); + ctx.inverse = $.inverse.bind(ctx); + ctx.hidden = $.hidden.bind(ctx); + ctx.strikethrough = $.strikethrough.bind(ctx); + + ctx.black = $.black.bind(ctx); + ctx.red = $.red.bind(ctx); + ctx.green = $.green.bind(ctx); + ctx.yellow = $.yellow.bind(ctx); + ctx.blue = $.blue.bind(ctx); + ctx.magenta = $.magenta.bind(ctx); + ctx.cyan = $.cyan.bind(ctx); + ctx.white = $.white.bind(ctx); + ctx.gray = $.gray.bind(ctx); + ctx.grey = $.grey.bind(ctx); + + ctx.bgBlack = $.bgBlack.bind(ctx); + ctx.bgRed = $.bgRed.bind(ctx); + ctx.bgGreen = $.bgGreen.bind(ctx); + ctx.bgYellow = $.bgYellow.bind(ctx); + ctx.bgBlue = $.bgBlue.bind(ctx); + ctx.bgMagenta = $.bgMagenta.bind(ctx); + ctx.bgCyan = $.bgCyan.bind(ctx); + ctx.bgWhite = $.bgWhite.bind(ctx); + + return ctx; +} + +function init(open, close) { + let blk = { + open: `\x1b[${open}m`, + close: `\x1b[${close}m`, + rgx: new RegExp(`\\x1b\\[${close}m`, 'g') + }; + return function (txt) { + if (this !== void 0 && this.has !== void 0) { + !!~this.has.indexOf(open) || (this.has.push(open),this.keys.push(blk)); + return txt === void 0 ? this : $.enabled ? run(this.keys, txt+'') : txt+''; + } + return txt === void 0 ? chain([open], [blk]) : $.enabled ? run([blk], txt+'') : txt+''; + }; +} + +module.exports = $; diff --git a/_extensions/d2/node_modules/kleur/index.mjs b/_extensions/d2/node_modules/kleur/index.mjs new file mode 100644 index 00000000..96dadad2 --- /dev/null +++ b/_extensions/d2/node_modules/kleur/index.mjs @@ -0,0 +1,110 @@ +'use strict'; + +let FORCE_COLOR, NODE_DISABLE_COLORS, NO_COLOR, TERM, isTTY=true; +if (typeof process !== 'undefined') { + ({ FORCE_COLOR, NODE_DISABLE_COLORS, NO_COLOR, TERM } = process.env || {}); + isTTY = process.stdout && process.stdout.isTTY; +} + +const $ = { + enabled: !NODE_DISABLE_COLORS && NO_COLOR == null && TERM !== 'dumb' && ( + FORCE_COLOR != null && FORCE_COLOR !== '0' || isTTY + ), + + // modifiers + reset: init(0, 0), + bold: init(1, 22), + dim: init(2, 22), + italic: init(3, 23), + underline: init(4, 24), + inverse: init(7, 27), + hidden: init(8, 28), + strikethrough: init(9, 29), + + // colors + black: init(30, 39), + red: init(31, 39), + green: init(32, 39), + yellow: init(33, 39), + blue: init(34, 39), + magenta: init(35, 39), + cyan: init(36, 39), + white: init(37, 39), + gray: init(90, 39), + grey: init(90, 39), + + // background colors + bgBlack: init(40, 49), + bgRed: init(41, 49), + bgGreen: init(42, 49), + bgYellow: init(43, 49), + bgBlue: init(44, 49), + bgMagenta: init(45, 49), + bgCyan: init(46, 49), + bgWhite: init(47, 49) +}; + +function run(arr, str) { + let i=0, tmp, beg='', end=''; + for (; i < arr.length; i++) { + tmp = arr[i]; + beg += tmp.open; + end += tmp.close; + if (!!~str.indexOf(tmp.close)) { + str = str.replace(tmp.rgx, tmp.close + tmp.open); + } + } + return beg + str + end; +} + +function chain(has, keys) { + let ctx = { has, keys }; + + ctx.reset = $.reset.bind(ctx); + ctx.bold = $.bold.bind(ctx); + ctx.dim = $.dim.bind(ctx); + ctx.italic = $.italic.bind(ctx); + ctx.underline = $.underline.bind(ctx); + ctx.inverse = $.inverse.bind(ctx); + ctx.hidden = $.hidden.bind(ctx); + ctx.strikethrough = $.strikethrough.bind(ctx); + + ctx.black = $.black.bind(ctx); + ctx.red = $.red.bind(ctx); + ctx.green = $.green.bind(ctx); + ctx.yellow = $.yellow.bind(ctx); + ctx.blue = $.blue.bind(ctx); + ctx.magenta = $.magenta.bind(ctx); + ctx.cyan = $.cyan.bind(ctx); + ctx.white = $.white.bind(ctx); + ctx.gray = $.gray.bind(ctx); + ctx.grey = $.grey.bind(ctx); + + ctx.bgBlack = $.bgBlack.bind(ctx); + ctx.bgRed = $.bgRed.bind(ctx); + ctx.bgGreen = $.bgGreen.bind(ctx); + ctx.bgYellow = $.bgYellow.bind(ctx); + ctx.bgBlue = $.bgBlue.bind(ctx); + ctx.bgMagenta = $.bgMagenta.bind(ctx); + ctx.bgCyan = $.bgCyan.bind(ctx); + ctx.bgWhite = $.bgWhite.bind(ctx); + + return ctx; +} + +function init(open, close) { + let blk = { + open: `\x1b[${open}m`, + close: `\x1b[${close}m`, + rgx: new RegExp(`\\x1b\\[${close}m`, 'g') + }; + return function (txt) { + if (this !== void 0 && this.has !== void 0) { + !!~this.has.indexOf(open) || (this.has.push(open),this.keys.push(blk)); + return txt === void 0 ? this : $.enabled ? run(this.keys, txt+'') : txt+''; + } + return txt === void 0 ? chain([open], [blk]) : $.enabled ? run([blk], txt+'') : txt+''; + }; +} + +export default $; diff --git a/_extensions/d2/node_modules/kleur/license b/_extensions/d2/node_modules/kleur/license new file mode 100644 index 00000000..a3f96f82 --- /dev/null +++ b/_extensions/d2/node_modules/kleur/license @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Luke Edwards (lukeed.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/_extensions/d2/node_modules/kleur/package.json b/_extensions/d2/node_modules/kleur/package.json new file mode 100644 index 00000000..5007c057 --- /dev/null +++ b/_extensions/d2/node_modules/kleur/package.json @@ -0,0 +1,51 @@ +{ + "name": "kleur", + "version": "4.1.5", + "repository": "lukeed/kleur", + "description": "The fastest Node.js library for formatting terminal text with ANSI colors~!", + "module": "index.mjs", + "types": "index.d.ts", + "main": "index.js", + "license": "MIT", + "exports": { + ".": { + "types": "./index.d.ts", + "import": "./index.mjs", + "require": "./index.js" + }, + "./colors": { + "types": "./colors.d.ts", + "import": "./colors.mjs", + "require": "./colors.js" + } + }, + "files": [ + "*.d.ts", + "colors.*", + "index.*" + ], + "author": { + "name": "Luke Edwards", + "email": "luke.edwards05@gmail.com", + "url": "https://lukeed.com" + }, + "scripts": { + "build": "node build", + "test": "uvu -r esm -i utils -i xyz" + }, + "engines": { + "node": ">=6" + }, + "keywords": [ + "ansi", + "cli", + "color", + "colors", + "console", + "terminal" + ], + "devDependencies": { + "esm": "3.2.25", + "uvu": "0.3.3" + } +} diff --git a/_extensions/d2/node_modules/kleur/readme.md b/_extensions/d2/node_modules/kleur/readme.md new file mode 100644 index 00000000..de7f5aa0 --- /dev/null +++ b/_extensions/d2/node_modules/kleur/readme.md @@ -0,0 +1,232 @@ +
+ kleur +
+ + + +
The fastest Node.js library for formatting terminal text with ANSI colors~!
+ +## Features + +* No dependencies +* Super [lightweight](#load-time) & [performant](#performance) +* Supports [nested](#nested-methods) & [chained](#chained-methods) colors +* No `String.prototype` modifications +* Conditional [color support](#conditional-support) +* [Fully treeshakable](#individual-colors) +* Familiar [API](#api) + +--- + +As of `v3.0` the Chalk-style syntax (magical getter) is no longer used.
Please visit [History](#history) for migration paths supporting that syntax. + +--- + + +## Install + +``` +$ npm install --save kleur +``` + + +## Usage + +```js +import kleur from 'kleur'; + +// basic usage +kleur.red('red text'); + +// chained methods +kleur.blue().bold().underline('howdy partner'); + +// nested methods +kleur.bold(`${ white().bgRed('[ERROR]') } ${ kleur.red().italic('Something happened')}`); +``` + +### Chained Methods + +```js +const { bold, green } = require('kleur'); + +console.log(bold().red('this is a bold red message')); +console.log(bold().italic('this is a bold italicized message')); +console.log(bold().yellow().bgRed().italic('this is a bold yellow italicized message')); +console.log(green().bold().underline('this is a bold green underlined message')); +``` + + + +### Nested Methods + +```js +const { yellow, red, cyan } = require('kleur'); + +console.log(yellow(`foo ${red().bold('red')} bar ${cyan('cyan')} baz`)); +console.log(yellow('foo ' + red().bold('red') + ' bar ' + cyan('cyan') + ' baz')); +``` + + + + +### Conditional Support + +Toggle color support as needed; `kleur` includes simple auto-detection which may not cover all cases. + +> **Note:** Both `kleur` and `kleur/colors` share the same detection logic. + +```js +import kleur from 'kleur'; + +// manually disable +kleur.enabled = false; + +// or use another library to detect support +kleur.enabled = require('color-support').level > 0; + +console.log(kleur.red('I will only be colored red if the terminal supports colors')); +``` + +> **Important:**
Colors will be disabled automatically in non [TTY contexts](https://nodejs.org/api/process.html#process_a_note_on_process_i_o). For example, spawning another process or piping output into another process will disable colorization automatically. To force colors in your piped output, you may do so with the `FORCE_COLOR=1` environment variable: + +```sh +$ node app.js #=> COLORS +$ node app.js > log.txt #=> NO COLORS +$ FORCE_COLOR=1 node app.js > log.txt #=> COLORS +$ FORCE_COLOR=0 node app.js > log.txt #=> NO COLORS +``` + +## API + +Any `kleur` method returns a `String` when invoked with input; otherwise chaining is expected. + +> It's up to the developer to pass the output to destinations like `console.log`, `process.stdout.write`, etc. + +The methods below are grouped by type for legibility purposes only. They each can be [chained](#chained-methods) or [nested](#nested-methods) with one another. + +***Colors:*** +> black — red — green — yellow — blue — magenta — cyan — white — gray — grey + +***Backgrounds:*** +> bgBlack — bgRed — bgGreen — bgYellow — bgBlue — bgMagenta — bgCyan — bgWhite + +***Modifiers:*** +> reset — bold — dim — italic* — underline — inverse — hidden — strikethrough* + +* Not widely supported + + +## Individual Colors + +When you only need a few colors, it doesn't make sense to import _all_ of `kleur` because, as small as it is, `kleur` is not treeshakeable, and so most of its code will be doing nothing. In order to fix this, you can import from the `kleur/colors` submodule which _fully_ supports tree-shaking. + +The caveat with this approach is that color functions **are not** chainable~!
Each function receives and colorizes its input. You may combine colors, backgrounds, and modifiers by nesting function calls within other functions. + +```js +// or: import * as kleur from 'kleur/colors'; +import { red, underline, bgWhite } from 'kleur/colors'; + +red('red text'); +//~> kleur.red('red text'); + +underline(red('red underlined text')); +//~> kleur.underline().red('red underlined text'); + +bgWhite(underline(red('red underlined text w/ white background'))); +//~> kleur.bgWhite().underline().red('red underlined text w/ white background'); +``` + +> **Note:** All the same [colors, backgrounds, and modifiers](#api) are available. + +***Conditional Support*** + +The `kleur/colors` submodule also allows you to toggle color support, as needed.
+It includes the same initial assumptions as `kleur`, in an attempt to have colors enabled by default. + +Unlike `kleur`, this setting exists as `kleur.$.enabled` instead of `kleur.enabled`: + +```js +import * as kleur from 'kleur/colors'; +// or: import { $, red } from 'kleur/colors'; + +// manually disabled +kleur.$.enabled = false; + +// or use another library to detect support +kleur.$.enabled = require('color-support').level > 0; + +console.log(red('I will only be colored red if the terminal supports colors')); +``` + + +## Benchmarks + +> Using Node v10.13.0 + +### Load time + +``` +chalk :: 5.303ms +kleur :: 0.488ms +kleur/colors :: 0.369ms +ansi-colors :: 1.504ms +``` + +### Performance + +``` +# All Colors + ansi-colors x 177,625 ops/sec ±1.47% (92 runs sampled) + chalk x 611,907 ops/sec ±0.20% (92 runs sampled) + kleur x 742,509 ops/sec ±1.47% (93 runs sampled) + kleur/colors x 881,742 ops/sec ±0.19% (98 runs sampled) + +# Stacked colors + ansi-colors x 23,331 ops/sec ±1.81% (94 runs sampled) + chalk x 337,178 ops/sec ±0.20% (98 runs sampled) + kleur x 78,299 ops/sec ±1.01% (97 runs sampled) + kleur/colors x 104,431 ops/sec ±0.22% (97 runs sampled) + +# Nested colors + ansi-colors x 67,181 ops/sec ±1.15% (92 runs sampled) + chalk x 116,361 ops/sec ±0.63% (94 runs sampled) + kleur x 139,514 ops/sec ±0.76% (95 runs sampled) + kleur/colors x 145,716 ops/sec ±0.97% (97 runs sampled) +``` + + +## History + +This project originally forked [`ansi-colors`](https://github.com/doowb/ansi-colors). + +Beginning with `kleur@3.0`, the Chalk-style syntax (magical getter) has been replaced with function calls per key: + +```js +// Old: +c.red.bold.underline('old'); + +// New: +c.red().bold().underline('new'); +``` +> As I work more with Rust, the newer syntax feels so much better & more natural! + +If you prefer the old syntax, you may migrate to `ansi-colors` or newer `chalk` releases.
Versions below `kleur@3.0` have been officially deprecated. + + +## License + +MIT © [Luke Edwards](https://lukeed.com) diff --git a/_extensions/d2/node_modules/longest-streak/index.d.ts b/_extensions/d2/node_modules/longest-streak/index.d.ts new file mode 100644 index 00000000..560c8183 --- /dev/null +++ b/_extensions/d2/node_modules/longest-streak/index.d.ts @@ -0,0 +1,11 @@ +/** + * Get the count of the longest repeating streak of `substring` in `value`. + * + * @param {string} value + * Content to search in. + * @param {string} substring + * Substring to look for, typically one character. + * @returns {number} + * Count of most frequent adjacent `substring`s in `value`. + */ +export function longestStreak(value: string, substring: string): number diff --git a/_extensions/d2/node_modules/longest-streak/index.js b/_extensions/d2/node_modules/longest-streak/index.js new file mode 100644 index 00000000..67e726d4 --- /dev/null +++ b/_extensions/d2/node_modules/longest-streak/index.js @@ -0,0 +1,36 @@ +/** + * Get the count of the longest repeating streak of `substring` in `value`. + * + * @param {string} value + * Content to search in. + * @param {string} substring + * Substring to look for, typically one character. + * @returns {number} + * Count of most frequent adjacent `substring`s in `value`. + */ +export function longestStreak(value, substring) { + const source = String(value) + let index = source.indexOf(substring) + let expected = index + let count = 0 + let max = 0 + + if (typeof substring !== 'string') { + throw new TypeError('Expected substring') + } + + while (index !== -1) { + if (index === expected) { + if (++count > max) { + max = count + } + } else { + count = 1 + } + + expected = index + substring.length + index = source.indexOf(substring, expected) + } + + return max +} diff --git a/_extensions/d2/node_modules/longest-streak/license b/_extensions/d2/node_modules/longest-streak/license new file mode 100644 index 00000000..611b6758 --- /dev/null +++ b/_extensions/d2/node_modules/longest-streak/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2015 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/longest-streak/package.json b/_extensions/d2/node_modules/longest-streak/package.json new file mode 100644 index 00000000..75d6e903 --- /dev/null +++ b/_extensions/d2/node_modules/longest-streak/package.json @@ -0,0 +1,73 @@ +{ + "name": "longest-streak", + "version": "3.1.0", + "description": "Count the longest repeating streak of a substring", + "license": "MIT", + "keywords": [ + "count", + "length", + "longest", + "repeating", + "streak", + "substring", + "character" + ], + "repository": "wooorm/longest-streak", + "bugs": "https://github.com/wooorm/longest-streak/issues", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "index.d.ts", + "index.js" + ], + "devDependencies": { + "@types/node": "^18.0.0", + "c8": "^7.0.0", + "prettier": "^2.0.0", + "remark-cli": "^11.0.0", + "remark-preset-wooorm": "^9.0.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "xo": "^0.52.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "tsc --build --clean && tsc --build && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test.js", + "test-coverage": "c8 --check-coverage --100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/longest-streak/readme.md b/_extensions/d2/node_modules/longest-streak/readme.md new file mode 100644 index 00000000..db02bfcf --- /dev/null +++ b/_extensions/d2/node_modules/longest-streak/readme.md @@ -0,0 +1,150 @@ +# longest-streak + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] + +Get the count of the longest repeating streak of `substring` in `value`. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`longestStreak(value, substring)`](#longeststreakvalue-substring) +* [Types](#types) +* [Compatibility](#compatibility) +* [Security](#security) +* [Related](#related) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +This is a tiny package that finds the count of the longest adjacent repeating +substring. + +## When should I use this? + +This package is rather niche. +I use it for serializing markdown ASTs (particularly fenced code and math). + +You can use [`ccount`][ccount] if you need the total count of substrings +occuring in a value. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 14.14+, 16.0+), install with [npm][]: + +```sh +npm install longest-streak +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {longestStreak} from 'https://esm.sh/longest-streak@3' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {longestStreak} from 'longest-streak' + +longestStreak('` foo `` bar `', '`') // => 2 +``` + +## API + +This package exports the identifier `longestStreak`. +There is no default export. + +### `longestStreak(value, substring)` + +Get the count of the longest repeating streak of `substring` in `value`. + +###### Parameters + +* `value` (`string`) — content to search in +* `substring` (`string`) — substring to look for, typically one character + +###### Returns + +Count of most frequent adjacent `substring`s in `value` (`number`). + +## Types + +This package is fully typed with [TypeScript][]. +It exports no additional types. + +## Compatibility + +This package is at least compatible with all maintained versions of Node.js. +As of now, that is Node.js 14.14+ and 16.0+. +It also works in Deno and modern browsers. + +## Security + +This package is safe. + +## Related + +* [`wooorm/ccount`](https://github.com/wooorm/ccount) + — count the total number of `substring`s in `value` +* [`wooorm/direction`](https://github.com/wooorm/direction) + — detect directionality: left-to-right, right-to-left, or neutral + +## Contribute + +Yes please! +See [How to Contribute to Open Source][contribute]. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/wooorm/longest-streak/workflows/main/badge.svg + +[build]: https://github.com/wooorm/longest-streak/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/wooorm/longest-streak.svg + +[coverage]: https://codecov.io/github/wooorm/longest-streak + +[downloads-badge]: https://img.shields.io/npm/dm/longest-streak.svg + +[downloads]: https://www.npmjs.com/package/longest-streak + +[size-badge]: https://img.shields.io/bundlephobia/minzip/longest-streak.svg + +[size]: https://bundlephobia.com/result?p=longest-streak + +[npm]: https://docs.npmjs.com/cli/install + +[esmsh]: https://esm.sh + +[license]: license + +[author]: https://wooorm.com + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[typescript]: https://www.typescriptlang.org + +[contribute]: https://opensource.guide/how-to-contribute/ + +[ccount]: https://github.com/wooorm/ccount diff --git a/_extensions/d2/node_modules/mdast-util-from-markdown/dev/index.d.ts b/_extensions/d2/node_modules/mdast-util-from-markdown/dev/index.d.ts new file mode 100644 index 00000000..b5d391fe --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-from-markdown/dev/index.d.ts @@ -0,0 +1,78 @@ +import type {OnEnterError} from './lib/index.js' + +export type { + CompileContext, + Encoding, + Extension, + Handle, + OnEnterError, + OnExitError, + Options, + Token, + Transform, + Value +} from './lib/index.js' + +/** + * Deprecated: use `OnEnterError`. + */ +// To do: next major: remove. +export type OnError = OnEnterError + +/** + * Interface of tracked data. + * + * When working on extensions that use more data, extend the corresponding + * interface to register their types: + * + * ```ts + * declare module 'mdast-util-from-markdown' { + * interface CompileData { + * // Register a new field. + * mathFlowInside?: boolean | undefined + * } + * } + * ``` + */ +// eslint-disable-next-line @typescript-eslint/consistent-type-definitions +export interface CompileData { + /** + * Whether we’re inside a hard break. + */ + atHardBreak?: boolean | undefined + + /** + * Current character reference type. + */ + characterReferenceType?: + | 'characterReferenceMarkerHexadecimal' + | 'characterReferenceMarkerNumeric' + | undefined + + /** + * Whether a first list item value (`1` in `1. a`) is expected. + */ + expectingFirstListItemValue?: boolean | undefined + + /** + * Whether we’re in flow code. + */ + flowCodeInside?: boolean | undefined + + /** + * Whether we’re in a reference. + */ + inReference?: boolean | undefined + + /** + * Whether we’re expecting a line ending from a setext heading, which can be slurped. + */ + setextHeadingSlurpLineEnding?: boolean | undefined + + /** + * Current reference. + */ + referenceType?: 'collapsed' | 'full' | undefined +} + +export {fromMarkdown} from './lib/index.js' diff --git a/_extensions/d2/node_modules/mdast-util-from-markdown/dev/index.js b/_extensions/d2/node_modules/mdast-util-from-markdown/dev/index.js new file mode 100644 index 00000000..d8bcf034 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-from-markdown/dev/index.js @@ -0,0 +1,2 @@ +// Note: types exported from `index.d.ts`. +export {fromMarkdown} from './lib/index.js' diff --git a/_extensions/d2/node_modules/mdast-util-from-markdown/dev/lib/index.d.ts b/_extensions/d2/node_modules/mdast-util-from-markdown/dev/lib/index.d.ts new file mode 100644 index 00000000..e0f97b64 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-from-markdown/dev/lib/index.d.ts @@ -0,0 +1,184 @@ +/** + * @param value + * Markdown to parse. + * @param encoding + * Character encoding for when `value` is `Buffer`. + * @param options + * Configuration. + * @returns + * mdast tree. + */ +export const fromMarkdown: (( + value: Value, + encoding: Encoding, + options?: Options | null | undefined +) => Root) & + ((value: Value, options?: Options | null | undefined) => Root) +export type Encoding = import('micromark-util-types').Encoding +export type Event = import('micromark-util-types').Event +export type ParseOptions = import('micromark-util-types').ParseOptions +export type Token = import('micromark-util-types').Token +export type TokenizeContext = import('micromark-util-types').TokenizeContext +export type Value = import('micromark-util-types').Value +export type UnistParent = import('unist').Parent +export type Point = import('unist').Point +export type PhrasingContent = import('mdast').PhrasingContent +export type StaticPhrasingContent = import('mdast').StaticPhrasingContent +export type Content = import('mdast').Content +export type Break = import('mdast').Break +export type Blockquote = import('mdast').Blockquote +export type Code = import('mdast').Code +export type Definition = import('mdast').Definition +export type Emphasis = import('mdast').Emphasis +export type Heading = import('mdast').Heading +export type HTML = import('mdast').HTML +export type Image = import('mdast').Image +export type ImageReference = import('mdast').ImageReference +export type InlineCode = import('mdast').InlineCode +export type Link = import('mdast').Link +export type LinkReference = import('mdast').LinkReference +export type List = import('mdast').List +export type ListItem = import('mdast').ListItem +export type Paragraph = import('mdast').Paragraph +export type Root = import('mdast').Root +export type Strong = import('mdast').Strong +export type Text = import('mdast').Text +export type ThematicBreak = import('mdast').ThematicBreak +export type ReferenceType = import('mdast').ReferenceType +export type CompileData = import('../index.js').CompileData +export type Node = Root | Content +export type Parent = Extract +export type Fragment = Omit & { + type: 'fragment' + children: Array +} +/** + * Extra transform, to change the AST afterwards. + */ +export type Transform = (tree: Root) => Root | undefined | null | void +/** + * Handle a token. + */ +export type Handle = (this: CompileContext, token: Token) => void +/** + * Token types mapping to handles + */ +export type Handles = Record +/** + * Handle the case where the `right` token is open, but it is closed (by the + * `left` token) or because we reached the end of the document. + */ +export type OnEnterError = ( + this: Omit, + left: Token | undefined, + right: Token +) => void +/** + * Handle the case where the `right` token is open but it is closed by + * exiting the `left` token. + */ +export type OnExitError = ( + this: Omit, + left: Token, + right: Token +) => void +/** + * Open token on the stack, with an optional error handler for when + * that token isn’t closed properly. + */ +export type TokenTuple = [Token, OnEnterError | undefined] +/** + * Configuration. + * + * We have our defaults, but extensions will add more. + */ +export type Config = { + /** + * Token types where line endings are used. + */ + canContainEols: Array + /** + * Opening handles. + */ + enter: Handles + /** + * Closing handles. + */ + exit: Handles + /** + * Tree transforms. + */ + transforms: Array +} +/** + * Change how markdown tokens from micromark are turned into mdast. + */ +export type Extension = Partial +/** + * mdast compiler context. + */ +export type CompileContext = { + /** + * Stack of nodes. + */ + stack: Array + /** + * Stack of tokens. + */ + tokenStack: Array + /** + * Get data from the key/value store. + */ + getData: ( + key: Key + ) => import('../index.js').CompileData[Key] + /** + * Set data into the key/value store. + */ + setData: ( + key: Key_1, + value?: import('../index.js').CompileData[Key_1] | undefined + ) => void + /** + * Capture some of the output data. + */ + buffer: (this: CompileContext) => void + /** + * Stop capturing and access the output data. + */ + resume: (this: CompileContext) => string + /** + * Enter a token. + */ + enter: ( + this: CompileContext, + node: Kind, + token: Token, + onError?: OnEnterError + ) => Kind + /** + * Exit a token. + */ + exit: (this: CompileContext, token: Token, onError?: OnExitError) => Node + /** + * Get the string value of a token. + */ + sliceSerialize: TokenizeContext['sliceSerialize'] + /** + * Configuration. + */ + config: Config +} +/** + * Configuration for how to build mdast. + */ +export type FromMarkdownOptions = { + /** + * Extensions for this utility to change how tokens are turned into a tree. + */ + mdastExtensions?: Array> | null | undefined +} +/** + * Configuration. + */ +export type Options = ParseOptions & FromMarkdownOptions diff --git a/_extensions/d2/node_modules/mdast-util-from-markdown/dev/lib/index.js b/_extensions/d2/node_modules/mdast-util-from-markdown/dev/lib/index.js new file mode 100644 index 00000000..e8c18700 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-from-markdown/dev/lib/index.js @@ -0,0 +1,1468 @@ +/** + * @typedef {import('micromark-util-types').Encoding} Encoding + * @typedef {import('micromark-util-types').Event} Event + * @typedef {import('micromark-util-types').ParseOptions} ParseOptions + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext + * @typedef {import('micromark-util-types').Value} Value + * + * @typedef {import('unist').Parent} UnistParent + * @typedef {import('unist').Point} Point + * + * @typedef {import('mdast').PhrasingContent} PhrasingContent + * @typedef {import('mdast').StaticPhrasingContent} StaticPhrasingContent + * @typedef {import('mdast').Content} Content + * @typedef {import('mdast').Break} Break + * @typedef {import('mdast').Blockquote} Blockquote + * @typedef {import('mdast').Code} Code + * @typedef {import('mdast').Definition} Definition + * @typedef {import('mdast').Emphasis} Emphasis + * @typedef {import('mdast').Heading} Heading + * @typedef {import('mdast').HTML} HTML + * @typedef {import('mdast').Image} Image + * @typedef {import('mdast').ImageReference} ImageReference + * @typedef {import('mdast').InlineCode} InlineCode + * @typedef {import('mdast').Link} Link + * @typedef {import('mdast').LinkReference} LinkReference + * @typedef {import('mdast').List} List + * @typedef {import('mdast').ListItem} ListItem + * @typedef {import('mdast').Paragraph} Paragraph + * @typedef {import('mdast').Root} Root + * @typedef {import('mdast').Strong} Strong + * @typedef {import('mdast').Text} Text + * @typedef {import('mdast').ThematicBreak} ThematicBreak + * @typedef {import('mdast').ReferenceType} ReferenceType + * @typedef {import('../index.js').CompileData} CompileData + */ + +/** + * @typedef {Root | Content} Node + * @typedef {Extract} Parent + * + * @typedef {Omit & {type: 'fragment', children: Array}} Fragment + */ + +/** + * @callback Transform + * Extra transform, to change the AST afterwards. + * @param {Root} tree + * Tree to transform. + * @returns {Root | undefined | null | void} + * New tree or nothing (in which case the current tree is used). + * + * @callback Handle + * Handle a token. + * @param {CompileContext} this + * Context. + * @param {Token} token + * Current token. + * @returns {void} + * Nothing. + * + * @typedef {Record} Handles + * Token types mapping to handles + * + * @callback OnEnterError + * Handle the case where the `right` token is open, but it is closed (by the + * `left` token) or because we reached the end of the document. + * @param {Omit} this + * Context. + * @param {Token | undefined} left + * Left token. + * @param {Token} right + * Right token. + * @returns {void} + * Nothing. + * + * @callback OnExitError + * Handle the case where the `right` token is open but it is closed by + * exiting the `left` token. + * @param {Omit} this + * Context. + * @param {Token} left + * Left token. + * @param {Token} right + * Right token. + * @returns {void} + * Nothing. + * + * @typedef {[Token, OnEnterError | undefined]} TokenTuple + * Open token on the stack, with an optional error handler for when + * that token isn’t closed properly. + */ + +/** + * @typedef Config + * Configuration. + * + * We have our defaults, but extensions will add more. + * @property {Array} canContainEols + * Token types where line endings are used. + * @property {Handles} enter + * Opening handles. + * @property {Handles} exit + * Closing handles. + * @property {Array} transforms + * Tree transforms. + * + * @typedef {Partial} Extension + * Change how markdown tokens from micromark are turned into mdast. + * + * @typedef CompileContext + * mdast compiler context. + * @property {Array} stack + * Stack of nodes. + * @property {Array} tokenStack + * Stack of tokens. + * @property {(key: Key) => CompileData[Key]} getData + * Get data from the key/value store. + * @property {(key: Key, value?: CompileData[Key]) => void} setData + * Set data into the key/value store. + * @property {(this: CompileContext) => void} buffer + * Capture some of the output data. + * @property {(this: CompileContext) => string} resume + * Stop capturing and access the output data. + * @property {(this: CompileContext, node: Kind, token: Token, onError?: OnEnterError) => Kind} enter + * Enter a token. + * @property {(this: CompileContext, token: Token, onError?: OnExitError) => Node} exit + * Exit a token. + * @property {TokenizeContext['sliceSerialize']} sliceSerialize + * Get the string value of a token. + * @property {Config} config + * Configuration. + * + * @typedef FromMarkdownOptions + * Configuration for how to build mdast. + * @property {Array> | null | undefined} [mdastExtensions] + * Extensions for this utility to change how tokens are turned into a tree. + * + * @typedef {ParseOptions & FromMarkdownOptions} Options + * Configuration. + */ + +// To do: micromark: create a registry of tokens? +// To do: next major: don’t return given `Node` from `enter`. +// To do: next major: remove setter/getter. + +import {ok as assert} from 'uvu/assert' +import {toString} from 'mdast-util-to-string' +import {parse} from 'micromark/lib/parse.js' +import {preprocess} from 'micromark/lib/preprocess.js' +import {postprocess} from 'micromark/lib/postprocess.js' +import {decodeNumericCharacterReference} from 'micromark-util-decode-numeric-character-reference' +import {decodeString} from 'micromark-util-decode-string' +import {normalizeIdentifier} from 'micromark-util-normalize-identifier' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' +import {decodeNamedCharacterReference} from 'decode-named-character-reference' +import {stringifyPosition} from 'unist-util-stringify-position' + +const own = {}.hasOwnProperty + +/** + * @param value + * Markdown to parse. + * @param encoding + * Character encoding for when `value` is `Buffer`. + * @param options + * Configuration. + * @returns + * mdast tree. + */ +export const fromMarkdown = + /** + * @type {( + * ((value: Value, encoding: Encoding, options?: Options | null | undefined) => Root) & + * ((value: Value, options?: Options | null | undefined) => Root) + * )} + */ + ( + /** + * @param {Value} value + * @param {Encoding | Options | null | undefined} [encoding] + * @param {Options | null | undefined} [options] + * @returns {Root} + */ + function (value, encoding, options) { + if (typeof encoding !== 'string') { + options = encoding + encoding = undefined + } + + return compiler(options)( + postprocess( + // @ts-expect-error: micromark types need to accept `null`. + parse(options).document().write(preprocess()(value, encoding, true)) + ) + ) + } + ) + +/** + * Note this compiler only understand complete buffering, not streaming. + * + * @param {Options | null | undefined} [options] + */ +function compiler(options) { + /** @type {Config} */ + const config = { + transforms: [], + canContainEols: ['emphasis', 'fragment', 'heading', 'paragraph', 'strong'], + enter: { + autolink: opener(link), + autolinkProtocol: onenterdata, + autolinkEmail: onenterdata, + atxHeading: opener(heading), + blockQuote: opener(blockQuote), + characterEscape: onenterdata, + characterReference: onenterdata, + codeFenced: opener(codeFlow), + codeFencedFenceInfo: buffer, + codeFencedFenceMeta: buffer, + codeIndented: opener(codeFlow, buffer), + codeText: opener(codeText, buffer), + codeTextData: onenterdata, + data: onenterdata, + codeFlowValue: onenterdata, + definition: opener(definition), + definitionDestinationString: buffer, + definitionLabelString: buffer, + definitionTitleString: buffer, + emphasis: opener(emphasis), + hardBreakEscape: opener(hardBreak), + hardBreakTrailing: opener(hardBreak), + htmlFlow: opener(html, buffer), + htmlFlowData: onenterdata, + htmlText: opener(html, buffer), + htmlTextData: onenterdata, + image: opener(image), + label: buffer, + link: opener(link), + listItem: opener(listItem), + listItemValue: onenterlistitemvalue, + listOrdered: opener(list, onenterlistordered), + listUnordered: opener(list), + paragraph: opener(paragraph), + reference: onenterreference, + referenceString: buffer, + resourceDestinationString: buffer, + resourceTitleString: buffer, + setextHeading: opener(heading), + strong: opener(strong), + thematicBreak: opener(thematicBreak) + }, + exit: { + atxHeading: closer(), + atxHeadingSequence: onexitatxheadingsequence, + autolink: closer(), + autolinkEmail: onexitautolinkemail, + autolinkProtocol: onexitautolinkprotocol, + blockQuote: closer(), + characterEscapeValue: onexitdata, + characterReferenceMarkerHexadecimal: onexitcharacterreferencemarker, + characterReferenceMarkerNumeric: onexitcharacterreferencemarker, + characterReferenceValue: onexitcharacterreferencevalue, + codeFenced: closer(onexitcodefenced), + codeFencedFence: onexitcodefencedfence, + codeFencedFenceInfo: onexitcodefencedfenceinfo, + codeFencedFenceMeta: onexitcodefencedfencemeta, + codeFlowValue: onexitdata, + codeIndented: closer(onexitcodeindented), + codeText: closer(onexitcodetext), + codeTextData: onexitdata, + data: onexitdata, + definition: closer(), + definitionDestinationString: onexitdefinitiondestinationstring, + definitionLabelString: onexitdefinitionlabelstring, + definitionTitleString: onexitdefinitiontitlestring, + emphasis: closer(), + hardBreakEscape: closer(onexithardbreak), + hardBreakTrailing: closer(onexithardbreak), + htmlFlow: closer(onexithtmlflow), + htmlFlowData: onexitdata, + htmlText: closer(onexithtmltext), + htmlTextData: onexitdata, + image: closer(onexitimage), + label: onexitlabel, + labelText: onexitlabeltext, + lineEnding: onexitlineending, + link: closer(onexitlink), + listItem: closer(), + listOrdered: closer(), + listUnordered: closer(), + paragraph: closer(), + referenceString: onexitreferencestring, + resourceDestinationString: onexitresourcedestinationstring, + resourceTitleString: onexitresourcetitlestring, + resource: onexitresource, + setextHeading: closer(onexitsetextheading), + setextHeadingLineSequence: onexitsetextheadinglinesequence, + setextHeadingText: onexitsetextheadingtext, + strong: closer(), + thematicBreak: closer() + } + } + + configure(config, (options || {}).mdastExtensions || []) + + /** @type {CompileData} */ + const data = {} + + return compile + + /** + * Turn micromark events into an mdast tree. + * + * @param {Array} events + * Events. + * @returns {Root} + * mdast tree. + */ + function compile(events) { + /** @type {Root} */ + let tree = {type: 'root', children: []} + /** @type {Omit} */ + const context = { + stack: [tree], + tokenStack: [], + config, + enter, + exit, + buffer, + resume, + setData, + getData + } + /** @type {Array} */ + const listStack = [] + let index = -1 + + while (++index < events.length) { + // We preprocess lists to add `listItem` tokens, and to infer whether + // items the list itself are spread out. + if ( + events[index][1].type === types.listOrdered || + events[index][1].type === types.listUnordered + ) { + if (events[index][0] === 'enter') { + listStack.push(index) + } else { + const tail = listStack.pop() + assert(typeof tail === 'number', 'expected list ot be open') + index = prepareList(events, tail, index) + } + } + } + + index = -1 + + while (++index < events.length) { + const handler = config[events[index][0]] + + if (own.call(handler, events[index][1].type)) { + handler[events[index][1].type].call( + Object.assign( + {sliceSerialize: events[index][2].sliceSerialize}, + context + ), + events[index][1] + ) + } + } + + // Handle tokens still being open. + if (context.tokenStack.length > 0) { + const tail = context.tokenStack[context.tokenStack.length - 1] + const handler = tail[1] || defaultOnError + handler.call(context, undefined, tail[0]) + } + + // Figure out `root` position. + tree.position = { + start: point( + events.length > 0 ? events[0][1].start : {line: 1, column: 1, offset: 0} + ), + end: point( + events.length > 0 + ? events[events.length - 2][1].end + : {line: 1, column: 1, offset: 0} + ) + } + + // Call transforms. + index = -1 + while (++index < config.transforms.length) { + tree = config.transforms[index](tree) || tree + } + + return tree + } + + /** + * @param {Array} events + * @param {number} start + * @param {number} length + * @returns {number} + */ + function prepareList(events, start, length) { + let index = start - 1 + let containerBalance = -1 + let listSpread = false + /** @type {Token | undefined} */ + let listItem + /** @type {number | undefined} */ + let lineIndex + /** @type {number | undefined} */ + let firstBlankLineIndex + /** @type {boolean | undefined} */ + let atMarker + + while (++index <= length) { + const event = events[index] + + if ( + event[1].type === types.listUnordered || + event[1].type === types.listOrdered || + event[1].type === types.blockQuote + ) { + if (event[0] === 'enter') { + containerBalance++ + } else { + containerBalance-- + } + + atMarker = undefined + } else if (event[1].type === types.lineEndingBlank) { + if (event[0] === 'enter') { + if ( + listItem && + !atMarker && + !containerBalance && + !firstBlankLineIndex + ) { + firstBlankLineIndex = index + } + + atMarker = undefined + } + } else if ( + event[1].type === types.linePrefix || + event[1].type === types.listItemValue || + event[1].type === types.listItemMarker || + event[1].type === types.listItemPrefix || + event[1].type === types.listItemPrefixWhitespace + ) { + // Empty. + } else { + atMarker = undefined + } + + if ( + (!containerBalance && + event[0] === 'enter' && + event[1].type === types.listItemPrefix) || + (containerBalance === -1 && + event[0] === 'exit' && + (event[1].type === types.listUnordered || + event[1].type === types.listOrdered)) + ) { + if (listItem) { + let tailIndex = index + lineIndex = undefined + + while (tailIndex--) { + const tailEvent = events[tailIndex] + + if ( + tailEvent[1].type === types.lineEnding || + tailEvent[1].type === types.lineEndingBlank + ) { + if (tailEvent[0] === 'exit') continue + + if (lineIndex) { + events[lineIndex][1].type = types.lineEndingBlank + listSpread = true + } + + tailEvent[1].type = types.lineEnding + lineIndex = tailIndex + } else if ( + tailEvent[1].type === types.linePrefix || + tailEvent[1].type === types.blockQuotePrefix || + tailEvent[1].type === types.blockQuotePrefixWhitespace || + tailEvent[1].type === types.blockQuoteMarker || + tailEvent[1].type === types.listItemIndent + ) { + // Empty + } else { + break + } + } + + if ( + firstBlankLineIndex && + (!lineIndex || firstBlankLineIndex < lineIndex) + ) { + // @ts-expect-error Patched. + listItem._spread = true + } + + // Fix position. + listItem.end = Object.assign( + {}, + lineIndex ? events[lineIndex][1].start : event[1].end + ) + + events.splice(lineIndex || index, 0, ['exit', listItem, event[2]]) + index++ + length++ + } + + // Create a new list item. + if (event[1].type === types.listItemPrefix) { + listItem = { + type: 'listItem', + // @ts-expect-error Patched + _spread: false, + start: Object.assign({}, event[1].start) + } + // @ts-expect-error: `listItem` is most definitely defined, TS... + events.splice(index, 0, ['enter', listItem, event[2]]) + index++ + length++ + firstBlankLineIndex = undefined + atMarker = true + } + } + } + + // @ts-expect-error Patched. + events[start][1]._spread = listSpread + return length + } + + /** + * Set data. + * + * @template {keyof CompileData} Key + * Field type. + * @param {Key} key + * Key of field. + * @param {CompileData[Key]} [value] + * New value. + * @returns {void} + * Nothing. + */ + function setData(key, value) { + data[key] = value + } + + /** + * Get data. + * + * @template {keyof CompileData} Key + * Field type. + * @param {Key} key + * Key of field. + * @returns {CompileData[Key]} + * Value. + */ + function getData(key) { + return data[key] + } + + /** + * Create an opener handle. + * + * @param {(token: Token) => Node} create + * Create a node. + * @param {Handle} [and] + * Optional function to also run. + * @returns {Handle} + * Handle. + */ + function opener(create, and) { + return open + + /** + * @this {CompileContext} + * @param {Token} token + * @returns {void} + */ + function open(token) { + enter.call(this, create(token), token) + if (and) and.call(this, token) + } + } + + /** + * @this {CompileContext} + * @returns {void} + */ + function buffer() { + this.stack.push({type: 'fragment', children: []}) + } + + /** + * @template {Node} Kind + * Node type. + * @this {CompileContext} + * Context. + * @param {Kind} node + * Node to enter. + * @param {Token} token + * Corresponding token. + * @param {OnEnterError | undefined} [errorHandler] + * Handle the case where this token is open, but it is closed by something else. + * @returns {Kind} + * The given node. + */ + function enter(node, token, errorHandler) { + const parent = this.stack[this.stack.length - 1] + assert(parent, 'expected `parent`') + assert('children' in parent, 'expected `parent`') + // @ts-expect-error: Assume `Node` can exist as a child of `parent`. + parent.children.push(node) + this.stack.push(node) + this.tokenStack.push([token, errorHandler]) + // @ts-expect-error: `end` will be patched later. + node.position = {start: point(token.start)} + return node + } + + /** + * Create a closer handle. + * + * @param {Handle} [and] + * Optional function to also run. + * @returns {Handle} + * Handle. + */ + function closer(and) { + return close + + /** + * @this {CompileContext} + * @param {Token} token + * @returns {void} + */ + function close(token) { + if (and) and.call(this, token) + exit.call(this, token) + } + } + + /** + * @this {CompileContext} + * Context. + * @param {Token} token + * Corresponding token. + * @param {OnExitError | undefined} [onExitError] + * Handle the case where another token is open. + * @returns {Node} + * The closed node. + */ + function exit(token, onExitError) { + const node = this.stack.pop() + assert(node, 'expected `node`') + const open = this.tokenStack.pop() + + if (!open) { + throw new Error( + 'Cannot close `' + + token.type + + '` (' + + stringifyPosition({start: token.start, end: token.end}) + + '): it’s not open' + ) + } else if (open[0].type !== token.type) { + if (onExitError) { + onExitError.call(this, token, open[0]) + } else { + const handler = open[1] || defaultOnError + handler.call(this, token, open[0]) + } + } + + assert(node.type !== 'fragment', 'unexpected fragment `exit`ed') + assert(node.position, 'expected `position` to be defined') + node.position.end = point(token.end) + return node + } + + /** + * @this {CompileContext} + * @returns {string} + */ + function resume() { + return toString(this.stack.pop()) + } + + // + // Handlers. + // + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onenterlistordered() { + setData('expectingFirstListItemValue', true) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onenterlistitemvalue(token) { + if (getData('expectingFirstListItemValue')) { + const ancestor = this.stack[this.stack.length - 2] + assert(ancestor, 'expected nodes on stack') + assert(ancestor.type === 'list', 'expected list on stack') + ancestor.start = Number.parseInt( + this.sliceSerialize(token), + constants.numericBaseDecimal + ) + setData('expectingFirstListItemValue') + } + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitcodefencedfenceinfo() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'code', 'expected code on stack') + node.lang = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitcodefencedfencemeta() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'code', 'expected code on stack') + node.meta = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitcodefencedfence() { + // Exit if this is the closing fence. + if (getData('flowCodeInside')) return + this.buffer() + setData('flowCodeInside', true) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitcodefenced() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'code', 'expected code on stack') + + node.value = data.replace(/^(\r?\n|\r)|(\r?\n|\r)$/g, '') + setData('flowCodeInside') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitcodeindented() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'code', 'expected code on stack') + + node.value = data.replace(/(\r?\n|\r)$/g, '') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitdefinitionlabelstring(token) { + const label = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'definition', 'expected definition on stack') + + node.label = label + node.identifier = normalizeIdentifier( + this.sliceSerialize(token) + ).toLowerCase() + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitdefinitiontitlestring() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'definition', 'expected definition on stack') + + node.title = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitdefinitiondestinationstring() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'definition', 'expected definition on stack') + + node.url = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitatxheadingsequence(token) { + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'heading', 'expected heading on stack') + + if (!node.depth) { + const depth = this.sliceSerialize(token).length + + assert( + depth === 1 || + depth === 2 || + depth === 3 || + depth === 4 || + depth === 5 || + depth === 6, + 'expected `depth` between `1` and `6`' + ) + + node.depth = depth + } + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitsetextheadingtext() { + setData('setextHeadingSlurpLineEnding', true) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitsetextheadinglinesequence(token) { + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'heading', 'expected heading on stack') + + node.depth = + this.sliceSerialize(token).charCodeAt(0) === codes.equalsTo ? 1 : 2 + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitsetextheading() { + setData('setextHeadingSlurpLineEnding') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onenterdata(token) { + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert('children' in node, 'expected parent on stack') + + let tail = node.children[node.children.length - 1] + + if (!tail || tail.type !== 'text') { + // Add a new text node. + tail = text() + // @ts-expect-error: we’ll add `end` later. + tail.position = {start: point(token.start)} + // @ts-expect-error: Assume `parent` accepts `text`. + node.children.push(tail) + } + + this.stack.push(tail) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitdata(token) { + const tail = this.stack.pop() + assert(tail, 'expected a `node` to be on the stack') + assert('value' in tail, 'expected a `literal` to be on the stack') + assert(tail.position, 'expected `node` to have an open position') + tail.value += this.sliceSerialize(token) + tail.position.end = point(token.end) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitlineending(token) { + const context = this.stack[this.stack.length - 1] + assert(context, 'expected `node`') + + // If we’re at a hard break, include the line ending in there. + if (getData('atHardBreak')) { + assert('children' in context, 'expected `parent`') + const tail = context.children[context.children.length - 1] + assert(tail.position, 'expected tail to have a starting position') + tail.position.end = point(token.end) + setData('atHardBreak') + return + } + + if ( + !getData('setextHeadingSlurpLineEnding') && + config.canContainEols.includes(context.type) + ) { + onenterdata.call(this, token) + onexitdata.call(this, token) + } + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexithardbreak() { + setData('atHardBreak', true) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexithtmlflow() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'html', 'expected html on stack') + + node.value = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexithtmltext() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'html', 'expected html on stack') + + node.value = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitcodetext() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'inlineCode', 'expected inline code on stack') + + node.value = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitlink() { + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'link', 'expected link on stack') + + // Note: there are also `identifier` and `label` fields on this link node! + // These are used / cleaned here. + + // To do: clean. + if (getData('inReference')) { + /** @type {ReferenceType} */ + const referenceType = getData('referenceType') || 'shortcut' + + node.type += 'Reference' + // @ts-expect-error: mutate. + node.referenceType = referenceType + // @ts-expect-error: mutate. + delete node.url + delete node.title + } else { + // @ts-expect-error: mutate. + delete node.identifier + // @ts-expect-error: mutate. + delete node.label + } + + setData('referenceType') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitimage() { + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'image', 'expected image on stack') + + // Note: there are also `identifier` and `label` fields on this link node! + // These are used / cleaned here. + + // To do: clean. + if (getData('inReference')) { + /** @type {ReferenceType} */ + const referenceType = getData('referenceType') || 'shortcut' + + node.type += 'Reference' + // @ts-expect-error: mutate. + node.referenceType = referenceType + // @ts-expect-error: mutate. + delete node.url + delete node.title + } else { + // @ts-expect-error: mutate. + delete node.identifier + // @ts-expect-error: mutate. + delete node.label + } + + setData('referenceType') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitlabeltext(token) { + const string = this.sliceSerialize(token) + const ancestor = this.stack[this.stack.length - 2] + assert(ancestor, 'expected ancestor on stack') + assert( + ancestor.type === 'image' || ancestor.type === 'link', + 'expected image or link on stack' + ) + + // @ts-expect-error: stash this on the node, as it might become a reference + // later. + ancestor.label = decodeString(string) + // @ts-expect-error: same as above. + ancestor.identifier = normalizeIdentifier(string).toLowerCase() + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitlabel() { + const fragment = this.stack[this.stack.length - 1] + assert(fragment, 'expected node on stack') + assert(fragment.type === 'fragment', 'expected fragment on stack') + const value = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert( + node.type === 'image' || node.type === 'link', + 'expected image or link on stack' + ) + + // Assume a reference. + setData('inReference', true) + + if (node.type === 'link') { + /** @type {Array} */ + // @ts-expect-error: Assume static phrasing content. + const children = fragment.children + + node.children = children + } else { + node.alt = value + } + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitresourcedestinationstring() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert( + node.type === 'image' || node.type === 'link', + 'expected image or link on stack' + ) + node.url = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitresourcetitlestring() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert( + node.type === 'image' || node.type === 'link', + 'expected image or link on stack' + ) + node.title = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitresource() { + setData('inReference') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onenterreference() { + setData('referenceType', 'collapsed') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitreferencestring(token) { + const label = this.resume() + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert( + node.type === 'image' || node.type === 'link', + 'expected image reference or link reference on stack' + ) + + // @ts-expect-error: stash this on the node, as it might become a reference + // later. + node.label = label + // @ts-expect-error: same as above. + node.identifier = normalizeIdentifier( + this.sliceSerialize(token) + ).toLowerCase() + setData('referenceType', 'full') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitcharacterreferencemarker(token) { + assert( + token.type === 'characterReferenceMarkerNumeric' || + token.type === 'characterReferenceMarkerHexadecimal' + ) + setData('characterReferenceType', token.type) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitcharacterreferencevalue(token) { + const data = this.sliceSerialize(token) + const type = getData('characterReferenceType') + /** @type {string} */ + let value + + if (type) { + value = decodeNumericCharacterReference( + data, + type === types.characterReferenceMarkerNumeric + ? constants.numericBaseDecimal + : constants.numericBaseHexadecimal + ) + setData('characterReferenceType') + } else { + const result = decodeNamedCharacterReference(data) + assert(result !== false, 'expected reference to decode') + value = result + } + + const tail = this.stack.pop() + assert(tail, 'expected `node`') + assert(tail.position, 'expected `node.position`') + assert('value' in tail, 'expected `node.value`') + tail.value += value + tail.position.end = point(token.end) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitautolinkprotocol(token) { + onexitdata.call(this, token) + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'link', 'expected link on stack') + + node.url = this.sliceSerialize(token) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitautolinkemail(token) { + onexitdata.call(this, token) + const node = this.stack[this.stack.length - 1] + assert(node, 'expected node on stack') + assert(node.type === 'link', 'expected link on stack') + + node.url = 'mailto:' + this.sliceSerialize(token) + } + + // + // Creaters. + // + + /** @returns {Blockquote} */ + function blockQuote() { + return {type: 'blockquote', children: []} + } + + /** @returns {Code} */ + function codeFlow() { + return {type: 'code', lang: null, meta: null, value: ''} + } + + /** @returns {InlineCode} */ + function codeText() { + return {type: 'inlineCode', value: ''} + } + + /** @returns {Definition} */ + function definition() { + return { + type: 'definition', + identifier: '', + label: null, + title: null, + url: '' + } + } + + /** @returns {Emphasis} */ + function emphasis() { + return {type: 'emphasis', children: []} + } + + /** @returns {Heading} */ + function heading() { + // @ts-expect-error `depth` will be set later. + return {type: 'heading', depth: undefined, children: []} + } + + /** @returns {Break} */ + function hardBreak() { + return {type: 'break'} + } + + /** @returns {HTML} */ + function html() { + return {type: 'html', value: ''} + } + + /** @returns {Image} */ + function image() { + return {type: 'image', title: null, url: '', alt: null} + } + + /** @returns {Link} */ + function link() { + return {type: 'link', title: null, url: '', children: []} + } + + /** + * @param {Token} token + * @returns {List} + */ + function list(token) { + return { + type: 'list', + ordered: token.type === 'listOrdered', + start: null, + // @ts-expect-error Patched. + spread: token._spread, + children: [] + } + } + + /** + * @param {Token} token + * @returns {ListItem} + */ + function listItem(token) { + return { + type: 'listItem', + // @ts-expect-error Patched. + spread: token._spread, + checked: null, + children: [] + } + } + + /** @returns {Paragraph} */ + function paragraph() { + return {type: 'paragraph', children: []} + } + + /** @returns {Strong} */ + function strong() { + return {type: 'strong', children: []} + } + + /** @returns {Text} */ + function text() { + return {type: 'text', value: ''} + } + + /** @returns {ThematicBreak} */ + function thematicBreak() { + return {type: 'thematicBreak'} + } +} + +/** + * Copy a point-like value. + * + * @param {Point} d + * Point-like value. + * @returns {Point} + * unist point. + */ +function point(d) { + return {line: d.line, column: d.column, offset: d.offset} +} + +/** + * @param {Config} combined + * @param {Array>} extensions + * @returns {void} + */ +function configure(combined, extensions) { + let index = -1 + + while (++index < extensions.length) { + const value = extensions[index] + + if (Array.isArray(value)) { + configure(combined, value) + } else { + extension(combined, value) + } + } +} + +/** + * @param {Config} combined + * @param {Extension} extension + * @returns {void} + */ +function extension(combined, extension) { + /** @type {keyof Extension} */ + let key + + for (key in extension) { + if (own.call(extension, key)) { + if (key === 'canContainEols') { + const right = extension[key] + if (right) { + combined[key].push(...right) + } + } else if (key === 'transforms') { + const right = extension[key] + if (right) { + combined[key].push(...right) + } + } else if (key === 'enter' || key === 'exit') { + const right = extension[key] + if (right) { + Object.assign(combined[key], right) + } + } + } + } +} + +/** @type {OnEnterError} */ +function defaultOnError(left, right) { + if (left) { + throw new Error( + 'Cannot close `' + + left.type + + '` (' + + stringifyPosition({start: left.start, end: left.end}) + + '): a different token (`' + + right.type + + '`, ' + + stringifyPosition({start: right.start, end: right.end}) + + ') is open' + ) + } else { + throw new Error( + 'Cannot close document, a token (`' + + right.type + + '`, ' + + stringifyPosition({start: right.start, end: right.end}) + + ') is still open' + ) + } +} diff --git a/_extensions/d2/node_modules/mdast-util-from-markdown/index.d.ts b/_extensions/d2/node_modules/mdast-util-from-markdown/index.d.ts new file mode 100644 index 00000000..b5d391fe --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-from-markdown/index.d.ts @@ -0,0 +1,78 @@ +import type {OnEnterError} from './lib/index.js' + +export type { + CompileContext, + Encoding, + Extension, + Handle, + OnEnterError, + OnExitError, + Options, + Token, + Transform, + Value +} from './lib/index.js' + +/** + * Deprecated: use `OnEnterError`. + */ +// To do: next major: remove. +export type OnError = OnEnterError + +/** + * Interface of tracked data. + * + * When working on extensions that use more data, extend the corresponding + * interface to register their types: + * + * ```ts + * declare module 'mdast-util-from-markdown' { + * interface CompileData { + * // Register a new field. + * mathFlowInside?: boolean | undefined + * } + * } + * ``` + */ +// eslint-disable-next-line @typescript-eslint/consistent-type-definitions +export interface CompileData { + /** + * Whether we’re inside a hard break. + */ + atHardBreak?: boolean | undefined + + /** + * Current character reference type. + */ + characterReferenceType?: + | 'characterReferenceMarkerHexadecimal' + | 'characterReferenceMarkerNumeric' + | undefined + + /** + * Whether a first list item value (`1` in `1. a`) is expected. + */ + expectingFirstListItemValue?: boolean | undefined + + /** + * Whether we’re in flow code. + */ + flowCodeInside?: boolean | undefined + + /** + * Whether we’re in a reference. + */ + inReference?: boolean | undefined + + /** + * Whether we’re expecting a line ending from a setext heading, which can be slurped. + */ + setextHeadingSlurpLineEnding?: boolean | undefined + + /** + * Current reference. + */ + referenceType?: 'collapsed' | 'full' | undefined +} + +export {fromMarkdown} from './lib/index.js' diff --git a/_extensions/d2/node_modules/mdast-util-from-markdown/index.js b/_extensions/d2/node_modules/mdast-util-from-markdown/index.js new file mode 100644 index 00000000..d8bcf034 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-from-markdown/index.js @@ -0,0 +1,2 @@ +// Note: types exported from `index.d.ts`. +export {fromMarkdown} from './lib/index.js' diff --git a/_extensions/d2/node_modules/mdast-util-from-markdown/lib/index.d.ts b/_extensions/d2/node_modules/mdast-util-from-markdown/lib/index.d.ts new file mode 100644 index 00000000..e0f97b64 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-from-markdown/lib/index.d.ts @@ -0,0 +1,184 @@ +/** + * @param value + * Markdown to parse. + * @param encoding + * Character encoding for when `value` is `Buffer`. + * @param options + * Configuration. + * @returns + * mdast tree. + */ +export const fromMarkdown: (( + value: Value, + encoding: Encoding, + options?: Options | null | undefined +) => Root) & + ((value: Value, options?: Options | null | undefined) => Root) +export type Encoding = import('micromark-util-types').Encoding +export type Event = import('micromark-util-types').Event +export type ParseOptions = import('micromark-util-types').ParseOptions +export type Token = import('micromark-util-types').Token +export type TokenizeContext = import('micromark-util-types').TokenizeContext +export type Value = import('micromark-util-types').Value +export type UnistParent = import('unist').Parent +export type Point = import('unist').Point +export type PhrasingContent = import('mdast').PhrasingContent +export type StaticPhrasingContent = import('mdast').StaticPhrasingContent +export type Content = import('mdast').Content +export type Break = import('mdast').Break +export type Blockquote = import('mdast').Blockquote +export type Code = import('mdast').Code +export type Definition = import('mdast').Definition +export type Emphasis = import('mdast').Emphasis +export type Heading = import('mdast').Heading +export type HTML = import('mdast').HTML +export type Image = import('mdast').Image +export type ImageReference = import('mdast').ImageReference +export type InlineCode = import('mdast').InlineCode +export type Link = import('mdast').Link +export type LinkReference = import('mdast').LinkReference +export type List = import('mdast').List +export type ListItem = import('mdast').ListItem +export type Paragraph = import('mdast').Paragraph +export type Root = import('mdast').Root +export type Strong = import('mdast').Strong +export type Text = import('mdast').Text +export type ThematicBreak = import('mdast').ThematicBreak +export type ReferenceType = import('mdast').ReferenceType +export type CompileData = import('../index.js').CompileData +export type Node = Root | Content +export type Parent = Extract +export type Fragment = Omit & { + type: 'fragment' + children: Array +} +/** + * Extra transform, to change the AST afterwards. + */ +export type Transform = (tree: Root) => Root | undefined | null | void +/** + * Handle a token. + */ +export type Handle = (this: CompileContext, token: Token) => void +/** + * Token types mapping to handles + */ +export type Handles = Record +/** + * Handle the case where the `right` token is open, but it is closed (by the + * `left` token) or because we reached the end of the document. + */ +export type OnEnterError = ( + this: Omit, + left: Token | undefined, + right: Token +) => void +/** + * Handle the case where the `right` token is open but it is closed by + * exiting the `left` token. + */ +export type OnExitError = ( + this: Omit, + left: Token, + right: Token +) => void +/** + * Open token on the stack, with an optional error handler for when + * that token isn’t closed properly. + */ +export type TokenTuple = [Token, OnEnterError | undefined] +/** + * Configuration. + * + * We have our defaults, but extensions will add more. + */ +export type Config = { + /** + * Token types where line endings are used. + */ + canContainEols: Array + /** + * Opening handles. + */ + enter: Handles + /** + * Closing handles. + */ + exit: Handles + /** + * Tree transforms. + */ + transforms: Array +} +/** + * Change how markdown tokens from micromark are turned into mdast. + */ +export type Extension = Partial +/** + * mdast compiler context. + */ +export type CompileContext = { + /** + * Stack of nodes. + */ + stack: Array + /** + * Stack of tokens. + */ + tokenStack: Array + /** + * Get data from the key/value store. + */ + getData: ( + key: Key + ) => import('../index.js').CompileData[Key] + /** + * Set data into the key/value store. + */ + setData: ( + key: Key_1, + value?: import('../index.js').CompileData[Key_1] | undefined + ) => void + /** + * Capture some of the output data. + */ + buffer: (this: CompileContext) => void + /** + * Stop capturing and access the output data. + */ + resume: (this: CompileContext) => string + /** + * Enter a token. + */ + enter: ( + this: CompileContext, + node: Kind, + token: Token, + onError?: OnEnterError + ) => Kind + /** + * Exit a token. + */ + exit: (this: CompileContext, token: Token, onError?: OnExitError) => Node + /** + * Get the string value of a token. + */ + sliceSerialize: TokenizeContext['sliceSerialize'] + /** + * Configuration. + */ + config: Config +} +/** + * Configuration for how to build mdast. + */ +export type FromMarkdownOptions = { + /** + * Extensions for this utility to change how tokens are turned into a tree. + */ + mdastExtensions?: Array> | null | undefined +} +/** + * Configuration. + */ +export type Options = ParseOptions & FromMarkdownOptions diff --git a/_extensions/d2/node_modules/mdast-util-from-markdown/lib/index.js b/_extensions/d2/node_modules/mdast-util-from-markdown/lib/index.js new file mode 100644 index 00000000..e006f86c --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-from-markdown/lib/index.js @@ -0,0 +1,1390 @@ +/** + * @typedef {import('micromark-util-types').Encoding} Encoding + * @typedef {import('micromark-util-types').Event} Event + * @typedef {import('micromark-util-types').ParseOptions} ParseOptions + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext + * @typedef {import('micromark-util-types').Value} Value + * + * @typedef {import('unist').Parent} UnistParent + * @typedef {import('unist').Point} Point + * + * @typedef {import('mdast').PhrasingContent} PhrasingContent + * @typedef {import('mdast').StaticPhrasingContent} StaticPhrasingContent + * @typedef {import('mdast').Content} Content + * @typedef {import('mdast').Break} Break + * @typedef {import('mdast').Blockquote} Blockquote + * @typedef {import('mdast').Code} Code + * @typedef {import('mdast').Definition} Definition + * @typedef {import('mdast').Emphasis} Emphasis + * @typedef {import('mdast').Heading} Heading + * @typedef {import('mdast').HTML} HTML + * @typedef {import('mdast').Image} Image + * @typedef {import('mdast').ImageReference} ImageReference + * @typedef {import('mdast').InlineCode} InlineCode + * @typedef {import('mdast').Link} Link + * @typedef {import('mdast').LinkReference} LinkReference + * @typedef {import('mdast').List} List + * @typedef {import('mdast').ListItem} ListItem + * @typedef {import('mdast').Paragraph} Paragraph + * @typedef {import('mdast').Root} Root + * @typedef {import('mdast').Strong} Strong + * @typedef {import('mdast').Text} Text + * @typedef {import('mdast').ThematicBreak} ThematicBreak + * @typedef {import('mdast').ReferenceType} ReferenceType + * @typedef {import('../index.js').CompileData} CompileData + */ + +/** + * @typedef {Root | Content} Node + * @typedef {Extract} Parent + * + * @typedef {Omit & {type: 'fragment', children: Array}} Fragment + */ + +/** + * @callback Transform + * Extra transform, to change the AST afterwards. + * @param {Root} tree + * Tree to transform. + * @returns {Root | undefined | null | void} + * New tree or nothing (in which case the current tree is used). + * + * @callback Handle + * Handle a token. + * @param {CompileContext} this + * Context. + * @param {Token} token + * Current token. + * @returns {void} + * Nothing. + * + * @typedef {Record} Handles + * Token types mapping to handles + * + * @callback OnEnterError + * Handle the case where the `right` token is open, but it is closed (by the + * `left` token) or because we reached the end of the document. + * @param {Omit} this + * Context. + * @param {Token | undefined} left + * Left token. + * @param {Token} right + * Right token. + * @returns {void} + * Nothing. + * + * @callback OnExitError + * Handle the case where the `right` token is open but it is closed by + * exiting the `left` token. + * @param {Omit} this + * Context. + * @param {Token} left + * Left token. + * @param {Token} right + * Right token. + * @returns {void} + * Nothing. + * + * @typedef {[Token, OnEnterError | undefined]} TokenTuple + * Open token on the stack, with an optional error handler for when + * that token isn’t closed properly. + */ + +/** + * @typedef Config + * Configuration. + * + * We have our defaults, but extensions will add more. + * @property {Array} canContainEols + * Token types where line endings are used. + * @property {Handles} enter + * Opening handles. + * @property {Handles} exit + * Closing handles. + * @property {Array} transforms + * Tree transforms. + * + * @typedef {Partial} Extension + * Change how markdown tokens from micromark are turned into mdast. + * + * @typedef CompileContext + * mdast compiler context. + * @property {Array} stack + * Stack of nodes. + * @property {Array} tokenStack + * Stack of tokens. + * @property {(key: Key) => CompileData[Key]} getData + * Get data from the key/value store. + * @property {(key: Key, value?: CompileData[Key]) => void} setData + * Set data into the key/value store. + * @property {(this: CompileContext) => void} buffer + * Capture some of the output data. + * @property {(this: CompileContext) => string} resume + * Stop capturing and access the output data. + * @property {(this: CompileContext, node: Kind, token: Token, onError?: OnEnterError) => Kind} enter + * Enter a token. + * @property {(this: CompileContext, token: Token, onError?: OnExitError) => Node} exit + * Exit a token. + * @property {TokenizeContext['sliceSerialize']} sliceSerialize + * Get the string value of a token. + * @property {Config} config + * Configuration. + * + * @typedef FromMarkdownOptions + * Configuration for how to build mdast. + * @property {Array> | null | undefined} [mdastExtensions] + * Extensions for this utility to change how tokens are turned into a tree. + * + * @typedef {ParseOptions & FromMarkdownOptions} Options + * Configuration. + */ + +// To do: micromark: create a registry of tokens? +// To do: next major: don’t return given `Node` from `enter`. +// To do: next major: remove setter/getter. + +import {toString} from 'mdast-util-to-string' +import {parse} from 'micromark/lib/parse.js' +import {preprocess} from 'micromark/lib/preprocess.js' +import {postprocess} from 'micromark/lib/postprocess.js' +import {decodeNumericCharacterReference} from 'micromark-util-decode-numeric-character-reference' +import {decodeString} from 'micromark-util-decode-string' +import {normalizeIdentifier} from 'micromark-util-normalize-identifier' +import {decodeNamedCharacterReference} from 'decode-named-character-reference' +import {stringifyPosition} from 'unist-util-stringify-position' +const own = {}.hasOwnProperty + +/** + * @param value + * Markdown to parse. + * @param encoding + * Character encoding for when `value` is `Buffer`. + * @param options + * Configuration. + * @returns + * mdast tree. + */ +export const fromMarkdown = + /** + * @type {( + * ((value: Value, encoding: Encoding, options?: Options | null | undefined) => Root) & + * ((value: Value, options?: Options | null | undefined) => Root) + * )} + */ + + /** + * @param {Value} value + * @param {Encoding | Options | null | undefined} [encoding] + * @param {Options | null | undefined} [options] + * @returns {Root} + */ + function (value, encoding, options) { + if (typeof encoding !== 'string') { + options = encoding + encoding = undefined + } + return compiler(options)( + postprocess( + // @ts-expect-error: micromark types need to accept `null`. + parse(options).document().write(preprocess()(value, encoding, true)) + ) + ) + } + +/** + * Note this compiler only understand complete buffering, not streaming. + * + * @param {Options | null | undefined} [options] + */ +function compiler(options) { + /** @type {Config} */ + const config = { + transforms: [], + canContainEols: ['emphasis', 'fragment', 'heading', 'paragraph', 'strong'], + enter: { + autolink: opener(link), + autolinkProtocol: onenterdata, + autolinkEmail: onenterdata, + atxHeading: opener(heading), + blockQuote: opener(blockQuote), + characterEscape: onenterdata, + characterReference: onenterdata, + codeFenced: opener(codeFlow), + codeFencedFenceInfo: buffer, + codeFencedFenceMeta: buffer, + codeIndented: opener(codeFlow, buffer), + codeText: opener(codeText, buffer), + codeTextData: onenterdata, + data: onenterdata, + codeFlowValue: onenterdata, + definition: opener(definition), + definitionDestinationString: buffer, + definitionLabelString: buffer, + definitionTitleString: buffer, + emphasis: opener(emphasis), + hardBreakEscape: opener(hardBreak), + hardBreakTrailing: opener(hardBreak), + htmlFlow: opener(html, buffer), + htmlFlowData: onenterdata, + htmlText: opener(html, buffer), + htmlTextData: onenterdata, + image: opener(image), + label: buffer, + link: opener(link), + listItem: opener(listItem), + listItemValue: onenterlistitemvalue, + listOrdered: opener(list, onenterlistordered), + listUnordered: opener(list), + paragraph: opener(paragraph), + reference: onenterreference, + referenceString: buffer, + resourceDestinationString: buffer, + resourceTitleString: buffer, + setextHeading: opener(heading), + strong: opener(strong), + thematicBreak: opener(thematicBreak) + }, + exit: { + atxHeading: closer(), + atxHeadingSequence: onexitatxheadingsequence, + autolink: closer(), + autolinkEmail: onexitautolinkemail, + autolinkProtocol: onexitautolinkprotocol, + blockQuote: closer(), + characterEscapeValue: onexitdata, + characterReferenceMarkerHexadecimal: onexitcharacterreferencemarker, + characterReferenceMarkerNumeric: onexitcharacterreferencemarker, + characterReferenceValue: onexitcharacterreferencevalue, + codeFenced: closer(onexitcodefenced), + codeFencedFence: onexitcodefencedfence, + codeFencedFenceInfo: onexitcodefencedfenceinfo, + codeFencedFenceMeta: onexitcodefencedfencemeta, + codeFlowValue: onexitdata, + codeIndented: closer(onexitcodeindented), + codeText: closer(onexitcodetext), + codeTextData: onexitdata, + data: onexitdata, + definition: closer(), + definitionDestinationString: onexitdefinitiondestinationstring, + definitionLabelString: onexitdefinitionlabelstring, + definitionTitleString: onexitdefinitiontitlestring, + emphasis: closer(), + hardBreakEscape: closer(onexithardbreak), + hardBreakTrailing: closer(onexithardbreak), + htmlFlow: closer(onexithtmlflow), + htmlFlowData: onexitdata, + htmlText: closer(onexithtmltext), + htmlTextData: onexitdata, + image: closer(onexitimage), + label: onexitlabel, + labelText: onexitlabeltext, + lineEnding: onexitlineending, + link: closer(onexitlink), + listItem: closer(), + listOrdered: closer(), + listUnordered: closer(), + paragraph: closer(), + referenceString: onexitreferencestring, + resourceDestinationString: onexitresourcedestinationstring, + resourceTitleString: onexitresourcetitlestring, + resource: onexitresource, + setextHeading: closer(onexitsetextheading), + setextHeadingLineSequence: onexitsetextheadinglinesequence, + setextHeadingText: onexitsetextheadingtext, + strong: closer(), + thematicBreak: closer() + } + } + configure(config, (options || {}).mdastExtensions || []) + + /** @type {CompileData} */ + const data = {} + return compile + + /** + * Turn micromark events into an mdast tree. + * + * @param {Array} events + * Events. + * @returns {Root} + * mdast tree. + */ + function compile(events) { + /** @type {Root} */ + let tree = { + type: 'root', + children: [] + } + /** @type {Omit} */ + const context = { + stack: [tree], + tokenStack: [], + config, + enter, + exit, + buffer, + resume, + setData, + getData + } + /** @type {Array} */ + const listStack = [] + let index = -1 + while (++index < events.length) { + // We preprocess lists to add `listItem` tokens, and to infer whether + // items the list itself are spread out. + if ( + events[index][1].type === 'listOrdered' || + events[index][1].type === 'listUnordered' + ) { + if (events[index][0] === 'enter') { + listStack.push(index) + } else { + const tail = listStack.pop() + index = prepareList(events, tail, index) + } + } + } + index = -1 + while (++index < events.length) { + const handler = config[events[index][0]] + if (own.call(handler, events[index][1].type)) { + handler[events[index][1].type].call( + Object.assign( + { + sliceSerialize: events[index][2].sliceSerialize + }, + context + ), + events[index][1] + ) + } + } + + // Handle tokens still being open. + if (context.tokenStack.length > 0) { + const tail = context.tokenStack[context.tokenStack.length - 1] + const handler = tail[1] || defaultOnError + handler.call(context, undefined, tail[0]) + } + + // Figure out `root` position. + tree.position = { + start: point( + events.length > 0 + ? events[0][1].start + : { + line: 1, + column: 1, + offset: 0 + } + ), + end: point( + events.length > 0 + ? events[events.length - 2][1].end + : { + line: 1, + column: 1, + offset: 0 + } + ) + } + + // Call transforms. + index = -1 + while (++index < config.transforms.length) { + tree = config.transforms[index](tree) || tree + } + return tree + } + + /** + * @param {Array} events + * @param {number} start + * @param {number} length + * @returns {number} + */ + function prepareList(events, start, length) { + let index = start - 1 + let containerBalance = -1 + let listSpread = false + /** @type {Token | undefined} */ + let listItem + /** @type {number | undefined} */ + let lineIndex + /** @type {number | undefined} */ + let firstBlankLineIndex + /** @type {boolean | undefined} */ + let atMarker + while (++index <= length) { + const event = events[index] + if ( + event[1].type === 'listUnordered' || + event[1].type === 'listOrdered' || + event[1].type === 'blockQuote' + ) { + if (event[0] === 'enter') { + containerBalance++ + } else { + containerBalance-- + } + atMarker = undefined + } else if (event[1].type === 'lineEndingBlank') { + if (event[0] === 'enter') { + if ( + listItem && + !atMarker && + !containerBalance && + !firstBlankLineIndex + ) { + firstBlankLineIndex = index + } + atMarker = undefined + } + } else if ( + event[1].type === 'linePrefix' || + event[1].type === 'listItemValue' || + event[1].type === 'listItemMarker' || + event[1].type === 'listItemPrefix' || + event[1].type === 'listItemPrefixWhitespace' + ) { + // Empty. + } else { + atMarker = undefined + } + if ( + (!containerBalance && + event[0] === 'enter' && + event[1].type === 'listItemPrefix') || + (containerBalance === -1 && + event[0] === 'exit' && + (event[1].type === 'listUnordered' || + event[1].type === 'listOrdered')) + ) { + if (listItem) { + let tailIndex = index + lineIndex = undefined + while (tailIndex--) { + const tailEvent = events[tailIndex] + if ( + tailEvent[1].type === 'lineEnding' || + tailEvent[1].type === 'lineEndingBlank' + ) { + if (tailEvent[0] === 'exit') continue + if (lineIndex) { + events[lineIndex][1].type = 'lineEndingBlank' + listSpread = true + } + tailEvent[1].type = 'lineEnding' + lineIndex = tailIndex + } else if ( + tailEvent[1].type === 'linePrefix' || + tailEvent[1].type === 'blockQuotePrefix' || + tailEvent[1].type === 'blockQuotePrefixWhitespace' || + tailEvent[1].type === 'blockQuoteMarker' || + tailEvent[1].type === 'listItemIndent' + ) { + // Empty + } else { + break + } + } + if ( + firstBlankLineIndex && + (!lineIndex || firstBlankLineIndex < lineIndex) + ) { + // @ts-expect-error Patched. + listItem._spread = true + } + + // Fix position. + listItem.end = Object.assign( + {}, + lineIndex ? events[lineIndex][1].start : event[1].end + ) + events.splice(lineIndex || index, 0, ['exit', listItem, event[2]]) + index++ + length++ + } + + // Create a new list item. + if (event[1].type === 'listItemPrefix') { + listItem = { + type: 'listItem', + // @ts-expect-error Patched + _spread: false, + start: Object.assign({}, event[1].start) + } + // @ts-expect-error: `listItem` is most definitely defined, TS... + events.splice(index, 0, ['enter', listItem, event[2]]) + index++ + length++ + firstBlankLineIndex = undefined + atMarker = true + } + } + } + + // @ts-expect-error Patched. + events[start][1]._spread = listSpread + return length + } + + /** + * Set data. + * + * @template {keyof CompileData} Key + * Field type. + * @param {Key} key + * Key of field. + * @param {CompileData[Key]} [value] + * New value. + * @returns {void} + * Nothing. + */ + function setData(key, value) { + data[key] = value + } + + /** + * Get data. + * + * @template {keyof CompileData} Key + * Field type. + * @param {Key} key + * Key of field. + * @returns {CompileData[Key]} + * Value. + */ + function getData(key) { + return data[key] + } + + /** + * Create an opener handle. + * + * @param {(token: Token) => Node} create + * Create a node. + * @param {Handle} [and] + * Optional function to also run. + * @returns {Handle} + * Handle. + */ + function opener(create, and) { + return open + + /** + * @this {CompileContext} + * @param {Token} token + * @returns {void} + */ + function open(token) { + enter.call(this, create(token), token) + if (and) and.call(this, token) + } + } + + /** + * @this {CompileContext} + * @returns {void} + */ + function buffer() { + this.stack.push({ + type: 'fragment', + children: [] + }) + } + + /** + * @template {Node} Kind + * Node type. + * @this {CompileContext} + * Context. + * @param {Kind} node + * Node to enter. + * @param {Token} token + * Corresponding token. + * @param {OnEnterError | undefined} [errorHandler] + * Handle the case where this token is open, but it is closed by something else. + * @returns {Kind} + * The given node. + */ + function enter(node, token, errorHandler) { + const parent = this.stack[this.stack.length - 1] + // @ts-expect-error: Assume `Node` can exist as a child of `parent`. + parent.children.push(node) + this.stack.push(node) + this.tokenStack.push([token, errorHandler]) + // @ts-expect-error: `end` will be patched later. + node.position = { + start: point(token.start) + } + return node + } + + /** + * Create a closer handle. + * + * @param {Handle} [and] + * Optional function to also run. + * @returns {Handle} + * Handle. + */ + function closer(and) { + return close + + /** + * @this {CompileContext} + * @param {Token} token + * @returns {void} + */ + function close(token) { + if (and) and.call(this, token) + exit.call(this, token) + } + } + + /** + * @this {CompileContext} + * Context. + * @param {Token} token + * Corresponding token. + * @param {OnExitError | undefined} [onExitError] + * Handle the case where another token is open. + * @returns {Node} + * The closed node. + */ + function exit(token, onExitError) { + const node = this.stack.pop() + const open = this.tokenStack.pop() + if (!open) { + throw new Error( + 'Cannot close `' + + token.type + + '` (' + + stringifyPosition({ + start: token.start, + end: token.end + }) + + '): it’s not open' + ) + } else if (open[0].type !== token.type) { + if (onExitError) { + onExitError.call(this, token, open[0]) + } else { + const handler = open[1] || defaultOnError + handler.call(this, token, open[0]) + } + } + node.position.end = point(token.end) + return node + } + + /** + * @this {CompileContext} + * @returns {string} + */ + function resume() { + return toString(this.stack.pop()) + } + + // + // Handlers. + // + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onenterlistordered() { + setData('expectingFirstListItemValue', true) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onenterlistitemvalue(token) { + if (getData('expectingFirstListItemValue')) { + const ancestor = this.stack[this.stack.length - 2] + ancestor.start = Number.parseInt(this.sliceSerialize(token), 10) + setData('expectingFirstListItemValue') + } + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitcodefencedfenceinfo() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + node.lang = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitcodefencedfencemeta() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + node.meta = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitcodefencedfence() { + // Exit if this is the closing fence. + if (getData('flowCodeInside')) return + this.buffer() + setData('flowCodeInside', true) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitcodefenced() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + node.value = data.replace(/^(\r?\n|\r)|(\r?\n|\r)$/g, '') + setData('flowCodeInside') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitcodeindented() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + node.value = data.replace(/(\r?\n|\r)$/g, '') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitdefinitionlabelstring(token) { + const label = this.resume() + const node = this.stack[this.stack.length - 1] + node.label = label + node.identifier = normalizeIdentifier( + this.sliceSerialize(token) + ).toLowerCase() + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitdefinitiontitlestring() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + node.title = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitdefinitiondestinationstring() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + node.url = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitatxheadingsequence(token) { + const node = this.stack[this.stack.length - 1] + if (!node.depth) { + const depth = this.sliceSerialize(token).length + node.depth = depth + } + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitsetextheadingtext() { + setData('setextHeadingSlurpLineEnding', true) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitsetextheadinglinesequence(token) { + const node = this.stack[this.stack.length - 1] + node.depth = this.sliceSerialize(token).charCodeAt(0) === 61 ? 1 : 2 + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitsetextheading() { + setData('setextHeadingSlurpLineEnding') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onenterdata(token) { + const node = this.stack[this.stack.length - 1] + let tail = node.children[node.children.length - 1] + if (!tail || tail.type !== 'text') { + // Add a new text node. + tail = text() + // @ts-expect-error: we’ll add `end` later. + tail.position = { + start: point(token.start) + } + // @ts-expect-error: Assume `parent` accepts `text`. + node.children.push(tail) + } + this.stack.push(tail) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitdata(token) { + const tail = this.stack.pop() + tail.value += this.sliceSerialize(token) + tail.position.end = point(token.end) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitlineending(token) { + const context = this.stack[this.stack.length - 1] + // If we’re at a hard break, include the line ending in there. + if (getData('atHardBreak')) { + const tail = context.children[context.children.length - 1] + tail.position.end = point(token.end) + setData('atHardBreak') + return + } + if ( + !getData('setextHeadingSlurpLineEnding') && + config.canContainEols.includes(context.type) + ) { + onenterdata.call(this, token) + onexitdata.call(this, token) + } + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexithardbreak() { + setData('atHardBreak', true) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexithtmlflow() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + node.value = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexithtmltext() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + node.value = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitcodetext() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + node.value = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitlink() { + const node = this.stack[this.stack.length - 1] + // Note: there are also `identifier` and `label` fields on this link node! + // These are used / cleaned here. + + // To do: clean. + if (getData('inReference')) { + /** @type {ReferenceType} */ + const referenceType = getData('referenceType') || 'shortcut' + node.type += 'Reference' + // @ts-expect-error: mutate. + node.referenceType = referenceType + // @ts-expect-error: mutate. + delete node.url + delete node.title + } else { + // @ts-expect-error: mutate. + delete node.identifier + // @ts-expect-error: mutate. + delete node.label + } + setData('referenceType') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitimage() { + const node = this.stack[this.stack.length - 1] + // Note: there are also `identifier` and `label` fields on this link node! + // These are used / cleaned here. + + // To do: clean. + if (getData('inReference')) { + /** @type {ReferenceType} */ + const referenceType = getData('referenceType') || 'shortcut' + node.type += 'Reference' + // @ts-expect-error: mutate. + node.referenceType = referenceType + // @ts-expect-error: mutate. + delete node.url + delete node.title + } else { + // @ts-expect-error: mutate. + delete node.identifier + // @ts-expect-error: mutate. + delete node.label + } + setData('referenceType') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitlabeltext(token) { + const string = this.sliceSerialize(token) + const ancestor = this.stack[this.stack.length - 2] + // @ts-expect-error: stash this on the node, as it might become a reference + // later. + ancestor.label = decodeString(string) + // @ts-expect-error: same as above. + ancestor.identifier = normalizeIdentifier(string).toLowerCase() + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitlabel() { + const fragment = this.stack[this.stack.length - 1] + const value = this.resume() + const node = this.stack[this.stack.length - 1] + // Assume a reference. + setData('inReference', true) + if (node.type === 'link') { + /** @type {Array} */ + // @ts-expect-error: Assume static phrasing content. + const children = fragment.children + node.children = children + } else { + node.alt = value + } + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitresourcedestinationstring() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + node.url = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitresourcetitlestring() { + const data = this.resume() + const node = this.stack[this.stack.length - 1] + node.title = data + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitresource() { + setData('inReference') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onenterreference() { + setData('referenceType', 'collapsed') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitreferencestring(token) { + const label = this.resume() + const node = this.stack[this.stack.length - 1] + // @ts-expect-error: stash this on the node, as it might become a reference + // later. + node.label = label + // @ts-expect-error: same as above. + node.identifier = normalizeIdentifier( + this.sliceSerialize(token) + ).toLowerCase() + setData('referenceType', 'full') + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + + function onexitcharacterreferencemarker(token) { + setData('characterReferenceType', token.type) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitcharacterreferencevalue(token) { + const data = this.sliceSerialize(token) + const type = getData('characterReferenceType') + /** @type {string} */ + let value + if (type) { + value = decodeNumericCharacterReference( + data, + type === 'characterReferenceMarkerNumeric' ? 10 : 16 + ) + setData('characterReferenceType') + } else { + const result = decodeNamedCharacterReference(data) + value = result + } + const tail = this.stack.pop() + tail.value += value + tail.position.end = point(token.end) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitautolinkprotocol(token) { + onexitdata.call(this, token) + const node = this.stack[this.stack.length - 1] + node.url = this.sliceSerialize(token) + } + + /** + * @this {CompileContext} + * @type {Handle} + */ + function onexitautolinkemail(token) { + onexitdata.call(this, token) + const node = this.stack[this.stack.length - 1] + node.url = 'mailto:' + this.sliceSerialize(token) + } + + // + // Creaters. + // + + /** @returns {Blockquote} */ + function blockQuote() { + return { + type: 'blockquote', + children: [] + } + } + + /** @returns {Code} */ + function codeFlow() { + return { + type: 'code', + lang: null, + meta: null, + value: '' + } + } + + /** @returns {InlineCode} */ + function codeText() { + return { + type: 'inlineCode', + value: '' + } + } + + /** @returns {Definition} */ + function definition() { + return { + type: 'definition', + identifier: '', + label: null, + title: null, + url: '' + } + } + + /** @returns {Emphasis} */ + function emphasis() { + return { + type: 'emphasis', + children: [] + } + } + + /** @returns {Heading} */ + function heading() { + // @ts-expect-error `depth` will be set later. + return { + type: 'heading', + depth: undefined, + children: [] + } + } + + /** @returns {Break} */ + function hardBreak() { + return { + type: 'break' + } + } + + /** @returns {HTML} */ + function html() { + return { + type: 'html', + value: '' + } + } + + /** @returns {Image} */ + function image() { + return { + type: 'image', + title: null, + url: '', + alt: null + } + } + + /** @returns {Link} */ + function link() { + return { + type: 'link', + title: null, + url: '', + children: [] + } + } + + /** + * @param {Token} token + * @returns {List} + */ + function list(token) { + return { + type: 'list', + ordered: token.type === 'listOrdered', + start: null, + // @ts-expect-error Patched. + spread: token._spread, + children: [] + } + } + + /** + * @param {Token} token + * @returns {ListItem} + */ + function listItem(token) { + return { + type: 'listItem', + // @ts-expect-error Patched. + spread: token._spread, + checked: null, + children: [] + } + } + + /** @returns {Paragraph} */ + function paragraph() { + return { + type: 'paragraph', + children: [] + } + } + + /** @returns {Strong} */ + function strong() { + return { + type: 'strong', + children: [] + } + } + + /** @returns {Text} */ + function text() { + return { + type: 'text', + value: '' + } + } + + /** @returns {ThematicBreak} */ + function thematicBreak() { + return { + type: 'thematicBreak' + } + } +} + +/** + * Copy a point-like value. + * + * @param {Point} d + * Point-like value. + * @returns {Point} + * unist point. + */ +function point(d) { + return { + line: d.line, + column: d.column, + offset: d.offset + } +} + +/** + * @param {Config} combined + * @param {Array>} extensions + * @returns {void} + */ +function configure(combined, extensions) { + let index = -1 + while (++index < extensions.length) { + const value = extensions[index] + if (Array.isArray(value)) { + configure(combined, value) + } else { + extension(combined, value) + } + } +} + +/** + * @param {Config} combined + * @param {Extension} extension + * @returns {void} + */ +function extension(combined, extension) { + /** @type {keyof Extension} */ + let key + for (key in extension) { + if (own.call(extension, key)) { + if (key === 'canContainEols') { + const right = extension[key] + if (right) { + combined[key].push(...right) + } + } else if (key === 'transforms') { + const right = extension[key] + if (right) { + combined[key].push(...right) + } + } else if (key === 'enter' || key === 'exit') { + const right = extension[key] + if (right) { + Object.assign(combined[key], right) + } + } + } + } +} + +/** @type {OnEnterError} */ +function defaultOnError(left, right) { + if (left) { + throw new Error( + 'Cannot close `' + + left.type + + '` (' + + stringifyPosition({ + start: left.start, + end: left.end + }) + + '): a different token (`' + + right.type + + '`, ' + + stringifyPosition({ + start: right.start, + end: right.end + }) + + ') is open' + ) + } else { + throw new Error( + 'Cannot close document, a token (`' + + right.type + + '`, ' + + stringifyPosition({ + start: right.start, + end: right.end + }) + + ') is still open' + ) + } +} diff --git a/_extensions/d2/node_modules/mdast-util-from-markdown/license b/_extensions/d2/node_modules/mdast-util-from-markdown/license new file mode 100644 index 00000000..39372356 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-from-markdown/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2020 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/mdast-util-from-markdown/package.json b/_extensions/d2/node_modules/mdast-util-from-markdown/package.json new file mode 100644 index 00000000..ab0aa159 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-from-markdown/package.json @@ -0,0 +1,120 @@ +{ + "name": "mdast-util-from-markdown", + "version": "1.3.0", + "description": "mdast utility to parse markdown", + "license": "MIT", + "keywords": [ + "unist", + "mdast", + "mdast-util", + "util", + "utility", + "markdown", + "markup", + "parse", + "syntax", + "tree", + "ast" + ], + "repository": "syntax-tree/mdast-util-from-markdown", + "bugs": "https://github.com/syntax-tree/mdast-util-from-markdown/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "dev/", + "lib/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "mdast-util-to-string": "^3.1.0", + "micromark": "^3.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-decode-string": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "unist-util-stringify-position": "^3.0.0", + "uvu": "^0.5.0" + }, + "devDependencies": { + "@types/node": "^18.0.0", + "c8": "^7.0.0", + "commonmark.json": "^0.30.0", + "esbuild": "^0.17.0", + "gzip-size-cli": "^5.0.0", + "hast-util-from-html": "^1.0.0", + "hast-util-to-html": "^8.0.0", + "mdast-util-to-hast": "^12.0.0", + "micromark-build": "^1.0.0", + "prettier": "^2.0.0", + "remark-cli": "^11.0.0", + "remark-preset-wooorm": "^9.0.0", + "terser": "^5.0.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "xo": "^0.53.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "tsc --build --clean && tsc --build && type-coverage && micromark-build && esbuild . --bundle --minify | terser | gzip-size --raw", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test/index.js", + "test-coverage": "c8 --check-coverage --100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true, + "rules": { + "complexity": "off", + "n/file-extension-in-import": "off", + "unicorn/prefer-code-point": "off", + "unicorn/prefer-switch": "off", + "unicorn/prefer-node-protocol": "off" + }, + "overrides": [ + { + "files": "test/**/*.js", + "rules": { + "no-await-in-loop": "off" + } + } + ] + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/mdast-util-from-markdown/readme.md b/_extensions/d2/node_modules/mdast-util-from-markdown/readme.md new file mode 100644 index 00000000..bbf7adec --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-from-markdown/readme.md @@ -0,0 +1,544 @@ +# mdast-util-from-markdown + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +**[mdast][]** utility that turns markdown into a syntax tree. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`fromMarkdown(value[, encoding][, options])`](#frommarkdownvalue-encoding-options) + * [`CompileContext`](#compilecontext) + * [`CompileData`](#compiledata) + * [`Encoding`](#encoding) + * [`Extension`](#extension) + * [`Handle`](#handle) + * [`OnEnterError`](#onentererror) + * [`OnExitError`](#onexiterror) + * [`Options`](#options) + * [`Token`](#token) + * [`Transform`](#transform) + * [`Value`](#value) +* [List of extensions](#list-of-extensions) +* [Syntax](#syntax) +* [Syntax tree](#syntax-tree) +* [Types](#types) +* [Compatibility](#compatibility) +* [Security](#security) +* [Related](#related) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +This package is a utility that takes markdown input and turns it into an +[mdast][] syntax tree. + +This utility uses [`micromark`][micromark], which turns markdown into tokens, +and then turns those tokens into nodes. +This package is used inside [`remark-parse`][remark-parse], which focusses on +making it easier to transform content by abstracting these internals away. + +## When should I use this? + +If you want to handle syntax trees manually, use this. +When you *just* want to turn markdown into HTML, use [`micromark`][micromark] +instead. +For an easier time processing content, use the **[remark][]** ecosystem instead. + +You can combine this package with other packages to add syntax extensions to +markdown. +Notable examples that deeply integrate with this package are +[`mdast-util-gfm`][mdast-util-gfm], +[`mdast-util-mdx`][mdast-util-mdx], +[`mdast-util-frontmatter`][mdast-util-frontmatter], +[`mdast-util-math`][mdast-util-math], and +[`mdast-util-directive`][mdast-util-directive]. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 14.14+ and 16.0+), install with [npm][]: + +```sh +npm install mdast-util-from-markdown +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {fromMarkdown} from 'https://esm.sh/mdast-util-from-markdown@1' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +Say we have the following markdown file `example.md`: + +```markdown +## Hello, *World*! +``` + +…and our module `example.js` looks as follows: + +```js +import fs from 'node:fs/promises' +import {fromMarkdown} from 'mdast-util-from-markdown' + +const doc = await fs.readFile('example.md') +const tree = fromMarkdown(doc) + +console.log(tree) +``` + +…now running `node example.js` yields (positional info removed for brevity): + +```js +{ + type: 'root', + children: [ + { + type: 'heading', + depth: 2, + children: [ + {type: 'text', value: 'Hello, '}, + {type: 'emphasis', children: [{type: 'text', value: 'World'}]}, + {type: 'text', value: '!'} + ] + } + ] +} +``` + +## API + +This package exports the identifier [`fromMarkdown`][api-frommarkdown]. +There is no default export. + +The export map supports the [`development` condition][development]. +Run `node --conditions development example.js` to get instrumented dev code. +Without this condition, production code is loaded. + +### `fromMarkdown(value[, encoding][, options])` + +Turn markdown into a syntax tree. + +###### Overloads + +* `(value: Value, encoding: Encoding, options?: Options) => Root` +* `(value: Value, options?: Options) => Root` + +###### Parameters + +* `value` ([`Value`][api-value]) + — markdown to parse +* `encoding` ([`Encoding`][api-encoding], default: `'utf8'`) + — [character encoding][character-encoding] for when `value` is + [`Buffer`][buffer] +* `options` ([`Options`][api-options], optional) + — configuration + +###### Returns + +mdast tree ([`Root`][root]). + +### `CompileContext` + +mdast compiler context (TypeScript type). + +###### Fields + +* `stack` ([`Array`][node]) + — stack of nodes +* `tokenStack` (`Array<[Token, OnEnterError | undefined]>`) + — stack of tokens +* `getData` (`(key: string) => unknown`) + — get data from the key/value store (see [`CompileData`][api-compiledata]) +* `setData` (`(key: string, value?: unknown) => void`) + — set data into the key/value store (see [`CompileData`][api-compiledata]) +* `buffer` (`() => void`) + — capture some of the output data +* `resume` (`() => string`) + — stop capturing and access the output data +* `enter` (`(node: Node, token: Token, onError?: OnEnterError) => Node`) + — enter a token +* `exit` (`(token: Token, onError?: OnExitError) => Node`) + — exit a token +* `sliceSerialize` (`(token: Token, expandTabs?: boolean) => string`) + — get the string value of a token +* `config` (`Required`) + — configuration + +### `CompileData` + +Interface of tracked data (TypeScript type). + +###### Type + +```ts +interface CompileData { /* see code */ } +``` + +When working on extensions that use more data, extend the corresponding +interface to register their types: + +```ts +declare module 'mdast-util-from-markdown' { + interface CompileData { + // Register a new field. + mathFlowInside?: boolean | undefined + } +} +``` + +### `Encoding` + +Encodings supported by the [`Buffer`][buffer] class (TypeScript type). + + + +See [`micromark`](https://github.com/micromark/micromark#api) for more info. + +###### Type + +```ts +type Encoding = 'utf8' | /* … */ +``` + +### `Extension` + +Change how markdown tokens from micromark are turned into mdast (TypeScript +type). + +###### Properties + +* `canContainEols` (`Array`, optional) + — token types where line endings are used +* `enter` ([`Record`][api-handle], optional) + — opening handles +* `exit` ([`Record`][api-handle], optional) + — closing handles +* `transforms` ([`Array`][api-transform], optional) + — tree transforms + +### `Handle` + +Handle a token (TypeScript type). + +###### Parameters + +* `this` ([`CompileContext`][api-compilecontext]) + — context +* `token` ([`Token`][api-token]) + — current token + +###### Returns + +Nothing (`void`). + +### `OnEnterError` + +Handle the case where the `right` token is open, but it is closed (by the +`left` token) or because we reached the end of the document (TypeScript type). + +###### Parameters + +* `this` ([`CompileContext`][api-compilecontext]) + — context +* `left` ([`Token`][api-token] or `undefined`) + — left token +* `right` ([`Token`][api-token]) + — right token + +###### Returns + +Nothing (`void`). + +### `OnExitError` + +Handle the case where the `right` token is open but it is closed by +exiting the `left` token (TypeScript type). + +###### Parameters + +* `this` ([`CompileContext`][api-compilecontext]) + — context +* `left` ([`Token`][api-token]) + — left token +* `right` ([`Token`][api-token]) + — right token + +###### Returns + +Nothing (`void`). + +### `Options` + +Configuration (TypeScript type). + +###### Properties + +* `extensions` ([`Array`][micromark-extension], optional) + — micromark extensions to change how markdown is parsed +* `mdastExtensions` ([`Array>`][api-extension], + optional) + — extensions for this utility to change how tokens are turned into a tree + +### `Token` + +Token from micromark (TypeScript type). + + + +See [`micromark`](https://github.com/micromark/micromark#api) for more info. + +###### Type + +```ts +type Token = { /* … */ } +``` + +### `Transform` + +Extra transform, to change the AST afterwards (TypeScript type). + +###### Parameters + +* `tree` ([`Root`][root]) + — tree to transform + +###### Returns + +New tree ([`Root`][root]) or nothing (in which case the current tree is used). + +### `Value` + +Contents of the file (TypeScript type). + + + +See [`micromark`](https://github.com/micromark/micromark#api) for more info. + +###### Type + +```ts +type Value = string | Uint8Array +``` + +## List of extensions + +* [`syntax-tree/mdast-util-directive`](https://github.com/syntax-tree/mdast-util-directive) + — directives +* [`syntax-tree/mdast-util-frontmatter`](https://github.com/syntax-tree/mdast-util-frontmatter) + — frontmatter (YAML, TOML, more) +* [`syntax-tree/mdast-util-gfm`](https://github.com/syntax-tree/mdast-util-gfm) + — GFM +* [`syntax-tree/mdast-util-gfm-autolink-literal`](https://github.com/syntax-tree/mdast-util-gfm-autolink-literal) + — GFM autolink literals +* [`syntax-tree/mdast-util-gfm-footnote`](https://github.com/syntax-tree/mdast-util-gfm-footnote) + — GFM footnotes +* [`syntax-tree/mdast-util-gfm-strikethrough`](https://github.com/syntax-tree/mdast-util-gfm-strikethrough) + — GFM strikethrough +* [`syntax-tree/mdast-util-gfm-table`](https://github.com/syntax-tree/mdast-util-gfm-table) + — GFM tables +* [`syntax-tree/mdast-util-gfm-task-list-item`](https://github.com/syntax-tree/mdast-util-gfm-task-list-item) + — GFM task list items +* [`syntax-tree/mdast-util-math`](https://github.com/syntax-tree/mdast-util-math) + — math +* [`syntax-tree/mdast-util-mdx`](https://github.com/syntax-tree/mdast-util-mdx) + — MDX +* [`syntax-tree/mdast-util-mdx-expression`](https://github.com/syntax-tree/mdast-util-mdx-expression) + — MDX expressions +* [`syntax-tree/mdast-util-mdx-jsx`](https://github.com/syntax-tree/mdast-util-mdx-jsx) + — MDX JSX +* [`syntax-tree/mdast-util-mdxjs-esm`](https://github.com/syntax-tree/mdast-util-mdxjs-esm) + — MDX ESM + +## Syntax + +Markdown is parsed according to CommonMark. +Extensions can add support for other syntax. +If you’re interested in extending markdown, +[more information is available in micromark’s readme][micromark-extend]. + +## Syntax tree + +The syntax tree is [mdast][]. + +## Types + +This package is fully typed with [TypeScript][]. +It exports the additional types [`CompileContext`][api-compilecontext], +[`CompileData`][api-compiledata], +[`Encoding`][api-encoding], +[`Extension`][api-extension], +[`Handle`][api-handle], +[`OnEnterError`][api-onentererror], +[`OnExitError`][api-onexiterror], +[`Options`][api-options], +[`Token`][api-token], +[`Transform`][api-transform], and +[`Value`][api-value]. + +## Compatibility + +Projects maintained by the unified collective are compatible with all maintained +versions of Node.js. +As of now, that is Node.js 14.14+ and 16.0+. +Our projects sometimes work with older versions, but this is not guaranteed. + +## Security + +As markdown is sometimes used for HTML, and improper use of HTML can open you up +to a [cross-site scripting (XSS)][xss] attack, use of `mdast-util-from-markdown` +can also be unsafe. +When going to HTML, use this utility in combination with +[`hast-util-sanitize`][hast-util-sanitize] to make the tree safe. + +## Related + +* [`syntax-tree/mdast-util-to-markdown`](https://github.com/syntax-tree/mdast-util-to-markdown) + — serialize mdast as markdown +* [`micromark/micromark`](https://github.com/micromark/micromark) + — parse markdown +* [`remarkjs/remark`](https://github.com/remarkjs/remark) + — process markdown + +## Contribute + +See [`contributing.md`][contributing] in [`syntax-tree/.github`][health] for +ways to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/syntax-tree/mdast-util-from-markdown/workflows/main/badge.svg + +[build]: https://github.com/syntax-tree/mdast-util-from-markdown/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/syntax-tree/mdast-util-from-markdown.svg + +[coverage]: https://codecov.io/github/syntax-tree/mdast-util-from-markdown + +[downloads-badge]: https://img.shields.io/npm/dm/mdast-util-from-markdown.svg + +[downloads]: https://www.npmjs.com/package/mdast-util-from-markdown + +[size-badge]: https://img.shields.io/bundlephobia/minzip/mdast-util-from-markdown.svg + +[size]: https://bundlephobia.com/result?p=mdast-util-from-markdown + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/syntax-tree/unist/discussions + +[npm]: https://docs.npmjs.com/cli/install + +[esmsh]: https://esm.sh + +[license]: license + +[author]: https://wooorm.com + +[health]: https://github.com/syntax-tree/.github + +[contributing]: https://github.com/syntax-tree/.github/blob/main/contributing.md + +[support]: https://github.com/syntax-tree/.github/blob/main/support.md + +[coc]: https://github.com/syntax-tree/.github/blob/main/code-of-conduct.md + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[typescript]: https://www.typescriptlang.org + +[mdast]: https://github.com/syntax-tree/mdast + +[node]: https://github.com/syntax-tree/mdast#nodes + +[mdast-util-gfm]: https://github.com/syntax-tree/mdast-util-gfm + +[mdast-util-mdx]: https://github.com/syntax-tree/mdast-util-mdx + +[mdast-util-frontmatter]: https://github.com/syntax-tree/mdast-util-frontmatter + +[mdast-util-math]: https://github.com/syntax-tree/mdast-util-math + +[mdast-util-directive]: https://github.com/syntax-tree/mdast-util-directive + +[root]: https://github.com/syntax-tree/mdast#root + +[character-encoding]: https://nodejs.org/api/buffer.html#buffer_buffers_and_character_encodings + +[buffer]: https://nodejs.org/api/buffer.html + +[xss]: https://en.wikipedia.org/wiki/Cross-site_scripting + +[hast-util-sanitize]: https://github.com/syntax-tree/hast-util-sanitize + +[micromark]: https://github.com/micromark/micromark + +[micromark-extension]: https://github.com/micromark/micromark#optionsextensions + +[micromark-extend]: https://github.com/micromark/micromark#extensions + +[remark]: https://github.com/remarkjs/remark + +[remark-parse]: https://github.com/remarkjs/remark/tree/main/packages/remark-parse + +[development]: https://nodejs.org/api/packages.html#packages_resolving_user_conditions + +[api-frommarkdown]: #frommarkdownvalue-encoding-options + +[api-compilecontext]: #compilecontext + +[api-compiledata]: #compiledata + +[api-encoding]: #encoding + +[api-extension]: #extension + +[api-handle]: #handle + +[api-onentererror]: #onentererror + +[api-onexiterror]: #onexiterror + +[api-options]: #options + +[api-token]: #token + +[api-transform]: #transform + +[api-value]: #value diff --git a/_extensions/d2/node_modules/mdast-util-phrasing/index.d.ts b/_extensions/d2/node_modules/mdast-util-phrasing/index.d.ts new file mode 100644 index 00000000..2627f17a --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-phrasing/index.d.ts @@ -0,0 +1 @@ +export {phrasing} from './lib/index.js' diff --git a/_extensions/d2/node_modules/mdast-util-phrasing/index.js b/_extensions/d2/node_modules/mdast-util-phrasing/index.js new file mode 100644 index 00000000..2627f17a --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-phrasing/index.js @@ -0,0 +1 @@ +export {phrasing} from './lib/index.js' diff --git a/_extensions/d2/node_modules/mdast-util-phrasing/lib/index.d.ts b/_extensions/d2/node_modules/mdast-util-phrasing/lib/index.d.ts new file mode 100644 index 00000000..e2435b8b --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-phrasing/lib/index.d.ts @@ -0,0 +1,12 @@ +/** + * Check if the given value is *phrasing content*. + * + * @param + * Thing to check, typically `Node`. + * @returns + * Whether `node` is phrasing content. + */ +export const phrasing: AssertPredicatePhrasing +export type PhrasingContent = import('mdast').PhrasingContent +export type AssertPredicatePhrasing = + import('unist-util-is').AssertPredicate diff --git a/_extensions/d2/node_modules/mdast-util-phrasing/lib/index.js b/_extensions/d2/node_modules/mdast-util-phrasing/lib/index.js new file mode 100644 index 00000000..b6d0f4a2 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-phrasing/lib/index.js @@ -0,0 +1,31 @@ +/** + * @typedef {import('mdast').PhrasingContent} PhrasingContent + * @typedef {import('unist-util-is').AssertPredicate} AssertPredicatePhrasing + */ + +import {convert} from 'unist-util-is' + +/** + * Check if the given value is *phrasing content*. + * + * @param + * Thing to check, typically `Node`. + * @returns + * Whether `value` is phrasing content. + */ +export const phrasing = /** @type {AssertPredicatePhrasing} */ ( + convert([ + 'break', + 'delete', + 'emphasis', + 'footnote', + 'footnoteReference', + 'image', + 'imageReference', + 'inlineCode', + 'link', + 'linkReference', + 'strong', + 'text' + ]) +) diff --git a/_extensions/d2/node_modules/mdast-util-phrasing/license b/_extensions/d2/node_modules/mdast-util-phrasing/license new file mode 100644 index 00000000..6883cb51 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-phrasing/license @@ -0,0 +1,23 @@ +(The MIT License) + +Copyright (c) 2017 Titus Wormer +Copyright (c) 2017 Victor Felder + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/mdast-util-phrasing/package.json b/_extensions/d2/node_modules/mdast-util-phrasing/package.json new file mode 100644 index 00000000..c902220e --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-phrasing/package.json @@ -0,0 +1,78 @@ +{ + "name": "mdast-util-phrasing", + "version": "3.0.1", + "description": "mdast utility to check if a node is phrasing content", + "license": "MIT", + "keywords": [ + "unist", + "mdast", + "mdast=util", + "util", + "utility", + "markdown", + "phrasing" + ], + "repository": "syntax-tree/mdast-util-phrasing", + "bugs": "https://github.com/syntax-tree/mdast-util-phrasing/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Victor Felder (https://draft.li)", + "contributors": [ + "Victor Felder (https://draft.li)", + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "lib/", + "index.d.ts", + "index.js" + ], + "dependencies": { + "@types/mdast": "^3.0.0", + "unist-util-is": "^5.0.0" + }, + "devDependencies": { + "@types/node": "^18.0.0", + "c8": "^7.0.0", + "prettier": "^2.0.0", + "remark-cli": "^11.0.0", + "remark-preset-wooorm": "^9.0.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "xo": "^0.53.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "tsc --build --clean && tsc --build && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test.js", + "test-coverage": "c8 --check-coverage --100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true + } +} diff --git a/_extensions/d2/node_modules/mdast-util-phrasing/readme.md b/_extensions/d2/node_modules/mdast-util-phrasing/readme.md new file mode 100644 index 00000000..d64cf7a9 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-phrasing/readme.md @@ -0,0 +1,195 @@ +# mdast-util-phrasing + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +[mdast][] utility to check if a node is phrasing content. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`phrasing(value)`](#phrasingvalue) +* [Types](#types) +* [Compatibility](#compatibility) +* [Security](#security) +* [Related](#related) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +This package is a tiny utility to check that a given [node][] is [phrasing +content][phrasing]. + +## When should I use this? + +This utility is typically useful if you’re making other utilities. +It uses [`unist-util-is`][unist-util-is], which you can use for your own checks. + +A different utility, [`hast-util-phrasing`][hast-util-phrasing], does the same +but on [hast][]. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 14.14+ and 16.0+), install with [npm][]: + +```sh +npm install mdast-util-phrasing +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {phrasing} from 'https://esm.sh/mdast-util-phrasing@3' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {phrasing} from 'mdast-util-phrasing' + +phrasing({type: 'paragraph', children: [{type: 'text', value: 'Alpha'}]}) +// => false + +phrasing({type: 'strong', children: [{type: 'text', value: 'Delta'}]}) +// => true +``` + +## API + +This package exports the identifier [`phrasing`][api-phrasing]. +There is no default export. + +### `phrasing(value)` + +Check if the given value is *[phrasing content][phrasing]*. + +###### Parameters + +* `value` (`unknown`) + — thing to check, typically [`Node`][node] + +###### Returns + +Whether `value` is phrasing content (`boolean`). + +## Types + +This package is fully typed with [TypeScript][]. +It does not export extra types. + +## Compatibility + +Projects maintained by the unified collective are compatible with all maintained +versions of Node.js. +As of now, that is Node.js 14.14+ and 16.0+. +Our projects sometimes work with older versions, but this is not guaranteed. + +## Security + +Use of `mdast-util-phrasing` does not involve **[hast][]**, user content, or +change the tree, so there are no openings for [cross-site scripting (XSS)][xss] +attacks. + +## Related + +* [`hast-util-phrasing`](https://github.com/syntax-tree/hast-util-phrasing) + — check if a hast node is phrasing content +* [`unist-util-is`](https://github.com/syntax-tree/unist-util-is) + — check if a node passes a test + +## Contribute + +See [`contributing.md`][contributing] in [`syntax-tree/.github`][health] for +ways to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Victor Felder][author] + + + +[build-badge]: https://github.com/syntax-tree/mdast-util-phrasing/workflows/main/badge.svg + +[build]: https://github.com/syntax-tree/mdast-util-phrasing/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/syntax-tree/mdast-util-phrasing.svg + +[coverage]: https://codecov.io/github/syntax-tree/mdast-util-phrasing + +[downloads-badge]: https://img.shields.io/npm/dm/mdast-util-phrasing.svg + +[downloads]: https://www.npmjs.com/package/mdast-util-phrasing + +[size-badge]: https://img.shields.io/bundlephobia/minzip/mdast-util-phrasing.svg + +[size]: https://bundlephobia.com/result?p=mdast-util-phrasing + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/syntax-tree/unist/discussions + +[npm]: https://docs.npmjs.com/cli/install + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[esmsh]: https://esm.sh + +[typescript]: https://www.typescriptlang.org + +[license]: license + +[author]: https://draft.li + +[health]: https://github.com/syntax-tree/.github + +[contributing]: https://github.com/syntax-tree/.github/blob/main/contributing.md + +[support]: https://github.com/syntax-tree/.github/blob/main/support.md + +[coc]: https://github.com/syntax-tree/.github/blob/main/code-of-conduct.md + +[xss]: https://en.wikipedia.org/wiki/Cross-site_scripting + +[hast]: https://github.com/syntax-tree/hast + +[mdast]: https://github.com/syntax-tree/mdast + +[node]: https://github.com/syntax-tree/mdast#nodes + +[phrasing]: https://github.com/syntax-tree/mdast#phrasingcontent + +[unist-util-is]: https://github.com/syntax-tree/unist-util-is + +[hast-util-phrasing]: https://github.com/syntax-tree/hast-util-phrasing + +[api-phrasing]: #phrasingvalue diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/index.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/index.d.ts new file mode 100644 index 00000000..d1610b81 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/index.d.ts @@ -0,0 +1,327 @@ +import type {Info, State} from './lib/types.js' + +/** + * Interface of registered constructs. + * + * When working on extensions that use new constructs, extend the corresponding + * interface to register its name: + * + * ```ts + * declare module 'mdast-util-to-markdown' { + * interface ConstructNameMap { + * // Register a new construct name (value is used, key should match it). + * gfmStrikethrough: 'gfmStrikethrough' + * } + * } + * ``` + */ +// eslint-disable-next-line @typescript-eslint/consistent-type-definitions +export interface ConstructNameMap { + /** + * Whole autolink. + * + * ```markdown + * > | and + * ^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^ + * ``` + */ + autolink: 'autolink' + /** + * Whole block quote. + * + * ```markdown + * > | > a + * ^^^ + * > | b + * ^ + * ``` + */ + blockquote: 'blockquote' + /** + * Whole code (indented). + * + * ```markdown + * ␠␠␠␠console.log(1) + * ^^^^^^^^^^^^^^^^^^ + * ``` + */ + codeIndented: 'codeIndented' + /** + * Whole code (fenced). + * + * ````markdown + * > | ```js + * ^^^^^ + * > | console.log(1) + * ^^^^^^^^^^^^^^ + * > | ``` + * ^^^ + * ```` + */ + codeFenced: 'codeFenced' + /** + * Code (fenced) language, when fenced with grave accents. + * + * ````markdown + * > | ```js + * ^^ + * | console.log(1) + * | ``` + * ```` + */ + codeFencedLangGraveAccent: 'codeFencedLangGraveAccent' + /** + * Code (fenced) language, when fenced with tildes. + * + * ````markdown + * > | ~~~js + * ^^ + * | console.log(1) + * | ~~~ + * ```` + */ + codeFencedLangTilde: 'codeFencedLangTilde' + /** + * Code (fenced) meta string, when fenced with grave accents. + * + * ````markdown + * > | ```js eval + * ^^^^ + * | console.log(1) + * | ``` + * ```` + */ + codeFencedMetaGraveAccent: 'codeFencedMetaGraveAccent' + /** + * Code (fenced) meta string, when fenced with tildes. + * + * ````markdown + * > | ~~~js eval + * ^^^^ + * | console.log(1) + * | ~~~ + * ```` + */ + codeFencedMetaTilde: 'codeFencedMetaTilde' + /** + * Whole definition. + * + * ```markdown + * > | [a]: b "c" + * ^^^^^^^^^^ + * ``` + */ + definition: 'definition' + /** + * Destination (literal) (occurs in definition, image, link). + * + * ```markdown + * > | [a]: "c" + * ^^^ + * > | a ![b]( "d") e + * ^^^ + * ``` + */ + destinationLiteral: 'destinationLiteral' + /** + * Destination (raw) (occurs in definition, image, link). + * + * ```markdown + * > | [a]: b "c" + * ^ + * > | a ![b](c "d") e + * ^ + * ``` + */ + destinationRaw: 'destinationRaw' + /** + * Emphasis. + * + * ```markdown + * > | *a* + * ^^^ + * ``` + */ + emphasis: 'emphasis' + /** + * Whole heading (atx). + * + * ```markdown + * > | # alpha + * ^^^^^^^ + * ``` + */ + headingAtx: 'headingAtx' + /** + * Whole heading (setext). + * + * ```markdown + * > | alpha + * ^^^^^ + * > | ===== + * ^^^^^ + * ``` + */ + headingSetext: 'headingSetext' + /** + * Whole image. + * + * ```markdown + * > | ![a](b) + * ^^^^^^^ + * > | ![c] + * ^^^^ + * ``` + */ + image: 'image' + /** + * Whole image reference. + * + * ```markdown + * > | ![a] + * ^^^^ + * ``` + */ + imageReference: 'imageReference' + /** + * Label (occurs in definitions, image reference, image, link reference, + * link). + * + * ```markdown + * > | [a]: b "c" + * ^^^ + * > | a [b] c + * ^^^ + * > | a ![b][c] d + * ^^^^ + * > | a [b](c) d + * ^^^ + * ``` + */ + label: 'label' + /** + * Whole link. + * + * ```markdown + * > | [a](b) + * ^^^^^^ + * > | [c] + * ^^^ + * ``` + */ + link: 'link' + /** + * Whole link reference. + * + * ```markdown + * > | [a] + * ^^^ + * ``` + */ + linkReference: 'linkReference' + /** + * List. + * + * ```markdown + * > | * a + * ^^^ + * > | 1. b + * ^^^^ + * ``` + */ + list: 'list' + /** + * List item. + * + * ```markdown + * > | * a + * ^^^ + * > | 1. b + * ^^^^ + * ``` + */ + listItem: 'listItem' + /** + * Paragraph. + * + * ```markdown + * > | a b + * ^^^ + * > | c. + * ^^ + * ``` + */ + paragraph: 'paragraph' + /** + * Phrasing (occurs in headings, paragraphs, etc). + * + * ```markdown + * > | a + * ^ + * ``` + */ + phrasing: 'phrasing' + /** + * Reference (occurs in image, link). + * + * ```markdown + * > | [a][] + * ^^ + * ``` + */ + reference: 'reference' + /** + * Strong. + * + * ```markdown + * > | **a** + * ^^^^^ + * ``` + */ + strong: 'strong' + /** + * Title using single quotes (occurs in definition, image, link). + * + * ```markdown + * > | [a](b 'c') + * ^^^ + * ``` + */ + titleApostrophe: 'titleApostrophe' + /** + * Title using double quotes (occurs in definition, image, link). + * + * ```markdown + * > | [a](b "c") + * ^^^ + * ``` + */ + titleQuote: 'titleQuote' +} + +/** + * Construct names for things generated by `mdast-util-to-markdown`. + * + * This is an enum of strings, each being a semantic label, useful to know when + * serializing whether we’re for example in a double (`"`) or single (`'`) + * quoted title. + */ +export type ConstructName = ConstructNameMap[keyof ConstructNameMap] + +export {toMarkdown} from './lib/index.js' +export {handle as defaultHandlers} from './lib/handle/index.js' +export type { + Handle, + Handlers, + Info, + Join, + Map, + Options, + SafeConfig, + State, + Tracker, + Unsafe +} from './lib/types.js' +// Deprecated. +export type SafeOptions = Info +export type Context = State diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/index.js b/_extensions/d2/node_modules/mdast-util-to-markdown/index.js new file mode 100644 index 00000000..45ea7c72 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/index.js @@ -0,0 +1,2 @@ +export {toMarkdown} from './lib/index.js' +export {handle as defaultHandlers} from './lib/handle/index.js' diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/configure.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/configure.d.ts new file mode 100644 index 00000000..b5082a00 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/configure.d.ts @@ -0,0 +1,12 @@ +/** + * @typedef {import('./types.js').Options} Options + * @typedef {import('./types.js').State} State + */ +/** + * @param {State} base + * @param {Options} extension + * @returns {State} + */ +export function configure(base: State, extension: Options): State +export type Options = import('./types.js').Options +export type State = import('./types.js').State diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/configure.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/configure.js new file mode 100644 index 00000000..71d0415f --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/configure.js @@ -0,0 +1,39 @@ +/** + * @typedef {import('./types.js').Options} Options + * @typedef {import('./types.js').State} State + */ + +/** + * @param {State} base + * @param {Options} extension + * @returns {State} + */ +export function configure(base, extension) { + let index = -1 + /** @type {keyof Options} */ + let key + + // First do subextensions. + if (extension.extensions) { + while (++index < extension.extensions.length) { + configure(base, extension.extensions[index]) + } + } + + for (key in extension) { + if (key === 'extensions') { + // Empty. + } else if (key === 'unsafe' || key === 'join') { + /* c8 ignore next 2 */ + // @ts-expect-error: hush. + base[key] = [...(base[key] || []), ...(extension[key] || [])] + } else if (key === 'handlers') { + base[key] = Object.assign(base[key], extension[key] || {}) + } else { + // @ts-expect-error: hush. + base.options[key] = extension[key] + } + } + + return base +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/blockquote.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/blockquote.d.ts new file mode 100644 index 00000000..92a67184 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/blockquote.d.ts @@ -0,0 +1,25 @@ +/** + * @typedef {import('mdast').Blockquote} Blockquote + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + * @typedef {import('../types.js').Map} Map + */ +/** + * @param {Blockquote} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function blockquote( + node: Blockquote, + _: Parent | undefined, + state: State, + info: Info +): string +export type Blockquote = import('mdast').Blockquote +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info +export type Map = import('../types.js').Map diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/blockquote.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/blockquote.js new file mode 100644 index 00000000..06e09827 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/blockquote.js @@ -0,0 +1,32 @@ +/** + * @typedef {import('mdast').Blockquote} Blockquote + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + * @typedef {import('../types.js').Map} Map + */ + +/** + * @param {Blockquote} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function blockquote(node, _, state, info) { + const exit = state.enter('blockquote') + const tracker = state.createTracker(info) + tracker.move('> ') + tracker.shift(2) + const value = state.indentLines( + state.containerFlow(node, tracker.current()), + map + ) + exit() + return value +} + +/** @type {Map} */ +function map(line, _, blank) { + return '>' + (blank ? '' : ' ') + line +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/break.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/break.d.ts new file mode 100644 index 00000000..85e6772a --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/break.d.ts @@ -0,0 +1,17 @@ +/** + * @param {Break} _ + * @param {Parent | undefined} _1 + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function hardBreak( + _: Break, + _1: Parent | undefined, + state: State, + info: Info +): string +export type Break = import('mdast').Break +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/break.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/break.js new file mode 100644 index 00000000..140952a6 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/break.js @@ -0,0 +1,32 @@ +/** + * @typedef {import('mdast').Break} Break + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +import {patternInScope} from '../util/pattern-in-scope.js' + +/** + * @param {Break} _ + * @param {Parent | undefined} _1 + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function hardBreak(_, _1, state, info) { + let index = -1 + + while (++index < state.unsafe.length) { + // If we can’t put eols in this construct (setext headings, tables), use a + // space instead. + if ( + state.unsafe[index].character === '\n' && + patternInScope(state.stack, state.unsafe[index]) + ) { + return /[ \t]/.test(info.before) ? '' : ' ' + } + } + + return '\\\n' +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/code.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/code.d.ts new file mode 100644 index 00000000..41327e10 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/code.d.ts @@ -0,0 +1,18 @@ +/** + * @param {Code} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function code( + node: Code, + _: Parent | undefined, + state: State, + info: Info +): string +export type Code = import('mdast').Code +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info +export type Map = import('../types.js').Map diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/code.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/code.js new file mode 100644 index 00000000..75ddd030 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/code.js @@ -0,0 +1,78 @@ +/** + * @typedef {import('mdast').Code} Code + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + * @typedef {import('../types.js').Map} Map + */ + +import {longestStreak} from 'longest-streak' +import {formatCodeAsIndented} from '../util/format-code-as-indented.js' +import {checkFence} from '../util/check-fence.js' + +/** + * @param {Code} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function code(node, _, state, info) { + const marker = checkFence(state) + const raw = node.value || '' + const suffix = marker === '`' ? 'GraveAccent' : 'Tilde' + + if (formatCodeAsIndented(node, state)) { + const exit = state.enter('codeIndented') + const value = state.indentLines(raw, map) + exit() + return value + } + + const tracker = state.createTracker(info) + const sequence = marker.repeat(Math.max(longestStreak(raw, marker) + 1, 3)) + const exit = state.enter('codeFenced') + let value = tracker.move(sequence) + + if (node.lang) { + const subexit = state.enter(`codeFencedLang${suffix}`) + value += tracker.move( + state.safe(node.lang, { + before: value, + after: ' ', + encode: ['`'], + ...tracker.current() + }) + ) + subexit() + } + + if (node.lang && node.meta) { + const subexit = state.enter(`codeFencedMeta${suffix}`) + value += tracker.move(' ') + value += tracker.move( + state.safe(node.meta, { + before: value, + after: '\n', + encode: ['`'], + ...tracker.current() + }) + ) + subexit() + } + + value += tracker.move('\n') + + if (raw) { + value += tracker.move(raw + '\n') + } + + value += tracker.move(sequence) + exit() + return value +} + +/** @type {Map} */ +function map(line, _, blank) { + return (blank ? '' : ' ') + line +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/definition.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/definition.d.ts new file mode 100644 index 00000000..8f91d872 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/definition.d.ts @@ -0,0 +1,17 @@ +/** + * @param {Definition} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function definition( + node: Definition, + _: Parent | undefined, + state: State, + info: Info +): string +export type Definition = import('mdast').Definition +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/definition.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/definition.js new file mode 100644 index 00000000..b55224d4 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/definition.js @@ -0,0 +1,78 @@ +/** + * @typedef {import('mdast').Definition} Definition + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +import {checkQuote} from '../util/check-quote.js' + +/** + * @param {Definition} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function definition(node, _, state, info) { + const quote = checkQuote(state) + const suffix = quote === '"' ? 'Quote' : 'Apostrophe' + const exit = state.enter('definition') + let subexit = state.enter('label') + const tracker = state.createTracker(info) + let value = tracker.move('[') + value += tracker.move( + state.safe(state.associationId(node), { + before: value, + after: ']', + ...tracker.current() + }) + ) + value += tracker.move(']: ') + + subexit() + + if ( + // If there’s no url, or… + !node.url || + // If there are control characters or whitespace. + /[\0- \u007F]/.test(node.url) + ) { + subexit = state.enter('destinationLiteral') + value += tracker.move('<') + value += tracker.move( + state.safe(node.url, {before: value, after: '>', ...tracker.current()}) + ) + value += tracker.move('>') + } else { + // No whitespace, raw is prettier. + subexit = state.enter('destinationRaw') + value += tracker.move( + state.safe(node.url, { + before: value, + after: node.title ? ' ' : '\n', + ...tracker.current() + }) + ) + } + + subexit() + + if (node.title) { + subexit = state.enter(`title${suffix}`) + value += tracker.move(' ' + quote) + value += tracker.move( + state.safe(node.title, { + before: value, + after: quote, + ...tracker.current() + }) + ) + value += tracker.move(quote) + subexit() + } + + exit() + + return value +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/emphasis.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/emphasis.d.ts new file mode 100644 index 00000000..011debe7 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/emphasis.d.ts @@ -0,0 +1,32 @@ +/** + * @param {Emphasis} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function emphasis( + node: Emphasis, + _: Parent | undefined, + state: State, + info: Info +): string +export namespace emphasis { + export {emphasisPeek as peek} +} +export type Emphasis = import('mdast').Emphasis +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info +/** + * @param {Emphasis} _ + * @param {Parent | undefined} _1 + * @param {State} state + * @returns {string} + */ +declare function emphasisPeek( + _: Emphasis, + _1: Parent | undefined, + state: State +): string +export {} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/emphasis.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/emphasis.js new file mode 100644 index 00000000..8d3c6661 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/emphasis.js @@ -0,0 +1,48 @@ +/** + * @typedef {import('mdast').Emphasis} Emphasis + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +import {checkEmphasis} from '../util/check-emphasis.js' + +emphasis.peek = emphasisPeek + +// To do: there are cases where emphasis cannot “form” depending on the +// previous or next character of sequences. +// There’s no way around that though, except for injecting zero-width stuff. +// Do we need to safeguard against that? +/** + * @param {Emphasis} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function emphasis(node, _, state, info) { + const marker = checkEmphasis(state) + const exit = state.enter('emphasis') + const tracker = state.createTracker(info) + let value = tracker.move(marker) + value += tracker.move( + state.containerPhrasing(node, { + before: value, + after: marker, + ...tracker.current() + }) + ) + value += tracker.move(marker) + exit() + return value +} + +/** + * @param {Emphasis} _ + * @param {Parent | undefined} _1 + * @param {State} state + * @returns {string} + */ +function emphasisPeek(_, _1, state) { + return state.options.emphasis || '*' +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/heading.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/heading.d.ts new file mode 100644 index 00000000..79480ca3 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/heading.d.ts @@ -0,0 +1,17 @@ +/** + * @param {Heading} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function heading( + node: Heading, + _: Parent | undefined, + state: State, + info: Info +): string +export type Heading = import('mdast').Heading +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/heading.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/heading.js new file mode 100644 index 00000000..dd52c835 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/heading.js @@ -0,0 +1,80 @@ +/** + * @typedef {import('mdast').Heading} Heading + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +import {formatHeadingAsSetext} from '../util/format-heading-as-setext.js' + +/** + * @param {Heading} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function heading(node, _, state, info) { + const rank = Math.max(Math.min(6, node.depth || 1), 1) + const tracker = state.createTracker(info) + + if (formatHeadingAsSetext(node, state)) { + const exit = state.enter('headingSetext') + const subexit = state.enter('phrasing') + const value = state.containerPhrasing(node, { + ...tracker.current(), + before: '\n', + after: '\n' + }) + subexit() + exit() + + return ( + value + + '\n' + + (rank === 1 ? '=' : '-').repeat( + // The whole size… + value.length - + // Minus the position of the character after the last EOL (or + // 0 if there is none)… + (Math.max(value.lastIndexOf('\r'), value.lastIndexOf('\n')) + 1) + ) + ) + } + + const sequence = '#'.repeat(rank) + const exit = state.enter('headingAtx') + const subexit = state.enter('phrasing') + + // Note: for proper tracking, we should reset the output positions when there + // is no content returned, because then the space is not output. + // Practically, in that case, there is no content, so it doesn’t matter that + // we’ve tracked one too many characters. + tracker.move(sequence + ' ') + + let value = state.containerPhrasing(node, { + before: '# ', + after: '\n', + ...tracker.current() + }) + + if (/^[\t ]/.test(value)) { + // To do: what effect has the character reference on tracking? + value = + '&#x' + + value.charCodeAt(0).toString(16).toUpperCase() + + ';' + + value.slice(1) + } + + value = value ? sequence + ' ' + value : sequence + + if (state.options.closeAtx) { + value += ' ' + sequence + } + + subexit() + exit() + + return value +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/html.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/html.d.ts new file mode 100644 index 00000000..4dcfb42f --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/html.d.ts @@ -0,0 +1,14 @@ +/** + * @param {HTML} node + * @returns {string} + */ +export function html(node: HTML): string +export namespace html { + export {htmlPeek as peek} +} +export type HTML = import('mdast').HTML +/** + * @returns {string} + */ +declare function htmlPeek(): string +export {} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/html.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/html.js new file mode 100644 index 00000000..34b0e688 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/html.js @@ -0,0 +1,20 @@ +/** + * @typedef {import('mdast').HTML} HTML + */ + +html.peek = htmlPeek + +/** + * @param {HTML} node + * @returns {string} + */ +export function html(node) { + return node.value || '' +} + +/** + * @returns {string} + */ +function htmlPeek() { + return '<' +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image-reference.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image-reference.d.ts new file mode 100644 index 00000000..e4b43a2f --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image-reference.d.ts @@ -0,0 +1,25 @@ +/** + * @param {ImageReference} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function imageReference( + node: ImageReference, + _: Parent | undefined, + state: State, + info: Info +): string +export namespace imageReference { + export {imageReferencePeek as peek} +} +export type ImageReference = import('mdast').ImageReference +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info +/** + * @returns {string} + */ +declare function imageReferencePeek(): string +export {} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image-reference.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image-reference.js new file mode 100644 index 00000000..f5f6c3d2 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image-reference.js @@ -0,0 +1,65 @@ +/** + * @typedef {import('mdast').ImageReference} ImageReference + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +imageReference.peek = imageReferencePeek + +/** + * @param {ImageReference} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function imageReference(node, _, state, info) { + const type = node.referenceType + const exit = state.enter('imageReference') + let subexit = state.enter('label') + const tracker = state.createTracker(info) + let value = tracker.move('![') + const alt = state.safe(node.alt, { + before: value, + after: ']', + ...tracker.current() + }) + value += tracker.move(alt + '][') + + subexit() + // Hide the fact that we’re in phrasing, because escapes don’t work. + const stack = state.stack + state.stack = [] + subexit = state.enter('reference') + // Note: for proper tracking, we should reset the output positions when we end + // up making a `shortcut` reference, because then there is no brace output. + // Practically, in that case, there is no content, so it doesn’t matter that + // we’ve tracked one too many characters. + const reference = state.safe(state.associationId(node), { + before: value, + after: ']', + ...tracker.current() + }) + subexit() + state.stack = stack + exit() + + if (type === 'full' || !alt || alt !== reference) { + value += tracker.move(reference + ']') + } else if (type === 'shortcut') { + // Remove the unwanted `[`. + value = value.slice(0, -1) + } else { + value += tracker.move(']') + } + + return value +} + +/** + * @returns {string} + */ +function imageReferencePeek() { + return '!' +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image.d.ts new file mode 100644 index 00000000..9bb8d1c0 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image.d.ts @@ -0,0 +1,25 @@ +/** + * @param {Image} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function image( + node: Image, + _: Parent | undefined, + state: State, + info: Info +): string +export namespace image { + export {imagePeek as peek} +} +export type Image = import('mdast').Image +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info +/** + * @returns {string} + */ +declare function imagePeek(): string +export {} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image.js new file mode 100644 index 00000000..e1aa03d9 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/image.js @@ -0,0 +1,84 @@ +/** + * @typedef {import('mdast').Image} Image + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +import {checkQuote} from '../util/check-quote.js' + +image.peek = imagePeek + +/** + * @param {Image} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function image(node, _, state, info) { + const quote = checkQuote(state) + const suffix = quote === '"' ? 'Quote' : 'Apostrophe' + const exit = state.enter('image') + let subexit = state.enter('label') + const tracker = state.createTracker(info) + let value = tracker.move('![') + value += tracker.move( + state.safe(node.alt, {before: value, after: ']', ...tracker.current()}) + ) + value += tracker.move('](') + + subexit() + + if ( + // If there’s no url but there is a title… + (!node.url && node.title) || + // If there are control characters or whitespace. + /[\0- \u007F]/.test(node.url) + ) { + subexit = state.enter('destinationLiteral') + value += tracker.move('<') + value += tracker.move( + state.safe(node.url, {before: value, after: '>', ...tracker.current()}) + ) + value += tracker.move('>') + } else { + // No whitespace, raw is prettier. + subexit = state.enter('destinationRaw') + value += tracker.move( + state.safe(node.url, { + before: value, + after: node.title ? ' ' : ')', + ...tracker.current() + }) + ) + } + + subexit() + + if (node.title) { + subexit = state.enter(`title${suffix}`) + value += tracker.move(' ' + quote) + value += tracker.move( + state.safe(node.title, { + before: value, + after: quote, + ...tracker.current() + }) + ) + value += tracker.move(quote) + subexit() + } + + value += tracker.move(')') + exit() + + return value +} + +/** + * @returns {string} + */ +function imagePeek() { + return '!' +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/index.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/index.d.ts new file mode 100644 index 00000000..a437f6b1 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/index.d.ts @@ -0,0 +1,41 @@ +export namespace handle { + export {blockquote} + export {hardBreak as break} + export {code} + export {definition} + export {emphasis} + export {hardBreak} + export {heading} + export {html} + export {image} + export {imageReference} + export {inlineCode} + export {link} + export {linkReference} + export {list} + export {listItem} + export {paragraph} + export {root} + export {strong} + export {text} + export {thematicBreak} +} +import {blockquote} from './blockquote.js' +import {hardBreak} from './break.js' +import {code} from './code.js' +import {definition} from './definition.js' +import {emphasis} from './emphasis.js' +import {heading} from './heading.js' +import {html} from './html.js' +import {image} from './image.js' +import {imageReference} from './image-reference.js' +import {inlineCode} from './inline-code.js' +import {link} from './link.js' +import {linkReference} from './link-reference.js' +import {list} from './list.js' +import {listItem} from './list-item.js' +import {paragraph} from './paragraph.js' +import {root} from './root.js' +import {strong} from './strong.js' +import {text} from './text.js' +import {thematicBreak} from './thematic-break.js' diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/index.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/index.js new file mode 100644 index 00000000..f03686a3 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/index.js @@ -0,0 +1,45 @@ +import {blockquote} from './blockquote.js' +import {hardBreak} from './break.js' +import {code} from './code.js' +import {definition} from './definition.js' +import {emphasis} from './emphasis.js' +import {heading} from './heading.js' +import {html} from './html.js' +import {image} from './image.js' +import {imageReference} from './image-reference.js' +import {inlineCode} from './inline-code.js' +import {link} from './link.js' +import {linkReference} from './link-reference.js' +import {list} from './list.js' +import {listItem} from './list-item.js' +import {paragraph} from './paragraph.js' +import {root} from './root.js' +import {strong} from './strong.js' +import {text} from './text.js' +import {thematicBreak} from './thematic-break.js' + +/** + * Default (CommonMark) handlers. + */ +export const handle = { + blockquote, + break: hardBreak, + code, + definition, + emphasis, + hardBreak, + heading, + html, + image, + imageReference, + inlineCode, + link, + linkReference, + list, + listItem, + paragraph, + root, + strong, + text, + thematicBreak +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/inline-code.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/inline-code.d.ts new file mode 100644 index 00000000..da33a12b --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/inline-code.d.ts @@ -0,0 +1,22 @@ +/** + * @param {InlineCode} node + * @param {Parent | undefined} _ + * @param {State} state + * @returns {string} + */ +export function inlineCode( + node: InlineCode, + _: Parent | undefined, + state: State +): string +export namespace inlineCode { + export {inlineCodePeek as peek} +} +export type InlineCode = import('mdast').InlineCode +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +/** + * @returns {string} + */ +declare function inlineCodePeek(): string +export {} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/inline-code.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/inline-code.js new file mode 100644 index 00000000..74d4beec --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/inline-code.js @@ -0,0 +1,79 @@ +/** + * @typedef {import('mdast').InlineCode} InlineCode + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + */ + +import {patternCompile} from '../util/pattern-compile.js' + +inlineCode.peek = inlineCodePeek + +/** + * @param {InlineCode} node + * @param {Parent | undefined} _ + * @param {State} state + * @returns {string} + */ +export function inlineCode(node, _, state) { + let value = node.value || '' + let sequence = '`' + let index = -1 + + // If there is a single grave accent on its own in the code, use a fence of + // two. + // If there are two in a row, use one. + while (new RegExp('(^|[^`])' + sequence + '([^`]|$)').test(value)) { + sequence += '`' + } + + // If this is not just spaces or eols (tabs don’t count), and either the + // first or last character are a space, eol, or tick, then pad with spaces. + if ( + /[^ \r\n]/.test(value) && + ((/^[ \r\n]/.test(value) && /[ \r\n]$/.test(value)) || /^`|`$/.test(value)) + ) { + value = ' ' + value + ' ' + } + + // We have a potential problem: certain characters after eols could result in + // blocks being seen. + // For example, if someone injected the string `'\n# b'`, then that would + // result in an ATX heading. + // We can’t escape characters in `inlineCode`, but because eols are + // transformed to spaces when going from markdown to HTML anyway, we can swap + // them out. + while (++index < state.unsafe.length) { + const pattern = state.unsafe[index] + const expression = patternCompile(pattern) + /** @type {RegExpExecArray | null} */ + let match + + // Only look for `atBreak`s. + // Btw: note that `atBreak` patterns will always start the regex at LF or + // CR. + if (!pattern.atBreak) continue + + while ((match = expression.exec(value))) { + let position = match.index + + // Support CRLF (patterns only look for one of the characters). + if ( + value.charCodeAt(position) === 10 /* `\n` */ && + value.charCodeAt(position - 1) === 13 /* `\r` */ + ) { + position-- + } + + value = value.slice(0, position) + ' ' + value.slice(match.index + 1) + } + } + + return sequence + value + sequence +} + +/** + * @returns {string} + */ +function inlineCodePeek() { + return '`' +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link-reference.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link-reference.d.ts new file mode 100644 index 00000000..df193d8e --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link-reference.d.ts @@ -0,0 +1,25 @@ +/** + * @param {LinkReference} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function linkReference( + node: LinkReference, + _: Parent | undefined, + state: State, + info: Info +): string +export namespace linkReference { + export {linkReferencePeek as peek} +} +export type LinkReference = import('mdast').LinkReference +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info +/** + * @returns {string} + */ +declare function linkReferencePeek(): string +export {} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link-reference.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link-reference.js new file mode 100644 index 00000000..7fadca3c --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link-reference.js @@ -0,0 +1,65 @@ +/** + * @typedef {import('mdast').LinkReference} LinkReference + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +linkReference.peek = linkReferencePeek + +/** + * @param {LinkReference} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function linkReference(node, _, state, info) { + const type = node.referenceType + const exit = state.enter('linkReference') + let subexit = state.enter('label') + const tracker = state.createTracker(info) + let value = tracker.move('[') + const text = state.containerPhrasing(node, { + before: value, + after: ']', + ...tracker.current() + }) + value += tracker.move(text + '][') + + subexit() + // Hide the fact that we’re in phrasing, because escapes don’t work. + const stack = state.stack + state.stack = [] + subexit = state.enter('reference') + // Note: for proper tracking, we should reset the output positions when we end + // up making a `shortcut` reference, because then there is no brace output. + // Practically, in that case, there is no content, so it doesn’t matter that + // we’ve tracked one too many characters. + const reference = state.safe(state.associationId(node), { + before: value, + after: ']', + ...tracker.current() + }) + subexit() + state.stack = stack + exit() + + if (type === 'full' || !text || text !== reference) { + value += tracker.move(reference + ']') + } else if (type === 'shortcut') { + // Remove the unwanted `[`. + value = value.slice(0, -1) + } else { + value += tracker.move(']') + } + + return value +} + +/** + * @returns {string} + */ +function linkReferencePeek() { + return '[' +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link.d.ts new file mode 100644 index 00000000..1f320424 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link.d.ts @@ -0,0 +1,33 @@ +/** + * @param {Link} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function link( + node: Link, + _: Parent | undefined, + state: State, + info: Info +): string +export namespace link { + export {linkPeek as peek} +} +export type Link = import('mdast').Link +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info +export type Exit = import('../types.js').Exit +/** + * @param {Link} node + * @param {Parent | undefined} _ + * @param {State} state + * @returns {string} + */ +declare function linkPeek( + node: Link, + _: Parent | undefined, + state: State +): string +export {} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link.js new file mode 100644 index 00000000..a057c0da --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/link.js @@ -0,0 +1,116 @@ +/** + * @typedef {import('mdast').Link} Link + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + * @typedef {import('../types.js').Exit} Exit + */ + +import {checkQuote} from '../util/check-quote.js' +import {formatLinkAsAutolink} from '../util/format-link-as-autolink.js' + +link.peek = linkPeek + +/** + * @param {Link} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function link(node, _, state, info) { + const quote = checkQuote(state) + const suffix = quote === '"' ? 'Quote' : 'Apostrophe' + const tracker = state.createTracker(info) + /** @type {Exit} */ + let exit + /** @type {Exit} */ + let subexit + + if (formatLinkAsAutolink(node, state)) { + // Hide the fact that we’re in phrasing, because escapes don’t work. + const stack = state.stack + state.stack = [] + exit = state.enter('autolink') + let value = tracker.move('<') + value += tracker.move( + state.containerPhrasing(node, { + before: value, + after: '>', + ...tracker.current() + }) + ) + value += tracker.move('>') + exit() + state.stack = stack + return value + } + + exit = state.enter('link') + subexit = state.enter('label') + let value = tracker.move('[') + value += tracker.move( + state.containerPhrasing(node, { + before: value, + after: '](', + ...tracker.current() + }) + ) + value += tracker.move('](') + subexit() + + if ( + // If there’s no url but there is a title… + (!node.url && node.title) || + // If there are control characters or whitespace. + /[\0- \u007F]/.test(node.url) + ) { + subexit = state.enter('destinationLiteral') + value += tracker.move('<') + value += tracker.move( + state.safe(node.url, {before: value, after: '>', ...tracker.current()}) + ) + value += tracker.move('>') + } else { + // No whitespace, raw is prettier. + subexit = state.enter('destinationRaw') + value += tracker.move( + state.safe(node.url, { + before: value, + after: node.title ? ' ' : ')', + ...tracker.current() + }) + ) + } + + subexit() + + if (node.title) { + subexit = state.enter(`title${suffix}`) + value += tracker.move(' ' + quote) + value += tracker.move( + state.safe(node.title, { + before: value, + after: quote, + ...tracker.current() + }) + ) + value += tracker.move(quote) + subexit() + } + + value += tracker.move(')') + + exit() + return value +} + +/** + * @param {Link} node + * @param {Parent | undefined} _ + * @param {State} state + * @returns {string} + */ +function linkPeek(node, _, state) { + return formatLinkAsAutolink(node, state) ? '<' : '[' +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list-item.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list-item.d.ts new file mode 100644 index 00000000..34ef3684 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list-item.d.ts @@ -0,0 +1,18 @@ +/** + * @param {ListItem} node + * @param {Parent | undefined} parent + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function listItem( + node: ListItem, + parent: Parent | undefined, + state: State, + info: Info +): string +export type ListItem = import('mdast').ListItem +export type Map = import('../types.js').Map +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list-item.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list-item.js new file mode 100644 index 00000000..a76005d8 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list-item.js @@ -0,0 +1,65 @@ +/** + * @typedef {import('mdast').ListItem} ListItem + * @typedef {import('../types.js').Map} Map + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +import {checkBullet} from '../util/check-bullet.js' +import {checkListItemIndent} from '../util/check-list-item-indent.js' + +/** + * @param {ListItem} node + * @param {Parent | undefined} parent + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function listItem(node, parent, state, info) { + const listItemIndent = checkListItemIndent(state) + let bullet = state.bulletCurrent || checkBullet(state) + + // Add the marker value for ordered lists. + if (parent && parent.type === 'list' && parent.ordered) { + bullet = + (typeof parent.start === 'number' && parent.start > -1 + ? parent.start + : 1) + + (state.options.incrementListMarker === false + ? 0 + : parent.children.indexOf(node)) + + bullet + } + + let size = bullet.length + 1 + + if ( + listItemIndent === 'tab' || + (listItemIndent === 'mixed' && + ((parent && parent.type === 'list' && parent.spread) || node.spread)) + ) { + size = Math.ceil(size / 4) * 4 + } + + const tracker = state.createTracker(info) + tracker.move(bullet + ' '.repeat(size - bullet.length)) + tracker.shift(size) + const exit = state.enter('listItem') + const value = state.indentLines( + state.containerFlow(node, tracker.current()), + map + ) + exit() + + return value + + /** @type {Map} */ + function map(line, index, blank) { + if (index) { + return (blank ? '' : ' '.repeat(size)) + line + } + + return (blank ? bullet : bullet + ' '.repeat(size - bullet.length)) + line + } +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list.d.ts new file mode 100644 index 00000000..50c7807c --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list.d.ts @@ -0,0 +1,17 @@ +/** + * @param {List} node + * @param {Parent | undefined} parent + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function list( + node: List, + parent: Parent | undefined, + state: State, + info: Info +): string +export type List = import('mdast').List +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list.js new file mode 100644 index 00000000..f3d1e0fd --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/list.js @@ -0,0 +1,113 @@ +/** + * @typedef {import('mdast').List} List + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +import {checkBullet} from '../util/check-bullet.js' +import {checkBulletOther} from '../util/check-bullet-other.js' +import {checkBulletOrdered} from '../util/check-bullet-ordered.js' +import {checkBulletOrderedOther} from '../util/check-bullet-ordered-other.js' +import {checkRule} from '../util/check-rule.js' + +/** + * @param {List} node + * @param {Parent | undefined} parent + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function list(node, parent, state, info) { + const exit = state.enter('list') + const bulletCurrent = state.bulletCurrent + /** @type {string} */ + let bullet = node.ordered ? checkBulletOrdered(state) : checkBullet(state) + /** @type {string} */ + const bulletOther = node.ordered + ? checkBulletOrderedOther(state) + : checkBulletOther(state) + const bulletLastUsed = state.bulletLastUsed + let useDifferentMarker = false + + if ( + parent && + // Explicit `other` set. + (node.ordered + ? state.options.bulletOrderedOther + : state.options.bulletOther) && + bulletLastUsed && + bullet === bulletLastUsed + ) { + useDifferentMarker = true + } + + if (!node.ordered) { + const firstListItem = node.children ? node.children[0] : undefined + + // If there’s an empty first list item directly in two list items, + // we have to use a different bullet: + // + // ```markdown + // * - * + // ``` + // + // …because otherwise it would become one big thematic break. + if ( + // Bullet could be used as a thematic break marker: + (bullet === '*' || bullet === '-') && + // Empty first list item: + firstListItem && + (!firstListItem.children || !firstListItem.children[0]) && + // Directly in two other list items: + state.stack[state.stack.length - 1] === 'list' && + state.stack[state.stack.length - 2] === 'listItem' && + state.stack[state.stack.length - 3] === 'list' && + state.stack[state.stack.length - 4] === 'listItem' && + // That are each the first child. + state.indexStack[state.indexStack.length - 1] === 0 && + state.indexStack[state.indexStack.length - 2] === 0 && + state.indexStack[state.indexStack.length - 3] === 0 + ) { + useDifferentMarker = true + } + + // If there’s a thematic break at the start of the first list item, + // we have to use a different bullet: + // + // ```markdown + // * --- + // ``` + // + // …because otherwise it would become one big thematic break. + if (checkRule(state) === bullet && firstListItem) { + let index = -1 + + while (++index < node.children.length) { + const item = node.children[index] + + if ( + item && + item.type === 'listItem' && + item.children && + item.children[0] && + item.children[0].type === 'thematicBreak' + ) { + useDifferentMarker = true + break + } + } + } + } + + if (useDifferentMarker) { + bullet = bulletOther + } + + state.bulletCurrent = bullet + const value = state.containerFlow(node, info) + state.bulletLastUsed = bullet + state.bulletCurrent = bulletCurrent + exit() + return value +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/paragraph.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/paragraph.d.ts new file mode 100644 index 00000000..77a91983 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/paragraph.d.ts @@ -0,0 +1,23 @@ +/** + * @typedef {import('mdast').Paragraph} Paragraph + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ +/** + * @param {Paragraph} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function paragraph( + node: Paragraph, + _: Parent | undefined, + state: State, + info: Info +): string +export type Paragraph = import('mdast').Paragraph +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/paragraph.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/paragraph.js new file mode 100644 index 00000000..5412ec16 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/paragraph.js @@ -0,0 +1,22 @@ +/** + * @typedef {import('mdast').Paragraph} Paragraph + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +/** + * @param {Paragraph} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function paragraph(node, _, state, info) { + const exit = state.enter('paragraph') + const subexit = state.enter('phrasing') + const value = state.containerPhrasing(node, info) + subexit() + exit() + return value +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/root.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/root.d.ts new file mode 100644 index 00000000..e30b7d96 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/root.d.ts @@ -0,0 +1,17 @@ +/** + * @param {Root} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function root( + node: Root, + _: Parent | undefined, + state: State, + info: Info +): string +export type Root = import('mdast').Root +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/root.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/root.js new file mode 100644 index 00000000..25839346 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/root.js @@ -0,0 +1,23 @@ +/** + * @typedef {import('mdast').Root} Root + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +import {phrasing} from 'mdast-util-phrasing' + +/** + * @param {Root} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function root(node, _, state, info) { + // Note: `html` nodes are ambiguous. + const hasPhrasing = node.children.some((d) => phrasing(d)) + const fn = hasPhrasing ? state.containerPhrasing : state.containerFlow + // @ts-expect-error: `root`s are supposed to have one type of content + return fn.call(state, node, info) +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/strong.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/strong.d.ts new file mode 100644 index 00000000..c175117a --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/strong.d.ts @@ -0,0 +1,32 @@ +/** + * @param {Strong} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function strong( + node: Strong, + _: Parent | undefined, + state: State, + info: Info +): string +export namespace strong { + export {strongPeek as peek} +} +export type Strong = import('mdast').Strong +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info +/** + * @param {Strong} _ + * @param {Parent | undefined} _1 + * @param {State} state + * @returns {string} + */ +declare function strongPeek( + _: Strong, + _1: Parent | undefined, + state: State +): string +export {} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/strong.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/strong.js new file mode 100644 index 00000000..a9b45f58 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/strong.js @@ -0,0 +1,48 @@ +/** + * @typedef {import('mdast').Strong} Strong + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +import {checkStrong} from '../util/check-strong.js' + +strong.peek = strongPeek + +// To do: there are cases where emphasis cannot “form” depending on the +// previous or next character of sequences. +// There’s no way around that though, except for injecting zero-width stuff. +// Do we need to safeguard against that? +/** + * @param {Strong} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function strong(node, _, state, info) { + const marker = checkStrong(state) + const exit = state.enter('strong') + const tracker = state.createTracker(info) + let value = tracker.move(marker + marker) + value += tracker.move( + state.containerPhrasing(node, { + before: value, + after: marker, + ...tracker.current() + }) + ) + value += tracker.move(marker + marker) + exit() + return value +} + +/** + * @param {Strong} _ + * @param {Parent | undefined} _1 + * @param {State} state + * @returns {string} + */ +function strongPeek(_, _1, state) { + return state.options.strong || '*' +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/text.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/text.d.ts new file mode 100644 index 00000000..df8166cc --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/text.d.ts @@ -0,0 +1,23 @@ +/** + * @typedef {import('mdast').Text} Text + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ +/** + * @param {Text} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function text( + node: Text, + _: Parent | undefined, + state: State, + info: Info +): string +export type Text = import('mdast').Text +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type Info = import('../types.js').Info diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/text.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/text.js new file mode 100644 index 00000000..f7b182f2 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/text.js @@ -0,0 +1,17 @@ +/** + * @typedef {import('mdast').Text} Text + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Info} Info + */ + +/** + * @param {Text} node + * @param {Parent | undefined} _ + * @param {State} state + * @param {Info} info + * @returns {string} + */ +export function text(node, _, state, info) { + return state.safe(node.value, info) +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/thematic-break.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/thematic-break.d.ts new file mode 100644 index 00000000..d87d46e9 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/thematic-break.d.ts @@ -0,0 +1,14 @@ +/** + * @param {ThematicBreak} _ + * @param {Parent | undefined} _1 + * @param {State} state + * @returns {string} + */ +export function thematicBreak( + _: ThematicBreak, + _1: Parent | undefined, + state: State +): string +export type ThematicBreak = import('mdast').ThematicBreak +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/thematic-break.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/thematic-break.js new file mode 100644 index 00000000..b12bd7f2 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/handle/thematic-break.js @@ -0,0 +1,22 @@ +/** + * @typedef {import('mdast').ThematicBreak} ThematicBreak + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + */ + +import {checkRuleRepetition} from '../util/check-rule-repetition.js' +import {checkRule} from '../util/check-rule.js' + +/** + * @param {ThematicBreak} _ + * @param {Parent | undefined} _1 + * @param {State} state + * @returns {string} + */ +export function thematicBreak(_, _1, state) { + const value = ( + checkRule(state) + (state.options.ruleSpaces ? ' ' : '') + ).repeat(checkRuleRepetition(state)) + + return state.options.ruleSpaces ? value.slice(0, -1) : value +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/index.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/index.d.ts new file mode 100644 index 00000000..f58e3ed2 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/index.d.ts @@ -0,0 +1,25 @@ +/** + * Turn an mdast syntax tree into markdown. + * + * @param {Node} tree + * Tree to serialize. + * @param {Options} [options] + * Configuration (optional). + * @returns {string} + * Serialized markdown representing `tree`. + */ +export function toMarkdown( + tree: Node, + options?: import('./types.js').Options | undefined +): string +export type Enter = import('./types.js').Enter +export type Info = import('./types.js').Info +export type Join = import('./types.js').Join +export type FlowContent = import('./types.js').FlowContent +export type Node = import('./types.js').Node +export type Options = import('./types.js').Options +export type Parent = import('./types.js').Parent +export type PhrasingContent = import('./types.js').PhrasingContent +export type SafeConfig = import('./types.js').SafeConfig +export type State = import('./types.js').State +export type TrackFields = import('./types.js').TrackFields diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/index.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/index.js new file mode 100644 index 00000000..7f788716 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/index.js @@ -0,0 +1,188 @@ +/** + * @typedef {import('./types.js').Enter} Enter + * @typedef {import('./types.js').Info} Info + * @typedef {import('./types.js').Join} Join + * @typedef {import('./types.js').FlowContent} FlowContent + * @typedef {import('./types.js').Node} Node + * @typedef {import('./types.js').Options} Options + * @typedef {import('./types.js').Parent} Parent + * @typedef {import('./types.js').PhrasingContent} PhrasingContent + * @typedef {import('./types.js').SafeConfig} SafeConfig + * @typedef {import('./types.js').State} State + * @typedef {import('./types.js').TrackFields} TrackFields + */ + +import {zwitch} from 'zwitch' +import {configure} from './configure.js' +import {handle as handlers} from './handle/index.js' +import {join} from './join.js' +import {unsafe} from './unsafe.js' +import {association} from './util/association.js' +import {containerPhrasing} from './util/container-phrasing.js' +import {containerFlow} from './util/container-flow.js' +import {indentLines} from './util/indent-lines.js' +import {safe} from './util/safe.js' +import {track} from './util/track.js' + +/** + * Turn an mdast syntax tree into markdown. + * + * @param {Node} tree + * Tree to serialize. + * @param {Options} [options] + * Configuration (optional). + * @returns {string} + * Serialized markdown representing `tree`. + */ +export function toMarkdown(tree, options = {}) { + /** @type {State} */ + const state = { + enter, + indentLines, + associationId: association, + containerPhrasing: containerPhrasingBound, + containerFlow: containerFlowBound, + createTracker: track, + safe: safeBound, + stack: [], + unsafe: [], + join: [], + // @ts-expect-error: we’ll fill it next. + handlers: {}, + options: {}, + indexStack: [], + // @ts-expect-error: we’ll add `handle` later. + handle: undefined + } + + configure(state, {unsafe, join, handlers}) + configure(state, options) + + if (state.options.tightDefinitions) { + configure(state, {join: [joinDefinition]}) + } + + state.handle = zwitch('type', { + invalid, + unknown, + handlers: state.handlers + }) + + let result = state.handle(tree, undefined, state, { + before: '\n', + after: '\n', + now: {line: 1, column: 1}, + lineShift: 0 + }) + + if ( + result && + result.charCodeAt(result.length - 1) !== 10 && + result.charCodeAt(result.length - 1) !== 13 + ) { + result += '\n' + } + + return result + + /** @type {Enter} */ + function enter(name) { + state.stack.push(name) + return exit + + function exit() { + state.stack.pop() + } + } +} + +/** + * @param {unknown} value + * @returns {never} + */ +function invalid(value) { + throw new Error('Cannot handle value `' + value + '`, expected node') +} + +/** + * @param {unknown} node + * @returns {never} + */ +function unknown(node) { + // @ts-expect-error: fine. + throw new Error('Cannot handle unknown node `' + node.type + '`') +} + +/** @type {Join} */ +function joinDefinition(left, right) { + // No blank line between adjacent definitions. + if (left.type === 'definition' && left.type === right.type) { + return 0 + } +} + +/** + * Serialize the children of a parent that contains phrasing children. + * + * These children will be joined flush together. + * + * @this {State} + * Info passed around about the current state. + * @param {Parent & {children: Array}} parent + * Parent of flow nodes. + * @param {Info} info + * Info on where we are in the document we are generating. + * @returns {string} + * Serialized children, joined together. + */ +function containerPhrasingBound(parent, info) { + return containerPhrasing(parent, this, info) +} + +/** + * Serialize the children of a parent that contains flow children. + * + * These children will typically be joined by blank lines. + * What they are joined by exactly is defined by `Join` functions. + * + * @this {State} + * Info passed around about the current state. + * @param {Parent & {children: Array}} parent + * Parent of flow nodes. + * @param {TrackFields} info + * Info on where we are in the document we are generating. + * @returns {string} + * Serialized children, joined by (blank) lines. + */ +function containerFlowBound(parent, info) { + return containerFlow(parent, this, info) +} + +/** + * Make a string safe for embedding in markdown constructs. + * + * In markdown, almost all punctuation characters can, in certain cases, + * result in something. + * Whether they do is highly subjective to where they happen and in what + * they happen. + * + * To solve this, `mdast-util-to-markdown` tracks: + * + * * Characters before and after something; + * * What “constructs” we are in. + * + * This information is then used by this function to escape or encode + * special characters. + * + * @this {State} + * Info passed around about the current state. + * @param {string | null | undefined} value + * Raw value to make safe. + * @param {SafeConfig} config + * Configuration. + * @returns {string} + * Serialized markdown safe for embedding. + */ +function safeBound(value, config) { + return safe(this, value, config) +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/join.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/join.d.ts new file mode 100644 index 00000000..bef5f730 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/join.d.ts @@ -0,0 +1,3 @@ +/** @type {Array} */ +export const join: Array +export type Join = import('./types.js').Join diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/join.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/join.js new file mode 100644 index 00000000..a1bbbd3d --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/join.js @@ -0,0 +1,51 @@ +/** + * @typedef {import('./types.js').Join} Join + */ + +import {formatCodeAsIndented} from './util/format-code-as-indented.js' +import {formatHeadingAsSetext} from './util/format-heading-as-setext.js' + +/** @type {Array} */ +export const join = [joinDefaults] + +/** @type {Join} */ +function joinDefaults(left, right, parent, state) { + // Indented code after list or another indented code. + if ( + right.type === 'code' && + formatCodeAsIndented(right, state) && + (left.type === 'list' || + (left.type === right.type && formatCodeAsIndented(left, state))) + ) { + return false + } + + // Two lists with the same marker. + if ( + left.type === 'list' && + left.type === right.type && + Boolean(left.ordered) === Boolean(right.ordered) && + !(left.ordered + ? state.options.bulletOrderedOther + : state.options.bulletOther) + ) { + return false + } + + // Join children of a list or an item. + // In which case, `parent` has a `spread` field. + if ('spread' in parent && typeof parent.spread === 'boolean') { + if ( + left.type === 'paragraph' && + // Two paragraphs. + (left.type === right.type || + right.type === 'definition' || + // Paragraph followed by a setext heading. + (right.type === 'heading' && formatHeadingAsSetext(right, state))) + ) { + return + } + + return parent.spread ? 1 : 0 + } +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/types.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/types.d.ts new file mode 100644 index 00000000..37dd6fd8 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/types.d.ts @@ -0,0 +1,466 @@ +export type UnistParent = import('unist').Parent +export type Point = import('unist').Point +export type Association = import('mdast').Association +export type Content = import('mdast').Content +export type ListContent = import('mdast').ListContent +export type PhrasingContent = import('mdast').PhrasingContent +export type Root = import('mdast').Root +export type TopLevelContent = import('mdast').TopLevelContent +export type ConstructName = import('../index.js').ConstructName +export type Node = Root | Content +export type Parent = Extract +export type FlowContent = TopLevelContent | ListContent +/** + * Info on where we are in the document we are generating. + */ +export type TrackFields = { + /** + * Current point. + */ + now: Point + /** + * Number of columns each line will be shifted by wrapping nodes. + */ + lineShift: number +} +/** + * Info on the characters that are around the current thing we are + * generating. + */ +export type SafeFields = { + /** + * Characters before this (guaranteed to be one, can be more). + */ + before: string + /** + * Characters after this (guaranteed to be one, can be more). + */ + after: string +} +/** + * Info on the surrounding of the node that is serialized. + */ +export type Info = TrackFields & SafeFields +/** + * Get current tracked info. + */ +export type TrackCurrent = () => TrackFields +/** + * Define a relative increased line shift (the typical indent for lines). + */ +export type TrackShift = (value: number) => void +/** + * Move past some generated markdown. + */ +export type TrackMove = (value: string | null | undefined) => string +/** + * Track positional info in the output. + * + * This info isn’t used yet but such functionality will allow line wrapping, + * source maps, etc. + */ +export type Tracker = { + /** + * Get the current tracked info. + */ + current: TrackCurrent + /** + * Define an increased line shift (the typical indent for lines). + */ + shift: TrackShift + /** + * Move past some generated markdown. + */ + move: TrackMove +} +/** + * Track positional info in the output. + * + * This info isn’t used yet but such functionality will allow line wrapping, + * source maps, etc. + */ +export type CreateTracker = (info: TrackFields) => Tracker +/** + * Get an identifier from an association to match it to others. + * + * Associations are nodes that match to something else through an ID: + * . + * + * The `label` of an association is the string value: character escapes and + * references work, and casing is intact. + * The `identifier` is used to match one association to another: + * controversially, character escapes and references don’t work in this + * matching: `©` does not match `©`, and `\+` does not match `+`. + * + * But casing is ignored (and whitespace) is trimmed and collapsed: ` A\nb` + * matches `a b`. + * So, we do prefer the label when figuring out how we’re going to serialize: + * it has whitespace, casing, and we can ignore most useless character + * escapes and all character references. + */ +export type AssociationId = (node: Association) => string +/** + * Map function to pad a single line. + */ +export type Map = (value: string, line: number, blank: boolean) => string +/** + * Pad serialized markdown. + */ +export type IndentLines = (value: string, map: Map) => string +/** + * Serialize the children of a parent that contains phrasing children. + * + * These children will be joined flush together. + */ +export type ContainerPhrasing = ( + parent: Parent & { + children: Array + }, + info: Info +) => string +/** + * Serialize the children of a parent that contains flow children. + * + * These children will typically be joined by blank lines. + * What they are joined by exactly is defined by `Join` functions. + */ +export type ContainerFlow = ( + parent: Parent & { + children: Array + }, + info: TrackFields +) => string +/** + * Extra configuration for `safe` + */ +export type SafeEncodeFields = { + /** + * Extra characters that *must* be encoded (as character references) instead + * of escaped (character escapes). + * + * Only ASCII punctuation will use character escapes, so you never need to + * pass non-ASCII-punctuation here. + */ + encode?: Array | null | undefined +} +export type SafeConfig = SafeFields & SafeEncodeFields +/** + * Make a string safe for embedding in markdown constructs. + * + * In markdown, almost all punctuation characters can, in certain cases, + * result in something. + * Whether they do is highly subjective to where they happen and in what + * they happen. + * + * To solve this, `mdast-util-to-markdown` tracks: + * + * * Characters before and after something; + * * What “constructs” we are in. + * + * This information is then used by this function to escape or encode + * special characters. + */ +export type Safe = ( + input: string | null | undefined, + config: SafeConfig +) => string +/** + * Enter something. + */ +export type Enter = (name: ConstructName) => Exit +/** + * Exit something. + */ +export type Exit = () => void +/** + * Info passed around about the current state. + */ +export type State = { + /** + * Stack of constructs we’re in. + */ + stack: Array + /** + * Positions of child nodes in their parents. + */ + indexStack: Array + /** + * Pad serialized markdown. + */ + indentLines: IndentLines + /** + * Get an identifier from an association to match it to others. + */ + associationId: AssociationId + /** + * Serialize the children of a parent that contains phrasing children. + */ + containerPhrasing: ContainerPhrasing + /** + * Serialize the children of a parent that contains flow children. + */ + containerFlow: ContainerFlow + /** + * Track positional info in the output. + */ + createTracker: CreateTracker + /** + * Serialize the children of a parent that contains flow children. + */ + safe: Safe + /** + * Enter a construct (returns a corresponding exit function). + */ + enter: Enter + /** + * Applied user configuration. + */ + options: Options + /** + * Applied unsafe patterns. + */ + unsafe: Array + /** + * Applied join handlers. + */ + join: Array + /** + * Call the configured handler for the given node. + */ + handle: Handle + /** + * Applied handlers. + */ + handlers: Handlers + /** + * List marker currently in use. + */ + bulletCurrent: string | undefined + /** + * List marker previously in use. + */ + bulletLastUsed: string | undefined +} +/** + * Handle a particular node. + */ +export type Handle = ( + node: any, + parent: Parent | undefined, + state: State, + Info: Info +) => string +/** + * Handle particular nodes. + * + * Each key is a node type, each value its corresponding handler. + */ +export type Handlers = Record +/** + * How to join two blocks. + * + * “Blocks” are typically joined by one blank line. + * Sometimes it’s nicer to have them flush next to each other, yet other + * times they cannot occur together at all. + * + * Join functions receive two adjacent siblings and their parent and what + * they return defines how many blank lines to use between them. + */ +export type Join = ( + left: Node, + right: Node, + parent: Parent, + state: State +) => boolean | null | undefined | void | number +/** + * Schema that defines when a character cannot occur. + */ +export type Unsafe = { + /** + * Single unsafe character. + */ + character: string + /** + * Constructs where this is bad. + */ + inConstruct?: ConstructName | Array | null | undefined + /** + * Constructs where this is fine again. + */ + notInConstruct?: ConstructName | Array | null | undefined + /** + * `character` is bad when this is before it (cannot be used together with + * `atBreak`). + */ + before?: string | null | undefined + /** + * `character` is bad when this is after it. + */ + after?: string | null | undefined + /** + * `character` is bad at a break (cannot be used together with `before`). + */ + atBreak?: boolean | null | undefined + /** + * The unsafe pattern (this whole object) compiled as a regex. + * + * This is internal and must not be defined. + */ + _compiled?: RegExp | null | undefined +} +/** + * Configuration (optional). + */ +export type Options = { + /** + * Marker to use for bullets of items in unordered lists. + */ + bullet?: '-' | '*' | '+' | null | undefined + /** + * Marker to use in certain cases where the primary bullet doesn’t work. + * + * There are three cases where the primary bullet cannot be used: + * + * * When three list items are on their own, the last one is empty, and + * `bullet` is also a valid `rule`: `* - +`. + * This would turn into a thematic break if serialized with three primary + * bullets. + * As this is an edge case unlikely to appear in normal markdown, the + * last list item will be given a different bullet. + * * When a thematic break is the first child of one of the list items, and + * `bullet` is the same character as `rule`: `- ***`. + * This would turn into a single thematic break if serialized with + * primary bullets. + * As this is an edge case unlikely to appear in normal markdown this + * markup is always fixed, even if `bulletOther` is not passed + * * When two unordered lists appear next to each other: `* a\n- b`. + * CommonMark sees different bullets as different lists, but several + * markdown parsers parse it as one list. + * To solve for both, we instead inject an empty comment between the two + * lists: `* a\n\n* b`, but if `bulletOther` is given explicitly, + * it will be used instead + */ + bulletOther?: '-' | '*' | '+' | null | undefined + /** + * Marker to use for bullets of items in ordered lists. + */ + bulletOrdered?: '.' | ')' | null | undefined + /** + * Marker to use in certain cases where the primary bullet for ordered items + * doesn’t work. + * + * There is one case where the primary bullet for ordered items cannot be used: + * + * * When two ordered lists appear next to each other: `1. a\n2) b`. + * CommonMark added support for `)` as a marker, but other markdown + * parsers do not support it. + * To solve for both, we instead inject an empty comment between the two + * lists: `1. a\n\n1. b`, but if `bulletOrderedOther` is given + * explicitly, it will be used instead + */ + bulletOrderedOther?: '.' | ')' | null | undefined + /** + * Whether to add the same number of number signs (`#`) at the end of an ATX + * heading as the opening sequence. + */ + closeAtx?: boolean | null | undefined + /** + * Marker to use for emphasis. + */ + emphasis?: '_' | '*' | null | undefined + /** + * Marker to use for fenced code. + */ + fence?: '~' | '`' | null | undefined + /** + * Whether to use fenced code always. + * + * The default is to use fenced code if there is a language defined, if the + * code is empty, or if it starts or ends in blank lines. + */ + fences?: boolean | null | undefined + /** + * Whether to increment the counter of ordered lists items. + */ + incrementListMarker?: boolean | null | undefined + /** + * How to indent the content of list items. + * + * Either with the size of the bullet plus one space (when `'one'`), a tab + * stop (`'tab'`), or depending on the item and its parent list (`'mixed'`, + * uses `'one'` if the item and list are tight and `'tab'` otherwise). + */ + listItemIndent?: 'tab' | 'one' | 'mixed' | null | undefined + /** + * Marker to use for titles. + */ + quote?: '"' | "'" | null | undefined + /** + * Whether to always use resource links. + * + * The default is to use autolinks (``) when possible + * and resource links (`[text](url)`) otherwise. + */ + resourceLink?: boolean | null | undefined + /** + * Marker to use for thematic breaks. + */ + rule?: '-' | '_' | '*' | null | undefined + /** + * Number of markers to use for thematic breaks. + */ + ruleRepetition?: number | null | undefined + /** + * Whether to add spaces between markers in thematic breaks. + */ + ruleSpaces?: boolean | null | undefined + /** + * Whether to use setext headings when possible. + * + * The default is to always use ATX headings (`# heading`) instead of setext + * headings (`heading\n=======`). + * Setext headings cannot be used for empty headings or headings with a rank + * of three or more. + */ + setext?: boolean | null | undefined + /** + * Marker to use for strong. + */ + strong?: '_' | '*' | null | undefined + /** + * Whether to join definitions without a blank line. + * + * The default is to add blank lines between any flow (“block”) construct. + * Turning this option on is a shortcut for a join function like so: + * + * ```js + * function joinTightDefinitions(left, right) { + * if (left.type === 'definition' && right.type === 'definition') { + * return 0 + * } + * } + * ``` + */ + tightDefinitions?: boolean | null | undefined + /** + * Handle particular nodes. + * + * Each key is a node type, each value its corresponding handler. + */ + handlers?: Partial | null | undefined + /** + * How to join blocks. + */ + join?: Array | null | undefined + /** + * Schemas that define when characters cannot occur. + */ + unsafe?: Array | null | undefined + /** + * List of extensions to include. + * + * Each `ToMarkdownExtension` is an object with the same interface as + * `Options` here. + */ + extensions?: Array | null | undefined +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/types.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/types.js new file mode 100644 index 00000000..ab0b9b5e --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/types.js @@ -0,0 +1,408 @@ +/** + * @typedef {import('unist').Parent} UnistParent + * @typedef {import('unist').Point} Point + * @typedef {import('mdast').Association} Association + * @typedef {import('mdast').Content} Content + * @typedef {import('mdast').ListContent} ListContent + * @typedef {import('mdast').PhrasingContent} PhrasingContent + * @typedef {import('mdast').Root} Root + * @typedef {import('mdast').TopLevelContent} TopLevelContent + * @typedef {import('../index.js').ConstructName} ConstructName + */ + +/** + * @typedef {Root | Content} Node + * @typedef {Extract} Parent + * @typedef {TopLevelContent | ListContent} FlowContent + * + * @typedef TrackFields + * Info on where we are in the document we are generating. + * @property {Point} now + * Current point. + * @property {number} lineShift + * Number of columns each line will be shifted by wrapping nodes. + * + * @typedef SafeFields + * Info on the characters that are around the current thing we are + * generating. + * @property {string} before + * Characters before this (guaranteed to be one, can be more). + * @property {string} after + * Characters after this (guaranteed to be one, can be more). + * + * @typedef {TrackFields & SafeFields} Info + * Info on the surrounding of the node that is serialized. + * + * @callback TrackCurrent + * Get current tracked info. + * @returns {TrackFields} + * Current tracked info. + * + * @callback TrackShift + * Define a relative increased line shift (the typical indent for lines). + * @param {number} value + * Relative increment in how much each line will be padded. + * @returns {void} + * Nothing. + * + * @callback TrackMove + * Move past some generated markdown. + * @param {string | null | undefined} value + * Generated markdown. + * @returns {string} + * Given markdown. + * + * @typedef Tracker + * Track positional info in the output. + * + * This info isn’t used yet but such functionality will allow line wrapping, + * source maps, etc. + * @property {TrackCurrent} current + * Get the current tracked info. + * @property {TrackShift} shift + * Define an increased line shift (the typical indent for lines). + * @property {TrackMove} move + * Move past some generated markdown. + * + * @callback CreateTracker + * Track positional info in the output. + * + * This info isn’t used yet but such functionality will allow line wrapping, + * source maps, etc. + * @param {TrackFields} info + * Info on where we are in the document we are generating. + * @returns {Tracker} + * Tracker. + * + * @callback AssociationId + * Get an identifier from an association to match it to others. + * + * Associations are nodes that match to something else through an ID: + * . + * + * The `label` of an association is the string value: character escapes and + * references work, and casing is intact. + * The `identifier` is used to match one association to another: + * controversially, character escapes and references don’t work in this + * matching: `©` does not match `©`, and `\+` does not match `+`. + * + * But casing is ignored (and whitespace) is trimmed and collapsed: ` A\nb` + * matches `a b`. + * So, we do prefer the label when figuring out how we’re going to serialize: + * it has whitespace, casing, and we can ignore most useless character + * escapes and all character references. + * @param {Association} node + * Node that includes an association. + * @returns {string} + * ID. + * + * @callback Map + * Map function to pad a single line. + * @param {string} value + * A single line of serialized markdown. + * @param {number} line + * Line number relative to the fragment. + * @param {boolean} blank + * Whether the line is considered blank in markdown. + * @returns {string} + * Padded line. + * + * @callback IndentLines + * Pad serialized markdown. + * @param {string} value + * Whole fragment of serialized markdown. + * @param {Map} map + * Map function. + * @returns {string} + * Padded value. + * + * @callback ContainerPhrasing + * Serialize the children of a parent that contains phrasing children. + * + * These children will be joined flush together. + * @param {Parent & {children: Array}} parent + * Parent of flow nodes. + * @param {Info} info + * Info on where we are in the document we are generating. + * @returns {string} + * Serialized children, joined together. + * + * @callback ContainerFlow + * Serialize the children of a parent that contains flow children. + * + * These children will typically be joined by blank lines. + * What they are joined by exactly is defined by `Join` functions. + * @param {Parent & {children: Array}} parent + * Parent of flow nodes. + * @param {TrackFields} info + * Info on where we are in the document we are generating. + * @returns {string} + * Serialized children, joined by (blank) lines. + * + * @typedef SafeEncodeFields + * Extra configuration for `safe` + * @property {Array | null | undefined} [encode] + * Extra characters that *must* be encoded (as character references) instead + * of escaped (character escapes). + * + * Only ASCII punctuation will use character escapes, so you never need to + * pass non-ASCII-punctuation here. + * + * @typedef {SafeFields & SafeEncodeFields} SafeConfig + * + * @callback Safe + * Make a string safe for embedding in markdown constructs. + * + * In markdown, almost all punctuation characters can, in certain cases, + * result in something. + * Whether they do is highly subjective to where they happen and in what + * they happen. + * + * To solve this, `mdast-util-to-markdown` tracks: + * + * * Characters before and after something; + * * What “constructs” we are in. + * + * This information is then used by this function to escape or encode + * special characters. + * @param {string | null | undefined} input + * Raw value to make safe. + * @param {SafeConfig} config + * Configuration. + * @returns {string} + * Serialized markdown safe for embedding. + * + * @callback Enter + * Enter something. + * @param {ConstructName} name + * Label, more similar to a micromark event than an mdast node type. + * @returns {Exit} + * Revert. + * + * @callback Exit + * Exit something. + * @returns {void} + * Nothing. + * + * @typedef State + * Info passed around about the current state. + * @property {Array} stack + * Stack of constructs we’re in. + * @property {Array} indexStack + * Positions of child nodes in their parents. + * @property {IndentLines} indentLines + * Pad serialized markdown. + * @property {AssociationId} associationId + * Get an identifier from an association to match it to others. + * @property {ContainerPhrasing} containerPhrasing + * Serialize the children of a parent that contains phrasing children. + * @property {ContainerFlow} containerFlow + * Serialize the children of a parent that contains flow children. + * @property {CreateTracker} createTracker + * Track positional info in the output. + * @property {Safe} safe + * Serialize the children of a parent that contains flow children. + * @property {Enter} enter + * Enter a construct (returns a corresponding exit function). + * @property {Options} options + * Applied user configuration. + * @property {Array} unsafe + * Applied unsafe patterns. + * @property {Array} join + * Applied join handlers. + * @property {Handle} handle + * Call the configured handler for the given node. + * @property {Handlers} handlers + * Applied handlers. + * @property {string | undefined} bulletCurrent + * List marker currently in use. + * @property {string | undefined} bulletLastUsed + * List marker previously in use. + * + * @callback Handle + * Handle a particular node. + * @param {any} node + * Expected mdast node. + * @param {Parent | undefined} parent + * Parent of `node`. + * @param {State} state + * Info passed around about the current state. + * @param {Info} Info + * Info on the surrounding of the node that is serialized. + * @returns {string} + * Serialized markdown representing `node`. + * + * @typedef {Record} Handlers + * Handle particular nodes. + * + * Each key is a node type, each value its corresponding handler. + * + * @callback Join + * How to join two blocks. + * + * “Blocks” are typically joined by one blank line. + * Sometimes it’s nicer to have them flush next to each other, yet other + * times they cannot occur together at all. + * + * Join functions receive two adjacent siblings and their parent and what + * they return defines how many blank lines to use between them. + * @param {Node} left + * First of two adjacent siblings. + * @param {Node} right + * Second of two adjacent siblings. + * @param {Parent} parent + * Parent of the two siblings. + * @param {State} state + * Info passed around about the current state. + * @returns {boolean | null | undefined | void | number} + * How many blank lines to use between the siblings. + * + * Where `true` is as passing `1` and `false` means the nodes cannot be + * joined by a blank line, such as two adjacent block quotes or indented code + * after a list, in which case a comment will be injected to break them up: + * + * ```markdown + * > Quote 1 + * + * + * + * > Quote 2 + * ``` + * + * > 👉 **Note**: abusing this feature will break markdown. + * > One such example is when returning `0` for two paragraphs, which will + * > result in the text running together, and in the future to be seen as + * > one paragraph. + * + * @typedef Unsafe + * Schema that defines when a character cannot occur. + * @property {string} character + * Single unsafe character. + * @property {ConstructName | Array | null | undefined} [inConstruct] + * Constructs where this is bad. + * @property {ConstructName | Array | null | undefined} [notInConstruct] + * Constructs where this is fine again. + * @property {string | null | undefined} [before] + * `character` is bad when this is before it (cannot be used together with + * `atBreak`). + * @property {string | null | undefined} [after] + * `character` is bad when this is after it. + * @property {boolean | null | undefined} [atBreak] + * `character` is bad at a break (cannot be used together with `before`). + * @property {RegExp | null | undefined} [_compiled] + * The unsafe pattern (this whole object) compiled as a regex. + * + * This is internal and must not be defined. + * + * @typedef Options + * Configuration (optional). + * @property {'-' | '*' | '+' | null | undefined} [bullet='*'] + * Marker to use for bullets of items in unordered lists. + * @property {'-' | '*' | '+' | null | undefined} [bulletOther] + * Marker to use in certain cases where the primary bullet doesn’t work. + * + * There are three cases where the primary bullet cannot be used: + * + * * When three list items are on their own, the last one is empty, and + * `bullet` is also a valid `rule`: `* - +`. + * This would turn into a thematic break if serialized with three primary + * bullets. + * As this is an edge case unlikely to appear in normal markdown, the + * last list item will be given a different bullet. + * * When a thematic break is the first child of one of the list items, and + * `bullet` is the same character as `rule`: `- ***`. + * This would turn into a single thematic break if serialized with + * primary bullets. + * As this is an edge case unlikely to appear in normal markdown this + * markup is always fixed, even if `bulletOther` is not passed + * * When two unordered lists appear next to each other: `* a\n- b`. + * CommonMark sees different bullets as different lists, but several + * markdown parsers parse it as one list. + * To solve for both, we instead inject an empty comment between the two + * lists: `* a\n\n* b`, but if `bulletOther` is given explicitly, + * it will be used instead + * @property {'.' | ')' | null | undefined} [bulletOrdered='.'] + * Marker to use for bullets of items in ordered lists. + * @property {'.' | ')' | null | undefined} [bulletOrderedOther] + * Marker to use in certain cases where the primary bullet for ordered items + * doesn’t work. + * + * There is one case where the primary bullet for ordered items cannot be used: + * + * * When two ordered lists appear next to each other: `1. a\n2) b`. + * CommonMark added support for `)` as a marker, but other markdown + * parsers do not support it. + * To solve for both, we instead inject an empty comment between the two + * lists: `1. a\n\n1. b`, but if `bulletOrderedOther` is given + * explicitly, it will be used instead + * @property {boolean | null | undefined} [closeAtx=false] + * Whether to add the same number of number signs (`#`) at the end of an ATX + * heading as the opening sequence. + * @property {'_' | '*' | null | undefined} [emphasis='*'] + * Marker to use for emphasis. + * @property {'~' | '`' | null | undefined} [fence='`'] + * Marker to use for fenced code. + * @property {boolean | null | undefined} [fences=false] + * Whether to use fenced code always. + * + * The default is to use fenced code if there is a language defined, if the + * code is empty, or if it starts or ends in blank lines. + * @property {boolean | null | undefined} [incrementListMarker=true] + * Whether to increment the counter of ordered lists items. + * @property {'tab' | 'one' | 'mixed' | null | undefined} [listItemIndent='tab'] + * How to indent the content of list items. + * + * Either with the size of the bullet plus one space (when `'one'`), a tab + * stop (`'tab'`), or depending on the item and its parent list (`'mixed'`, + * uses `'one'` if the item and list are tight and `'tab'` otherwise). + * @property {'"' | "'" | null | undefined} [quote='"'] + * Marker to use for titles. + * @property {boolean | null | undefined} [resourceLink=false] + * Whether to always use resource links. + * + * The default is to use autolinks (``) when possible + * and resource links (`[text](url)`) otherwise. + * @property {'-' | '_' | '*' | null | undefined} [rule='*'] + * Marker to use for thematic breaks. + * @property {number | null | undefined} [ruleRepetition=3] + * Number of markers to use for thematic breaks. + * @property {boolean | null | undefined} [ruleSpaces=false] + * Whether to add spaces between markers in thematic breaks. + * @property {boolean | null | undefined} [setext=false] + * Whether to use setext headings when possible. + * + * The default is to always use ATX headings (`# heading`) instead of setext + * headings (`heading\n=======`). + * Setext headings cannot be used for empty headings or headings with a rank + * of three or more. + * @property {'_' | '*' | null | undefined} [strong='*'] + * Marker to use for strong. + * @property {boolean | null | undefined} [tightDefinitions=false] + * Whether to join definitions without a blank line. + * + * The default is to add blank lines between any flow (“block”) construct. + * Turning this option on is a shortcut for a join function like so: + * + * ```js + * function joinTightDefinitions(left, right) { + * if (left.type === 'definition' && right.type === 'definition') { + * return 0 + * } + * } + * ``` + * @property {Partial | null | undefined} [handlers={}] + * Handle particular nodes. + * + * Each key is a node type, each value its corresponding handler. + * @property {Array | null | undefined} [join=[]] + * How to join blocks. + * @property {Array | null | undefined} [unsafe=[]] + * Schemas that define when characters cannot occur. + * @property {Array | null | undefined} [extensions=[]] + * List of extensions to include. + * + * Each `ToMarkdownExtension` is an object with the same interface as + * `Options` here. + */ + +export {} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/unsafe.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/unsafe.d.ts new file mode 100644 index 00000000..b609a569 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/unsafe.d.ts @@ -0,0 +1,4 @@ +/** @type {Array} */ +export const unsafe: Array +export type Unsafe = import('./types.js').Unsafe +export type ConstructName = import('./types.js').ConstructName diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/unsafe.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/unsafe.js new file mode 100644 index 00000000..74c0a269 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/unsafe.js @@ -0,0 +1,147 @@ +/** + * @typedef {import('./types.js').Unsafe} Unsafe + * @typedef {import('./types.js').ConstructName} ConstructName + */ + +/** + * List of constructs that occur in phrasing (paragraphs, headings), but cannot + * contain things like attention (emphasis, strong), images, or links. + * So they sort of cancel each other out. + * Note: could use a better name. + * + * @type {Array} + */ +const fullPhrasingSpans = [ + 'autolink', + 'destinationLiteral', + 'destinationRaw', + 'reference', + 'titleQuote', + 'titleApostrophe' +] + +/** @type {Array} */ +export const unsafe = [ + {character: '\t', after: '[\\r\\n]', inConstruct: 'phrasing'}, + {character: '\t', before: '[\\r\\n]', inConstruct: 'phrasing'}, + { + character: '\t', + inConstruct: ['codeFencedLangGraveAccent', 'codeFencedLangTilde'] + }, + { + character: '\r', + inConstruct: [ + 'codeFencedLangGraveAccent', + 'codeFencedLangTilde', + 'codeFencedMetaGraveAccent', + 'codeFencedMetaTilde', + 'destinationLiteral', + 'headingAtx' + ] + }, + { + character: '\n', + inConstruct: [ + 'codeFencedLangGraveAccent', + 'codeFencedLangTilde', + 'codeFencedMetaGraveAccent', + 'codeFencedMetaTilde', + 'destinationLiteral', + 'headingAtx' + ] + }, + {character: ' ', after: '[\\r\\n]', inConstruct: 'phrasing'}, + {character: ' ', before: '[\\r\\n]', inConstruct: 'phrasing'}, + { + character: ' ', + inConstruct: ['codeFencedLangGraveAccent', 'codeFencedLangTilde'] + }, + // An exclamation mark can start an image, if it is followed by a link or + // a link reference. + { + character: '!', + after: '\\[', + inConstruct: 'phrasing', + notInConstruct: fullPhrasingSpans + }, + // A quote can break out of a title. + {character: '"', inConstruct: 'titleQuote'}, + // A number sign could start an ATX heading if it starts a line. + {atBreak: true, character: '#'}, + {character: '#', inConstruct: 'headingAtx', after: '(?:[\r\n]|$)'}, + // Dollar sign and percentage are not used in markdown. + // An ampersand could start a character reference. + {character: '&', after: '[#A-Za-z]', inConstruct: 'phrasing'}, + // An apostrophe can break out of a title. + {character: "'", inConstruct: 'titleApostrophe'}, + // A left paren could break out of a destination raw. + {character: '(', inConstruct: 'destinationRaw'}, + // A left paren followed by `]` could make something into a link or image. + { + before: '\\]', + character: '(', + inConstruct: 'phrasing', + notInConstruct: fullPhrasingSpans + }, + // A right paren could start a list item or break out of a destination + // raw. + {atBreak: true, before: '\\d+', character: ')'}, + {character: ')', inConstruct: 'destinationRaw'}, + // An asterisk can start thematic breaks, list items, emphasis, strong. + {atBreak: true, character: '*', after: '(?:[ \t\r\n*])'}, + {character: '*', inConstruct: 'phrasing', notInConstruct: fullPhrasingSpans}, + // A plus sign could start a list item. + {atBreak: true, character: '+', after: '(?:[ \t\r\n])'}, + // A dash can start thematic breaks, list items, and setext heading + // underlines. + {atBreak: true, character: '-', after: '(?:[ \t\r\n-])'}, + // A dot could start a list item. + {atBreak: true, before: '\\d+', character: '.', after: '(?:[ \t\r\n]|$)'}, + // Slash, colon, and semicolon are not used in markdown for constructs. + // A less than can start html (flow or text) or an autolink. + // HTML could start with an exclamation mark (declaration, cdata, comment), + // slash (closing tag), question mark (instruction), or a letter (tag). + // An autolink also starts with a letter. + // Finally, it could break out of a destination literal. + {atBreak: true, character: '<', after: '[!/?A-Za-z]'}, + { + character: '<', + after: '[!/?A-Za-z]', + inConstruct: 'phrasing', + notInConstruct: fullPhrasingSpans + }, + {character: '<', inConstruct: 'destinationLiteral'}, + // An equals to can start setext heading underlines. + {atBreak: true, character: '='}, + // A greater than can start block quotes and it can break out of a + // destination literal. + {atBreak: true, character: '>'}, + {character: '>', inConstruct: 'destinationLiteral'}, + // Question mark and at sign are not used in markdown for constructs. + // A left bracket can start definitions, references, labels, + {atBreak: true, character: '['}, + {character: '[', inConstruct: 'phrasing', notInConstruct: fullPhrasingSpans}, + {character: '[', inConstruct: ['label', 'reference']}, + // A backslash can start an escape (when followed by punctuation) or a + // hard break (when followed by an eol). + // Note: typical escapes are handled in `safe`! + {character: '\\', after: '[\\r\\n]', inConstruct: 'phrasing'}, + // A right bracket can exit labels. + {character: ']', inConstruct: ['label', 'reference']}, + // Caret is not used in markdown for constructs. + // An underscore can start emphasis, strong, or a thematic break. + {atBreak: true, character: '_'}, + {character: '_', inConstruct: 'phrasing', notInConstruct: fullPhrasingSpans}, + // A grave accent can start code (fenced or text), or it can break out of + // a grave accent code fence. + {atBreak: true, character: '`'}, + { + character: '`', + inConstruct: ['codeFencedLangGraveAccent', 'codeFencedMetaGraveAccent'] + }, + {character: '`', inConstruct: 'phrasing', notInConstruct: fullPhrasingSpans}, + // Left brace, vertical bar, right brace are not used in markdown for + // constructs. + // A tilde can start code (fenced). + {atBreak: true, character: '~'} +] diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/association.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/association.d.ts new file mode 100644 index 00000000..b8093219 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/association.d.ts @@ -0,0 +1,2 @@ +export function association(node: import('mdast').Association): string +export type AssociationId = import('../types.js').AssociationId diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/association.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/association.js new file mode 100644 index 00000000..e2fec6b2 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/association.js @@ -0,0 +1,33 @@ +/** + * @typedef {import('../types.js').AssociationId} AssociationId + */ + +import {decodeString} from 'micromark-util-decode-string' + +/** + * Get an identifier from an association to match it to others. + * + * Associations are nodes that match to something else through an ID: + * . + * + * The `label` of an association is the string value: character escapes and + * references work, and casing is intact. + * The `identifier` is used to match one association to another: + * controversially, character escapes and references don’t work in this + * matching: `©` does not match `©`, and `\+` does not match `+`. + * + * But casing is ignored (and whitespace) is trimmed and collapsed: ` A\nb` + * matches `a b`. + * So, we do prefer the label when figuring out how we’re going to serialize: + * it has whitespace, casing, and we can ignore most useless character + * escapes and all character references. + * + * @type {AssociationId} + */ +export function association(node) { + if (node.label || !node.identifier) { + return node.label || '' + } + + return decodeString(node.identifier) +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered-other.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered-other.d.ts new file mode 100644 index 00000000..fcf50425 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered-other.d.ts @@ -0,0 +1,9 @@ +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkBulletOrderedOther( + state: State +): Exclude +export type State = import('../types.js').State +export type Options = import('../types.js').Options diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered-other.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered-other.js new file mode 100644 index 00000000..dc5696f1 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered-other.js @@ -0,0 +1,39 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ + +import {checkBulletOrdered} from './check-bullet-ordered.js' + +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkBulletOrderedOther(state) { + const bulletOrdered = checkBulletOrdered(state) + const bulletOrderedOther = state.options.bulletOrderedOther + + if (!bulletOrderedOther) { + return bulletOrdered === '.' ? ')' : '.' + } + + if (bulletOrderedOther !== '.' && bulletOrderedOther !== ')') { + throw new Error( + 'Cannot serialize items with `' + + bulletOrderedOther + + '` for `options.bulletOrderedOther`, expected `*`, `+`, or `-`' + ) + } + + if (bulletOrderedOther === bulletOrdered) { + throw new Error( + 'Expected `bulletOrdered` (`' + + bulletOrdered + + '`) and `bulletOrderedOther` (`' + + bulletOrderedOther + + '`) to be different' + ) + } + + return bulletOrderedOther +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered.d.ts new file mode 100644 index 00000000..3aa2ffd5 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered.d.ts @@ -0,0 +1,13 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkBulletOrdered( + state: State +): Exclude +export type State = import('../types.js').State +export type Options = import('../types.js').Options diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered.js new file mode 100644 index 00000000..fa615427 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-ordered.js @@ -0,0 +1,22 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ + +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkBulletOrdered(state) { + const marker = state.options.bulletOrdered || '.' + + if (marker !== '.' && marker !== ')') { + throw new Error( + 'Cannot serialize items with `' + + marker + + '` for `options.bulletOrdered`, expected `.` or `)`' + ) + } + + return marker +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-other.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-other.d.ts new file mode 100644 index 00000000..3280cc7f --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-other.d.ts @@ -0,0 +1,9 @@ +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkBulletOther( + state: State +): Exclude +export type State = import('../types.js').State +export type Options = import('../types.js').Options diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-other.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-other.js new file mode 100644 index 00000000..2e7a12af --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet-other.js @@ -0,0 +1,39 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ + +import {checkBullet} from './check-bullet.js' + +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkBulletOther(state) { + const bullet = checkBullet(state) + const bulletOther = state.options.bulletOther + + if (!bulletOther) { + return bullet === '*' ? '-' : '*' + } + + if (bulletOther !== '*' && bulletOther !== '+' && bulletOther !== '-') { + throw new Error( + 'Cannot serialize items with `' + + bulletOther + + '` for `options.bulletOther`, expected `*`, `+`, or `-`' + ) + } + + if (bulletOther === bullet) { + throw new Error( + 'Expected `bullet` (`' + + bullet + + '`) and `bulletOther` (`' + + bulletOther + + '`) to be different' + ) + } + + return bulletOther +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet.d.ts new file mode 100644 index 00000000..0f4a2d3b --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet.d.ts @@ -0,0 +1,13 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkBullet( + state: State +): Exclude +export type State = import('../types.js').State +export type Options = import('../types.js').Options diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet.js new file mode 100644 index 00000000..89588fd2 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-bullet.js @@ -0,0 +1,22 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ + +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkBullet(state) { + const marker = state.options.bullet || '*' + + if (marker !== '*' && marker !== '+' && marker !== '-') { + throw new Error( + 'Cannot serialize items with `' + + marker + + '` for `options.bullet`, expected `*`, `+`, or `-`' + ) + } + + return marker +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-emphasis.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-emphasis.d.ts new file mode 100644 index 00000000..32933588 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-emphasis.d.ts @@ -0,0 +1,13 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkEmphasis( + state: State +): Exclude +export type State = import('../types.js').State +export type Options = import('../types.js').Options diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-emphasis.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-emphasis.js new file mode 100644 index 00000000..4bb5272d --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-emphasis.js @@ -0,0 +1,22 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ + +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkEmphasis(state) { + const marker = state.options.emphasis || '*' + + if (marker !== '*' && marker !== '_') { + throw new Error( + 'Cannot serialize emphasis with `' + + marker + + '` for `options.emphasis`, expected `*`, or `_`' + ) + } + + return marker +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-fence.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-fence.d.ts new file mode 100644 index 00000000..9ec5e321 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-fence.d.ts @@ -0,0 +1,13 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkFence( + state: State +): Exclude +export type State = import('../types.js').State +export type Options = import('../types.js').Options diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-fence.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-fence.js new file mode 100644 index 00000000..3b92d048 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-fence.js @@ -0,0 +1,22 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ + +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkFence(state) { + const marker = state.options.fence || '`' + + if (marker !== '`' && marker !== '~') { + throw new Error( + 'Cannot serialize code with `' + + marker + + '` for `options.fence`, expected `` ` `` or `~`' + ) + } + + return marker +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-list-item-indent.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-list-item-indent.d.ts new file mode 100644 index 00000000..8a7d81d1 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-list-item-indent.d.ts @@ -0,0 +1,13 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkListItemIndent( + state: State +): Exclude +export type State = import('../types.js').State +export type Options = import('../types.js').Options diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-list-item-indent.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-list-item-indent.js new file mode 100644 index 00000000..97f25e63 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-list-item-indent.js @@ -0,0 +1,28 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ + +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkListItemIndent(state) { + const style = state.options.listItemIndent || 'tab' + + // To do: remove in a major. + // @ts-expect-error: deprecated. + if (style === 1 || style === '1') { + return 'one' + } + + if (style !== 'tab' && style !== 'one' && style !== 'mixed') { + throw new Error( + 'Cannot serialize items with `' + + style + + '` for `options.listItemIndent`, expected `tab`, `one`, or `mixed`' + ) + } + + return style +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-quote.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-quote.d.ts new file mode 100644 index 00000000..5e30e968 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-quote.d.ts @@ -0,0 +1,13 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkQuote( + state: State +): Exclude +export type State = import('../types.js').State +export type Options = import('../types.js').Options diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-quote.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-quote.js new file mode 100644 index 00000000..a79060e3 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-quote.js @@ -0,0 +1,22 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ + +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkQuote(state) { + const marker = state.options.quote || '"' + + if (marker !== '"' && marker !== "'") { + throw new Error( + 'Cannot serialize title with `' + + marker + + '` for `options.quote`, expected `"`, or `\'`' + ) + } + + return marker +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule-repetition.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule-repetition.d.ts new file mode 100644 index 00000000..bc0865ac --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule-repetition.d.ts @@ -0,0 +1,13 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkRuleRepetition( + state: State +): Exclude +export type State = import('../types.js').State +export type Options = import('../types.js').Options diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule-repetition.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule-repetition.js new file mode 100644 index 00000000..7b75bfb6 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule-repetition.js @@ -0,0 +1,22 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ + +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkRuleRepetition(state) { + const repetition = state.options.ruleRepetition || 3 + + if (repetition < 3) { + throw new Error( + 'Cannot serialize rules with repetition `' + + repetition + + '` for `options.ruleRepetition`, expected `3` or more' + ) + } + + return repetition +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule.d.ts new file mode 100644 index 00000000..14fc87b4 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule.d.ts @@ -0,0 +1,13 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkRule( + state: State +): Exclude +export type State = import('../types.js').State +export type Options = import('../types.js').Options diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule.js new file mode 100644 index 00000000..b8ab7e4d --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-rule.js @@ -0,0 +1,22 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ + +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkRule(state) { + const marker = state.options.rule || '*' + + if (marker !== '*' && marker !== '-' && marker !== '_') { + throw new Error( + 'Cannot serialize rules with `' + + marker + + '` for `options.rule`, expected `*`, `-`, or `_`' + ) + } + + return marker +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-strong.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-strong.d.ts new file mode 100644 index 00000000..d712dae8 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-strong.d.ts @@ -0,0 +1,13 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkStrong( + state: State +): Exclude +export type State = import('../types.js').State +export type Options = import('../types.js').Options diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-strong.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-strong.js new file mode 100644 index 00000000..be0a4ccd --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/check-strong.js @@ -0,0 +1,22 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').Options} Options + */ + +/** + * @param {State} state + * @returns {Exclude} + */ +export function checkStrong(state) { + const marker = state.options.strong || '*' + + if (marker !== '*' && marker !== '_') { + throw new Error( + 'Cannot serialize strong with `' + + marker + + '` for `options.strong`, expected `*`, or `_`' + ) + } + + return marker +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-flow.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-flow.d.ts new file mode 100644 index 00000000..49d66a08 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-flow.d.ts @@ -0,0 +1,29 @@ +/** + * @typedef {import('../types.js').FlowContent} FlowContent + * @typedef {import('../types.js').Node} Node + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').TrackFields} TrackFields + */ +/** + * @param {Parent & {children: Array}} parent + * Parent of flow nodes. + * @param {State} state + * Info passed around about the current state. + * @param {TrackFields} info + * Info on where we are in the document we are generating. + * @returns {string} + * Serialized children, joined by (blank) lines. + */ +export function containerFlow( + parent: import('../types.js').Parent & { + children: Array + }, + state: State, + info: TrackFields +): string +export type FlowContent = import('../types.js').FlowContent +export type Node = import('../types.js').Node +export type Parent = import('../types.js').Parent +export type State = import('../types.js').State +export type TrackFields = import('../types.js').TrackFields diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-flow.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-flow.js new file mode 100644 index 00000000..4a895037 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-flow.js @@ -0,0 +1,87 @@ +/** + * @typedef {import('../types.js').FlowContent} FlowContent + * @typedef {import('../types.js').Node} Node + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').TrackFields} TrackFields + */ + +/** + * @param {Parent & {children: Array}} parent + * Parent of flow nodes. + * @param {State} state + * Info passed around about the current state. + * @param {TrackFields} info + * Info on where we are in the document we are generating. + * @returns {string} + * Serialized children, joined by (blank) lines. + */ +export function containerFlow(parent, state, info) { + const indexStack = state.indexStack + const children = parent.children || [] + const tracker = state.createTracker(info) + /** @type {Array} */ + const results = [] + let index = -1 + + indexStack.push(-1) + + while (++index < children.length) { + const child = children[index] + + indexStack[indexStack.length - 1] = index + + results.push( + tracker.move( + state.handle(child, parent, state, { + before: '\n', + after: '\n', + ...tracker.current() + }) + ) + ) + + if (child.type !== 'list') { + state.bulletLastUsed = undefined + } + + if (index < children.length - 1) { + results.push( + tracker.move(between(child, children[index + 1], parent, state)) + ) + } + } + + indexStack.pop() + + return results.join('') +} + +/** + * @param {Node} left + * @param {Node} right + * @param {Parent} parent + * @param {State} state + * @returns {string} + */ +function between(left, right, parent, state) { + let index = state.join.length + + while (index--) { + const result = state.join[index](left, right, parent, state) + + if (result === true || result === 1) { + break + } + + if (typeof result === 'number') { + return '\n'.repeat(1 + result) + } + + if (result === false) { + return '\n\n\n\n' + } + } + + return '\n\n' +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-phrasing.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-phrasing.d.ts new file mode 100644 index 00000000..fb003867 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-phrasing.d.ts @@ -0,0 +1,33 @@ +/** + * @typedef {import('../types.js').Handle} Handle + * @typedef {import('../types.js').Info} Info + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').PhrasingContent} PhrasingContent + * @typedef {import('../types.js').State} State + */ +/** + * Serialize the children of a parent that contains phrasing children. + * + * These children will be joined flush together. + * + * @param {Parent & {children: Array}} parent + * Parent of flow nodes. + * @param {State} state + * Info passed around about the current state. + * @param {Info} info + * Info on where we are in the document we are generating. + * @returns {string} + * Serialized children, joined together. + */ +export function containerPhrasing( + parent: import('../types.js').Parent & { + children: Array + }, + state: State, + info: Info +): string +export type Handle = import('../types.js').Handle +export type Info = import('../types.js').Info +export type Parent = import('../types.js').Parent +export type PhrasingContent = import('../types.js').PhrasingContent +export type State = import('../types.js').State diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-phrasing.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-phrasing.js new file mode 100644 index 00000000..c42871d5 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/container-phrasing.js @@ -0,0 +1,97 @@ +/** + * @typedef {import('../types.js').Handle} Handle + * @typedef {import('../types.js').Info} Info + * @typedef {import('../types.js').Parent} Parent + * @typedef {import('../types.js').PhrasingContent} PhrasingContent + * @typedef {import('../types.js').State} State + */ + +/** + * Serialize the children of a parent that contains phrasing children. + * + * These children will be joined flush together. + * + * @param {Parent & {children: Array}} parent + * Parent of flow nodes. + * @param {State} state + * Info passed around about the current state. + * @param {Info} info + * Info on where we are in the document we are generating. + * @returns {string} + * Serialized children, joined together. + */ +export function containerPhrasing(parent, state, info) { + const indexStack = state.indexStack + const children = parent.children || [] + /** @type {Array} */ + const results = [] + let index = -1 + let before = info.before + + indexStack.push(-1) + let tracker = state.createTracker(info) + + while (++index < children.length) { + const child = children[index] + /** @type {string} */ + let after + + indexStack[indexStack.length - 1] = index + + if (index + 1 < children.length) { + /** @type {Handle} */ + // @ts-expect-error: hush, it’s actually a `zwitch`. + let handle = state.handle.handlers[children[index + 1].type] + /** @type {Handle} */ + // @ts-expect-error: hush, it’s actually a `zwitch`. + if (handle && handle.peek) handle = handle.peek + after = handle + ? handle(children[index + 1], parent, state, { + before: '', + after: '', + ...tracker.current() + }).charAt(0) + : '' + } else { + after = info.after + } + + // In some cases, html (text) can be found in phrasing right after an eol. + // When we’d serialize that, in most cases that would be seen as html + // (flow). + // As we can’t escape or so to prevent it from happening, we take a somewhat + // reasonable approach: replace that eol with a space. + // See: + if ( + results.length > 0 && + (before === '\r' || before === '\n') && + child.type === 'html' + ) { + results[results.length - 1] = results[results.length - 1].replace( + /(\r?\n|\r)$/, + ' ' + ) + before = ' ' + + // To do: does this work to reset tracker? + tracker = state.createTracker(info) + tracker.move(results.join('')) + } + + results.push( + tracker.move( + state.handle(child, parent, state, { + ...tracker.current(), + before, + after + }) + ) + ) + + before = results[results.length - 1].slice(-1) + } + + indexStack.pop() + + return results.join('') +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-code-as-indented.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-code-as-indented.d.ts new file mode 100644 index 00000000..a6153fef --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-code-as-indented.d.ts @@ -0,0 +1,12 @@ +/** + * @typedef {import('mdast').Code} Code + * @typedef {import('../types.js').State} State + */ +/** + * @param {Code} node + * @param {State} state + * @returns {boolean} + */ +export function formatCodeAsIndented(node: Code, state: State): boolean +export type Code = import('mdast').Code +export type State = import('../types.js').State diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-code-as-indented.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-code-as-indented.js new file mode 100644 index 00000000..9d004e28 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-code-as-indented.js @@ -0,0 +1,22 @@ +/** + * @typedef {import('mdast').Code} Code + * @typedef {import('../types.js').State} State + */ + +/** + * @param {Code} node + * @param {State} state + * @returns {boolean} + */ +export function formatCodeAsIndented(node, state) { + return Boolean( + !state.options.fences && + node.value && + // If there’s no info… + !node.lang && + // And there’s a non-whitespace character… + /[^ \r\n]/.test(node.value) && + // And the value doesn’t start or end in a blank… + !/^[\t ]*(?:[\r\n]|$)|(?:^|[\r\n])[\t ]*$/.test(node.value) + ) +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-heading-as-setext.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-heading-as-setext.d.ts new file mode 100644 index 00000000..290a9b40 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-heading-as-setext.d.ts @@ -0,0 +1,8 @@ +/** + * @param {Heading} node + * @param {State} state + * @returns {boolean} + */ +export function formatHeadingAsSetext(node: Heading, state: State): boolean +export type Heading = import('mdast').Heading +export type State = import('../types.js').State diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-heading-as-setext.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-heading-as-setext.js new file mode 100644 index 00000000..899c902e --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-heading-as-setext.js @@ -0,0 +1,34 @@ +/** + * @typedef {import('mdast').Heading} Heading + * @typedef {import('../types.js').State} State + */ + +import {visit, EXIT} from 'unist-util-visit' +import {toString} from 'mdast-util-to-string' + +/** + * @param {Heading} node + * @param {State} state + * @returns {boolean} + */ +export function formatHeadingAsSetext(node, state) { + let literalWithBreak = false + + // Look for literals with a line break. + // Note that this also + visit(node, (node) => { + if ( + ('value' in node && /\r?\n|\r/.test(node.value)) || + node.type === 'break' + ) { + literalWithBreak = true + return EXIT + } + }) + + return Boolean( + (!node.depth || node.depth < 3) && + toString(node) && + (state.options.setext || literalWithBreak) + ) +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-link-as-autolink.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-link-as-autolink.d.ts new file mode 100644 index 00000000..9116a473 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-link-as-autolink.d.ts @@ -0,0 +1,8 @@ +/** + * @param {Link} node + * @param {State} state + * @returns {boolean} + */ +export function formatLinkAsAutolink(node: Link, state: State): boolean +export type Link = import('mdast').Link +export type State = import('../types.js').State diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-link-as-autolink.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-link-as-autolink.js new file mode 100644 index 00000000..fd94e44c --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/format-link-as-autolink.js @@ -0,0 +1,34 @@ +/** + * @typedef {import('mdast').Link} Link + * @typedef {import('../types.js').State} State + */ + +import {toString} from 'mdast-util-to-string' + +/** + * @param {Link} node + * @param {State} state + * @returns {boolean} + */ +export function formatLinkAsAutolink(node, state) { + const raw = toString(node) + + return Boolean( + !state.options.resourceLink && + // If there’s a url… + node.url && + // And there’s a no title… + !node.title && + // And the content of `node` is a single text node… + node.children && + node.children.length === 1 && + node.children[0].type === 'text' && + // And if the url is the same as the content… + (raw === node.url || 'mailto:' + raw === node.url) && + // And that starts w/ a protocol… + /^[a-z][a-z+.-]+:/i.test(node.url) && + // And that doesn’t contain ASCII control codes (character escapes and + // references don’t work), space, or angle brackets… + !/[\0- <>\u007F]/.test(node.url) + ) +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/indent-lines.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/indent-lines.d.ts new file mode 100644 index 00000000..04390c54 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/indent-lines.d.ts @@ -0,0 +1,5 @@ +export function indentLines( + value: string, + map: import('../types.js').Map +): string +export type IndentLines = import('../types.js').IndentLines diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/indent-lines.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/indent-lines.js new file mode 100644 index 00000000..3c1c6535 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/indent-lines.js @@ -0,0 +1,35 @@ +/** + * @typedef {import('../types.js').IndentLines} IndentLines + */ + +const eol = /\r?\n|\r/g + +/** + * @type {IndentLines} + */ +export function indentLines(value, map) { + /** @type {Array} */ + const result = [] + let start = 0 + let line = 0 + /** @type {RegExpExecArray | null} */ + let match + + while ((match = eol.exec(value))) { + one(value.slice(start, match.index)) + result.push(match[0]) + start = match.index + match[0].length + line++ + } + + one(value.slice(start)) + + return result.join('') + + /** + * @param {string} value + */ + function one(value) { + result.push(map(value, line, !value)) + } +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-compile.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-compile.d.ts new file mode 100644 index 00000000..3af4914a --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-compile.d.ts @@ -0,0 +1,9 @@ +/** + * @typedef {import('../types.js').Unsafe} Unsafe + */ +/** + * @param {Unsafe} pattern + * @returns {RegExp} + */ +export function patternCompile(pattern: Unsafe): RegExp +export type Unsafe = import('../types.js').Unsafe diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-compile.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-compile.js new file mode 100644 index 00000000..278d2125 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-compile.js @@ -0,0 +1,25 @@ +/** + * @typedef {import('../types.js').Unsafe} Unsafe + */ + +/** + * @param {Unsafe} pattern + * @returns {RegExp} + */ +export function patternCompile(pattern) { + if (!pattern._compiled) { + const before = + (pattern.atBreak ? '[\\r\\n][\\t ]*' : '') + + (pattern.before ? '(?:' + pattern.before + ')' : '') + + pattern._compiled = new RegExp( + (before ? '(' + before + ')' : '') + + (/[|\\{}()[\]^$+*?.-]/.test(pattern.character) ? '\\' : '') + + pattern.character + + (pattern.after ? '(?:' + pattern.after + ')' : ''), + 'g' + ) + } + + return pattern._compiled +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-in-scope.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-in-scope.d.ts new file mode 100644 index 00000000..2dc49c85 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-in-scope.d.ts @@ -0,0 +1,15 @@ +/** + * @typedef {import('../types.js').Unsafe} Unsafe + * @typedef {import('../types.js').ConstructName} ConstructName + */ +/** + * @param {Array} stack + * @param {Unsafe} pattern + * @returns {boolean} + */ +export function patternInScope( + stack: Array, + pattern: Unsafe +): boolean +export type Unsafe = import('../types.js').Unsafe +export type ConstructName = import('../types.js').ConstructName diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-in-scope.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-in-scope.js new file mode 100644 index 00000000..19da059a --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/pattern-in-scope.js @@ -0,0 +1,42 @@ +/** + * @typedef {import('../types.js').Unsafe} Unsafe + * @typedef {import('../types.js').ConstructName} ConstructName + */ + +/** + * @param {Array} stack + * @param {Unsafe} pattern + * @returns {boolean} + */ +export function patternInScope(stack, pattern) { + return ( + listInScope(stack, pattern.inConstruct, true) && + !listInScope(stack, pattern.notInConstruct, false) + ) +} + +/** + * @param {Array} stack + * @param {Unsafe['inConstruct']} list + * @param {boolean} none + * @returns {boolean} + */ +function listInScope(stack, list, none) { + if (typeof list === 'string') { + list = [list] + } + + if (!list || list.length === 0) { + return none + } + + let index = -1 + + while (++index < list.length) { + if (stack.includes(list[index])) { + return true + } + } + + return false +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/safe.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/safe.d.ts new file mode 100644 index 00000000..6b3f42ff --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/safe.d.ts @@ -0,0 +1,32 @@ +/** + * Make a string safe for embedding in markdown constructs. + * + * In markdown, almost all punctuation characters can, in certain cases, + * result in something. + * Whether they do is highly subjective to where they happen and in what + * they happen. + * + * To solve this, `mdast-util-to-markdown` tracks: + * + * * Characters before and after something; + * * What “constructs” we are in. + * + * This information is then used by this function to escape or encode + * special characters. + * + * @param {State} state + * Info passed around about the current state. + * @param {string | null | undefined} input + * Raw value to make safe. + * @param {SafeConfig} config + * Configuration. + * @returns {string} + * Serialized markdown safe for embedding. + */ +export function safe( + state: State, + input: string | null | undefined, + config: SafeConfig +): string +export type State = import('../types.js').State +export type SafeConfig = import('../types.js').SafeConfig diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/safe.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/safe.js new file mode 100644 index 00000000..a1f53e1a --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/safe.js @@ -0,0 +1,177 @@ +/** + * @typedef {import('../types.js').State} State + * @typedef {import('../types.js').SafeConfig} SafeConfig + */ + +import {patternCompile} from './pattern-compile.js' +import {patternInScope} from './pattern-in-scope.js' + +/** + * Make a string safe for embedding in markdown constructs. + * + * In markdown, almost all punctuation characters can, in certain cases, + * result in something. + * Whether they do is highly subjective to where they happen and in what + * they happen. + * + * To solve this, `mdast-util-to-markdown` tracks: + * + * * Characters before and after something; + * * What “constructs” we are in. + * + * This information is then used by this function to escape or encode + * special characters. + * + * @param {State} state + * Info passed around about the current state. + * @param {string | null | undefined} input + * Raw value to make safe. + * @param {SafeConfig} config + * Configuration. + * @returns {string} + * Serialized markdown safe for embedding. + */ +export function safe(state, input, config) { + const value = (config.before || '') + (input || '') + (config.after || '') + /** @type {Array} */ + const positions = [] + /** @type {Array} */ + const result = [] + /** @type {Record} */ + const infos = {} + let index = -1 + + while (++index < state.unsafe.length) { + const pattern = state.unsafe[index] + + if (!patternInScope(state.stack, pattern)) { + continue + } + + const expression = patternCompile(pattern) + /** @type {RegExpExecArray | null} */ + let match + + while ((match = expression.exec(value))) { + const before = 'before' in pattern || Boolean(pattern.atBreak) + const after = 'after' in pattern + const position = match.index + (before ? match[1].length : 0) + + if (positions.includes(position)) { + if (infos[position].before && !before) { + infos[position].before = false + } + + if (infos[position].after && !after) { + infos[position].after = false + } + } else { + positions.push(position) + infos[position] = {before, after} + } + } + } + + positions.sort(numerical) + + let start = config.before ? config.before.length : 0 + const end = value.length - (config.after ? config.after.length : 0) + index = -1 + + while (++index < positions.length) { + const position = positions[index] + + // Character before or after matched: + if (position < start || position >= end) { + continue + } + + // If this character is supposed to be escaped because it has a condition on + // the next character, and the next character is definitly being escaped, + // then skip this escape. + if ( + (position + 1 < end && + positions[index + 1] === position + 1 && + infos[position].after && + !infos[position + 1].before && + !infos[position + 1].after) || + (positions[index - 1] === position - 1 && + infos[position].before && + !infos[position - 1].before && + !infos[position - 1].after) + ) { + continue + } + + if (start !== position) { + // If we have to use a character reference, an ampersand would be more + // correct, but as backslashes only care about punctuation, either will + // do the trick + result.push(escapeBackslashes(value.slice(start, position), '\\')) + } + + start = position + + if ( + /[!-/:-@[-`{-~]/.test(value.charAt(position)) && + (!config.encode || !config.encode.includes(value.charAt(position))) + ) { + // Character escape. + result.push('\\') + } else { + // Character reference. + result.push( + '&#x' + value.charCodeAt(position).toString(16).toUpperCase() + ';' + ) + start++ + } + } + + result.push(escapeBackslashes(value.slice(start, end), config.after)) + + return result.join('') +} + +/** + * @param {number} a + * @param {number} b + * @returns {number} + */ +function numerical(a, b) { + return a - b +} + +/** + * @param {string} value + * @param {string} after + * @returns {string} + */ +function escapeBackslashes(value, after) { + const expression = /\\(?=[!-/:-@[-`{-~])/g + /** @type {Array} */ + const positions = [] + /** @type {Array} */ + const results = [] + const whole = value + after + let index = -1 + let start = 0 + /** @type {RegExpExecArray | null} */ + let match + + while ((match = expression.exec(whole))) { + positions.push(match.index) + } + + while (++index < positions.length) { + if (start !== positions[index]) { + results.push(value.slice(start, positions[index])) + } + + results.push('\\') + start = positions[index] + } + + results.push(value.slice(start)) + + return results.join('') +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/track.d.ts b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/track.d.ts new file mode 100644 index 00000000..78d1d859 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/track.d.ts @@ -0,0 +1,7 @@ +export function track( + info: import('../types.js').TrackFields +): import('../types.js').Tracker +export type CreateTracker = import('../types.js').CreateTracker +export type TrackCurrent = import('../types.js').TrackCurrent +export type TrackMove = import('../types.js').TrackMove +export type TrackShift = import('../types.js').TrackShift diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/track.js b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/track.js new file mode 100644 index 00000000..b88ef39b --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/lib/util/track.js @@ -0,0 +1,58 @@ +/** + * @typedef {import('../types.js').CreateTracker} CreateTracker + * @typedef {import('../types.js').TrackCurrent} TrackCurrent + * @typedef {import('../types.js').TrackMove} TrackMove + * @typedef {import('../types.js').TrackShift} TrackShift + */ + +/** + * Track positional info in the output. + * + * @type {CreateTracker} + */ +export function track(config) { + // Defaults are used to prevent crashes when older utilities somehow activate + // this code. + /* c8 ignore next 5 */ + const options = config || {} + const now = options.now || {} + let lineShift = options.lineShift || 0 + let line = now.line || 1 + let column = now.column || 1 + + return {move, current, shift} + + /** + * Get the current tracked info. + * + * @type {TrackCurrent} + */ + function current() { + return {now: {line, column}, lineShift} + } + + /** + * Define an increased line shift (the typical indent for lines). + * + * @type {TrackShift} + */ + function shift(value) { + lineShift += value + } + + /** + * Move past some generated markdown. + * + * @type {TrackMove} + */ + function move(input) { + // eslint-disable-next-line unicorn/prefer-default-parameters + const value = input || '' + const chunks = value.split(/\r?\n|\r/g) + const tail = chunks[chunks.length - 1] + line += chunks.length - 1 + column = + chunks.length === 1 ? column + tail.length : 1 + tail.length + lineShift + return value + } +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/license b/_extensions/d2/node_modules/mdast-util-to-markdown/license new file mode 100644 index 00000000..39372356 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2020 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/package.json b/_extensions/d2/node_modules/mdast-util-to-markdown/package.json new file mode 100644 index 00000000..dc263420 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/package.json @@ -0,0 +1,100 @@ +{ + "name": "mdast-util-to-markdown", + "version": "1.5.0", + "description": "mdast utility to serialize markdown", + "license": "MIT", + "keywords": [ + "unist", + "mdast", + "mdast-util", + "util", + "utility", + "markdown", + "markup", + "serialize", + "stringify", + "compile", + "syntax", + "tree", + "ast" + ], + "repository": "syntax-tree/mdast-util-to-markdown", + "bugs": "https://github.com/syntax-tree/mdast-util-to-markdown/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "lib/", + "index.d.ts", + "index.js" + ], + "dependencies": { + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^3.0.0", + "mdast-util-to-string": "^3.0.0", + "micromark-util-decode-string": "^1.0.0", + "unist-util-visit": "^4.0.0", + "zwitch": "^2.0.0" + }, + "devDependencies": { + "@types/node": "^18.0.0", + "c8": "^7.0.0", + "mdast-util-from-markdown": "^1.0.0", + "prettier": "^2.0.0", + "remark-cli": "^11.0.0", + "remark-preset-wooorm": "^9.0.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "unist-util-remove-position": "^4.0.0", + "xo": "^0.53.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "tsc --build --clean && tsc --build && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test/index.js", + "test-coverage": "c8 --check-coverage --100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true, + "rules": { + "complexity": "off", + "unicorn/prefer-code-point": "off", + "unicorn/prefer-switch": "off" + } + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true, + "ignoreFiles": [ + "lib/types.d.ts" + ] + } +} diff --git a/_extensions/d2/node_modules/mdast-util-to-markdown/readme.md b/_extensions/d2/node_modules/mdast-util-to-markdown/readme.md new file mode 100644 index 00000000..4a174a39 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-markdown/readme.md @@ -0,0 +1,732 @@ +# mdast-util-to-markdown + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +**[mdast][]** utility that turns a syntax tree into markdown. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`toMarkdown(tree[, options])`](#tomarkdowntree-options) + * [`defaultHandlers`](#defaulthandlers) + * [`ConstructName`](#constructname) + * [`ConstructNameMap`](#constructnamemap) + * [`Handle`](#handle) + * [`Handlers`](#handlers) + * [`Info`](#info) + * [`Join`](#join) + * [`Map`](#map) + * [`Options`](#options) + * [`SafeConfig`](#safeconfig) + * [`State`](#state) + * [`Tracker`](#tracker) + * [`Unsafe`](#unsafe) +* [List of extensions](#list-of-extensions) +* [Syntax](#syntax) +* [Syntax tree](#syntax-tree) +* [Types](#types) +* [Security](#security) +* [Related](#related) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +This package is a utility that takes an [mdast][] syntax tree as input and turns +it into serialized markdown. + +This utility is a low level project. +It’s used in [`remark-stringify`][remark-stringify], which focusses on making it +easier to transform content by abstracting these internals away. + +## When should I use this? + +If you want to handle syntax trees manually, use this. +For an easier time processing content, use the **[remark][]** ecosystem instead. + +You can combine this utility with other utilities to add syntax extensions. +Notable examples that deeply integrate with it are +[`mdast-util-gfm`][mdast-util-gfm], +[`mdast-util-mdx`][mdast-util-mdx], +[`mdast-util-frontmatter`][mdast-util-frontmatter], +[`mdast-util-math`][mdast-util-math], and +[`mdast-util-directive`][mdast-util-directive]. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 14.14+, or 16.0+), install with [npm][]: + +```sh +npm install mdast-util-to-markdown +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {toMarkdown} from 'https://esm.sh/mdast-util-to-markdown@1' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +Say our module `example.js` looks as follows: + +```js +import {toMarkdown} from 'mdast-util-to-markdown' + +/** @type {import('mdast').Root} */ +const tree = { + type: 'root', + children: [ + { + type: 'blockquote', + children: [ + {type: 'thematicBreak'}, + { + type: 'paragraph', + children: [ + {type: 'text', value: '- a\nb !'}, + { + type: 'link', + url: 'example.com', + children: [{type: 'text', value: 'd'}] + } + ] + } + ] + } + ] +} + +console.log(toMarkdown(tree)) +``` + +…now running `node example.js` yields: + +```markdown +> *** +> +> \- a +> b \![d](example.com) +``` + +> 👉 **Note**: observe the properly escaped characters which would otherwise +> turn into a list and image respectively. + +## API + +This package exports the identifiers `toMarkdown` and `defaultHandlers`. +There is no default export. + +### `toMarkdown(tree[, options])` + +Turn an **[mdast][]** syntax tree into markdown. + +###### Parameters + +* `tree` ([`Node`][node]) + — tree to serialize +* `options` ([`Options`][options], optional) + — configuration + +###### Returns + +Serialized markdown representing `tree` (`string`). + +### `defaultHandlers` + +Default (CommonMark) handlers ([`Handlers`][handlers]). + +### `ConstructName` + +Construct names for things generated by `mdast-util-to-markdown` (TypeScript +type). + +This is an enum of strings, each being a semantic label, useful to know when +serializing whether we’re for example in a double (`"`) or single (`'`) quoted +title. + +###### Type + +```ts +type ConstructName = ConstructNameMap[keyof ConstructNameMap] +``` + +### `ConstructNameMap` + +Interface of registered constructs (TypeScript type). + +###### Type + +```ts +interface ConstructNameMap { /* see code */ } +``` + +When working on extensions that use new constructs, extend the corresponding +interface to register its name: + +```ts +declare module 'mdast-util-to-markdown' { + interface ConstructNameMap { + // Register a new construct name (value is used, key should match it). + gfmStrikethrough: 'gfmStrikethrough' + } +} +``` + +### `Handle` + +Handle a particular node (TypeScript type). + +###### Parameters + +* `node` (`any`) + — expected mdast node +* `parent` ([`Node`][node], optional) + — parent of `node` +* `state` ([`State`][state]) + — info passed around about the current state +* `info` ([`Info`][info]) + — info on the surrounding of the node that is serialized + +###### Returns + +Serialized markdown representing `node` (`string`). + +### `Handlers` + +Handle particular nodes (TypeScript type). + +Each key is a node type (`Node['type']`), each value its corresponding handler +([`Handle`][handle]). + +###### Type + +```ts +type Handlers = Record +``` + +### `Info` + +Info on the surrounding of the node that is serialized (TypeScript type). + +###### Fields + +* `now` ([`Point`][point]) + — current point +* `lineShift` (`number`) + — number of columns each line will be shifted by wrapping nodes +* `before` (`string`) + — characters before this (guaranteed to be one, can be more) +* `after` (`string`) + — characters after this (guaranteed to be one, can be more) + +### `Join` + +How to join two blocks (TypeScript type). + +“Blocks” are typically joined by one blank line. +Sometimes it’s nicer to have them flush next to each other, yet other times +they cannot occur together at all. + +Join functions receive two adjacent siblings and their parent and what they +return defines how many blank lines to use between them. + +###### Parameters + +* `left` ([`Node`][node]) + — first of two adjacent siblings +* `right` ([`Node`][node]) + — second of two adjacent siblings +* `parent` ([`Node`][node]) + — parent of the two siblings +* `state` ([`State`][state]) + — info passed around about the current state + +###### Returns + +How many blank lines to use between the siblings (`boolean`, `number`, +optional). + +Where `true` is as passing `1` and `false` means the nodes cannot be +joined by a blank line, such as two adjacent block quotes or indented code +after a list, in which case a comment will be injected to break them up: + +```markdown +> Quote 1 + + + +> Quote 2 +``` + +> 👉 **Note**: abusing this feature will break markdown. +> One such example is when returning `0` for two paragraphs, which will result +> in the text running together, and in the future to be seen as one paragraph. + +### `Map` + +Map function to pad a single line (TypeScript type). + +###### Parameters + +* `value` (`string`) + — a single line of serialized markdown +* `line` (`number`) + — line number relative to the fragment +* `blank` (`boolean`) + — whether the line is considered blank in markdown + +###### Returns + +Padded line (`string`). + +### `Options` + +Configuration (TypeScript type). + +##### Fields + +The following fields influence how markdown is serialized. + +###### `options.bullet` + +Marker to use for bullets of items in unordered lists (`'*'`, `'+'`, or `'-'`, +default: `'*'`). + +###### `options.bulletOther` + +Marker to use in certain cases where the primary bullet doesn’t work (`'*'`, +`'+'`, or `'-'`, default: depends). + +There are three cases where the primary bullet cannot be used: + +* When three list items are on their own, the last one is empty, and `bullet` + is also a valid `rule`: `* - +`. + This would turn into a thematic break if serialized with three primary + bullets. + As this is an edge case unlikely to appear in normal markdown, the last list + item will be given a different bullet. +* When a thematic break is the first child of one of the list items, and + `bullet` is the same character as `rule`: `- ***`. + This would turn into a single thematic break if serialized with primary + bullets. + As this is an edge case unlikely to appear in normal markdown this markup is + always fixed, even if `bulletOther` is not passed +* When two unordered lists appear next to each other: `* a\n- b`. + CommonMark sees different bullets as different lists, but several markdown + parsers parse it as one list. + To solve for both, we instead inject an empty comment between the two lists: + `* a\n\n* b`, but if `bulletOther` is given explicitly, it will be + used instead + +###### `options.bulletOrdered` + +Marker to use for bullets of items in ordered lists (`'.'` or `')'`, default: +`'.'`). + +###### `options.bulletOrderedOther` + +Marker to use in certain cases where the primary bullet for ordered items +doesn’t work (`'.'` or `')'`, default: none). + +There is one case where the primary bullet for ordered items cannot be used: + +* When two ordered lists appear next to each other: `1. a\n2) b`. + CommonMark added support for `)` as a marker, but other markdown parsers + do not support it. + To solve for both, we instead inject an empty comment between the two lists: + `1. a\n\n1. b`, but if `bulletOrderedOther` is given explicitly, it + will be used instead + +###### `options.closeAtx` + +Whether to add the same number of number signs (`#`) at the end of an ATX +heading as the opening sequence (`boolean`, default: `false`). + +###### `options.emphasis` + +Marker to use for emphasis (`'*'` or `'_'`, default: `'*'`). + +###### `options.fence` + +Marker to use for fenced code (``'`'`` or `'~'`, default: ``'`'``). + +###### `options.fences` + +Whether to use fenced code always (`boolean`, default: `false`). +The default is to use fenced code if there is a language defined, if the code is +empty, or if it starts or ends in blank lines. + +###### `options.incrementListMarker` + +Whether to increment the counter of ordered lists items (`boolean`, default: +`true`). + +###### `options.listItemIndent` + +How to indent the content of list items (`'one'`, `'tab'`, or `'mixed'`, +default: `'tab'`). +Either with the size of the bullet plus one space (when `'one'`), a tab stop +(`'tab'`), or depending on the item and its parent list (`'mixed'`, uses `'one'` +if the item and list are tight and `'tab'` otherwise). + +###### `options.quote` + +Marker to use for titles (`'"'` or `"'"`, default: `'"'`). + +###### `options.resourceLink` + +Whether to always use resource links (`boolean`, default: `false`). +The default is to use autolinks (``) when possible +and resource links (`[text](url)`) otherwise. + +###### `options.rule` + +Marker to use for thematic breaks (`'*'`, `'-'`, or `'_'`, default: `'*'`). + +###### `options.ruleRepetition` + +Number of markers to use for thematic breaks (`number`, default: `3`, min: `3`). + +###### `options.ruleSpaces` + +Whether to add spaces between markers in thematic breaks (`boolean`, default: +`false`). + +###### `options.setext` + +Whether to use setext headings when possible (`boolean`, default: `false`). +The default is to always use ATX headings (`# heading`) instead of setext +headings (`heading\n=======`). +Setext headings cannot be used for empty headings or headings with a rank of +three or more. + +###### `options.strong` + +Marker to use for strong (`'*'` or `'_'`, default: `'*'`). + +###### `options.tightDefinitions` + +Whether to join definitions without a blank line (`boolean`, default: `false`). + +The default is to add blank lines between any flow (“block”) construct. +Turning this option on is a shortcut for a [`Join`][join] function like so: + +```js +function joinTightDefinitions(left, right) { + if (left.type === 'definition' && right.type === 'definition') { + return 0 + } +} +``` + +###### `options.handlers` + +Handle particular nodes ([`Handlers`][handlers], optional). + +###### `options.join` + +How to join blocks ([`Array`][join], optional). + +###### `options.unsafe` + +Schemas that define when characters cannot occur ([`Array`][unsafe], +optional). + +###### `options.extensions` + +List of extensions (`Array`, default: `[]`). +Each extension is an object with the same interface as `Options` itself. + +### `SafeConfig` + +Configuration passed to `state.safe` (TypeScript type). + +###### Fields + +* `before` (`string`) + — characters before this (guaranteed to be one, can be more) +* `after` (`string`) + — characters after this (guaranteed to be one, can be more) +* `encode` (`Array`, optional) + — extra characters that *must* be encoded (as character references) instead + of escaped (character escapes). + Only ASCII punctuation will use character escapes, so you never need to + pass non-ASCII-punctuation here + +### `State` + +Info passed around about the current state (TypeScript type). + +###### Fields + +* `stack` ([`Array`][constructname]) + — stack of constructs we’re in +* `indexStack` (`Array`) + — positions of child nodes in their parents +* `associationId` (`(node: Association) => string`) + — get an identifier from an association to match it to others (see + [`Association`][association]) +* `enter` (`(construct: ConstructName) => () => void`) + — enter a construct (returns a corresponding exit function) + (see [`ConstructName`][constructname]) +* `indentLines` (`(value: string, map: Map) => string`) + — pad serialized markdown (see [`Map`][map]) +* `containerFlow` (`(parent: Node, info: Info) => string`) + — serialize flow children (see [`Info`][info]) +* `containerPhrasing` (`(parent: Node, info: Info) => string`) + — serialize phrasing children (see [`Info`][info]) +* `createTracker` (`(info: Info) => Tracker`) + — track positional info in the output (see [`Info`][info], + [`Tracker`][tracker]) +* `safe` (`(value: string, config: SafeConfig) => string`) + — make a string safe for embedding (see [`SafeConfig`][safeconfig]) +* `options` ([`Options`][options]) + — applied user configuration +* `unsafe` ([`Array`][unsafe]) + — applied unsafe patterns +* `join` ([`Array`][join]) + — applied join handlers +* `handle` ([`Handle`][handle]) + — call the configured handler for the given node +* `handlers` ([`Handlers`][handlers]) + — applied handlers +* `bulletCurrent` (`string` or `undefined`) + — list marker currently in use +* `bulletLastUsed` (`string` or `undefined`) + — list marker previously in use + +### `Tracker` + +Track positional info in the output (TypeScript type). + +This info isn’t used yet but such functionality will allow line wrapping, +source maps, etc. + +###### Fields + +* `current` (`() => Info`) + — get current tracked info +* `shift` (`(value: number) => void`) + — define a relative increased line shift (the typical indent for lines) +* `move` (`(value: string) => string`) + — move past some generated markdown + +### `Unsafe` + +Schema that defines when a character cannot occur (TypeScript type). + +###### Fields + +* `character` (`string`) + — single unsafe character +* `inConstruct` ([`ConstructName`][constructname], `Array`, + optional) + — constructs where this is bad +* `notInConstruct` ([`ConstructName`][constructname], `Array`, + optional) + — constructs where this is fine again +* `before` (`string`, optional) + — `character` is bad when this is before it (cannot be used together with + `atBreak`) +* `after` (`string`, optional) + — `character` is bad when this is after it +* `atBreak` (`boolean`, optional) + — `character` is bad at a break (cannot be used together with `before`) + +## List of extensions + +* [`syntax-tree/mdast-util-directive`](https://github.com/syntax-tree/mdast-util-directive) + — directives +* [`syntax-tree/mdast-util-frontmatter`](https://github.com/syntax-tree/mdast-util-frontmatter) + — frontmatter (YAML, TOML, more) +* [`syntax-tree/mdast-util-gfm`](https://github.com/syntax-tree/mdast-util-gfm) + — GFM +* [`syntax-tree/mdast-util-gfm-autolink-literal`](https://github.com/syntax-tree/mdast-util-gfm-autolink-literal) + — GFM autolink literals +* [`syntax-tree/mdast-util-gfm-footnote`](https://github.com/syntax-tree/mdast-util-gfm-footnote) + — GFM footnotes +* [`syntax-tree/mdast-util-gfm-strikethrough`](https://github.com/syntax-tree/mdast-util-gfm-strikethrough) + — GFM strikethrough +* [`syntax-tree/mdast-util-gfm-table`](https://github.com/syntax-tree/mdast-util-gfm-table) + — GFM tables +* [`syntax-tree/mdast-util-gfm-task-list-item`](https://github.com/syntax-tree/mdast-util-gfm-task-list-item) + — GFM task list items +* [`syntax-tree/mdast-util-math`](https://github.com/syntax-tree/mdast-util-math) + — math +* [`syntax-tree/mdast-util-mdx`](https://github.com/syntax-tree/mdast-util-mdx) + — MDX +* [`syntax-tree/mdast-util-mdx-expression`](https://github.com/syntax-tree/mdast-util-mdx-expression) + — MDX expressions +* [`syntax-tree/mdast-util-mdx-jsx`](https://github.com/syntax-tree/mdast-util-mdx-jsx) + — MDX JSX +* [`syntax-tree/mdast-util-mdxjs-esm`](https://github.com/syntax-tree/mdast-util-mdxjs-esm) + — MDX ESM + +## Syntax + +Markdown is serialized according to CommonMark but care is taken to format in +such a way that the resulting markdown should work with most markdown parsers. +Extensions can add support for custom syntax. + +## Syntax tree + +The syntax tree is [mdast][]. + +## Types + +This package is fully typed with [TypeScript][]. +It exports the additional types `ConstructName`, `ConstructNameMap`, `Handle`, +`Handlers`, `Info` `Join`, `Map`, `Options`, `SafeConfig`, `State`, and +`Unsafe`. + +## Security + +`mdast-util-to-markdown` will do its best to serialize markdown to match the +syntax tree, but there are several cases where that is impossible. +It’ll do its best, but complete roundtripping is impossible given that any value +could be injected into the tree. + +As markdown is sometimes used for HTML, and improper use of HTML can open you up +to a [cross-site scripting (XSS)][xss] attack, use of `mdast-util-to-markdown` +and parsing it again later could potentially be unsafe. +When parsing markdown afterwards and then going to HTML, use something like +[`hast-util-sanitize`][hast-util-sanitize] to make the tree safe. + +## Related + +* [`syntax-tree/mdast-util-from-markdown`](https://github.com/syntax-tree/mdast-util-from-markdown) + — parse markdown to mdast +* [`micromark/micromark`](https://github.com/micromark/micromark) + — parse markdown +* [`remarkjs/remark`](https://github.com/remarkjs/remark) + — process markdown + +## Contribute + +See [`contributing.md`][contributing] in [`syntax-tree/.github`][health] for +ways to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/syntax-tree/mdast-util-to-markdown/workflows/main/badge.svg + +[build]: https://github.com/syntax-tree/mdast-util-to-markdown/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/syntax-tree/mdast-util-to-markdown.svg + +[coverage]: https://codecov.io/github/syntax-tree/mdast-util-to-markdown + +[downloads-badge]: https://img.shields.io/npm/dm/mdast-util-to-markdown.svg + +[downloads]: https://www.npmjs.com/package/mdast-util-to-markdown + +[size-badge]: https://img.shields.io/bundlephobia/minzip/mdast-util-to-markdown.svg + +[size]: https://bundlephobia.com/result?p=mdast-util-to-markdown + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/syntax-tree/unist/discussions + +[npm]: https://docs.npmjs.com/cli/install + +[esmsh]: https://esm.sh + +[license]: license + +[author]: https://wooorm.com + +[health]: https://github.com/syntax-tree/.github + +[contributing]: https://github.com/syntax-tree/.github/blob/main/contributing.md + +[support]: https://github.com/syntax-tree/.github/blob/main/support.md + +[coc]: https://github.com/syntax-tree/.github/blob/main/code-of-conduct.md + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[typescript]: https://www.typescriptlang.org + +[xss]: https://en.wikipedia.org/wiki/Cross-site_scripting + +[hast-util-sanitize]: https://github.com/syntax-tree/hast-util-sanitize + +[point]: https://github.com/syntax-tree/unist#point + +[mdast]: https://github.com/syntax-tree/mdast + +[node]: https://github.com/syntax-tree/mdast#nodes + +[association]: https://github.com/syntax-tree/mdast#association + +[mdast-util-gfm]: https://github.com/syntax-tree/mdast-util-gfm + +[mdast-util-mdx]: https://github.com/syntax-tree/mdast-util-mdx + +[mdast-util-frontmatter]: https://github.com/syntax-tree/mdast-util-frontmatter + +[mdast-util-math]: https://github.com/syntax-tree/mdast-util-math + +[mdast-util-directive]: https://github.com/syntax-tree/mdast-util-directive + +[remark]: https://github.com/remarkjs/remark + +[remark-stringify]: https://github.com/remarkjs/remark/tree/main/packages/remark-stringify + +[constructname]: #constructname + +[handle]: #handle + +[handlers]: #handlers + +[info]: #info + +[join]: #join + +[map]: #map + +[options]: #options + +[safeconfig]: #safeconfig + +[state]: #state + +[tracker]: #tracker + +[unsafe]: #unsafe diff --git a/_extensions/d2/node_modules/mdast-util-to-string/index.d.ts b/_extensions/d2/node_modules/mdast-util-to-string/index.d.ts new file mode 100644 index 00000000..d4a39692 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-string/index.d.ts @@ -0,0 +1,2 @@ +export {toString} from './lib/index.js' +export type Options = import('./lib/index.js').Options diff --git a/_extensions/d2/node_modules/mdast-util-to-string/index.js b/_extensions/d2/node_modules/mdast-util-to-string/index.js new file mode 100644 index 00000000..8674f309 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-string/index.js @@ -0,0 +1,5 @@ +/** + * @typedef {import('./lib/index.js').Options} Options + */ + +export {toString} from './lib/index.js' diff --git a/_extensions/d2/node_modules/mdast-util-to-string/lib/index.d.ts b/_extensions/d2/node_modules/mdast-util-to-string/lib/index.d.ts new file mode 100644 index 00000000..9c8f5eea --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-string/lib/index.d.ts @@ -0,0 +1,31 @@ +/** + * Get the text content of a node or list of nodes. + * + * Prefers the node’s plain-text fields, otherwise serializes its children, + * and if the given value is an array, serialize the nodes in it. + * + * @param {unknown} value + * Thing to serialize, typically `Node`. + * @param {Options | null | undefined} [options] + * Configuration (optional). + * @returns {string} + * Serialized `value`. + */ +export function toString( + value: unknown, + options?: Options | null | undefined +): string +export type Node = import('mdast').Root | import('mdast').Content +/** + * Configuration (optional). + */ +export type Options = { + /** + * Whether to use `alt` for `image`s. + */ + includeImageAlt?: boolean | null | undefined + /** + * Whether to use `value` of HTML. + */ + includeHtml?: boolean | null | undefined +} diff --git a/_extensions/d2/node_modules/mdast-util-to-string/lib/index.js b/_extensions/d2/node_modules/mdast-util-to-string/lib/index.js new file mode 100644 index 00000000..7e792e24 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-string/lib/index.js @@ -0,0 +1,108 @@ +/** + * @typedef {import('mdast').Root|import('mdast').Content} Node + * + * @typedef Options + * Configuration (optional). + * @property {boolean | null | undefined} [includeImageAlt=true] + * Whether to use `alt` for `image`s. + * @property {boolean | null | undefined} [includeHtml=true] + * Whether to use `value` of HTML. + */ + +/** @type {Options} */ +const emptyOptions = {} + +/** + * Get the text content of a node or list of nodes. + * + * Prefers the node’s plain-text fields, otherwise serializes its children, + * and if the given value is an array, serialize the nodes in it. + * + * @param {unknown} value + * Thing to serialize, typically `Node`. + * @param {Options | null | undefined} [options] + * Configuration (optional). + * @returns {string} + * Serialized `value`. + */ +export function toString(value, options) { + const settings = options || emptyOptions + const includeImageAlt = + typeof settings.includeImageAlt === 'boolean' + ? settings.includeImageAlt + : true + const includeHtml = + typeof settings.includeHtml === 'boolean' ? settings.includeHtml : true + + return one(value, includeImageAlt, includeHtml) +} + +/** + * One node or several nodes. + * + * @param {unknown} value + * Thing to serialize. + * @param {boolean} includeImageAlt + * Include image `alt`s. + * @param {boolean} includeHtml + * Include HTML. + * @returns {string} + * Serialized node. + */ +function one(value, includeImageAlt, includeHtml) { + if (node(value)) { + if ('value' in value) { + return value.type === 'html' && !includeHtml ? '' : value.value + } + + if (includeImageAlt && 'alt' in value && value.alt) { + return value.alt + } + + if ('children' in value) { + return all(value.children, includeImageAlt, includeHtml) + } + } + + if (Array.isArray(value)) { + return all(value, includeImageAlt, includeHtml) + } + + return '' +} + +/** + * Serialize a list of nodes. + * + * @param {Array} values + * Thing to serialize. + * @param {boolean} includeImageAlt + * Include image `alt`s. + * @param {boolean} includeHtml + * Include HTML. + * @returns {string} + * Serialized nodes. + */ +function all(values, includeImageAlt, includeHtml) { + /** @type {Array} */ + const result = [] + let index = -1 + + while (++index < values.length) { + result[index] = one(values[index], includeImageAlt, includeHtml) + } + + return result.join('') +} + +/** + * Check if `value` looks like a node. + * + * @param {unknown} value + * Thing. + * @returns {value is Node} + * Whether `value` is a node. + */ +function node(value) { + return Boolean(value && typeof value === 'object') +} diff --git a/_extensions/d2/node_modules/mdast-util-to-string/license b/_extensions/d2/node_modules/mdast-util-to-string/license new file mode 100644 index 00000000..32e7a3d9 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-string/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2015 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/mdast-util-to-string/package.json b/_extensions/d2/node_modules/mdast-util-to-string/package.json new file mode 100644 index 00000000..682e5042 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-string/package.json @@ -0,0 +1,79 @@ +{ + "name": "mdast-util-to-string", + "version": "3.2.0", + "description": "mdast utility to get the plain text content of a node", + "license": "MIT", + "keywords": [ + "unist", + "mdast", + "mdast-util", + "util", + "utility", + "markdown", + "node", + "string", + "serialize" + ], + "repository": "syntax-tree/mdast-util-to-string", + "bugs": "https://github.com/syntax-tree/mdast-util-to-string/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "lib/", + "index.d.ts", + "index.js" + ], + "dependencies": { + "@types/mdast": "^3.0.0" + }, + "devDependencies": { + "@types/node": "^18.0.0", + "c8": "^7.0.0", + "prettier": "^2.0.0", + "remark-cli": "^11.0.0", + "remark-preset-wooorm": "^9.0.0", + "tape": "^5.0.0", + "type-coverage": "^2.0.0", + "typescript": "^5.0.0", + "xo": "^0.53.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "tsc --build --clean && tsc --build && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test.js", + "test-coverage": "c8 --check-coverage --100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true + } +} diff --git a/_extensions/d2/node_modules/mdast-util-to-string/readme.md b/_extensions/d2/node_modules/mdast-util-to-string/readme.md new file mode 100644 index 00000000..552bc4e2 --- /dev/null +++ b/_extensions/d2/node_modules/mdast-util-to-string/readme.md @@ -0,0 +1,216 @@ +# mdast-util-to-string + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +[mdast][] utility to get the text content of a node. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`toString(value[, options])`](#tostringvalue-options) + * [`Options`](#options) +* [Types](#types) +* [Compatibility](#compatibility) +* [Security](#security) +* [Related](#related) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +This package is a tiny utility that gets the textual content of a node. + +## When should I use this? + +This utility is useful when you have a node, say a heading, and want to get the +text inside it. + +This package does not serialize markdown, that’s what +[`mdast-util-to-markdown`][mdast-util-to-markdown] does. + +Similar packages, [`hast-util-to-string`][hast-util-to-string] and +[`hast-util-to-text`][hast-util-to-text], do the same but on [hast][]. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 14.14+ and 16.0+), install with [npm][]: + +```sh +npm install mdast-util-to-string +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {toString} from 'https://esm.sh/mdast-util-to-string@3' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {unified} from 'unified' +import {fromMarkdown} from 'mdast-util-from-markdown' +import {toString} from 'mdast-util-to-string' + +const tree = fromMarkdown('Some _emphasis_, **importance**, and `code`.') + +console.log(toString(tree)) // => 'Some emphasis, importance, and code.' +``` + +## API + +This package exports the identifier [`toString`][api-tostring]. +There is no default export. + +### `toString(value[, options])` + +Get the text content of a node or list of nodes. + +Prefers the node’s plain-text fields, otherwise serializes its children, +and if the given value is an array, serialize the nodes in it. + +###### Parameters + +* `value` (`unknown`) + — thing to serialize, typically [`Node`][node] +* `options` ([`Options`][api-options], optional) + — configuration + +###### Returns + +Serialized `value` (`string`). + +### `Options` + +Configuration (TypeScript type). + +###### Fields + +* `includeImageAlt` (`boolean`, default: `true`) + — whether to use `alt` for `image`s +* `includeHtml` (`boolean`, default: `true`) + — whether to use `value` of HTML + +## Types + +This package is fully typed with [TypeScript][]. +It exports the additional type [`Options`][api-options]. + +## Compatibility + +Projects maintained by the unified collective are compatible with all maintained +versions of Node.js. +As of now, that is Node.js 14.14+ and 16.0+. +Our projects sometimes work with older versions, but this is not guaranteed. + +## Security + +Use of `mdast-util-to-string` does not involve **[hast][]**, user content, or +change the tree, so there are no openings for [cross-site scripting (XSS)][xss] +attacks. + +## Related + +* [`hast-util-to-string`](https://github.com/wooorm/rehype-minify/tree/main/packages/hast-util-to-string) + — get text content in hast +* [`hast-util-to-text`](https://github.com/syntax-tree/hast-util-to-text) + — get text content in hast according to the `innerText` algorithm + +## Contribute + +See [`contributing.md`][contributing] in [`syntax-tree/.github`][health] for +ways to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/syntax-tree/mdast-util-to-string/workflows/main/badge.svg + +[build]: https://github.com/syntax-tree/mdast-util-to-string/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/syntax-tree/mdast-util-to-string.svg + +[coverage]: https://codecov.io/github/syntax-tree/mdast-util-to-string + +[downloads-badge]: https://img.shields.io/npm/dm/mdast-util-to-string.svg + +[downloads]: https://www.npmjs.com/package/mdast-util-to-string + +[size-badge]: https://img.shields.io/bundlephobia/minzip/mdast-util-to-string.svg + +[size]: https://bundlephobia.com/result?p=mdast-util-to-string + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/syntax-tree/unist/discussions + +[npm]: https://docs.npmjs.com/cli/install + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[esmsh]: https://esm.sh + +[typescript]: https://www.typescriptlang.org + +[license]: license + +[author]: https://wooorm.com + +[health]: https://github.com/syntax-tree/.github + +[contributing]: https://github.com/syntax-tree/.github/blob/main/contributing.md + +[support]: https://github.com/syntax-tree/.github/blob/main/support.md + +[coc]: https://github.com/syntax-tree/.github/blob/main/code-of-conduct.md + +[mdast]: https://github.com/syntax-tree/mdast + +[mdast-util-to-markdown]: https://github.com/syntax-tree/mdast-util-to-markdown + +[hast]: https://github.com/syntax-tree/hast + +[hast-util-to-string]: https://github.com/rehypejs/rehype-minify/tree/main/packages/hast-util-to-string + +[hast-util-to-text]: https://github.com/syntax-tree/hast-util-to-text + +[node]: https://github.com/syntax-tree/mdast#nodes + +[xss]: https://en.wikipedia.org/wiki/Cross-site_scripting + +[api-tostring]: #tostringvalue-options + +[api-options]: #options diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/index.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/index.d.ts new file mode 100644 index 00000000..f9143e09 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/index.d.ts @@ -0,0 +1,22 @@ +export {attention} from './lib/attention.js' +export {autolink} from './lib/autolink.js' +export {blankLine} from './lib/blank-line.js' +export {blockQuote} from './lib/block-quote.js' +export {characterEscape} from './lib/character-escape.js' +export {characterReference} from './lib/character-reference.js' +export {codeFenced} from './lib/code-fenced.js' +export {codeIndented} from './lib/code-indented.js' +export {codeText} from './lib/code-text.js' +export {content} from './lib/content.js' +export {definition} from './lib/definition.js' +export {hardBreakEscape} from './lib/hard-break-escape.js' +export {headingAtx} from './lib/heading-atx.js' +export {htmlFlow} from './lib/html-flow.js' +export {htmlText} from './lib/html-text.js' +export {labelEnd} from './lib/label-end.js' +export {labelStartImage} from './lib/label-start-image.js' +export {labelStartLink} from './lib/label-start-link.js' +export {lineEnding} from './lib/line-ending.js' +export {list} from './lib/list.js' +export {setextUnderline} from './lib/setext-underline.js' +export {thematicBreak} from './lib/thematic-break.js' diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/index.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/index.js new file mode 100644 index 00000000..f9143e09 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/index.js @@ -0,0 +1,22 @@ +export {attention} from './lib/attention.js' +export {autolink} from './lib/autolink.js' +export {blankLine} from './lib/blank-line.js' +export {blockQuote} from './lib/block-quote.js' +export {characterEscape} from './lib/character-escape.js' +export {characterReference} from './lib/character-reference.js' +export {codeFenced} from './lib/code-fenced.js' +export {codeIndented} from './lib/code-indented.js' +export {codeText} from './lib/code-text.js' +export {content} from './lib/content.js' +export {definition} from './lib/definition.js' +export {hardBreakEscape} from './lib/hard-break-escape.js' +export {headingAtx} from './lib/heading-atx.js' +export {htmlFlow} from './lib/html-flow.js' +export {htmlText} from './lib/html-text.js' +export {labelEnd} from './lib/label-end.js' +export {labelStartImage} from './lib/label-start-image.js' +export {labelStartLink} from './lib/label-start-link.js' +export {lineEnding} from './lib/line-ending.js' +export {list} from './lib/list.js' +export {setextUnderline} from './lib/setext-underline.js' +export {thematicBreak} from './lib/thematic-break.js' diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/attention.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/attention.d.ts new file mode 100644 index 00000000..7f5a1c5a --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/attention.d.ts @@ -0,0 +1,10 @@ +/** @type {Construct} */ +export const attention: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Resolver = import('micromark-util-types').Resolver +export type State = import('micromark-util-types').State +export type Token = import('micromark-util-types').Token +export type Event = import('micromark-util-types').Event +export type Code = import('micromark-util-types').Code +export type Point = import('micromark-util-types').Point diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/attention.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/attention.js new file mode 100644 index 00000000..457f358b --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/attention.js @@ -0,0 +1,262 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').Event} Event + * @typedef {import('micromark-util-types').Code} Code + * @typedef {import('micromark-util-types').Point} Point + */ + +import {ok as assert} from 'uvu/assert' +import {push, splice} from 'micromark-util-chunked' +import {classifyCharacter} from 'micromark-util-classify-character' +import {resolveAll} from 'micromark-util-resolve-all' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const attention = { + name: 'attention', + tokenize: tokenizeAttention, + resolveAll: resolveAllAttention +} + +/** + * Take all events and resolve attention to emphasis or strong. + * + * @type {Resolver} + */ +function resolveAllAttention(events, context) { + let index = -1 + /** @type {number} */ + let open + /** @type {Token} */ + let group + /** @type {Token} */ + let text + /** @type {Token} */ + let openingSequence + /** @type {Token} */ + let closingSequence + /** @type {number} */ + let use + /** @type {Event[]} */ + let nextEvents + /** @type {number} */ + let offset + + // Walk through all events. + // + // Note: performance of this is fine on an mb of normal markdown, but it’s + // a bottleneck for malicious stuff. + while (++index < events.length) { + // Find a token that can close. + if ( + events[index][0] === 'enter' && + events[index][1].type === 'attentionSequence' && + events[index][1]._close + ) { + open = index + + // Now walk back to find an opener. + while (open--) { + // Find a token that can open the closer. + if ( + events[open][0] === 'exit' && + events[open][1].type === 'attentionSequence' && + events[open][1]._open && + // If the markers are the same: + context.sliceSerialize(events[open][1]).charCodeAt(0) === + context.sliceSerialize(events[index][1]).charCodeAt(0) + ) { + // If the opening can close or the closing can open, + // and the close size *is not* a multiple of three, + // but the sum of the opening and closing size *is* multiple of three, + // then don’t match. + if ( + (events[open][1]._close || events[index][1]._open) && + (events[index][1].end.offset - events[index][1].start.offset) % 3 && + !( + (events[open][1].end.offset - + events[open][1].start.offset + + events[index][1].end.offset - + events[index][1].start.offset) % + 3 + ) + ) { + continue + } + + // Number of markers to use from the sequence. + use = + events[open][1].end.offset - events[open][1].start.offset > 1 && + events[index][1].end.offset - events[index][1].start.offset > 1 + ? 2 + : 1 + + const start = Object.assign({}, events[open][1].end) + const end = Object.assign({}, events[index][1].start) + movePoint(start, -use) + movePoint(end, use) + + openingSequence = { + type: use > 1 ? types.strongSequence : types.emphasisSequence, + start, + end: Object.assign({}, events[open][1].end) + } + closingSequence = { + type: use > 1 ? types.strongSequence : types.emphasisSequence, + start: Object.assign({}, events[index][1].start), + end + } + text = { + type: use > 1 ? types.strongText : types.emphasisText, + start: Object.assign({}, events[open][1].end), + end: Object.assign({}, events[index][1].start) + } + group = { + type: use > 1 ? types.strong : types.emphasis, + start: Object.assign({}, openingSequence.start), + end: Object.assign({}, closingSequence.end) + } + + events[open][1].end = Object.assign({}, openingSequence.start) + events[index][1].start = Object.assign({}, closingSequence.end) + + nextEvents = [] + + // If there are more markers in the opening, add them before. + if (events[open][1].end.offset - events[open][1].start.offset) { + nextEvents = push(nextEvents, [ + ['enter', events[open][1], context], + ['exit', events[open][1], context] + ]) + } + + // Opening. + nextEvents = push(nextEvents, [ + ['enter', group, context], + ['enter', openingSequence, context], + ['exit', openingSequence, context], + ['enter', text, context] + ]) + + // Between. + nextEvents = push( + nextEvents, + resolveAll( + context.parser.constructs.insideSpan.null, + events.slice(open + 1, index), + context + ) + ) + + // Closing. + nextEvents = push(nextEvents, [ + ['exit', text, context], + ['enter', closingSequence, context], + ['exit', closingSequence, context], + ['exit', group, context] + ]) + + // If there are more markers in the closing, add them after. + if (events[index][1].end.offset - events[index][1].start.offset) { + offset = 2 + nextEvents = push(nextEvents, [ + ['enter', events[index][1], context], + ['exit', events[index][1], context] + ]) + } else { + offset = 0 + } + + splice(events, open - 1, index - open + 3, nextEvents) + + index = open + nextEvents.length - offset - 2 + break + } + } + } + } + + // Remove remaining sequences. + index = -1 + + while (++index < events.length) { + if (events[index][1].type === 'attentionSequence') { + events[index][1].type = 'data' + } + } + + return events +} + +/** @type {Tokenizer} */ +function tokenizeAttention(effects, ok) { + const attentionMarkers = this.parser.constructs.attentionMarkers.null + const previous = this.previous + const before = classifyCharacter(previous) + + /** @type {NonNullable} */ + let marker + + return start + + /** @type {State} */ + function start(code) { + assert( + code === codes.asterisk || code === codes.underscore, + 'expected asterisk or underscore' + ) + effects.enter('attentionSequence') + marker = code + return sequence(code) + } + + /** @type {State} */ + function sequence(code) { + if (code === marker) { + effects.consume(code) + return sequence + } + + const token = effects.exit('attentionSequence') + const after = classifyCharacter(code) + + const open = + !after || + (after === constants.characterGroupPunctuation && before) || + attentionMarkers.includes(code) + const close = + !before || + (before === constants.characterGroupPunctuation && after) || + attentionMarkers.includes(previous) + + token._open = Boolean( + marker === codes.asterisk ? open : open && (before || !close) + ) + token._close = Boolean( + marker === codes.asterisk ? close : close && (after || !open) + ) + return ok(code) + } +} + +/** + * Move a point a bit. + * + * Note: `move` only works inside lines! It’s not possible to move past other + * chunks (replacement characters, tabs, or line endings). + * + * @param {Point} point + * @param {number} offset + * @returns {void} + */ +function movePoint(point, offset) { + point.column += offset + point.offset += offset + point._bufferIndex += offset +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/autolink.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/autolink.d.ts new file mode 100644 index 00000000..02690867 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/autolink.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const autolink: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/autolink.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/autolink.js new file mode 100644 index 00000000..7dd7ed92 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/autolink.js @@ -0,0 +1,159 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ + +import {ok as assert} from 'uvu/assert' +import { + asciiAlpha, + asciiAlphanumeric, + asciiAtext, + asciiControl +} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const autolink = {name: 'autolink', tokenize: tokenizeAutolink} + +/** @type {Tokenizer} */ +function tokenizeAutolink(effects, ok, nok) { + let size = 1 + + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.lessThan, 'expected `<`') + effects.enter(types.autolink) + effects.enter(types.autolinkMarker) + effects.consume(code) + effects.exit(types.autolinkMarker) + effects.enter(types.autolinkProtocol) + return open + } + + /** @type {State} */ + function open(code) { + if (asciiAlpha(code)) { + effects.consume(code) + return schemeOrEmailAtext + } + + return asciiAtext(code) ? emailAtext(code) : nok(code) + } + + /** @type {State} */ + function schemeOrEmailAtext(code) { + return code === codes.plusSign || + code === codes.dash || + code === codes.dot || + asciiAlphanumeric(code) + ? schemeInsideOrEmailAtext(code) + : emailAtext(code) + } + + /** @type {State} */ + function schemeInsideOrEmailAtext(code) { + if (code === codes.colon) { + effects.consume(code) + return urlInside + } + + if ( + (code === codes.plusSign || + code === codes.dash || + code === codes.dot || + asciiAlphanumeric(code)) && + size++ < constants.autolinkSchemeSizeMax + ) { + effects.consume(code) + return schemeInsideOrEmailAtext + } + + return emailAtext(code) + } + + /** @type {State} */ + function urlInside(code) { + if (code === codes.greaterThan) { + effects.exit(types.autolinkProtocol) + return end(code) + } + + if ( + code === codes.eof || + code === codes.space || + code === codes.lessThan || + asciiControl(code) + ) { + return nok(code) + } + + effects.consume(code) + return urlInside + } + + /** @type {State} */ + function emailAtext(code) { + if (code === codes.atSign) { + effects.consume(code) + size = 0 + return emailAtSignOrDot + } + + if (asciiAtext(code)) { + effects.consume(code) + return emailAtext + } + + return nok(code) + } + + /** @type {State} */ + function emailAtSignOrDot(code) { + return asciiAlphanumeric(code) ? emailLabel(code) : nok(code) + } + + /** @type {State} */ + function emailLabel(code) { + if (code === codes.dot) { + effects.consume(code) + size = 0 + return emailAtSignOrDot + } + + if (code === codes.greaterThan) { + // Exit, then change the type. + effects.exit(types.autolinkProtocol).type = types.autolinkEmail + return end(code) + } + + return emailValue(code) + } + + /** @type {State} */ + function emailValue(code) { + if ( + (code === codes.dash || asciiAlphanumeric(code)) && + size++ < constants.autolinkDomainSizeMax + ) { + effects.consume(code) + return code === codes.dash ? emailValue : emailLabel + } + + return nok(code) + } + + /** @type {State} */ + function end(code) { + assert(code === codes.greaterThan, 'expected `>`') + effects.enter(types.autolinkMarker) + effects.consume(code) + effects.exit(types.autolinkMarker) + effects.exit(types.autolink) + return ok + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/blank-line.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/blank-line.d.ts new file mode 100644 index 00000000..fcacec69 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/blank-line.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const blankLine: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/blank-line.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/blank-line.js new file mode 100644 index 00000000..7ae3a21c --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/blank-line.js @@ -0,0 +1,23 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ + +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const blankLine = {tokenize: tokenizeBlankLine, partial: true} + +/** @type {Tokenizer} */ +function tokenizeBlankLine(effects, ok, nok) { + return factorySpace(effects, afterWhitespace, types.linePrefix) + + /** @type {State} */ + function afterWhitespace(code) { + return code === codes.eof || markdownLineEnding(code) ? ok(code) : nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/block-quote.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/block-quote.d.ts new file mode 100644 index 00000000..a86d5d29 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/block-quote.d.ts @@ -0,0 +1,6 @@ +/** @type {Construct} */ +export const blockQuote: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Exiter = import('micromark-util-types').Exiter +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/block-quote.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/block-quote.js new file mode 100644 index 00000000..7ae10957 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/block-quote.js @@ -0,0 +1,81 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Exiter} Exiter + * @typedef {import('micromark-util-types').State} State + */ + +import {ok as assert} from 'uvu/assert' +import {factorySpace} from 'micromark-factory-space' +import {markdownSpace} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const blockQuote = { + name: 'blockQuote', + tokenize: tokenizeBlockQuoteStart, + continuation: {tokenize: tokenizeBlockQuoteContinuation}, + exit +} + +/** @type {Tokenizer} */ +function tokenizeBlockQuoteStart(effects, ok, nok) { + const self = this + + return start + + /** @type {State} */ + function start(code) { + if (code === codes.greaterThan) { + const state = self.containerState + + assert(state, 'expected `containerState` to be defined in container') + + if (!state.open) { + effects.enter(types.blockQuote, {_container: true}) + state.open = true + } + + effects.enter(types.blockQuotePrefix) + effects.enter(types.blockQuoteMarker) + effects.consume(code) + effects.exit(types.blockQuoteMarker) + return after + } + + return nok(code) + } + + /** @type {State} */ + function after(code) { + if (markdownSpace(code)) { + effects.enter(types.blockQuotePrefixWhitespace) + effects.consume(code) + effects.exit(types.blockQuotePrefixWhitespace) + effects.exit(types.blockQuotePrefix) + return ok + } + + effects.exit(types.blockQuotePrefix) + return ok(code) + } +} + +/** @type {Tokenizer} */ +function tokenizeBlockQuoteContinuation(effects, ok, nok) { + return factorySpace( + effects, + effects.attempt(blockQuote, ok, nok), + types.linePrefix, + this.parser.constructs.disable.null.includes('codeIndented') + ? undefined + : constants.tabSize + ) +} + +/** @type {Exiter} */ +function exit(effects) { + effects.exit(types.blockQuote) +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-escape.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-escape.d.ts new file mode 100644 index 00000000..dd4cec72 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-escape.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const characterEscape: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-escape.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-escape.js new file mode 100644 index 00000000..deeea8ad --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-escape.js @@ -0,0 +1,44 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ + +import {ok as assert} from 'uvu/assert' +import {asciiPunctuation} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const characterEscape = { + name: 'characterEscape', + tokenize: tokenizeCharacterEscape +} + +/** @type {Tokenizer} */ +function tokenizeCharacterEscape(effects, ok, nok) { + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.backslash, 'expected `\\`') + effects.enter(types.characterEscape) + effects.enter(types.escapeMarker) + effects.consume(code) + effects.exit(types.escapeMarker) + return open + } + + /** @type {State} */ + function open(code) { + if (asciiPunctuation(code)) { + effects.enter(types.characterEscapeValue) + effects.consume(code) + effects.exit(types.characterEscapeValue) + effects.exit(types.characterEscape) + return ok + } + + return nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-reference.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-reference.d.ts new file mode 100644 index 00000000..76035826 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-reference.d.ts @@ -0,0 +1,7 @@ +/** @type {Construct} */ +export const characterReference: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Token = import('micromark-util-types').Token +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-reference.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-reference.js new file mode 100644 index 00000000..3738b617 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/character-reference.js @@ -0,0 +1,109 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ + +import {ok as assert} from 'uvu/assert' +import {decodeNamedCharacterReference} from 'decode-named-character-reference' +import { + asciiAlphanumeric, + asciiDigit, + asciiHexDigit +} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const characterReference = { + name: 'characterReference', + tokenize: tokenizeCharacterReference +} + +/** @type {Tokenizer} */ +function tokenizeCharacterReference(effects, ok, nok) { + const self = this + let size = 0 + /** @type {number} */ + let max + /** @type {(code: Code) => code is number} */ + let test + + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.ampersand, 'expected `&`') + effects.enter(types.characterReference) + effects.enter(types.characterReferenceMarker) + effects.consume(code) + effects.exit(types.characterReferenceMarker) + return open + } + + /** @type {State} */ + function open(code) { + if (code === codes.numberSign) { + effects.enter(types.characterReferenceMarkerNumeric) + effects.consume(code) + effects.exit(types.characterReferenceMarkerNumeric) + return numeric + } + + effects.enter(types.characterReferenceValue) + max = constants.characterReferenceNamedSizeMax + test = asciiAlphanumeric + return value(code) + } + + /** @type {State} */ + function numeric(code) { + if (code === codes.uppercaseX || code === codes.lowercaseX) { + effects.enter(types.characterReferenceMarkerHexadecimal) + effects.consume(code) + effects.exit(types.characterReferenceMarkerHexadecimal) + effects.enter(types.characterReferenceValue) + max = constants.characterReferenceHexadecimalSizeMax + test = asciiHexDigit + return value + } + + effects.enter(types.characterReferenceValue) + max = constants.characterReferenceDecimalSizeMax + test = asciiDigit + return value(code) + } + + /** @type {State} */ + function value(code) { + /** @type {Token} */ + let token + + if (code === codes.semicolon && size) { + token = effects.exit(types.characterReferenceValue) + + if ( + test === asciiAlphanumeric && + !decodeNamedCharacterReference(self.sliceSerialize(token)) + ) { + return nok(code) + } + + effects.enter(types.characterReferenceMarker) + effects.consume(code) + effects.exit(types.characterReferenceMarker) + effects.exit(types.characterReference) + return ok + } + + if (test(code) && size++ < max) { + effects.consume(code) + return value + } + + return nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-fenced.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-fenced.d.ts new file mode 100644 index 00000000..d0843f5b --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-fenced.d.ts @@ -0,0 +1,6 @@ +/** @type {Construct} */ +export const codeFenced: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-fenced.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-fenced.js new file mode 100644 index 00000000..184fa52d --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-fenced.js @@ -0,0 +1,234 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ + +import {ok as assert} from 'uvu/assert' +import {factorySpace} from 'micromark-factory-space' +import { + markdownLineEnding, + markdownLineEndingOrSpace +} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const codeFenced = { + name: 'codeFenced', + tokenize: tokenizeCodeFenced, + concrete: true +} + +/** @type {Tokenizer} */ +function tokenizeCodeFenced(effects, ok, nok) { + const self = this + /** @type {Construct} */ + const closingFenceConstruct = {tokenize: tokenizeClosingFence, partial: true} + /** @type {Construct} */ + const nonLazyLine = {tokenize: tokenizeNonLazyLine, partial: true} + const tail = this.events[this.events.length - 1] + const initialPrefix = + tail && tail[1].type === types.linePrefix + ? tail[2].sliceSerialize(tail[1], true).length + : 0 + let sizeOpen = 0 + /** @type {NonNullable} */ + let marker + + return start + + /** @type {State} */ + function start(code) { + assert( + code === codes.graveAccent || code === codes.tilde, + 'expected `` ` `` or `~`' + ) + effects.enter(types.codeFenced) + effects.enter(types.codeFencedFence) + effects.enter(types.codeFencedFenceSequence) + marker = code + return sequenceOpen(code) + } + + /** @type {State} */ + function sequenceOpen(code) { + if (code === marker) { + effects.consume(code) + sizeOpen++ + return sequenceOpen + } + + effects.exit(types.codeFencedFenceSequence) + return sizeOpen < constants.codeFencedSequenceSizeMin + ? nok(code) + : factorySpace(effects, infoOpen, types.whitespace)(code) + } + + /** @type {State} */ + function infoOpen(code) { + if (code === codes.eof || markdownLineEnding(code)) { + return openAfter(code) + } + + effects.enter(types.codeFencedFenceInfo) + effects.enter(types.chunkString, {contentType: constants.contentTypeString}) + return info(code) + } + + /** @type {State} */ + function info(code) { + if (code === codes.eof || markdownLineEndingOrSpace(code)) { + effects.exit(types.chunkString) + effects.exit(types.codeFencedFenceInfo) + return factorySpace(effects, infoAfter, types.whitespace)(code) + } + + if (code === codes.graveAccent && code === marker) return nok(code) + effects.consume(code) + return info + } + + /** @type {State} */ + function infoAfter(code) { + if (code === codes.eof || markdownLineEnding(code)) { + return openAfter(code) + } + + effects.enter(types.codeFencedFenceMeta) + effects.enter(types.chunkString, {contentType: constants.contentTypeString}) + return meta(code) + } + + /** @type {State} */ + function meta(code) { + if (code === codes.eof || markdownLineEnding(code)) { + effects.exit(types.chunkString) + effects.exit(types.codeFencedFenceMeta) + return openAfter(code) + } + + if (code === codes.graveAccent && code === marker) return nok(code) + effects.consume(code) + return meta + } + + /** @type {State} */ + function openAfter(code) { + effects.exit(types.codeFencedFence) + return self.interrupt ? ok(code) : contentStart(code) + } + + /** @type {State} */ + function contentStart(code) { + if (code === codes.eof) { + return after(code) + } + + if (markdownLineEnding(code)) { + return effects.attempt( + nonLazyLine, + effects.attempt( + closingFenceConstruct, + after, + initialPrefix + ? factorySpace( + effects, + contentStart, + types.linePrefix, + initialPrefix + 1 + ) + : contentStart + ), + after + )(code) + } + + effects.enter(types.codeFlowValue) + return contentContinue(code) + } + + /** @type {State} */ + function contentContinue(code) { + if (code === codes.eof || markdownLineEnding(code)) { + effects.exit(types.codeFlowValue) + return contentStart(code) + } + + effects.consume(code) + return contentContinue + } + + /** @type {State} */ + function after(code) { + effects.exit(types.codeFenced) + return ok(code) + } + + /** @type {Tokenizer} */ + function tokenizeNonLazyLine(effects, ok, nok) { + const self = this + + return start + + /** @type {State} */ + function start(code) { + assert(markdownLineEnding(code), 'expected eol') + effects.enter(types.lineEnding) + effects.consume(code) + effects.exit(types.lineEnding) + return lineStart + } + + /** @type {State} */ + function lineStart(code) { + return self.parser.lazy[self.now().line] ? nok(code) : ok(code) + } + } + + /** @type {Tokenizer} */ + function tokenizeClosingFence(effects, ok, nok) { + let size = 0 + + return factorySpace( + effects, + closingSequenceStart, + types.linePrefix, + this.parser.constructs.disable.null.includes('codeIndented') + ? undefined + : constants.tabSize + ) + + /** @type {State} */ + function closingSequenceStart(code) { + effects.enter(types.codeFencedFence) + effects.enter(types.codeFencedFenceSequence) + return closingSequence(code) + } + + /** @type {State} */ + function closingSequence(code) { + if (code === marker) { + effects.consume(code) + size++ + return closingSequence + } + + if (size < sizeOpen) return nok(code) + effects.exit(types.codeFencedFenceSequence) + return factorySpace(effects, closingSequenceEnd, types.whitespace)(code) + } + + /** @type {State} */ + function closingSequenceEnd(code) { + if (code === codes.eof || markdownLineEnding(code)) { + effects.exit(types.codeFencedFence) + return ok(code) + } + + return nok(code) + } + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-indented.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-indented.d.ts new file mode 100644 index 00000000..2259ccb5 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-indented.d.ts @@ -0,0 +1,7 @@ +/** @type {Construct} */ +export const codeIndented: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Resolver = import('micromark-util-types').Resolver +export type Token = import('micromark-util-types').Token +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-indented.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-indented.js new file mode 100644 index 00000000..d92900ab --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-indented.js @@ -0,0 +1,121 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').State} State + */ + +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const codeIndented = { + name: 'codeIndented', + tokenize: tokenizeCodeIndented +} + +/** @type {Construct} */ +const indentedContent = {tokenize: tokenizeIndentedContent, partial: true} + +/** @type {Tokenizer} */ +function tokenizeCodeIndented(effects, ok, nok) { + const self = this + return start + + /** @type {State} */ + function start(code) { + effects.enter(types.codeIndented) + return factorySpace( + effects, + afterStartPrefix, + types.linePrefix, + constants.tabSize + 1 + )(code) + } + + /** @type {State} */ + function afterStartPrefix(code) { + const tail = self.events[self.events.length - 1] + return tail && + tail[1].type === types.linePrefix && + tail[2].sliceSerialize(tail[1], true).length >= constants.tabSize + ? afterPrefix(code) + : nok(code) + } + + /** @type {State} */ + function afterPrefix(code) { + if (code === codes.eof) { + return after(code) + } + + if (markdownLineEnding(code)) { + return effects.attempt(indentedContent, afterPrefix, after)(code) + } + + effects.enter(types.codeFlowValue) + return content(code) + } + + /** @type {State} */ + function content(code) { + if (code === codes.eof || markdownLineEnding(code)) { + effects.exit(types.codeFlowValue) + return afterPrefix(code) + } + + effects.consume(code) + return content + } + + /** @type {State} */ + function after(code) { + effects.exit(types.codeIndented) + return ok(code) + } +} + +/** @type {Tokenizer} */ +function tokenizeIndentedContent(effects, ok, nok) { + const self = this + + return start + + /** @type {State} */ + function start(code) { + // If this is a lazy line, it can’t be code. + if (self.parser.lazy[self.now().line]) { + return nok(code) + } + + if (markdownLineEnding(code)) { + effects.enter(types.lineEnding) + effects.consume(code) + effects.exit(types.lineEnding) + return start + } + + return factorySpace( + effects, + afterPrefix, + types.linePrefix, + constants.tabSize + 1 + )(code) + } + + /** @type {State} */ + function afterPrefix(code) { + const tail = self.events[self.events.length - 1] + return tail && + tail[1].type === types.linePrefix && + tail[2].sliceSerialize(tail[1], true).length >= constants.tabSize + ? ok(code) + : markdownLineEnding(code) + ? start(code) + : nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-text.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-text.d.ts new file mode 100644 index 00000000..619a420d --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-text.d.ts @@ -0,0 +1,8 @@ +/** @type {Construct} */ +export const codeText: Construct +export type Construct = import('micromark-util-types').Construct +export type Resolver = import('micromark-util-types').Resolver +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Previous = import('micromark-util-types').Previous +export type Token = import('micromark-util-types').Token +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-text.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-text.js new file mode 100644 index 00000000..46de06d9 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/code-text.js @@ -0,0 +1,200 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Previous} Previous + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').State} State + */ + +import {ok as assert} from 'uvu/assert' +import {markdownLineEnding} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const codeText = { + name: 'codeText', + tokenize: tokenizeCodeText, + resolve: resolveCodeText, + previous +} + +/** @type {Resolver} */ +function resolveCodeText(events) { + let tailExitIndex = events.length - 4 + let headEnterIndex = 3 + /** @type {number} */ + let index + /** @type {number|undefined} */ + let enter + + // If we start and end with an EOL or a space. + if ( + (events[headEnterIndex][1].type === types.lineEnding || + events[headEnterIndex][1].type === 'space') && + (events[tailExitIndex][1].type === types.lineEnding || + events[tailExitIndex][1].type === 'space') + ) { + index = headEnterIndex + + // And we have data. + while (++index < tailExitIndex) { + if (events[index][1].type === types.codeTextData) { + // Then we have padding. + events[headEnterIndex][1].type = types.codeTextPadding + events[tailExitIndex][1].type = types.codeTextPadding + headEnterIndex += 2 + tailExitIndex -= 2 + break + } + } + } + + // Merge adjacent spaces and data. + index = headEnterIndex - 1 + tailExitIndex++ + + while (++index <= tailExitIndex) { + if (enter === undefined) { + if ( + index !== tailExitIndex && + events[index][1].type !== types.lineEnding + ) { + enter = index + } + } else if ( + index === tailExitIndex || + events[index][1].type === types.lineEnding + ) { + events[enter][1].type = types.codeTextData + + if (index !== enter + 2) { + events[enter][1].end = events[index - 1][1].end + events.splice(enter + 2, index - enter - 2) + tailExitIndex -= index - enter - 2 + index = enter + 2 + } + + enter = undefined + } + } + + return events +} + +/** @type {Previous} */ +function previous(code) { + // If there is a previous code, there will always be a tail. + return ( + code !== codes.graveAccent || + this.events[this.events.length - 1][1].type === types.characterEscape + ) +} + +/** @type {Tokenizer} */ +function tokenizeCodeText(effects, ok, nok) { + const self = this + let sizeOpen = 0 + /** @type {number} */ + let size + /** @type {Token} */ + let token + + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.graveAccent, 'expected `` ` ``') + assert(previous.call(self, self.previous), 'expected correct previous') + effects.enter(types.codeText) + effects.enter(types.codeTextSequence) + return openingSequence(code) + } + + /** @type {State} */ + function openingSequence(code) { + if (code === codes.graveAccent) { + effects.consume(code) + sizeOpen++ + return openingSequence + } + + effects.exit(types.codeTextSequence) + return gap(code) + } + + /** @type {State} */ + function gap(code) { + // EOF. + if (code === codes.eof) { + return nok(code) + } + + // Closing fence? + // Could also be data. + if (code === codes.graveAccent) { + token = effects.enter(types.codeTextSequence) + size = 0 + return closingSequence(code) + } + + // Tabs don’t work, and virtual spaces don’t make sense. + if (code === codes.space) { + effects.enter('space') + effects.consume(code) + effects.exit('space') + return gap + } + + if (markdownLineEnding(code)) { + effects.enter(types.lineEnding) + effects.consume(code) + effects.exit(types.lineEnding) + return gap + } + + // Data. + effects.enter(types.codeTextData) + return data(code) + } + + // In code. + /** @type {State} */ + function data(code) { + if ( + code === codes.eof || + code === codes.space || + code === codes.graveAccent || + markdownLineEnding(code) + ) { + effects.exit(types.codeTextData) + return gap(code) + } + + effects.consume(code) + return data + } + + // Closing fence. + /** @type {State} */ + function closingSequence(code) { + // More. + if (code === codes.graveAccent) { + effects.consume(code) + size++ + return closingSequence + } + + // Done! + if (size === sizeOpen) { + effects.exit(types.codeTextSequence) + effects.exit(types.codeText) + return ok(code) + } + + // More or less accents: mark as data. + token.type = types.codeTextData + return data(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/content.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/content.d.ts new file mode 100644 index 00000000..0bfe34f1 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/content.d.ts @@ -0,0 +1,10 @@ +/** + * No name because it must not be turned off. + * @type {Construct} + */ +export const content: Construct +export type Construct = import('micromark-util-types').Construct +export type Resolver = import('micromark-util-types').Resolver +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Token = import('micromark-util-types').Token +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/content.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/content.js new file mode 100644 index 00000000..a07cb64f --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/content.js @@ -0,0 +1,133 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').State} State + */ + +import {ok as assert} from 'uvu/assert' +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding} from 'micromark-util-character' +import {subtokenize} from 'micromark-util-subtokenize' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** + * No name because it must not be turned off. + * @type {Construct} + */ +export const content = {tokenize: tokenizeContent, resolve: resolveContent} + +/** @type {Construct} */ +const continuationConstruct = {tokenize: tokenizeContinuation, partial: true} + +/** + * Content is transparent: it’s parsed right now. That way, definitions are also + * parsed right now: before text in paragraphs (specifically, media) are parsed. + * + * @type {Resolver} + */ +function resolveContent(events) { + subtokenize(events) + return events +} + +/** @type {Tokenizer} */ +function tokenizeContent(effects, ok) { + /** @type {Token} */ + let previous + + return start + + /** @type {State} */ + function start(code) { + assert( + code !== codes.eof && !markdownLineEnding(code), + 'expected no eof or eol' + ) + + effects.enter(types.content) + previous = effects.enter(types.chunkContent, { + contentType: constants.contentTypeContent + }) + return data(code) + } + + /** @type {State} */ + function data(code) { + if (code === codes.eof) { + return contentEnd(code) + } + + if (markdownLineEnding(code)) { + return effects.check( + continuationConstruct, + contentContinue, + contentEnd + )(code) + } + + // Data. + effects.consume(code) + return data + } + + /** @type {State} */ + function contentEnd(code) { + effects.exit(types.chunkContent) + effects.exit(types.content) + return ok(code) + } + + /** @type {State} */ + function contentContinue(code) { + assert(markdownLineEnding(code), 'expected eol') + effects.consume(code) + effects.exit(types.chunkContent) + previous.next = effects.enter(types.chunkContent, { + contentType: constants.contentTypeContent, + previous + }) + previous = previous.next + return data + } +} + +/** @type {Tokenizer} */ +function tokenizeContinuation(effects, ok, nok) { + const self = this + + return startLookahead + + /** @type {State} */ + function startLookahead(code) { + assert(markdownLineEnding(code), 'expected a line ending') + effects.exit(types.chunkContent) + effects.enter(types.lineEnding) + effects.consume(code) + effects.exit(types.lineEnding) + return factorySpace(effects, prefixed, types.linePrefix) + } + + /** @type {State} */ + function prefixed(code) { + if (code === codes.eof || markdownLineEnding(code)) { + return nok(code) + } + + const tail = self.events[self.events.length - 1] + + if ( + !self.parser.constructs.disable.null.includes('codeIndented') && + tail && + tail[1].type === types.linePrefix && + tail[2].sliceSerialize(tail[1], true).length >= constants.tabSize + ) { + return ok(code) + } + + return effects.interrupt(self.parser.constructs.flow, nok, ok)(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/definition.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/definition.d.ts new file mode 100644 index 00000000..19b9227a --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/definition.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const definition: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/definition.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/definition.js new file mode 100644 index 00000000..f99c08eb --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/definition.js @@ -0,0 +1,135 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ + +import {ok as assert} from 'uvu/assert' +import {factoryDestination} from 'micromark-factory-destination' +import {factoryLabel} from 'micromark-factory-label' +import {factorySpace} from 'micromark-factory-space' +import {factoryTitle} from 'micromark-factory-title' +import {factoryWhitespace} from 'micromark-factory-whitespace' +import {normalizeIdentifier} from 'micromark-util-normalize-identifier' +import { + markdownLineEnding, + markdownLineEndingOrSpace +} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const definition = {name: 'definition', tokenize: tokenizeDefinition} + +/** @type {Construct} */ +const titleConstruct = {tokenize: tokenizeTitle, partial: true} + +/** @type {Tokenizer} */ +function tokenizeDefinition(effects, ok, nok) { + const self = this + /** @type {string} */ + let identifier + + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.leftSquareBracket, 'expected `[`') + effects.enter(types.definition) + return factoryLabel.call( + self, + effects, + labelAfter, + nok, + types.definitionLabel, + types.definitionLabelMarker, + types.definitionLabelString + )(code) + } + + /** @type {State} */ + function labelAfter(code) { + identifier = normalizeIdentifier( + self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1) + ) + + if (code === codes.colon) { + effects.enter(types.definitionMarker) + effects.consume(code) + effects.exit(types.definitionMarker) + + // Note: blank lines can’t exist in content. + return factoryWhitespace( + effects, + factoryDestination( + effects, + effects.attempt( + titleConstruct, + factorySpace(effects, after, types.whitespace), + factorySpace(effects, after, types.whitespace) + ), + nok, + types.definitionDestination, + types.definitionDestinationLiteral, + types.definitionDestinationLiteralMarker, + types.definitionDestinationRaw, + types.definitionDestinationString + ) + ) + } + + return nok(code) + } + + /** @type {State} */ + function after(code) { + if (code === codes.eof || markdownLineEnding(code)) { + effects.exit(types.definition) + + if (!self.parser.defined.includes(identifier)) { + self.parser.defined.push(identifier) + } + + return ok(code) + } + + return nok(code) + } +} + +/** @type {Tokenizer} */ +function tokenizeTitle(effects, ok, nok) { + return start + + /** @type {State} */ + function start(code) { + return markdownLineEndingOrSpace(code) + ? factoryWhitespace(effects, before)(code) + : nok(code) + } + + /** @type {State} */ + function before(code) { + if ( + code === codes.quotationMark || + code === codes.apostrophe || + code === codes.leftParenthesis + ) { + return factoryTitle( + effects, + factorySpace(effects, after, types.whitespace), + nok, + types.definitionTitle, + types.definitionTitleMarker, + types.definitionTitleString + )(code) + } + + return nok(code) + } + + /** @type {State} */ + function after(code) { + return code === codes.eof || markdownLineEnding(code) ? ok(code) : nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/hard-break-escape.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/hard-break-escape.d.ts new file mode 100644 index 00000000..f140079b --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/hard-break-escape.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const hardBreakEscape: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/hard-break-escape.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/hard-break-escape.js new file mode 100644 index 00000000..73828f3c --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/hard-break-escape.js @@ -0,0 +1,41 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ + +import {ok as assert} from 'uvu/assert' +import {markdownLineEnding} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const hardBreakEscape = { + name: 'hardBreakEscape', + tokenize: tokenizeHardBreakEscape +} + +/** @type {Tokenizer} */ +function tokenizeHardBreakEscape(effects, ok, nok) { + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.backslash, 'expected `\\`') + effects.enter(types.hardBreakEscape) + effects.enter(types.escapeMarker) + effects.consume(code) + return open + } + + /** @type {State} */ + function open(code) { + if (markdownLineEnding(code)) { + effects.exit(types.escapeMarker) + effects.exit(types.hardBreakEscape) + return ok(code) + } + + return nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/heading-atx.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/heading-atx.d.ts new file mode 100644 index 00000000..3a252315 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/heading-atx.d.ts @@ -0,0 +1,7 @@ +/** @type {Construct} */ +export const headingAtx: Construct +export type Construct = import('micromark-util-types').Construct +export type Resolver = import('micromark-util-types').Resolver +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Token = import('micromark-util-types').Token +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/heading-atx.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/heading-atx.js new file mode 100644 index 00000000..e9449f2a --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/heading-atx.js @@ -0,0 +1,162 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').State} State + */ + +import {ok as assert} from 'uvu/assert' +import {factorySpace} from 'micromark-factory-space' +import { + markdownLineEnding, + markdownLineEndingOrSpace, + markdownSpace +} from 'micromark-util-character' +import {splice} from 'micromark-util-chunked' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const headingAtx = { + name: 'headingAtx', + tokenize: tokenizeHeadingAtx, + resolve: resolveHeadingAtx +} + +/** @type {Resolver} */ +function resolveHeadingAtx(events, context) { + let contentEnd = events.length - 2 + let contentStart = 3 + /** @type {Token} */ + let content + /** @type {Token} */ + let text + + // Prefix whitespace, part of the opening. + if (events[contentStart][1].type === types.whitespace) { + contentStart += 2 + } + + // Suffix whitespace, part of the closing. + if ( + contentEnd - 2 > contentStart && + events[contentEnd][1].type === types.whitespace + ) { + contentEnd -= 2 + } + + if ( + events[contentEnd][1].type === types.atxHeadingSequence && + (contentStart === contentEnd - 1 || + (contentEnd - 4 > contentStart && + events[contentEnd - 2][1].type === types.whitespace)) + ) { + contentEnd -= contentStart + 1 === contentEnd ? 2 : 4 + } + + if (contentEnd > contentStart) { + content = { + type: types.atxHeadingText, + start: events[contentStart][1].start, + end: events[contentEnd][1].end + } + text = { + type: types.chunkText, + start: events[contentStart][1].start, + end: events[contentEnd][1].end, + // @ts-expect-error Constants are fine to assign. + contentType: constants.contentTypeText + } + + splice(events, contentStart, contentEnd - contentStart + 1, [ + ['enter', content, context], + ['enter', text, context], + ['exit', text, context], + ['exit', content, context] + ]) + } + + return events +} + +/** @type {Tokenizer} */ +function tokenizeHeadingAtx(effects, ok, nok) { + const self = this + let size = 0 + + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.numberSign, 'expected `#`') + effects.enter(types.atxHeading) + effects.enter(types.atxHeadingSequence) + return fenceOpenInside(code) + } + + /** @type {State} */ + function fenceOpenInside(code) { + if ( + code === codes.numberSign && + size++ < constants.atxHeadingOpeningFenceSizeMax + ) { + effects.consume(code) + return fenceOpenInside + } + + if (code === codes.eof || markdownLineEndingOrSpace(code)) { + effects.exit(types.atxHeadingSequence) + return self.interrupt ? ok(code) : headingBreak(code) + } + + return nok(code) + } + + /** @type {State} */ + function headingBreak(code) { + if (code === codes.numberSign) { + effects.enter(types.atxHeadingSequence) + return sequence(code) + } + + if (code === codes.eof || markdownLineEnding(code)) { + effects.exit(types.atxHeading) + return ok(code) + } + + if (markdownSpace(code)) { + return factorySpace(effects, headingBreak, types.whitespace)(code) + } + + effects.enter(types.atxHeadingText) + return data(code) + } + + /** @type {State} */ + function sequence(code) { + if (code === codes.numberSign) { + effects.consume(code) + return sequence + } + + effects.exit(types.atxHeadingSequence) + return headingBreak(code) + } + + /** @type {State} */ + function data(code) { + if ( + code === codes.eof || + code === codes.numberSign || + markdownLineEndingOrSpace(code) + ) { + effects.exit(types.atxHeadingText) + return headingBreak(code) + } + + effects.consume(code) + return data + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-flow.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-flow.d.ts new file mode 100644 index 00000000..434d6c60 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-flow.d.ts @@ -0,0 +1,7 @@ +/** @type {Construct} */ +export const htmlFlow: Construct +export type Construct = import('micromark-util-types').Construct +export type Resolver = import('micromark-util-types').Resolver +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-flow.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-flow.js new file mode 100644 index 00000000..18dd82d0 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-flow.js @@ -0,0 +1,573 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ + +import {ok as assert} from 'uvu/assert' +import { + asciiAlpha, + asciiAlphanumeric, + markdownLineEnding, + markdownLineEndingOrSpace, + markdownSpace +} from 'micromark-util-character' +import {htmlBlockNames, htmlRawNames} from 'micromark-util-html-tag-name' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' +import {blankLine} from './blank-line.js' + +/** @type {Construct} */ +export const htmlFlow = { + name: 'htmlFlow', + tokenize: tokenizeHtmlFlow, + resolveTo: resolveToHtmlFlow, + concrete: true +} + +/** @type {Construct} */ +const nextBlankConstruct = {tokenize: tokenizeNextBlank, partial: true} + +/** @type {Resolver} */ +function resolveToHtmlFlow(events) { + let index = events.length + + while (index--) { + if ( + events[index][0] === 'enter' && + events[index][1].type === types.htmlFlow + ) { + break + } + } + + if (index > 1 && events[index - 2][1].type === types.linePrefix) { + // Add the prefix start to the HTML token. + events[index][1].start = events[index - 2][1].start + // Add the prefix start to the HTML line token. + events[index + 1][1].start = events[index - 2][1].start + // Remove the line prefix. + events.splice(index - 2, 2) + } + + return events +} + +/** @type {Tokenizer} */ +function tokenizeHtmlFlow(effects, ok, nok) { + const self = this + /** @type {number} */ + let kind + /** @type {boolean} */ + let startTag + /** @type {string} */ + let buffer + /** @type {number} */ + let index + /** @type {Code} */ + let marker + + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.lessThan, 'expected `<`') + effects.enter(types.htmlFlow) + effects.enter(types.htmlFlowData) + effects.consume(code) + return open + } + + /** @type {State} */ + function open(code) { + if (code === codes.exclamationMark) { + effects.consume(code) + return declarationStart + } + + if (code === codes.slash) { + effects.consume(code) + return tagCloseStart + } + + if (code === codes.questionMark) { + effects.consume(code) + kind = constants.htmlInstruction + // While we’re in an instruction instead of a declaration, we’re on a `?` + // right now, so we do need to search for `>`, similar to declarations. + return self.interrupt ? ok : continuationDeclarationInside + } + + if (asciiAlpha(code)) { + effects.consume(code) + buffer = String.fromCharCode(code) + startTag = true + return tagName + } + + return nok(code) + } + + /** @type {State} */ + function declarationStart(code) { + if (code === codes.dash) { + effects.consume(code) + kind = constants.htmlComment + return commentOpenInside + } + + if (code === codes.leftSquareBracket) { + effects.consume(code) + kind = constants.htmlCdata + buffer = constants.cdataOpeningString + index = 0 + return cdataOpenInside + } + + if (asciiAlpha(code)) { + effects.consume(code) + kind = constants.htmlDeclaration + return self.interrupt ? ok : continuationDeclarationInside + } + + return nok(code) + } + + /** @type {State} */ + function commentOpenInside(code) { + if (code === codes.dash) { + effects.consume(code) + return self.interrupt ? ok : continuationDeclarationInside + } + + return nok(code) + } + + /** @type {State} */ + function cdataOpenInside(code) { + if (code === buffer.charCodeAt(index++)) { + effects.consume(code) + return index === buffer.length + ? self.interrupt + ? ok + : continuation + : cdataOpenInside + } + + return nok(code) + } + + /** @type {State} */ + function tagCloseStart(code) { + if (asciiAlpha(code)) { + effects.consume(code) + buffer = String.fromCharCode(code) + return tagName + } + + return nok(code) + } + + /** @type {State} */ + function tagName(code) { + if ( + code === codes.eof || + code === codes.slash || + code === codes.greaterThan || + markdownLineEndingOrSpace(code) + ) { + if ( + code !== codes.slash && + startTag && + htmlRawNames.includes(buffer.toLowerCase()) + ) { + kind = constants.htmlRaw + return self.interrupt ? ok(code) : continuation(code) + } + + if (htmlBlockNames.includes(buffer.toLowerCase())) { + kind = constants.htmlBasic + + if (code === codes.slash) { + effects.consume(code) + return basicSelfClosing + } + + return self.interrupt ? ok(code) : continuation(code) + } + + kind = constants.htmlComplete + // Do not support complete HTML when interrupting + return self.interrupt && !self.parser.lazy[self.now().line] + ? nok(code) + : startTag + ? completeAttributeNameBefore(code) + : completeClosingTagAfter(code) + } + + if (code === codes.dash || asciiAlphanumeric(code)) { + effects.consume(code) + buffer += String.fromCharCode(code) + return tagName + } + + return nok(code) + } + + /** @type {State} */ + function basicSelfClosing(code) { + if (code === codes.greaterThan) { + effects.consume(code) + return self.interrupt ? ok : continuation + } + + return nok(code) + } + + /** @type {State} */ + function completeClosingTagAfter(code) { + if (markdownSpace(code)) { + effects.consume(code) + return completeClosingTagAfter + } + + return completeEnd(code) + } + + /** @type {State} */ + function completeAttributeNameBefore(code) { + if (code === codes.slash) { + effects.consume(code) + return completeEnd + } + + if (code === codes.colon || code === codes.underscore || asciiAlpha(code)) { + effects.consume(code) + return completeAttributeName + } + + if (markdownSpace(code)) { + effects.consume(code) + return completeAttributeNameBefore + } + + return completeEnd(code) + } + + /** @type {State} */ + function completeAttributeName(code) { + if ( + code === codes.dash || + code === codes.dot || + code === codes.colon || + code === codes.underscore || + asciiAlphanumeric(code) + ) { + effects.consume(code) + return completeAttributeName + } + + return completeAttributeNameAfter(code) + } + + /** @type {State} */ + function completeAttributeNameAfter(code) { + if (code === codes.equalsTo) { + effects.consume(code) + return completeAttributeValueBefore + } + + if (markdownSpace(code)) { + effects.consume(code) + return completeAttributeNameAfter + } + + return completeAttributeNameBefore(code) + } + + /** @type {State} */ + function completeAttributeValueBefore(code) { + if ( + code === codes.eof || + code === codes.lessThan || + code === codes.equalsTo || + code === codes.greaterThan || + code === codes.graveAccent + ) { + return nok(code) + } + + if (code === codes.quotationMark || code === codes.apostrophe) { + effects.consume(code) + marker = code + return completeAttributeValueQuoted + } + + if (markdownSpace(code)) { + effects.consume(code) + return completeAttributeValueBefore + } + + marker = null + return completeAttributeValueUnquoted(code) + } + + /** @type {State} */ + function completeAttributeValueQuoted(code) { + if (code === codes.eof || markdownLineEnding(code)) { + return nok(code) + } + + if (code === marker) { + effects.consume(code) + return completeAttributeValueQuotedAfter + } + + effects.consume(code) + return completeAttributeValueQuoted + } + + /** @type {State} */ + function completeAttributeValueUnquoted(code) { + if ( + code === codes.eof || + code === codes.quotationMark || + code === codes.apostrophe || + code === codes.lessThan || + code === codes.equalsTo || + code === codes.greaterThan || + code === codes.graveAccent || + markdownLineEndingOrSpace(code) + ) { + return completeAttributeNameAfter(code) + } + + effects.consume(code) + return completeAttributeValueUnquoted + } + + /** @type {State} */ + function completeAttributeValueQuotedAfter(code) { + if ( + code === codes.slash || + code === codes.greaterThan || + markdownSpace(code) + ) { + return completeAttributeNameBefore(code) + } + + return nok(code) + } + + /** @type {State} */ + function completeEnd(code) { + if (code === codes.greaterThan) { + effects.consume(code) + return completeAfter + } + + return nok(code) + } + + /** @type {State} */ + function completeAfter(code) { + if (markdownSpace(code)) { + effects.consume(code) + return completeAfter + } + + return code === codes.eof || markdownLineEnding(code) + ? continuation(code) + : nok(code) + } + + /** @type {State} */ + function continuation(code) { + if (code === codes.dash && kind === constants.htmlComment) { + effects.consume(code) + return continuationCommentInside + } + + if (code === codes.lessThan && kind === constants.htmlRaw) { + effects.consume(code) + return continuationRawTagOpen + } + + if (code === codes.greaterThan && kind === constants.htmlDeclaration) { + effects.consume(code) + return continuationClose + } + + if (code === codes.questionMark && kind === constants.htmlInstruction) { + effects.consume(code) + return continuationDeclarationInside + } + + if (code === codes.rightSquareBracket && kind === constants.htmlCdata) { + effects.consume(code) + return continuationCharacterDataInside + } + + if ( + markdownLineEnding(code) && + (kind === constants.htmlBasic || kind === constants.htmlComplete) + ) { + return effects.check( + nextBlankConstruct, + continuationClose, + continuationAtLineEnding + )(code) + } + + if (code === codes.eof || markdownLineEnding(code)) { + return continuationAtLineEnding(code) + } + + effects.consume(code) + return continuation + } + + /** @type {State} */ + function continuationAtLineEnding(code) { + effects.exit(types.htmlFlowData) + return htmlContinueStart(code) + } + + /** @type {State} */ + function htmlContinueStart(code) { + if (code === codes.eof) { + return done(code) + } + + if (markdownLineEnding(code)) { + return effects.attempt( + {tokenize: htmlLineEnd, partial: true}, + htmlContinueStart, + done + )(code) + } + + effects.enter(types.htmlFlowData) + return continuation(code) + } + + /** @type {Tokenizer} */ + function htmlLineEnd(effects, ok, nok) { + return start + + /** @type {State} */ + function start(code) { + assert(markdownLineEnding(code), 'expected eol') + effects.enter(types.lineEnding) + effects.consume(code) + effects.exit(types.lineEnding) + return lineStart + } + + /** @type {State} */ + function lineStart(code) { + return self.parser.lazy[self.now().line] ? nok(code) : ok(code) + } + } + + /** @type {State} */ + function continuationCommentInside(code) { + if (code === codes.dash) { + effects.consume(code) + return continuationDeclarationInside + } + + return continuation(code) + } + + /** @type {State} */ + function continuationRawTagOpen(code) { + if (code === codes.slash) { + effects.consume(code) + buffer = '' + return continuationRawEndTag + } + + return continuation(code) + } + + /** @type {State} */ + function continuationRawEndTag(code) { + if ( + code === codes.greaterThan && + htmlRawNames.includes(buffer.toLowerCase()) + ) { + effects.consume(code) + return continuationClose + } + + if (asciiAlpha(code) && buffer.length < constants.htmlRawSizeMax) { + effects.consume(code) + buffer += String.fromCharCode(code) + return continuationRawEndTag + } + + return continuation(code) + } + + /** @type {State} */ + function continuationCharacterDataInside(code) { + if (code === codes.rightSquareBracket) { + effects.consume(code) + return continuationDeclarationInside + } + + return continuation(code) + } + + /** @type {State} */ + function continuationDeclarationInside(code) { + if (code === codes.greaterThan) { + effects.consume(code) + return continuationClose + } + + // More dashes. + if (code === codes.dash && kind === constants.htmlComment) { + effects.consume(code) + return continuationDeclarationInside + } + + return continuation(code) + } + + /** @type {State} */ + function continuationClose(code) { + if (code === codes.eof || markdownLineEnding(code)) { + effects.exit(types.htmlFlowData) + return done(code) + } + + effects.consume(code) + return continuationClose + } + + /** @type {State} */ + function done(code) { + effects.exit(types.htmlFlow) + return ok(code) + } +} + +/** @type {Tokenizer} */ +function tokenizeNextBlank(effects, ok, nok) { + return start + + /** @type {State} */ + function start(code) { + assert(markdownLineEnding(code), 'expected a line ending') + effects.exit(types.htmlFlowData) + effects.enter(types.lineEndingBlank) + effects.consume(code) + effects.exit(types.lineEndingBlank) + return effects.attempt(blankLine, ok, nok) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-text.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-text.d.ts new file mode 100644 index 00000000..74c71bf4 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-text.d.ts @@ -0,0 +1,6 @@ +/** @type {Construct} */ +export const htmlText: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-text.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-text.js new file mode 100644 index 00000000..46f3ba42 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/html-text.js @@ -0,0 +1,489 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ + +import {ok as assert} from 'uvu/assert' +import {factorySpace} from 'micromark-factory-space' +import { + asciiAlpha, + asciiAlphanumeric, + markdownLineEnding, + markdownLineEndingOrSpace, + markdownSpace +} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const htmlText = {name: 'htmlText', tokenize: tokenizeHtmlText} + +/** @type {Tokenizer} */ +function tokenizeHtmlText(effects, ok, nok) { + const self = this + /** @type {NonNullable|undefined} */ + let marker + /** @type {string} */ + let buffer + /** @type {number} */ + let index + /** @type {State} */ + let returnState + + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.lessThan, 'expected `<`') + effects.enter(types.htmlText) + effects.enter(types.htmlTextData) + effects.consume(code) + return open + } + + /** @type {State} */ + function open(code) { + if (code === codes.exclamationMark) { + effects.consume(code) + return declarationOpen + } + + if (code === codes.slash) { + effects.consume(code) + return tagCloseStart + } + + if (code === codes.questionMark) { + effects.consume(code) + return instruction + } + + if (asciiAlpha(code)) { + effects.consume(code) + return tagOpen + } + + return nok(code) + } + + /** @type {State} */ + function declarationOpen(code) { + if (code === codes.dash) { + effects.consume(code) + return commentOpen + } + + if (code === codes.leftSquareBracket) { + effects.consume(code) + buffer = constants.cdataOpeningString + index = 0 + return cdataOpen + } + + if (asciiAlpha(code)) { + effects.consume(code) + return declaration + } + + return nok(code) + } + + /** @type {State} */ + function commentOpen(code) { + if (code === codes.dash) { + effects.consume(code) + return commentStart + } + + return nok(code) + } + + /** @type {State} */ + function commentStart(code) { + if (code === codes.eof || code === codes.greaterThan) { + return nok(code) + } + + if (code === codes.dash) { + effects.consume(code) + return commentStartDash + } + + return comment(code) + } + + /** @type {State} */ + function commentStartDash(code) { + if (code === codes.eof || code === codes.greaterThan) { + return nok(code) + } + + return comment(code) + } + + /** @type {State} */ + function comment(code) { + if (code === codes.eof) { + return nok(code) + } + + if (code === codes.dash) { + effects.consume(code) + return commentClose + } + + if (markdownLineEnding(code)) { + returnState = comment + return atLineEnding(code) + } + + effects.consume(code) + return comment + } + + /** @type {State} */ + function commentClose(code) { + if (code === codes.dash) { + effects.consume(code) + return end + } + + return comment(code) + } + + /** @type {State} */ + function cdataOpen(code) { + if (code === buffer.charCodeAt(index++)) { + effects.consume(code) + return index === buffer.length ? cdata : cdataOpen + } + + return nok(code) + } + + /** @type {State} */ + function cdata(code) { + if (code === codes.eof) { + return nok(code) + } + + if (code === codes.rightSquareBracket) { + effects.consume(code) + return cdataClose + } + + if (markdownLineEnding(code)) { + returnState = cdata + return atLineEnding(code) + } + + effects.consume(code) + return cdata + } + + /** @type {State} */ + function cdataClose(code) { + if (code === codes.rightSquareBracket) { + effects.consume(code) + return cdataEnd + } + + return cdata(code) + } + + /** @type {State} */ + function cdataEnd(code) { + if (code === codes.greaterThan) { + return end(code) + } + + if (code === codes.rightSquareBracket) { + effects.consume(code) + return cdataEnd + } + + return cdata(code) + } + + /** @type {State} */ + function declaration(code) { + if (code === codes.eof || code === codes.greaterThan) { + return end(code) + } + + if (markdownLineEnding(code)) { + returnState = declaration + return atLineEnding(code) + } + + effects.consume(code) + return declaration + } + + /** @type {State} */ + function instruction(code) { + if (code === codes.eof) { + return nok(code) + } + + if (code === codes.questionMark) { + effects.consume(code) + return instructionClose + } + + if (markdownLineEnding(code)) { + returnState = instruction + return atLineEnding(code) + } + + effects.consume(code) + return instruction + } + + /** @type {State} */ + function instructionClose(code) { + return code === codes.greaterThan ? end(code) : instruction(code) + } + + /** @type {State} */ + function tagCloseStart(code) { + if (asciiAlpha(code)) { + effects.consume(code) + return tagClose + } + + return nok(code) + } + + /** @type {State} */ + function tagClose(code) { + if (code === codes.dash || asciiAlphanumeric(code)) { + effects.consume(code) + return tagClose + } + + return tagCloseBetween(code) + } + + /** @type {State} */ + function tagCloseBetween(code) { + if (markdownLineEnding(code)) { + returnState = tagCloseBetween + return atLineEnding(code) + } + + if (markdownSpace(code)) { + effects.consume(code) + return tagCloseBetween + } + + return end(code) + } + + /** @type {State} */ + function tagOpen(code) { + if (code === codes.dash || asciiAlphanumeric(code)) { + effects.consume(code) + return tagOpen + } + + if ( + code === codes.slash || + code === codes.greaterThan || + markdownLineEndingOrSpace(code) + ) { + return tagOpenBetween(code) + } + + return nok(code) + } + + /** @type {State} */ + function tagOpenBetween(code) { + if (code === codes.slash) { + effects.consume(code) + return end + } + + if (code === codes.colon || code === codes.underscore || asciiAlpha(code)) { + effects.consume(code) + return tagOpenAttributeName + } + + if (markdownLineEnding(code)) { + returnState = tagOpenBetween + return atLineEnding(code) + } + + if (markdownSpace(code)) { + effects.consume(code) + return tagOpenBetween + } + + return end(code) + } + + /** @type {State} */ + function tagOpenAttributeName(code) { + if ( + code === codes.dash || + code === codes.dot || + code === codes.colon || + code === codes.underscore || + asciiAlphanumeric(code) + ) { + effects.consume(code) + return tagOpenAttributeName + } + + return tagOpenAttributeNameAfter(code) + } + + /** @type {State} */ + function tagOpenAttributeNameAfter(code) { + if (code === codes.equalsTo) { + effects.consume(code) + return tagOpenAttributeValueBefore + } + + if (markdownLineEnding(code)) { + returnState = tagOpenAttributeNameAfter + return atLineEnding(code) + } + + if (markdownSpace(code)) { + effects.consume(code) + return tagOpenAttributeNameAfter + } + + return tagOpenBetween(code) + } + + /** @type {State} */ + function tagOpenAttributeValueBefore(code) { + if ( + code === codes.eof || + code === codes.lessThan || + code === codes.equalsTo || + code === codes.greaterThan || + code === codes.graveAccent + ) { + return nok(code) + } + + if (code === codes.quotationMark || code === codes.apostrophe) { + effects.consume(code) + marker = code + return tagOpenAttributeValueQuoted + } + + if (markdownLineEnding(code)) { + returnState = tagOpenAttributeValueBefore + return atLineEnding(code) + } + + if (markdownSpace(code)) { + effects.consume(code) + return tagOpenAttributeValueBefore + } + + effects.consume(code) + marker = undefined + return tagOpenAttributeValueUnquoted + } + + /** @type {State} */ + function tagOpenAttributeValueQuoted(code) { + if (code === marker) { + effects.consume(code) + return tagOpenAttributeValueQuotedAfter + } + + if (code === codes.eof) { + return nok(code) + } + + if (markdownLineEnding(code)) { + returnState = tagOpenAttributeValueQuoted + return atLineEnding(code) + } + + effects.consume(code) + return tagOpenAttributeValueQuoted + } + + /** @type {State} */ + function tagOpenAttributeValueQuotedAfter(code) { + if ( + code === codes.greaterThan || + code === codes.slash || + markdownLineEndingOrSpace(code) + ) { + return tagOpenBetween(code) + } + + return nok(code) + } + + /** @type {State} */ + function tagOpenAttributeValueUnquoted(code) { + if ( + code === codes.eof || + code === codes.quotationMark || + code === codes.apostrophe || + code === codes.lessThan || + code === codes.equalsTo || + code === codes.graveAccent + ) { + return nok(code) + } + + if (code === codes.greaterThan || markdownLineEndingOrSpace(code)) { + return tagOpenBetween(code) + } + + effects.consume(code) + return tagOpenAttributeValueUnquoted + } + + // We can’t have blank lines in content, so no need to worry about empty + // tokens. + /** @type {State} */ + function atLineEnding(code) { + assert(returnState, 'expected return state') + assert(markdownLineEnding(code), 'expected eol') + effects.exit(types.htmlTextData) + effects.enter(types.lineEnding) + effects.consume(code) + effects.exit(types.lineEnding) + return factorySpace( + effects, + afterPrefix, + types.linePrefix, + self.parser.constructs.disable.null.includes('codeIndented') + ? undefined + : constants.tabSize + ) + } + + /** @type {State} */ + function afterPrefix(code) { + effects.enter(types.htmlTextData) + return returnState(code) + } + + /** @type {State} */ + function end(code) { + if (code === codes.greaterThan) { + effects.consume(code) + effects.exit(types.htmlTextData) + effects.exit(types.htmlText) + return ok + } + + return nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-end.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-end.d.ts new file mode 100644 index 00000000..11cbcded --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-end.d.ts @@ -0,0 +1,9 @@ +/** @type {Construct} */ +export const labelEnd: Construct +export type Construct = import('micromark-util-types').Construct +export type Resolver = import('micromark-util-types').Resolver +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Event = import('micromark-util-types').Event +export type Token = import('micromark-util-types').Token +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-end.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-end.js new file mode 100644 index 00000000..a08f879d --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-end.js @@ -0,0 +1,384 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Event} Event + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ + +import {ok as assert} from 'uvu/assert' +import {factoryDestination} from 'micromark-factory-destination' +import {factoryLabel} from 'micromark-factory-label' +import {factoryTitle} from 'micromark-factory-title' +import {factoryWhitespace} from 'micromark-factory-whitespace' +import {markdownLineEndingOrSpace} from 'micromark-util-character' +import {push, splice} from 'micromark-util-chunked' +import {normalizeIdentifier} from 'micromark-util-normalize-identifier' +import {resolveAll} from 'micromark-util-resolve-all' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const labelEnd = { + name: 'labelEnd', + tokenize: tokenizeLabelEnd, + resolveTo: resolveToLabelEnd, + resolveAll: resolveAllLabelEnd +} + +/** @type {Construct} */ +const resourceConstruct = {tokenize: tokenizeResource} +/** @type {Construct} */ +const fullReferenceConstruct = {tokenize: tokenizeFullReference} +/** @type {Construct} */ +const collapsedReferenceConstruct = {tokenize: tokenizeCollapsedReference} + +/** @type {Resolver} */ +function resolveAllLabelEnd(events) { + let index = -1 + /** @type {Token} */ + let token + + while (++index < events.length) { + token = events[index][1] + + if ( + token.type === types.labelImage || + token.type === types.labelLink || + token.type === types.labelEnd + ) { + // Remove the marker. + events.splice(index + 1, token.type === types.labelImage ? 4 : 2) + token.type = types.data + index++ + } + } + + return events +} + +/** @type {Resolver} */ +function resolveToLabelEnd(events, context) { + let index = events.length + let offset = 0 + /** @type {Token} */ + let token + /** @type {number|undefined} */ + let open + /** @type {number|undefined} */ + let close + /** @type {Event[]} */ + let media + + // Find an opening. + while (index--) { + token = events[index][1] + + if (open) { + // If we see another link, or inactive link label, we’ve been here before. + if ( + token.type === types.link || + (token.type === types.labelLink && token._inactive) + ) { + break + } + + // Mark other link openings as inactive, as we can’t have links in + // links. + if (events[index][0] === 'enter' && token.type === types.labelLink) { + token._inactive = true + } + } else if (close) { + if ( + events[index][0] === 'enter' && + (token.type === types.labelImage || token.type === types.labelLink) && + !token._balanced + ) { + open = index + + if (token.type !== types.labelLink) { + offset = 2 + break + } + } + } else if (token.type === types.labelEnd) { + close = index + } + } + + assert(open !== undefined, '`open` is supposed to be found') + assert(close !== undefined, '`close` is supposed to be found') + + const group = { + type: events[open][1].type === types.labelLink ? types.link : types.image, + start: Object.assign({}, events[open][1].start), + end: Object.assign({}, events[events.length - 1][1].end) + } + + const label = { + type: types.label, + start: Object.assign({}, events[open][1].start), + end: Object.assign({}, events[close][1].end) + } + + const text = { + type: types.labelText, + start: Object.assign({}, events[open + offset + 2][1].end), + end: Object.assign({}, events[close - 2][1].start) + } + + media = [ + ['enter', group, context], + ['enter', label, context] + ] + + // Opening marker. + media = push(media, events.slice(open + 1, open + offset + 3)) + + // Text open. + media = push(media, [['enter', text, context]]) + + // Between. + media = push( + media, + resolveAll( + context.parser.constructs.insideSpan.null, + events.slice(open + offset + 4, close - 3), + context + ) + ) + + // Text close, marker close, label close. + media = push(media, [ + ['exit', text, context], + events[close - 2], + events[close - 1], + ['exit', label, context] + ]) + + // Reference, resource, or so. + media = push(media, events.slice(close + 1)) + + // Media close. + media = push(media, [['exit', group, context]]) + + splice(events, open, events.length, media) + + return events +} + +/** @type {Tokenizer} */ +function tokenizeLabelEnd(effects, ok, nok) { + const self = this + let index = self.events.length + /** @type {Token} */ + let labelStart + /** @type {boolean} */ + let defined + + // Find an opening. + while (index--) { + if ( + (self.events[index][1].type === types.labelImage || + self.events[index][1].type === types.labelLink) && + !self.events[index][1]._balanced + ) { + labelStart = self.events[index][1] + break + } + } + + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.rightSquareBracket, 'expected `]`') + + if (!labelStart) { + return nok(code) + } + + // It’s a balanced bracket, but contains a link. + if (labelStart._inactive) return balanced(code) + defined = self.parser.defined.includes( + normalizeIdentifier( + self.sliceSerialize({start: labelStart.end, end: self.now()}) + ) + ) + effects.enter(types.labelEnd) + effects.enter(types.labelMarker) + effects.consume(code) + effects.exit(types.labelMarker) + effects.exit(types.labelEnd) + return afterLabelEnd + } + + /** @type {State} */ + function afterLabelEnd(code) { + // Resource: `[asd](fgh)`. + if (code === codes.leftParenthesis) { + return effects.attempt( + resourceConstruct, + ok, + defined ? ok : balanced + )(code) + } + + // Collapsed (`[asd][]`) or full (`[asd][fgh]`) reference? + if (code === codes.leftSquareBracket) { + return effects.attempt( + fullReferenceConstruct, + ok, + defined + ? effects.attempt(collapsedReferenceConstruct, ok, balanced) + : balanced + )(code) + } + + // Shortcut reference: `[asd]`? + return defined ? ok(code) : balanced(code) + } + + /** @type {State} */ + function balanced(code) { + labelStart._balanced = true + return nok(code) + } +} + +/** @type {Tokenizer} */ +function tokenizeResource(effects, ok, nok) { + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.leftParenthesis, 'expected left paren') + effects.enter(types.resource) + effects.enter(types.resourceMarker) + effects.consume(code) + effects.exit(types.resourceMarker) + return factoryWhitespace(effects, open) + } + + /** @type {State} */ + function open(code) { + if (code === codes.rightParenthesis) { + return end(code) + } + + return factoryDestination( + effects, + destinationAfter, + nok, + types.resourceDestination, + types.resourceDestinationLiteral, + types.resourceDestinationLiteralMarker, + types.resourceDestinationRaw, + types.resourceDestinationString, + constants.linkResourceDestinationBalanceMax + )(code) + } + + /** @type {State} */ + function destinationAfter(code) { + return markdownLineEndingOrSpace(code) + ? factoryWhitespace(effects, between)(code) + : end(code) + } + + /** @type {State} */ + function between(code) { + if ( + code === codes.quotationMark || + code === codes.apostrophe || + code === codes.leftParenthesis + ) { + return factoryTitle( + effects, + factoryWhitespace(effects, end), + nok, + types.resourceTitle, + types.resourceTitleMarker, + types.resourceTitleString + )(code) + } + + return end(code) + } + + /** @type {State} */ + function end(code) { + if (code === codes.rightParenthesis) { + effects.enter(types.resourceMarker) + effects.consume(code) + effects.exit(types.resourceMarker) + effects.exit(types.resource) + return ok + } + + return nok(code) + } +} + +/** @type {Tokenizer} */ +function tokenizeFullReference(effects, ok, nok) { + const self = this + + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.leftSquareBracket, 'expected left bracket') + return factoryLabel.call( + self, + effects, + afterLabel, + nok, + types.reference, + types.referenceMarker, + types.referenceString + )(code) + } + + /** @type {State} */ + function afterLabel(code) { + return self.parser.defined.includes( + normalizeIdentifier( + self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1) + ) + ) + ? ok(code) + : nok(code) + } +} + +/** @type {Tokenizer} */ +function tokenizeCollapsedReference(effects, ok, nok) { + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.leftSquareBracket, 'expected left bracket') + effects.enter(types.reference) + effects.enter(types.referenceMarker) + effects.consume(code) + effects.exit(types.referenceMarker) + return open + } + + /** @type {State} */ + function open(code) { + if (code === codes.rightSquareBracket) { + effects.enter(types.referenceMarker) + effects.consume(code) + effects.exit(types.referenceMarker) + effects.exit(types.reference) + return ok + } + + return nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-image.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-image.d.ts new file mode 100644 index 00000000..302d8905 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-image.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const labelStartImage: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-image.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-image.js new file mode 100644 index 00000000..1c64efc4 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-image.js @@ -0,0 +1,60 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ + +import {ok as assert} from 'uvu/assert' +import {codes} from 'micromark-util-symbol/codes.js' +import {types} from 'micromark-util-symbol/types.js' +import {labelEnd} from './label-end.js' + +/** @type {Construct} */ +export const labelStartImage = { + name: 'labelStartImage', + tokenize: tokenizeLabelStartImage, + resolveAll: labelEnd.resolveAll +} + +/** @type {Tokenizer} */ +function tokenizeLabelStartImage(effects, ok, nok) { + const self = this + + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.exclamationMark, 'expected `!`') + effects.enter(types.labelImage) + effects.enter(types.labelImageMarker) + effects.consume(code) + effects.exit(types.labelImageMarker) + return open + } + + /** @type {State} */ + function open(code) { + if (code === codes.leftSquareBracket) { + effects.enter(types.labelMarker) + effects.consume(code) + effects.exit(types.labelMarker) + effects.exit(types.labelImage) + return after + } + + return nok(code) + } + + /** @type {State} */ + function after(code) { + /* To do: remove in the future once we’ve switched from + * `micromark-extension-footnote` to `micromark-extension-gfm-footnote`, + * which doesn’t need this */ + /* Hidden footnotes hook */ + /* c8 ignore next 3 */ + return code === codes.caret && + '_hiddenFootnoteSupport' in self.parser.constructs + ? nok(code) + : ok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-link.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-link.d.ts new file mode 100644 index 00000000..5a757d36 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-link.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const labelStartLink: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-link.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-link.js new file mode 100644 index 00000000..1c14479c --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/label-start-link.js @@ -0,0 +1,48 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ + +import {ok as assert} from 'uvu/assert' +import {codes} from 'micromark-util-symbol/codes.js' +import {types} from 'micromark-util-symbol/types.js' +import {labelEnd} from './label-end.js' + +/** @type {Construct} */ +export const labelStartLink = { + name: 'labelStartLink', + tokenize: tokenizeLabelStartLink, + resolveAll: labelEnd.resolveAll +} + +/** @type {Tokenizer} */ +function tokenizeLabelStartLink(effects, ok, nok) { + const self = this + + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.leftSquareBracket, 'expected `[`') + effects.enter(types.labelLink) + effects.enter(types.labelMarker) + effects.consume(code) + effects.exit(types.labelMarker) + effects.exit(types.labelLink) + return after + } + + /** @type {State} */ + function after(code) { + /* To do: remove in the future once we’ve switched from + * `micromark-extension-footnote` to `micromark-extension-gfm-footnote`, + * which doesn’t need this */ + /* Hidden footnotes hook. */ + /* c8 ignore next 3 */ + return code === codes.caret && + '_hiddenFootnoteSupport' in self.parser.constructs + ? nok(code) + : ok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/line-ending.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/line-ending.d.ts new file mode 100644 index 00000000..3d9fdf9e --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/line-ending.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const lineEnding: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/line-ending.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/line-ending.js new file mode 100644 index 00000000..24b7db6d --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/line-ending.js @@ -0,0 +1,27 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ + +import {ok as assert} from 'uvu/assert' +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding} from 'micromark-util-character' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const lineEnding = {name: 'lineEnding', tokenize: tokenizeLineEnding} + +/** @type {Tokenizer} */ +function tokenizeLineEnding(effects, ok) { + return start + + /** @type {State} */ + function start(code) { + assert(markdownLineEnding(code), 'expected eol') + effects.enter(types.lineEnding) + effects.consume(code) + effects.exit(types.lineEnding) + return factorySpace(effects, ok, types.linePrefix) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/list.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/list.d.ts new file mode 100644 index 00000000..6a138dbe --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/list.d.ts @@ -0,0 +1,16 @@ +/** @type {Construct} */ +export const list: Construct +export type Construct = import('micromark-util-types').Construct +export type TokenizeContext = import('micromark-util-types').TokenizeContext +export type Exiter = import('micromark-util-types').Exiter +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code +export type ListContainerState = Record & { + marker: Code + type: string + size: number +} +export type TokenizeContextWithState = TokenizeContext & { + containerState: ListContainerState +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/list.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/list.js new file mode 100644 index 00000000..63e10447 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/list.js @@ -0,0 +1,276 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext + * @typedef {import('micromark-util-types').Exiter} Exiter + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ + +/** + * @typedef {Record & {marker: Code, type: string, size: number}} ListContainerState + * @typedef {TokenizeContext & {containerState: ListContainerState}} TokenizeContextWithState + */ + +import {ok as assert} from 'uvu/assert' +import {factorySpace} from 'micromark-factory-space' +import {asciiDigit, markdownSpace} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' +import {blankLine} from './blank-line.js' +import {thematicBreak} from './thematic-break.js' + +/** @type {Construct} */ +export const list = { + name: 'list', + tokenize: tokenizeListStart, + continuation: {tokenize: tokenizeListContinuation}, + exit: tokenizeListEnd +} + +/** @type {Construct} */ +const listItemPrefixWhitespaceConstruct = { + tokenize: tokenizeListItemPrefixWhitespace, + partial: true +} + +/** @type {Construct} */ +const indentConstruct = {tokenize: tokenizeIndent, partial: true} + +/** + * @type {Tokenizer} + * @this {TokenizeContextWithState} + */ +function tokenizeListStart(effects, ok, nok) { + const self = this + const tail = self.events[self.events.length - 1] + let initialSize = + tail && tail[1].type === types.linePrefix + ? tail[2].sliceSerialize(tail[1], true).length + : 0 + let size = 0 + + return start + + /** @type {State} */ + function start(code) { + const kind = + self.containerState.type || + (code === codes.asterisk || code === codes.plusSign || code === codes.dash + ? types.listUnordered + : types.listOrdered) + + if ( + kind === types.listUnordered + ? !self.containerState.marker || code === self.containerState.marker + : asciiDigit(code) + ) { + if (!self.containerState.type) { + self.containerState.type = kind + effects.enter(kind, {_container: true}) + } + + if (kind === types.listUnordered) { + effects.enter(types.listItemPrefix) + return code === codes.asterisk || code === codes.dash + ? effects.check(thematicBreak, nok, atMarker)(code) + : atMarker(code) + } + + if (!self.interrupt || code === codes.digit1) { + effects.enter(types.listItemPrefix) + effects.enter(types.listItemValue) + return inside(code) + } + } + + return nok(code) + } + + /** @type {State} */ + function inside(code) { + if (asciiDigit(code) && ++size < constants.listItemValueSizeMax) { + effects.consume(code) + return inside + } + + if ( + (!self.interrupt || size < 2) && + (self.containerState.marker + ? code === self.containerState.marker + : code === codes.rightParenthesis || code === codes.dot) + ) { + effects.exit(types.listItemValue) + return atMarker(code) + } + + return nok(code) + } + + /** + * @type {State} + **/ + function atMarker(code) { + assert(code !== codes.eof, 'eof (`null`) is not a marker') + effects.enter(types.listItemMarker) + effects.consume(code) + effects.exit(types.listItemMarker) + self.containerState.marker = self.containerState.marker || code + return effects.check( + blankLine, + // Can’t be empty when interrupting. + self.interrupt ? nok : onBlank, + effects.attempt( + listItemPrefixWhitespaceConstruct, + endOfPrefix, + otherPrefix + ) + ) + } + + /** @type {State} */ + function onBlank(code) { + self.containerState.initialBlankLine = true + initialSize++ + return endOfPrefix(code) + } + + /** @type {State} */ + function otherPrefix(code) { + if (markdownSpace(code)) { + effects.enter(types.listItemPrefixWhitespace) + effects.consume(code) + effects.exit(types.listItemPrefixWhitespace) + return endOfPrefix + } + + return nok(code) + } + + /** @type {State} */ + function endOfPrefix(code) { + self.containerState.size = + initialSize + + self.sliceSerialize(effects.exit(types.listItemPrefix), true).length + return ok(code) + } +} + +/** + * @type {Tokenizer} + * @this {TokenizeContextWithState} + */ +function tokenizeListContinuation(effects, ok, nok) { + const self = this + + self.containerState._closeFlow = undefined + + return effects.check(blankLine, onBlank, notBlank) + + /** @type {State} */ + function onBlank(code) { + self.containerState.furtherBlankLines = + self.containerState.furtherBlankLines || + self.containerState.initialBlankLine + + // We have a blank line. + // Still, try to consume at most the items size. + return factorySpace( + effects, + ok, + types.listItemIndent, + self.containerState.size + 1 + )(code) + } + + /** @type {State} */ + function notBlank(code) { + if (self.containerState.furtherBlankLines || !markdownSpace(code)) { + self.containerState.furtherBlankLines = undefined + self.containerState.initialBlankLine = undefined + return notInCurrentItem(code) + } + + self.containerState.furtherBlankLines = undefined + self.containerState.initialBlankLine = undefined + return effects.attempt(indentConstruct, ok, notInCurrentItem)(code) + } + + /** @type {State} */ + function notInCurrentItem(code) { + // While we do continue, we signal that the flow should be closed. + self.containerState._closeFlow = true + // As we’re closing flow, we’re no longer interrupting. + self.interrupt = undefined + return factorySpace( + effects, + effects.attempt(list, ok, nok), + types.linePrefix, + self.parser.constructs.disable.null.includes('codeIndented') + ? undefined + : constants.tabSize + )(code) + } +} + +/** + * @type {Tokenizer} + * @this {TokenizeContextWithState} + */ +function tokenizeIndent(effects, ok, nok) { + const self = this + + return factorySpace( + effects, + afterPrefix, + types.listItemIndent, + self.containerState.size + 1 + ) + + /** @type {State} */ + function afterPrefix(code) { + const tail = self.events[self.events.length - 1] + return tail && + tail[1].type === types.listItemIndent && + tail[2].sliceSerialize(tail[1], true).length === self.containerState.size + ? ok(code) + : nok(code) + } +} + +/** + * @type {Exiter} + * @this {TokenizeContextWithState} + */ +function tokenizeListEnd(effects) { + effects.exit(this.containerState.type) +} + +/** + * @type {Tokenizer} + * @this {TokenizeContextWithState} + */ +function tokenizeListItemPrefixWhitespace(effects, ok, nok) { + const self = this + + return factorySpace( + effects, + afterPrefix, + types.listItemPrefixWhitespace, + self.parser.constructs.disable.null.includes('codeIndented') + ? undefined + : constants.tabSize + 1 + ) + + /** @type {State} */ + function afterPrefix(code) { + const tail = self.events[self.events.length - 1] + + return !markdownSpace(code) && + tail && + tail[1].type === types.listItemPrefixWhitespace + ? ok(code) + : nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/setext-underline.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/setext-underline.d.ts new file mode 100644 index 00000000..7dc65882 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/setext-underline.d.ts @@ -0,0 +1,7 @@ +/** @type {Construct} */ +export const setextUnderline: Construct +export type Construct = import('micromark-util-types').Construct +export type Resolver = import('micromark-util-types').Resolver +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/setext-underline.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/setext-underline.js new file mode 100644 index 00000000..0662ac71 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/setext-underline.js @@ -0,0 +1,148 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ + +import {ok as assert} from 'uvu/assert' +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const setextUnderline = { + name: 'setextUnderline', + tokenize: tokenizeSetextUnderline, + resolveTo: resolveToSetextUnderline +} + +/** @type {Resolver} */ +function resolveToSetextUnderline(events, context) { + let index = events.length + /** @type {number|undefined} */ + let content + /** @type {number|undefined} */ + let text + /** @type {number|undefined} */ + let definition + + // Find the opening of the content. + // It’ll always exist: we don’t tokenize if it isn’t there. + while (index--) { + if (events[index][0] === 'enter') { + if (events[index][1].type === types.content) { + content = index + break + } + + if (events[index][1].type === types.paragraph) { + text = index + } + } + // Exit + else { + if (events[index][1].type === types.content) { + // Remove the content end (if needed we’ll add it later) + events.splice(index, 1) + } + + if (!definition && events[index][1].type === types.definition) { + definition = index + } + } + } + + assert(text !== undefined, 'expected a `text` index to be found') + assert(content !== undefined, 'expected a `text` index to be found') + + const heading = { + type: types.setextHeading, + start: Object.assign({}, events[text][1].start), + end: Object.assign({}, events[events.length - 1][1].end) + } + + // Change the paragraph to setext heading text. + events[text][1].type = types.setextHeadingText + + // If we have definitions in the content, we’ll keep on having content, + // but we need move it. + if (definition) { + events.splice(text, 0, ['enter', heading, context]) + events.splice(definition + 1, 0, ['exit', events[content][1], context]) + events[content][1].end = Object.assign({}, events[definition][1].end) + } else { + events[content][1] = heading + } + + // Add the heading exit at the end. + events.push(['exit', heading, context]) + + return events +} + +/** @type {Tokenizer} */ +function tokenizeSetextUnderline(effects, ok, nok) { + const self = this + let index = self.events.length + /** @type {NonNullable} */ + let marker + /** @type {boolean} */ + let paragraph + + // Find an opening. + while (index--) { + // Skip enter/exit of line ending, line prefix, and content. + // We can now either have a definition or a paragraph. + if ( + self.events[index][1].type !== types.lineEnding && + self.events[index][1].type !== types.linePrefix && + self.events[index][1].type !== types.content + ) { + paragraph = self.events[index][1].type === types.paragraph + break + } + } + + return start + + /** @type {State} */ + function start(code) { + assert( + code === codes.dash || code === codes.equalsTo, + 'expected `=` or `-`' + ) + + if (!self.parser.lazy[self.now().line] && (self.interrupt || paragraph)) { + effects.enter(types.setextHeadingLine) + effects.enter(types.setextHeadingLineSequence) + marker = code + return closingSequence(code) + } + + return nok(code) + } + + /** @type {State} */ + function closingSequence(code) { + if (code === marker) { + effects.consume(code) + return closingSequence + } + + effects.exit(types.setextHeadingLineSequence) + return factorySpace(effects, closingSequenceEnd, types.lineSuffix)(code) + } + + /** @type {State} */ + function closingSequenceEnd(code) { + if (code === codes.eof || markdownLineEnding(code)) { + effects.exit(types.setextHeadingLine) + return ok(code) + } + + return nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/thematic-break.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/thematic-break.d.ts new file mode 100644 index 00000000..c5992e04 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/thematic-break.d.ts @@ -0,0 +1,6 @@ +/** @type {Construct} */ +export const thematicBreak: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/thematic-break.js b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/thematic-break.js new file mode 100644 index 00000000..8cd82db6 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/dev/lib/thematic-break.js @@ -0,0 +1,76 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ + +import {ok as assert} from 'uvu/assert' +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding, markdownSpace} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** @type {Construct} */ +export const thematicBreak = { + name: 'thematicBreak', + tokenize: tokenizeThematicBreak +} + +/** @type {Tokenizer} */ +function tokenizeThematicBreak(effects, ok, nok) { + let size = 0 + /** @type {NonNullable} */ + let marker + + return start + + /** @type {State} */ + function start(code) { + assert( + code === codes.asterisk || + code === codes.dash || + code === codes.underscore, + 'expected `*`, `-`, or `_`' + ) + + effects.enter(types.thematicBreak) + marker = code + return atBreak(code) + } + + /** @type {State} */ + function atBreak(code) { + if (code === marker) { + effects.enter(types.thematicBreakSequence) + return sequence(code) + } + + if (markdownSpace(code)) { + return factorySpace(effects, atBreak, types.whitespace)(code) + } + + if ( + size < constants.thematicBreakMarkerCountMin || + (code !== codes.eof && !markdownLineEnding(code)) + ) { + return nok(code) + } + + effects.exit(types.thematicBreak) + return ok(code) + } + + /** @type {State} */ + function sequence(code) { + if (code === marker) { + effects.consume(code) + size++ + return sequence + } + + effects.exit(types.thematicBreakSequence) + return atBreak(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/index.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/index.d.ts new file mode 100644 index 00000000..f9143e09 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/index.d.ts @@ -0,0 +1,22 @@ +export {attention} from './lib/attention.js' +export {autolink} from './lib/autolink.js' +export {blankLine} from './lib/blank-line.js' +export {blockQuote} from './lib/block-quote.js' +export {characterEscape} from './lib/character-escape.js' +export {characterReference} from './lib/character-reference.js' +export {codeFenced} from './lib/code-fenced.js' +export {codeIndented} from './lib/code-indented.js' +export {codeText} from './lib/code-text.js' +export {content} from './lib/content.js' +export {definition} from './lib/definition.js' +export {hardBreakEscape} from './lib/hard-break-escape.js' +export {headingAtx} from './lib/heading-atx.js' +export {htmlFlow} from './lib/html-flow.js' +export {htmlText} from './lib/html-text.js' +export {labelEnd} from './lib/label-end.js' +export {labelStartImage} from './lib/label-start-image.js' +export {labelStartLink} from './lib/label-start-link.js' +export {lineEnding} from './lib/line-ending.js' +export {list} from './lib/list.js' +export {setextUnderline} from './lib/setext-underline.js' +export {thematicBreak} from './lib/thematic-break.js' diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/index.js b/_extensions/d2/node_modules/micromark-core-commonmark/index.js new file mode 100644 index 00000000..f9143e09 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/index.js @@ -0,0 +1,22 @@ +export {attention} from './lib/attention.js' +export {autolink} from './lib/autolink.js' +export {blankLine} from './lib/blank-line.js' +export {blockQuote} from './lib/block-quote.js' +export {characterEscape} from './lib/character-escape.js' +export {characterReference} from './lib/character-reference.js' +export {codeFenced} from './lib/code-fenced.js' +export {codeIndented} from './lib/code-indented.js' +export {codeText} from './lib/code-text.js' +export {content} from './lib/content.js' +export {definition} from './lib/definition.js' +export {hardBreakEscape} from './lib/hard-break-escape.js' +export {headingAtx} from './lib/heading-atx.js' +export {htmlFlow} from './lib/html-flow.js' +export {htmlText} from './lib/html-text.js' +export {labelEnd} from './lib/label-end.js' +export {labelStartImage} from './lib/label-start-image.js' +export {labelStartLink} from './lib/label-start-link.js' +export {lineEnding} from './lib/line-ending.js' +export {list} from './lib/list.js' +export {setextUnderline} from './lib/setext-underline.js' +export {thematicBreak} from './lib/thematic-break.js' diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/attention.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/attention.d.ts new file mode 100644 index 00000000..7f5a1c5a --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/attention.d.ts @@ -0,0 +1,10 @@ +/** @type {Construct} */ +export const attention: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Resolver = import('micromark-util-types').Resolver +export type State = import('micromark-util-types').State +export type Token = import('micromark-util-types').Token +export type Event = import('micromark-util-types').Event +export type Code = import('micromark-util-types').Code +export type Point = import('micromark-util-types').Point diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/attention.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/attention.js new file mode 100644 index 00000000..698a21d2 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/attention.js @@ -0,0 +1,235 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').Event} Event + * @typedef {import('micromark-util-types').Code} Code + * @typedef {import('micromark-util-types').Point} Point + */ +import {push, splice} from 'micromark-util-chunked' +import {classifyCharacter} from 'micromark-util-classify-character' +import {resolveAll} from 'micromark-util-resolve-all' + +/** @type {Construct} */ +export const attention = { + name: 'attention', + tokenize: tokenizeAttention, + resolveAll: resolveAllAttention +} +/** + * Take all events and resolve attention to emphasis or strong. + * + * @type {Resolver} + */ + +function resolveAllAttention(events, context) { + let index = -1 + /** @type {number} */ + + let open + /** @type {Token} */ + + let group + /** @type {Token} */ + + let text + /** @type {Token} */ + + let openingSequence + /** @type {Token} */ + + let closingSequence + /** @type {number} */ + + let use + /** @type {Event[]} */ + + let nextEvents + /** @type {number} */ + + let offset // Walk through all events. + // + // Note: performance of this is fine on an mb of normal markdown, but it’s + // a bottleneck for malicious stuff. + + while (++index < events.length) { + // Find a token that can close. + if ( + events[index][0] === 'enter' && + events[index][1].type === 'attentionSequence' && + events[index][1]._close + ) { + open = index // Now walk back to find an opener. + + while (open--) { + // Find a token that can open the closer. + if ( + events[open][0] === 'exit' && + events[open][1].type === 'attentionSequence' && + events[open][1]._open && // If the markers are the same: + context.sliceSerialize(events[open][1]).charCodeAt(0) === + context.sliceSerialize(events[index][1]).charCodeAt(0) + ) { + // If the opening can close or the closing can open, + // and the close size *is not* a multiple of three, + // but the sum of the opening and closing size *is* multiple of three, + // then don’t match. + if ( + (events[open][1]._close || events[index][1]._open) && + (events[index][1].end.offset - events[index][1].start.offset) % 3 && + !( + (events[open][1].end.offset - + events[open][1].start.offset + + events[index][1].end.offset - + events[index][1].start.offset) % + 3 + ) + ) { + continue + } // Number of markers to use from the sequence. + + use = + events[open][1].end.offset - events[open][1].start.offset > 1 && + events[index][1].end.offset - events[index][1].start.offset > 1 + ? 2 + : 1 + const start = Object.assign({}, events[open][1].end) + const end = Object.assign({}, events[index][1].start) + movePoint(start, -use) + movePoint(end, use) + openingSequence = { + type: use > 1 ? 'strongSequence' : 'emphasisSequence', + start, + end: Object.assign({}, events[open][1].end) + } + closingSequence = { + type: use > 1 ? 'strongSequence' : 'emphasisSequence', + start: Object.assign({}, events[index][1].start), + end + } + text = { + type: use > 1 ? 'strongText' : 'emphasisText', + start: Object.assign({}, events[open][1].end), + end: Object.assign({}, events[index][1].start) + } + group = { + type: use > 1 ? 'strong' : 'emphasis', + start: Object.assign({}, openingSequence.start), + end: Object.assign({}, closingSequence.end) + } + events[open][1].end = Object.assign({}, openingSequence.start) + events[index][1].start = Object.assign({}, closingSequence.end) + nextEvents = [] // If there are more markers in the opening, add them before. + + if (events[open][1].end.offset - events[open][1].start.offset) { + nextEvents = push(nextEvents, [ + ['enter', events[open][1], context], + ['exit', events[open][1], context] + ]) + } // Opening. + + nextEvents = push(nextEvents, [ + ['enter', group, context], + ['enter', openingSequence, context], + ['exit', openingSequence, context], + ['enter', text, context] + ]) // Between. + + nextEvents = push( + nextEvents, + resolveAll( + context.parser.constructs.insideSpan.null, + events.slice(open + 1, index), + context + ) + ) // Closing. + + nextEvents = push(nextEvents, [ + ['exit', text, context], + ['enter', closingSequence, context], + ['exit', closingSequence, context], + ['exit', group, context] + ]) // If there are more markers in the closing, add them after. + + if (events[index][1].end.offset - events[index][1].start.offset) { + offset = 2 + nextEvents = push(nextEvents, [ + ['enter', events[index][1], context], + ['exit', events[index][1], context] + ]) + } else { + offset = 0 + } + + splice(events, open - 1, index - open + 3, nextEvents) + index = open + nextEvents.length - offset - 2 + break + } + } + } + } // Remove remaining sequences. + + index = -1 + + while (++index < events.length) { + if (events[index][1].type === 'attentionSequence') { + events[index][1].type = 'data' + } + } + + return events +} +/** @type {Tokenizer} */ + +function tokenizeAttention(effects, ok) { + const attentionMarkers = this.parser.constructs.attentionMarkers.null + const previous = this.previous + const before = classifyCharacter(previous) + /** @type {NonNullable} */ + + let marker + return start + /** @type {State} */ + + function start(code) { + effects.enter('attentionSequence') + marker = code + return sequence(code) + } + /** @type {State} */ + + function sequence(code) { + if (code === marker) { + effects.consume(code) + return sequence + } + + const token = effects.exit('attentionSequence') + const after = classifyCharacter(code) + const open = + !after || (after === 2 && before) || attentionMarkers.includes(code) + const close = + !before || (before === 2 && after) || attentionMarkers.includes(previous) + token._open = Boolean(marker === 42 ? open : open && (before || !close)) + token._close = Boolean(marker === 42 ? close : close && (after || !open)) + return ok(code) + } +} +/** + * Move a point a bit. + * + * Note: `move` only works inside lines! It’s not possible to move past other + * chunks (replacement characters, tabs, or line endings). + * + * @param {Point} point + * @param {number} offset + * @returns {void} + */ + +function movePoint(point, offset) { + point.column += offset + point.offset += offset + point._bufferIndex += offset +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/autolink.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/autolink.d.ts new file mode 100644 index 00000000..02690867 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/autolink.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const autolink: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/autolink.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/autolink.js new file mode 100644 index 00000000..4dc39059 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/autolink.js @@ -0,0 +1,140 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ +import { + asciiAlpha, + asciiAlphanumeric, + asciiAtext, + asciiControl +} from 'micromark-util-character' + +/** @type {Construct} */ +export const autolink = { + name: 'autolink', + tokenize: tokenizeAutolink +} +/** @type {Tokenizer} */ + +function tokenizeAutolink(effects, ok, nok) { + let size = 1 + return start + /** @type {State} */ + + function start(code) { + effects.enter('autolink') + effects.enter('autolinkMarker') + effects.consume(code) + effects.exit('autolinkMarker') + effects.enter('autolinkProtocol') + return open + } + /** @type {State} */ + + function open(code) { + if (asciiAlpha(code)) { + effects.consume(code) + return schemeOrEmailAtext + } + + return asciiAtext(code) ? emailAtext(code) : nok(code) + } + /** @type {State} */ + + function schemeOrEmailAtext(code) { + return code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code) + ? schemeInsideOrEmailAtext(code) + : emailAtext(code) + } + /** @type {State} */ + + function schemeInsideOrEmailAtext(code) { + if (code === 58) { + effects.consume(code) + return urlInside + } + + if ( + (code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code)) && + size++ < 32 + ) { + effects.consume(code) + return schemeInsideOrEmailAtext + } + + return emailAtext(code) + } + /** @type {State} */ + + function urlInside(code) { + if (code === 62) { + effects.exit('autolinkProtocol') + return end(code) + } + + if (code === null || code === 32 || code === 60 || asciiControl(code)) { + return nok(code) + } + + effects.consume(code) + return urlInside + } + /** @type {State} */ + + function emailAtext(code) { + if (code === 64) { + effects.consume(code) + size = 0 + return emailAtSignOrDot + } + + if (asciiAtext(code)) { + effects.consume(code) + return emailAtext + } + + return nok(code) + } + /** @type {State} */ + + function emailAtSignOrDot(code) { + return asciiAlphanumeric(code) ? emailLabel(code) : nok(code) + } + /** @type {State} */ + + function emailLabel(code) { + if (code === 46) { + effects.consume(code) + size = 0 + return emailAtSignOrDot + } + + if (code === 62) { + // Exit, then change the type. + effects.exit('autolinkProtocol').type = 'autolinkEmail' + return end(code) + } + + return emailValue(code) + } + /** @type {State} */ + + function emailValue(code) { + if ((code === 45 || asciiAlphanumeric(code)) && size++ < 63) { + effects.consume(code) + return code === 45 ? emailValue : emailLabel + } + + return nok(code) + } + /** @type {State} */ + + function end(code) { + effects.enter('autolinkMarker') + effects.consume(code) + effects.exit('autolinkMarker') + effects.exit('autolink') + return ok + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/blank-line.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/blank-line.d.ts new file mode 100644 index 00000000..fcacec69 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/blank-line.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const blankLine: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/blank-line.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/blank-line.js new file mode 100644 index 00000000..801927fd --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/blank-line.js @@ -0,0 +1,23 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding} from 'micromark-util-character' + +/** @type {Construct} */ +export const blankLine = { + tokenize: tokenizeBlankLine, + partial: true +} +/** @type {Tokenizer} */ + +function tokenizeBlankLine(effects, ok, nok) { + return factorySpace(effects, afterWhitespace, 'linePrefix') + /** @type {State} */ + + function afterWhitespace(code) { + return code === null || markdownLineEnding(code) ? ok(code) : nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/block-quote.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/block-quote.d.ts new file mode 100644 index 00000000..a86d5d29 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/block-quote.d.ts @@ -0,0 +1,6 @@ +/** @type {Construct} */ +export const blockQuote: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Exiter = import('micromark-util-types').Exiter +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/block-quote.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/block-quote.js new file mode 100644 index 00000000..8aaa78df --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/block-quote.js @@ -0,0 +1,75 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Exiter} Exiter + * @typedef {import('micromark-util-types').State} State + */ +import {factorySpace} from 'micromark-factory-space' +import {markdownSpace} from 'micromark-util-character' + +/** @type {Construct} */ +export const blockQuote = { + name: 'blockQuote', + tokenize: tokenizeBlockQuoteStart, + continuation: { + tokenize: tokenizeBlockQuoteContinuation + }, + exit +} +/** @type {Tokenizer} */ + +function tokenizeBlockQuoteStart(effects, ok, nok) { + const self = this + return start + /** @type {State} */ + + function start(code) { + if (code === 62) { + const state = self.containerState + + if (!state.open) { + effects.enter('blockQuote', { + _container: true + }) + state.open = true + } + + effects.enter('blockQuotePrefix') + effects.enter('blockQuoteMarker') + effects.consume(code) + effects.exit('blockQuoteMarker') + return after + } + + return nok(code) + } + /** @type {State} */ + + function after(code) { + if (markdownSpace(code)) { + effects.enter('blockQuotePrefixWhitespace') + effects.consume(code) + effects.exit('blockQuotePrefixWhitespace') + effects.exit('blockQuotePrefix') + return ok + } + + effects.exit('blockQuotePrefix') + return ok(code) + } +} +/** @type {Tokenizer} */ + +function tokenizeBlockQuoteContinuation(effects, ok, nok) { + return factorySpace( + effects, + effects.attempt(blockQuote, ok, nok), + 'linePrefix', + this.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4 + ) +} +/** @type {Exiter} */ + +function exit(effects) { + effects.exit('blockQuote') +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/character-escape.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/character-escape.d.ts new file mode 100644 index 00000000..dd4cec72 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/character-escape.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const characterEscape: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/character-escape.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/character-escape.js new file mode 100644 index 00000000..6eea11db --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/character-escape.js @@ -0,0 +1,39 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ +import {asciiPunctuation} from 'micromark-util-character' + +/** @type {Construct} */ +export const characterEscape = { + name: 'characterEscape', + tokenize: tokenizeCharacterEscape +} +/** @type {Tokenizer} */ + +function tokenizeCharacterEscape(effects, ok, nok) { + return start + /** @type {State} */ + + function start(code) { + effects.enter('characterEscape') + effects.enter('escapeMarker') + effects.consume(code) + effects.exit('escapeMarker') + return open + } + /** @type {State} */ + + function open(code) { + if (asciiPunctuation(code)) { + effects.enter('characterEscapeValue') + effects.consume(code) + effects.exit('characterEscapeValue') + effects.exit('characterEscape') + return ok + } + + return nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/character-reference.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/character-reference.d.ts new file mode 100644 index 00000000..76035826 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/character-reference.d.ts @@ -0,0 +1,7 @@ +/** @type {Construct} */ +export const characterReference: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Token = import('micromark-util-types').Token +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/character-reference.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/character-reference.js new file mode 100644 index 00000000..bb9b1010 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/character-reference.js @@ -0,0 +1,104 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ +import {decodeNamedCharacterReference} from 'decode-named-character-reference' +import { + asciiAlphanumeric, + asciiDigit, + asciiHexDigit +} from 'micromark-util-character' + +/** @type {Construct} */ +export const characterReference = { + name: 'characterReference', + tokenize: tokenizeCharacterReference +} +/** @type {Tokenizer} */ + +function tokenizeCharacterReference(effects, ok, nok) { + const self = this + let size = 0 + /** @type {number} */ + + let max + /** @type {(code: Code) => code is number} */ + + let test + return start + /** @type {State} */ + + function start(code) { + effects.enter('characterReference') + effects.enter('characterReferenceMarker') + effects.consume(code) + effects.exit('characterReferenceMarker') + return open + } + /** @type {State} */ + + function open(code) { + if (code === 35) { + effects.enter('characterReferenceMarkerNumeric') + effects.consume(code) + effects.exit('characterReferenceMarkerNumeric') + return numeric + } + + effects.enter('characterReferenceValue') + max = 31 + test = asciiAlphanumeric + return value(code) + } + /** @type {State} */ + + function numeric(code) { + if (code === 88 || code === 120) { + effects.enter('characterReferenceMarkerHexadecimal') + effects.consume(code) + effects.exit('characterReferenceMarkerHexadecimal') + effects.enter('characterReferenceValue') + max = 6 + test = asciiHexDigit + return value + } + + effects.enter('characterReferenceValue') + max = 7 + test = asciiDigit + return value(code) + } + /** @type {State} */ + + function value(code) { + /** @type {Token} */ + let token + + if (code === 59 && size) { + token = effects.exit('characterReferenceValue') + + if ( + test === asciiAlphanumeric && + !decodeNamedCharacterReference(self.sliceSerialize(token)) + ) { + return nok(code) + } + + effects.enter('characterReferenceMarker') + effects.consume(code) + effects.exit('characterReferenceMarker') + effects.exit('characterReference') + return ok + } + + if (test(code) && size++ < max) { + effects.consume(code) + return value + } + + return nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-fenced.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-fenced.d.ts new file mode 100644 index 00000000..d0843f5b --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-fenced.d.ts @@ -0,0 +1,6 @@ +/** @type {Construct} */ +export const codeFenced: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-fenced.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-fenced.js new file mode 100644 index 00000000..49b1624f --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-fenced.js @@ -0,0 +1,234 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ +import {factorySpace} from 'micromark-factory-space' +import { + markdownLineEnding, + markdownLineEndingOrSpace +} from 'micromark-util-character' + +/** @type {Construct} */ +export const codeFenced = { + name: 'codeFenced', + tokenize: tokenizeCodeFenced, + concrete: true +} +/** @type {Tokenizer} */ + +function tokenizeCodeFenced(effects, ok, nok) { + const self = this + /** @type {Construct} */ + + const closingFenceConstruct = { + tokenize: tokenizeClosingFence, + partial: true + } + /** @type {Construct} */ + + const nonLazyLine = { + tokenize: tokenizeNonLazyLine, + partial: true + } + const tail = this.events[this.events.length - 1] + const initialPrefix = + tail && tail[1].type === 'linePrefix' + ? tail[2].sliceSerialize(tail[1], true).length + : 0 + let sizeOpen = 0 + /** @type {NonNullable} */ + + let marker + return start + /** @type {State} */ + + function start(code) { + effects.enter('codeFenced') + effects.enter('codeFencedFence') + effects.enter('codeFencedFenceSequence') + marker = code + return sequenceOpen(code) + } + /** @type {State} */ + + function sequenceOpen(code) { + if (code === marker) { + effects.consume(code) + sizeOpen++ + return sequenceOpen + } + + effects.exit('codeFencedFenceSequence') + return sizeOpen < 3 + ? nok(code) + : factorySpace(effects, infoOpen, 'whitespace')(code) + } + /** @type {State} */ + + function infoOpen(code) { + if (code === null || markdownLineEnding(code)) { + return openAfter(code) + } + + effects.enter('codeFencedFenceInfo') + effects.enter('chunkString', { + contentType: 'string' + }) + return info(code) + } + /** @type {State} */ + + function info(code) { + if (code === null || markdownLineEndingOrSpace(code)) { + effects.exit('chunkString') + effects.exit('codeFencedFenceInfo') + return factorySpace(effects, infoAfter, 'whitespace')(code) + } + + if (code === 96 && code === marker) return nok(code) + effects.consume(code) + return info + } + /** @type {State} */ + + function infoAfter(code) { + if (code === null || markdownLineEnding(code)) { + return openAfter(code) + } + + effects.enter('codeFencedFenceMeta') + effects.enter('chunkString', { + contentType: 'string' + }) + return meta(code) + } + /** @type {State} */ + + function meta(code) { + if (code === null || markdownLineEnding(code)) { + effects.exit('chunkString') + effects.exit('codeFencedFenceMeta') + return openAfter(code) + } + + if (code === 96 && code === marker) return nok(code) + effects.consume(code) + return meta + } + /** @type {State} */ + + function openAfter(code) { + effects.exit('codeFencedFence') + return self.interrupt ? ok(code) : contentStart(code) + } + /** @type {State} */ + + function contentStart(code) { + if (code === null) { + return after(code) + } + + if (markdownLineEnding(code)) { + return effects.attempt( + nonLazyLine, + effects.attempt( + closingFenceConstruct, + after, + initialPrefix + ? factorySpace( + effects, + contentStart, + 'linePrefix', + initialPrefix + 1 + ) + : contentStart + ), + after + )(code) + } + + effects.enter('codeFlowValue') + return contentContinue(code) + } + /** @type {State} */ + + function contentContinue(code) { + if (code === null || markdownLineEnding(code)) { + effects.exit('codeFlowValue') + return contentStart(code) + } + + effects.consume(code) + return contentContinue + } + /** @type {State} */ + + function after(code) { + effects.exit('codeFenced') + return ok(code) + } + /** @type {Tokenizer} */ + + function tokenizeNonLazyLine(effects, ok, nok) { + const self = this + return start + /** @type {State} */ + + function start(code) { + effects.enter('lineEnding') + effects.consume(code) + effects.exit('lineEnding') + return lineStart + } + /** @type {State} */ + + function lineStart(code) { + return self.parser.lazy[self.now().line] ? nok(code) : ok(code) + } + } + /** @type {Tokenizer} */ + + function tokenizeClosingFence(effects, ok, nok) { + let size = 0 + return factorySpace( + effects, + closingSequenceStart, + 'linePrefix', + this.parser.constructs.disable.null.includes('codeIndented') + ? undefined + : 4 + ) + /** @type {State} */ + + function closingSequenceStart(code) { + effects.enter('codeFencedFence') + effects.enter('codeFencedFenceSequence') + return closingSequence(code) + } + /** @type {State} */ + + function closingSequence(code) { + if (code === marker) { + effects.consume(code) + size++ + return closingSequence + } + + if (size < sizeOpen) return nok(code) + effects.exit('codeFencedFenceSequence') + return factorySpace(effects, closingSequenceEnd, 'whitespace')(code) + } + /** @type {State} */ + + function closingSequenceEnd(code) { + if (code === null || markdownLineEnding(code)) { + effects.exit('codeFencedFence') + return ok(code) + } + + return nok(code) + } + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-indented.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-indented.d.ts new file mode 100644 index 00000000..2259ccb5 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-indented.d.ts @@ -0,0 +1,7 @@ +/** @type {Construct} */ +export const codeIndented: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Resolver = import('micromark-util-types').Resolver +export type Token = import('micromark-util-types').Token +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-indented.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-indented.js new file mode 100644 index 00000000..7f17930e --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-indented.js @@ -0,0 +1,109 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').State} State + */ +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding} from 'micromark-util-character' + +/** @type {Construct} */ +export const codeIndented = { + name: 'codeIndented', + tokenize: tokenizeCodeIndented +} +/** @type {Construct} */ + +const indentedContent = { + tokenize: tokenizeIndentedContent, + partial: true +} +/** @type {Tokenizer} */ + +function tokenizeCodeIndented(effects, ok, nok) { + const self = this + return start + /** @type {State} */ + + function start(code) { + effects.enter('codeIndented') + return factorySpace(effects, afterStartPrefix, 'linePrefix', 4 + 1)(code) + } + /** @type {State} */ + + function afterStartPrefix(code) { + const tail = self.events[self.events.length - 1] + return tail && + tail[1].type === 'linePrefix' && + tail[2].sliceSerialize(tail[1], true).length >= 4 + ? afterPrefix(code) + : nok(code) + } + /** @type {State} */ + + function afterPrefix(code) { + if (code === null) { + return after(code) + } + + if (markdownLineEnding(code)) { + return effects.attempt(indentedContent, afterPrefix, after)(code) + } + + effects.enter('codeFlowValue') + return content(code) + } + /** @type {State} */ + + function content(code) { + if (code === null || markdownLineEnding(code)) { + effects.exit('codeFlowValue') + return afterPrefix(code) + } + + effects.consume(code) + return content + } + /** @type {State} */ + + function after(code) { + effects.exit('codeIndented') + return ok(code) + } +} +/** @type {Tokenizer} */ + +function tokenizeIndentedContent(effects, ok, nok) { + const self = this + return start + /** @type {State} */ + + function start(code) { + // If this is a lazy line, it can’t be code. + if (self.parser.lazy[self.now().line]) { + return nok(code) + } + + if (markdownLineEnding(code)) { + effects.enter('lineEnding') + effects.consume(code) + effects.exit('lineEnding') + return start + } + + return factorySpace(effects, afterPrefix, 'linePrefix', 4 + 1)(code) + } + /** @type {State} */ + + function afterPrefix(code) { + const tail = self.events[self.events.length - 1] + return tail && + tail[1].type === 'linePrefix' && + tail[2].sliceSerialize(tail[1], true).length >= 4 + ? ok(code) + : markdownLineEnding(code) + ? start(code) + : nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-text.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-text.d.ts new file mode 100644 index 00000000..619a420d --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-text.d.ts @@ -0,0 +1,8 @@ +/** @type {Construct} */ +export const codeText: Construct +export type Construct = import('micromark-util-types').Construct +export type Resolver = import('micromark-util-types').Resolver +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Previous = import('micromark-util-types').Previous +export type Token = import('micromark-util-types').Token +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-text.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-text.js new file mode 100644 index 00000000..e76393f6 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/code-text.js @@ -0,0 +1,186 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Previous} Previous + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').State} State + */ +import {markdownLineEnding} from 'micromark-util-character' + +/** @type {Construct} */ +export const codeText = { + name: 'codeText', + tokenize: tokenizeCodeText, + resolve: resolveCodeText, + previous +} +/** @type {Resolver} */ + +function resolveCodeText(events) { + let tailExitIndex = events.length - 4 + let headEnterIndex = 3 + /** @type {number} */ + + let index + /** @type {number|undefined} */ + + let enter // If we start and end with an EOL or a space. + + if ( + (events[headEnterIndex][1].type === 'lineEnding' || + events[headEnterIndex][1].type === 'space') && + (events[tailExitIndex][1].type === 'lineEnding' || + events[tailExitIndex][1].type === 'space') + ) { + index = headEnterIndex // And we have data. + + while (++index < tailExitIndex) { + if (events[index][1].type === 'codeTextData') { + // Then we have padding. + events[headEnterIndex][1].type = 'codeTextPadding' + events[tailExitIndex][1].type = 'codeTextPadding' + headEnterIndex += 2 + tailExitIndex -= 2 + break + } + } + } // Merge adjacent spaces and data. + + index = headEnterIndex - 1 + tailExitIndex++ + + while (++index <= tailExitIndex) { + if (enter === undefined) { + if (index !== tailExitIndex && events[index][1].type !== 'lineEnding') { + enter = index + } + } else if ( + index === tailExitIndex || + events[index][1].type === 'lineEnding' + ) { + events[enter][1].type = 'codeTextData' + + if (index !== enter + 2) { + events[enter][1].end = events[index - 1][1].end + events.splice(enter + 2, index - enter - 2) + tailExitIndex -= index - enter - 2 + index = enter + 2 + } + + enter = undefined + } + } + + return events +} +/** @type {Previous} */ + +function previous(code) { + // If there is a previous code, there will always be a tail. + return ( + code !== 96 || + this.events[this.events.length - 1][1].type === 'characterEscape' + ) +} +/** @type {Tokenizer} */ + +function tokenizeCodeText(effects, ok, nok) { + const self = this + let sizeOpen = 0 + /** @type {number} */ + + let size + /** @type {Token} */ + + let token + return start + /** @type {State} */ + + function start(code) { + effects.enter('codeText') + effects.enter('codeTextSequence') + return openingSequence(code) + } + /** @type {State} */ + + function openingSequence(code) { + if (code === 96) { + effects.consume(code) + sizeOpen++ + return openingSequence + } + + effects.exit('codeTextSequence') + return gap(code) + } + /** @type {State} */ + + function gap(code) { + // EOF. + if (code === null) { + return nok(code) + } // Closing fence? + // Could also be data. + + if (code === 96) { + token = effects.enter('codeTextSequence') + size = 0 + return closingSequence(code) + } // Tabs don’t work, and virtual spaces don’t make sense. + + if (code === 32) { + effects.enter('space') + effects.consume(code) + effects.exit('space') + return gap + } + + if (markdownLineEnding(code)) { + effects.enter('lineEnding') + effects.consume(code) + effects.exit('lineEnding') + return gap + } // Data. + + effects.enter('codeTextData') + return data(code) + } // In code. + + /** @type {State} */ + + function data(code) { + if ( + code === null || + code === 32 || + code === 96 || + markdownLineEnding(code) + ) { + effects.exit('codeTextData') + return gap(code) + } + + effects.consume(code) + return data + } // Closing fence. + + /** @type {State} */ + + function closingSequence(code) { + // More. + if (code === 96) { + effects.consume(code) + size++ + return closingSequence + } // Done! + + if (size === sizeOpen) { + effects.exit('codeTextSequence') + effects.exit('codeText') + return ok(code) + } // More or less accents: mark as data. + + token.type = 'codeTextData' + return data(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/content.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/content.d.ts new file mode 100644 index 00000000..0bfe34f1 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/content.d.ts @@ -0,0 +1,10 @@ +/** + * No name because it must not be turned off. + * @type {Construct} + */ +export const content: Construct +export type Construct = import('micromark-util-types').Construct +export type Resolver = import('micromark-util-types').Resolver +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Token = import('micromark-util-types').Token +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/content.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/content.js new file mode 100644 index 00000000..67ea3263 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/content.js @@ -0,0 +1,124 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').State} State + */ +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding} from 'micromark-util-character' +import {subtokenize} from 'micromark-util-subtokenize' + +/** + * No name because it must not be turned off. + * @type {Construct} + */ +export const content = { + tokenize: tokenizeContent, + resolve: resolveContent +} +/** @type {Construct} */ + +const continuationConstruct = { + tokenize: tokenizeContinuation, + partial: true +} +/** + * Content is transparent: it’s parsed right now. That way, definitions are also + * parsed right now: before text in paragraphs (specifically, media) are parsed. + * + * @type {Resolver} + */ + +function resolveContent(events) { + subtokenize(events) + return events +} +/** @type {Tokenizer} */ + +function tokenizeContent(effects, ok) { + /** @type {Token} */ + let previous + return start + /** @type {State} */ + + function start(code) { + effects.enter('content') + previous = effects.enter('chunkContent', { + contentType: 'content' + }) + return data(code) + } + /** @type {State} */ + + function data(code) { + if (code === null) { + return contentEnd(code) + } + + if (markdownLineEnding(code)) { + return effects.check( + continuationConstruct, + contentContinue, + contentEnd + )(code) + } // Data. + + effects.consume(code) + return data + } + /** @type {State} */ + + function contentEnd(code) { + effects.exit('chunkContent') + effects.exit('content') + return ok(code) + } + /** @type {State} */ + + function contentContinue(code) { + effects.consume(code) + effects.exit('chunkContent') + previous.next = effects.enter('chunkContent', { + contentType: 'content', + previous + }) + previous = previous.next + return data + } +} +/** @type {Tokenizer} */ + +function tokenizeContinuation(effects, ok, nok) { + const self = this + return startLookahead + /** @type {State} */ + + function startLookahead(code) { + effects.exit('chunkContent') + effects.enter('lineEnding') + effects.consume(code) + effects.exit('lineEnding') + return factorySpace(effects, prefixed, 'linePrefix') + } + /** @type {State} */ + + function prefixed(code) { + if (code === null || markdownLineEnding(code)) { + return nok(code) + } + + const tail = self.events[self.events.length - 1] + + if ( + !self.parser.constructs.disable.null.includes('codeIndented') && + tail && + tail[1].type === 'linePrefix' && + tail[2].sliceSerialize(tail[1], true).length >= 4 + ) { + return ok(code) + } + + return effects.interrupt(self.parser.constructs.flow, nok, ok)(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/definition.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/definition.d.ts new file mode 100644 index 00000000..19b9227a --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/definition.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const definition: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/definition.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/definition.js new file mode 100644 index 00000000..a0ed5715 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/definition.js @@ -0,0 +1,131 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ +import {factoryDestination} from 'micromark-factory-destination' +import {factoryLabel} from 'micromark-factory-label' +import {factorySpace} from 'micromark-factory-space' +import {factoryTitle} from 'micromark-factory-title' +import {factoryWhitespace} from 'micromark-factory-whitespace' +import {normalizeIdentifier} from 'micromark-util-normalize-identifier' +import { + markdownLineEnding, + markdownLineEndingOrSpace +} from 'micromark-util-character' + +/** @type {Construct} */ +export const definition = { + name: 'definition', + tokenize: tokenizeDefinition +} +/** @type {Construct} */ + +const titleConstruct = { + tokenize: tokenizeTitle, + partial: true +} +/** @type {Tokenizer} */ + +function tokenizeDefinition(effects, ok, nok) { + const self = this + /** @type {string} */ + + let identifier + return start + /** @type {State} */ + + function start(code) { + effects.enter('definition') + return factoryLabel.call( + self, + effects, + labelAfter, + nok, + 'definitionLabel', + 'definitionLabelMarker', + 'definitionLabelString' + )(code) + } + /** @type {State} */ + + function labelAfter(code) { + identifier = normalizeIdentifier( + self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1) + ) + + if (code === 58) { + effects.enter('definitionMarker') + effects.consume(code) + effects.exit('definitionMarker') // Note: blank lines can’t exist in content. + + return factoryWhitespace( + effects, + factoryDestination( + effects, + effects.attempt( + titleConstruct, + factorySpace(effects, after, 'whitespace'), + factorySpace(effects, after, 'whitespace') + ), + nok, + 'definitionDestination', + 'definitionDestinationLiteral', + 'definitionDestinationLiteralMarker', + 'definitionDestinationRaw', + 'definitionDestinationString' + ) + ) + } + + return nok(code) + } + /** @type {State} */ + + function after(code) { + if (code === null || markdownLineEnding(code)) { + effects.exit('definition') + + if (!self.parser.defined.includes(identifier)) { + self.parser.defined.push(identifier) + } + + return ok(code) + } + + return nok(code) + } +} +/** @type {Tokenizer} */ + +function tokenizeTitle(effects, ok, nok) { + return start + /** @type {State} */ + + function start(code) { + return markdownLineEndingOrSpace(code) + ? factoryWhitespace(effects, before)(code) + : nok(code) + } + /** @type {State} */ + + function before(code) { + if (code === 34 || code === 39 || code === 40) { + return factoryTitle( + effects, + factorySpace(effects, after, 'whitespace'), + nok, + 'definitionTitle', + 'definitionTitleMarker', + 'definitionTitleString' + )(code) + } + + return nok(code) + } + /** @type {State} */ + + function after(code) { + return code === null || markdownLineEnding(code) ? ok(code) : nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/hard-break-escape.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/hard-break-escape.d.ts new file mode 100644 index 00000000..f140079b --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/hard-break-escape.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const hardBreakEscape: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/hard-break-escape.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/hard-break-escape.js new file mode 100644 index 00000000..bc23b005 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/hard-break-escape.js @@ -0,0 +1,36 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ +import {markdownLineEnding} from 'micromark-util-character' + +/** @type {Construct} */ +export const hardBreakEscape = { + name: 'hardBreakEscape', + tokenize: tokenizeHardBreakEscape +} +/** @type {Tokenizer} */ + +function tokenizeHardBreakEscape(effects, ok, nok) { + return start + /** @type {State} */ + + function start(code) { + effects.enter('hardBreakEscape') + effects.enter('escapeMarker') + effects.consume(code) + return open + } + /** @type {State} */ + + function open(code) { + if (markdownLineEnding(code)) { + effects.exit('escapeMarker') + effects.exit('hardBreakEscape') + return ok(code) + } + + return nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/heading-atx.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/heading-atx.d.ts new file mode 100644 index 00000000..3a252315 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/heading-atx.d.ts @@ -0,0 +1,7 @@ +/** @type {Construct} */ +export const headingAtx: Construct +export type Construct = import('micromark-util-types').Construct +export type Resolver = import('micromark-util-types').Resolver +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Token = import('micromark-util-types').Token +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/heading-atx.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/heading-atx.js new file mode 100644 index 00000000..8712d7ef --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/heading-atx.js @@ -0,0 +1,147 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').State} State + */ +import {factorySpace} from 'micromark-factory-space' +import { + markdownLineEnding, + markdownLineEndingOrSpace, + markdownSpace +} from 'micromark-util-character' +import {splice} from 'micromark-util-chunked' + +/** @type {Construct} */ +export const headingAtx = { + name: 'headingAtx', + tokenize: tokenizeHeadingAtx, + resolve: resolveHeadingAtx +} +/** @type {Resolver} */ + +function resolveHeadingAtx(events, context) { + let contentEnd = events.length - 2 + let contentStart = 3 + /** @type {Token} */ + + let content + /** @type {Token} */ + + let text // Prefix whitespace, part of the opening. + + if (events[contentStart][1].type === 'whitespace') { + contentStart += 2 + } // Suffix whitespace, part of the closing. + + if ( + contentEnd - 2 > contentStart && + events[contentEnd][1].type === 'whitespace' + ) { + contentEnd -= 2 + } + + if ( + events[contentEnd][1].type === 'atxHeadingSequence' && + (contentStart === contentEnd - 1 || + (contentEnd - 4 > contentStart && + events[contentEnd - 2][1].type === 'whitespace')) + ) { + contentEnd -= contentStart + 1 === contentEnd ? 2 : 4 + } + + if (contentEnd > contentStart) { + content = { + type: 'atxHeadingText', + start: events[contentStart][1].start, + end: events[contentEnd][1].end + } + text = { + type: 'chunkText', + start: events[contentStart][1].start, + end: events[contentEnd][1].end, + // @ts-expect-error Constants are fine to assign. + contentType: 'text' + } + splice(events, contentStart, contentEnd - contentStart + 1, [ + ['enter', content, context], + ['enter', text, context], + ['exit', text, context], + ['exit', content, context] + ]) + } + + return events +} +/** @type {Tokenizer} */ + +function tokenizeHeadingAtx(effects, ok, nok) { + const self = this + let size = 0 + return start + /** @type {State} */ + + function start(code) { + effects.enter('atxHeading') + effects.enter('atxHeadingSequence') + return fenceOpenInside(code) + } + /** @type {State} */ + + function fenceOpenInside(code) { + if (code === 35 && size++ < 6) { + effects.consume(code) + return fenceOpenInside + } + + if (code === null || markdownLineEndingOrSpace(code)) { + effects.exit('atxHeadingSequence') + return self.interrupt ? ok(code) : headingBreak(code) + } + + return nok(code) + } + /** @type {State} */ + + function headingBreak(code) { + if (code === 35) { + effects.enter('atxHeadingSequence') + return sequence(code) + } + + if (code === null || markdownLineEnding(code)) { + effects.exit('atxHeading') + return ok(code) + } + + if (markdownSpace(code)) { + return factorySpace(effects, headingBreak, 'whitespace')(code) + } + + effects.enter('atxHeadingText') + return data(code) + } + /** @type {State} */ + + function sequence(code) { + if (code === 35) { + effects.consume(code) + return sequence + } + + effects.exit('atxHeadingSequence') + return headingBreak(code) + } + /** @type {State} */ + + function data(code) { + if (code === null || code === 35 || markdownLineEndingOrSpace(code)) { + effects.exit('atxHeadingText') + return headingBreak(code) + } + + effects.consume(code) + return data + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/html-flow.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/html-flow.d.ts new file mode 100644 index 00000000..434d6c60 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/html-flow.d.ts @@ -0,0 +1,7 @@ +/** @type {Construct} */ +export const htmlFlow: Construct +export type Construct = import('micromark-util-types').Construct +export type Resolver = import('micromark-util-types').Resolver +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/html-flow.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/html-flow.js new file mode 100644 index 00000000..fde0babb --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/html-flow.js @@ -0,0 +1,561 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ +import { + asciiAlpha, + asciiAlphanumeric, + markdownLineEnding, + markdownLineEndingOrSpace, + markdownSpace +} from 'micromark-util-character' +import {htmlBlockNames, htmlRawNames} from 'micromark-util-html-tag-name' +import {blankLine} from './blank-line.js' +/** @type {Construct} */ + +export const htmlFlow = { + name: 'htmlFlow', + tokenize: tokenizeHtmlFlow, + resolveTo: resolveToHtmlFlow, + concrete: true +} +/** @type {Construct} */ + +const nextBlankConstruct = { + tokenize: tokenizeNextBlank, + partial: true +} +/** @type {Resolver} */ + +function resolveToHtmlFlow(events) { + let index = events.length + + while (index--) { + if (events[index][0] === 'enter' && events[index][1].type === 'htmlFlow') { + break + } + } + + if (index > 1 && events[index - 2][1].type === 'linePrefix') { + // Add the prefix start to the HTML token. + events[index][1].start = events[index - 2][1].start // Add the prefix start to the HTML line token. + + events[index + 1][1].start = events[index - 2][1].start // Remove the line prefix. + + events.splice(index - 2, 2) + } + + return events +} +/** @type {Tokenizer} */ + +function tokenizeHtmlFlow(effects, ok, nok) { + const self = this + /** @type {number} */ + + let kind + /** @type {boolean} */ + + let startTag + /** @type {string} */ + + let buffer + /** @type {number} */ + + let index + /** @type {Code} */ + + let marker + return start + /** @type {State} */ + + function start(code) { + effects.enter('htmlFlow') + effects.enter('htmlFlowData') + effects.consume(code) + return open + } + /** @type {State} */ + + function open(code) { + if (code === 33) { + effects.consume(code) + return declarationStart + } + + if (code === 47) { + effects.consume(code) + return tagCloseStart + } + + if (code === 63) { + effects.consume(code) + kind = 3 // While we’re in an instruction instead of a declaration, we’re on a `?` + // right now, so we do need to search for `>`, similar to declarations. + + return self.interrupt ? ok : continuationDeclarationInside + } + + if (asciiAlpha(code)) { + effects.consume(code) + buffer = String.fromCharCode(code) + startTag = true + return tagName + } + + return nok(code) + } + /** @type {State} */ + + function declarationStart(code) { + if (code === 45) { + effects.consume(code) + kind = 2 + return commentOpenInside + } + + if (code === 91) { + effects.consume(code) + kind = 5 + buffer = 'CDATA[' + index = 0 + return cdataOpenInside + } + + if (asciiAlpha(code)) { + effects.consume(code) + kind = 4 + return self.interrupt ? ok : continuationDeclarationInside + } + + return nok(code) + } + /** @type {State} */ + + function commentOpenInside(code) { + if (code === 45) { + effects.consume(code) + return self.interrupt ? ok : continuationDeclarationInside + } + + return nok(code) + } + /** @type {State} */ + + function cdataOpenInside(code) { + if (code === buffer.charCodeAt(index++)) { + effects.consume(code) + return index === buffer.length + ? self.interrupt + ? ok + : continuation + : cdataOpenInside + } + + return nok(code) + } + /** @type {State} */ + + function tagCloseStart(code) { + if (asciiAlpha(code)) { + effects.consume(code) + buffer = String.fromCharCode(code) + return tagName + } + + return nok(code) + } + /** @type {State} */ + + function tagName(code) { + if ( + code === null || + code === 47 || + code === 62 || + markdownLineEndingOrSpace(code) + ) { + if ( + code !== 47 && + startTag && + htmlRawNames.includes(buffer.toLowerCase()) + ) { + kind = 1 + return self.interrupt ? ok(code) : continuation(code) + } + + if (htmlBlockNames.includes(buffer.toLowerCase())) { + kind = 6 + + if (code === 47) { + effects.consume(code) + return basicSelfClosing + } + + return self.interrupt ? ok(code) : continuation(code) + } + + kind = 7 // Do not support complete HTML when interrupting + + return self.interrupt && !self.parser.lazy[self.now().line] + ? nok(code) + : startTag + ? completeAttributeNameBefore(code) + : completeClosingTagAfter(code) + } + + if (code === 45 || asciiAlphanumeric(code)) { + effects.consume(code) + buffer += String.fromCharCode(code) + return tagName + } + + return nok(code) + } + /** @type {State} */ + + function basicSelfClosing(code) { + if (code === 62) { + effects.consume(code) + return self.interrupt ? ok : continuation + } + + return nok(code) + } + /** @type {State} */ + + function completeClosingTagAfter(code) { + if (markdownSpace(code)) { + effects.consume(code) + return completeClosingTagAfter + } + + return completeEnd(code) + } + /** @type {State} */ + + function completeAttributeNameBefore(code) { + if (code === 47) { + effects.consume(code) + return completeEnd + } + + if (code === 58 || code === 95 || asciiAlpha(code)) { + effects.consume(code) + return completeAttributeName + } + + if (markdownSpace(code)) { + effects.consume(code) + return completeAttributeNameBefore + } + + return completeEnd(code) + } + /** @type {State} */ + + function completeAttributeName(code) { + if ( + code === 45 || + code === 46 || + code === 58 || + code === 95 || + asciiAlphanumeric(code) + ) { + effects.consume(code) + return completeAttributeName + } + + return completeAttributeNameAfter(code) + } + /** @type {State} */ + + function completeAttributeNameAfter(code) { + if (code === 61) { + effects.consume(code) + return completeAttributeValueBefore + } + + if (markdownSpace(code)) { + effects.consume(code) + return completeAttributeNameAfter + } + + return completeAttributeNameBefore(code) + } + /** @type {State} */ + + function completeAttributeValueBefore(code) { + if ( + code === null || + code === 60 || + code === 61 || + code === 62 || + code === 96 + ) { + return nok(code) + } + + if (code === 34 || code === 39) { + effects.consume(code) + marker = code + return completeAttributeValueQuoted + } + + if (markdownSpace(code)) { + effects.consume(code) + return completeAttributeValueBefore + } + + marker = null + return completeAttributeValueUnquoted(code) + } + /** @type {State} */ + + function completeAttributeValueQuoted(code) { + if (code === null || markdownLineEnding(code)) { + return nok(code) + } + + if (code === marker) { + effects.consume(code) + return completeAttributeValueQuotedAfter + } + + effects.consume(code) + return completeAttributeValueQuoted + } + /** @type {State} */ + + function completeAttributeValueUnquoted(code) { + if ( + code === null || + code === 34 || + code === 39 || + code === 60 || + code === 61 || + code === 62 || + code === 96 || + markdownLineEndingOrSpace(code) + ) { + return completeAttributeNameAfter(code) + } + + effects.consume(code) + return completeAttributeValueUnquoted + } + /** @type {State} */ + + function completeAttributeValueQuotedAfter(code) { + if (code === 47 || code === 62 || markdownSpace(code)) { + return completeAttributeNameBefore(code) + } + + return nok(code) + } + /** @type {State} */ + + function completeEnd(code) { + if (code === 62) { + effects.consume(code) + return completeAfter + } + + return nok(code) + } + /** @type {State} */ + + function completeAfter(code) { + if (markdownSpace(code)) { + effects.consume(code) + return completeAfter + } + + return code === null || markdownLineEnding(code) + ? continuation(code) + : nok(code) + } + /** @type {State} */ + + function continuation(code) { + if (code === 45 && kind === 2) { + effects.consume(code) + return continuationCommentInside + } + + if (code === 60 && kind === 1) { + effects.consume(code) + return continuationRawTagOpen + } + + if (code === 62 && kind === 4) { + effects.consume(code) + return continuationClose + } + + if (code === 63 && kind === 3) { + effects.consume(code) + return continuationDeclarationInside + } + + if (code === 93 && kind === 5) { + effects.consume(code) + return continuationCharacterDataInside + } + + if (markdownLineEnding(code) && (kind === 6 || kind === 7)) { + return effects.check( + nextBlankConstruct, + continuationClose, + continuationAtLineEnding + )(code) + } + + if (code === null || markdownLineEnding(code)) { + return continuationAtLineEnding(code) + } + + effects.consume(code) + return continuation + } + /** @type {State} */ + + function continuationAtLineEnding(code) { + effects.exit('htmlFlowData') + return htmlContinueStart(code) + } + /** @type {State} */ + + function htmlContinueStart(code) { + if (code === null) { + return done(code) + } + + if (markdownLineEnding(code)) { + return effects.attempt( + { + tokenize: htmlLineEnd, + partial: true + }, + htmlContinueStart, + done + )(code) + } + + effects.enter('htmlFlowData') + return continuation(code) + } + /** @type {Tokenizer} */ + + function htmlLineEnd(effects, ok, nok) { + return start + /** @type {State} */ + + function start(code) { + effects.enter('lineEnding') + effects.consume(code) + effects.exit('lineEnding') + return lineStart + } + /** @type {State} */ + + function lineStart(code) { + return self.parser.lazy[self.now().line] ? nok(code) : ok(code) + } + } + /** @type {State} */ + + function continuationCommentInside(code) { + if (code === 45) { + effects.consume(code) + return continuationDeclarationInside + } + + return continuation(code) + } + /** @type {State} */ + + function continuationRawTagOpen(code) { + if (code === 47) { + effects.consume(code) + buffer = '' + return continuationRawEndTag + } + + return continuation(code) + } + /** @type {State} */ + + function continuationRawEndTag(code) { + if (code === 62 && htmlRawNames.includes(buffer.toLowerCase())) { + effects.consume(code) + return continuationClose + } + + if (asciiAlpha(code) && buffer.length < 8) { + effects.consume(code) + buffer += String.fromCharCode(code) + return continuationRawEndTag + } + + return continuation(code) + } + /** @type {State} */ + + function continuationCharacterDataInside(code) { + if (code === 93) { + effects.consume(code) + return continuationDeclarationInside + } + + return continuation(code) + } + /** @type {State} */ + + function continuationDeclarationInside(code) { + if (code === 62) { + effects.consume(code) + return continuationClose + } // More dashes. + + if (code === 45 && kind === 2) { + effects.consume(code) + return continuationDeclarationInside + } + + return continuation(code) + } + /** @type {State} */ + + function continuationClose(code) { + if (code === null || markdownLineEnding(code)) { + effects.exit('htmlFlowData') + return done(code) + } + + effects.consume(code) + return continuationClose + } + /** @type {State} */ + + function done(code) { + effects.exit('htmlFlow') + return ok(code) + } +} +/** @type {Tokenizer} */ + +function tokenizeNextBlank(effects, ok, nok) { + return start + /** @type {State} */ + + function start(code) { + effects.exit('htmlFlowData') + effects.enter('lineEndingBlank') + effects.consume(code) + effects.exit('lineEndingBlank') + return effects.attempt(blankLine, ok, nok) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/html-text.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/html-text.d.ts new file mode 100644 index 00000000..74c71bf4 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/html-text.d.ts @@ -0,0 +1,6 @@ +/** @type {Construct} */ +export const htmlText: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/html-text.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/html-text.js new file mode 100644 index 00000000..f324a5b8 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/html-text.js @@ -0,0 +1,479 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ +import {factorySpace} from 'micromark-factory-space' +import { + asciiAlpha, + asciiAlphanumeric, + markdownLineEnding, + markdownLineEndingOrSpace, + markdownSpace +} from 'micromark-util-character' + +/** @type {Construct} */ +export const htmlText = { + name: 'htmlText', + tokenize: tokenizeHtmlText +} +/** @type {Tokenizer} */ + +function tokenizeHtmlText(effects, ok, nok) { + const self = this + /** @type {NonNullable|undefined} */ + + let marker + /** @type {string} */ + + let buffer + /** @type {number} */ + + let index + /** @type {State} */ + + let returnState + return start + /** @type {State} */ + + function start(code) { + effects.enter('htmlText') + effects.enter('htmlTextData') + effects.consume(code) + return open + } + /** @type {State} */ + + function open(code) { + if (code === 33) { + effects.consume(code) + return declarationOpen + } + + if (code === 47) { + effects.consume(code) + return tagCloseStart + } + + if (code === 63) { + effects.consume(code) + return instruction + } + + if (asciiAlpha(code)) { + effects.consume(code) + return tagOpen + } + + return nok(code) + } + /** @type {State} */ + + function declarationOpen(code) { + if (code === 45) { + effects.consume(code) + return commentOpen + } + + if (code === 91) { + effects.consume(code) + buffer = 'CDATA[' + index = 0 + return cdataOpen + } + + if (asciiAlpha(code)) { + effects.consume(code) + return declaration + } + + return nok(code) + } + /** @type {State} */ + + function commentOpen(code) { + if (code === 45) { + effects.consume(code) + return commentStart + } + + return nok(code) + } + /** @type {State} */ + + function commentStart(code) { + if (code === null || code === 62) { + return nok(code) + } + + if (code === 45) { + effects.consume(code) + return commentStartDash + } + + return comment(code) + } + /** @type {State} */ + + function commentStartDash(code) { + if (code === null || code === 62) { + return nok(code) + } + + return comment(code) + } + /** @type {State} */ + + function comment(code) { + if (code === null) { + return nok(code) + } + + if (code === 45) { + effects.consume(code) + return commentClose + } + + if (markdownLineEnding(code)) { + returnState = comment + return atLineEnding(code) + } + + effects.consume(code) + return comment + } + /** @type {State} */ + + function commentClose(code) { + if (code === 45) { + effects.consume(code) + return end + } + + return comment(code) + } + /** @type {State} */ + + function cdataOpen(code) { + if (code === buffer.charCodeAt(index++)) { + effects.consume(code) + return index === buffer.length ? cdata : cdataOpen + } + + return nok(code) + } + /** @type {State} */ + + function cdata(code) { + if (code === null) { + return nok(code) + } + + if (code === 93) { + effects.consume(code) + return cdataClose + } + + if (markdownLineEnding(code)) { + returnState = cdata + return atLineEnding(code) + } + + effects.consume(code) + return cdata + } + /** @type {State} */ + + function cdataClose(code) { + if (code === 93) { + effects.consume(code) + return cdataEnd + } + + return cdata(code) + } + /** @type {State} */ + + function cdataEnd(code) { + if (code === 62) { + return end(code) + } + + if (code === 93) { + effects.consume(code) + return cdataEnd + } + + return cdata(code) + } + /** @type {State} */ + + function declaration(code) { + if (code === null || code === 62) { + return end(code) + } + + if (markdownLineEnding(code)) { + returnState = declaration + return atLineEnding(code) + } + + effects.consume(code) + return declaration + } + /** @type {State} */ + + function instruction(code) { + if (code === null) { + return nok(code) + } + + if (code === 63) { + effects.consume(code) + return instructionClose + } + + if (markdownLineEnding(code)) { + returnState = instruction + return atLineEnding(code) + } + + effects.consume(code) + return instruction + } + /** @type {State} */ + + function instructionClose(code) { + return code === 62 ? end(code) : instruction(code) + } + /** @type {State} */ + + function tagCloseStart(code) { + if (asciiAlpha(code)) { + effects.consume(code) + return tagClose + } + + return nok(code) + } + /** @type {State} */ + + function tagClose(code) { + if (code === 45 || asciiAlphanumeric(code)) { + effects.consume(code) + return tagClose + } + + return tagCloseBetween(code) + } + /** @type {State} */ + + function tagCloseBetween(code) { + if (markdownLineEnding(code)) { + returnState = tagCloseBetween + return atLineEnding(code) + } + + if (markdownSpace(code)) { + effects.consume(code) + return tagCloseBetween + } + + return end(code) + } + /** @type {State} */ + + function tagOpen(code) { + if (code === 45 || asciiAlphanumeric(code)) { + effects.consume(code) + return tagOpen + } + + if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) { + return tagOpenBetween(code) + } + + return nok(code) + } + /** @type {State} */ + + function tagOpenBetween(code) { + if (code === 47) { + effects.consume(code) + return end + } + + if (code === 58 || code === 95 || asciiAlpha(code)) { + effects.consume(code) + return tagOpenAttributeName + } + + if (markdownLineEnding(code)) { + returnState = tagOpenBetween + return atLineEnding(code) + } + + if (markdownSpace(code)) { + effects.consume(code) + return tagOpenBetween + } + + return end(code) + } + /** @type {State} */ + + function tagOpenAttributeName(code) { + if ( + code === 45 || + code === 46 || + code === 58 || + code === 95 || + asciiAlphanumeric(code) + ) { + effects.consume(code) + return tagOpenAttributeName + } + + return tagOpenAttributeNameAfter(code) + } + /** @type {State} */ + + function tagOpenAttributeNameAfter(code) { + if (code === 61) { + effects.consume(code) + return tagOpenAttributeValueBefore + } + + if (markdownLineEnding(code)) { + returnState = tagOpenAttributeNameAfter + return atLineEnding(code) + } + + if (markdownSpace(code)) { + effects.consume(code) + return tagOpenAttributeNameAfter + } + + return tagOpenBetween(code) + } + /** @type {State} */ + + function tagOpenAttributeValueBefore(code) { + if ( + code === null || + code === 60 || + code === 61 || + code === 62 || + code === 96 + ) { + return nok(code) + } + + if (code === 34 || code === 39) { + effects.consume(code) + marker = code + return tagOpenAttributeValueQuoted + } + + if (markdownLineEnding(code)) { + returnState = tagOpenAttributeValueBefore + return atLineEnding(code) + } + + if (markdownSpace(code)) { + effects.consume(code) + return tagOpenAttributeValueBefore + } + + effects.consume(code) + marker = undefined + return tagOpenAttributeValueUnquoted + } + /** @type {State} */ + + function tagOpenAttributeValueQuoted(code) { + if (code === marker) { + effects.consume(code) + return tagOpenAttributeValueQuotedAfter + } + + if (code === null) { + return nok(code) + } + + if (markdownLineEnding(code)) { + returnState = tagOpenAttributeValueQuoted + return atLineEnding(code) + } + + effects.consume(code) + return tagOpenAttributeValueQuoted + } + /** @type {State} */ + + function tagOpenAttributeValueQuotedAfter(code) { + if (code === 62 || code === 47 || markdownLineEndingOrSpace(code)) { + return tagOpenBetween(code) + } + + return nok(code) + } + /** @type {State} */ + + function tagOpenAttributeValueUnquoted(code) { + if ( + code === null || + code === 34 || + code === 39 || + code === 60 || + code === 61 || + code === 96 + ) { + return nok(code) + } + + if (code === 62 || markdownLineEndingOrSpace(code)) { + return tagOpenBetween(code) + } + + effects.consume(code) + return tagOpenAttributeValueUnquoted + } // We can’t have blank lines in content, so no need to worry about empty + // tokens. + + /** @type {State} */ + + function atLineEnding(code) { + effects.exit('htmlTextData') + effects.enter('lineEnding') + effects.consume(code) + effects.exit('lineEnding') + return factorySpace( + effects, + afterPrefix, + 'linePrefix', + self.parser.constructs.disable.null.includes('codeIndented') + ? undefined + : 4 + ) + } + /** @type {State} */ + + function afterPrefix(code) { + effects.enter('htmlTextData') + return returnState(code) + } + /** @type {State} */ + + function end(code) { + if (code === 62) { + effects.consume(code) + effects.exit('htmlTextData') + effects.exit('htmlText') + return ok + } + + return nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-end.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-end.d.ts new file mode 100644 index 00000000..11cbcded --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-end.d.ts @@ -0,0 +1,9 @@ +/** @type {Construct} */ +export const labelEnd: Construct +export type Construct = import('micromark-util-types').Construct +export type Resolver = import('micromark-util-types').Resolver +export type Tokenizer = import('micromark-util-types').Tokenizer +export type Event = import('micromark-util-types').Event +export type Token = import('micromark-util-types').Token +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-end.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-end.js new file mode 100644 index 00000000..f1ef7ce2 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-end.js @@ -0,0 +1,367 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').Event} Event + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ +import {factoryDestination} from 'micromark-factory-destination' +import {factoryLabel} from 'micromark-factory-label' +import {factoryTitle} from 'micromark-factory-title' +import {factoryWhitespace} from 'micromark-factory-whitespace' +import {markdownLineEndingOrSpace} from 'micromark-util-character' +import {push, splice} from 'micromark-util-chunked' +import {normalizeIdentifier} from 'micromark-util-normalize-identifier' +import {resolveAll} from 'micromark-util-resolve-all' + +/** @type {Construct} */ +export const labelEnd = { + name: 'labelEnd', + tokenize: tokenizeLabelEnd, + resolveTo: resolveToLabelEnd, + resolveAll: resolveAllLabelEnd +} +/** @type {Construct} */ + +const resourceConstruct = { + tokenize: tokenizeResource +} +/** @type {Construct} */ + +const fullReferenceConstruct = { + tokenize: tokenizeFullReference +} +/** @type {Construct} */ + +const collapsedReferenceConstruct = { + tokenize: tokenizeCollapsedReference +} +/** @type {Resolver} */ + +function resolveAllLabelEnd(events) { + let index = -1 + /** @type {Token} */ + + let token + + while (++index < events.length) { + token = events[index][1] + + if ( + token.type === 'labelImage' || + token.type === 'labelLink' || + token.type === 'labelEnd' + ) { + // Remove the marker. + events.splice(index + 1, token.type === 'labelImage' ? 4 : 2) + token.type = 'data' + index++ + } + } + + return events +} +/** @type {Resolver} */ + +function resolveToLabelEnd(events, context) { + let index = events.length + let offset = 0 + /** @type {Token} */ + + let token + /** @type {number|undefined} */ + + let open + /** @type {number|undefined} */ + + let close + /** @type {Event[]} */ + + let media // Find an opening. + + while (index--) { + token = events[index][1] + + if (open) { + // If we see another link, or inactive link label, we’ve been here before. + if ( + token.type === 'link' || + (token.type === 'labelLink' && token._inactive) + ) { + break + } // Mark other link openings as inactive, as we can’t have links in + // links. + + if (events[index][0] === 'enter' && token.type === 'labelLink') { + token._inactive = true + } + } else if (close) { + if ( + events[index][0] === 'enter' && + (token.type === 'labelImage' || token.type === 'labelLink') && + !token._balanced + ) { + open = index + + if (token.type !== 'labelLink') { + offset = 2 + break + } + } + } else if (token.type === 'labelEnd') { + close = index + } + } + + const group = { + type: events[open][1].type === 'labelLink' ? 'link' : 'image', + start: Object.assign({}, events[open][1].start), + end: Object.assign({}, events[events.length - 1][1].end) + } + const label = { + type: 'label', + start: Object.assign({}, events[open][1].start), + end: Object.assign({}, events[close][1].end) + } + const text = { + type: 'labelText', + start: Object.assign({}, events[open + offset + 2][1].end), + end: Object.assign({}, events[close - 2][1].start) + } + media = [ + ['enter', group, context], + ['enter', label, context] + ] // Opening marker. + + media = push(media, events.slice(open + 1, open + offset + 3)) // Text open. + + media = push(media, [['enter', text, context]]) // Between. + + media = push( + media, + resolveAll( + context.parser.constructs.insideSpan.null, + events.slice(open + offset + 4, close - 3), + context + ) + ) // Text close, marker close, label close. + + media = push(media, [ + ['exit', text, context], + events[close - 2], + events[close - 1], + ['exit', label, context] + ]) // Reference, resource, or so. + + media = push(media, events.slice(close + 1)) // Media close. + + media = push(media, [['exit', group, context]]) + splice(events, open, events.length, media) + return events +} +/** @type {Tokenizer} */ + +function tokenizeLabelEnd(effects, ok, nok) { + const self = this + let index = self.events.length + /** @type {Token} */ + + let labelStart + /** @type {boolean} */ + + let defined // Find an opening. + + while (index--) { + if ( + (self.events[index][1].type === 'labelImage' || + self.events[index][1].type === 'labelLink') && + !self.events[index][1]._balanced + ) { + labelStart = self.events[index][1] + break + } + } + + return start + /** @type {State} */ + + function start(code) { + if (!labelStart) { + return nok(code) + } // It’s a balanced bracket, but contains a link. + + if (labelStart._inactive) return balanced(code) + defined = self.parser.defined.includes( + normalizeIdentifier( + self.sliceSerialize({ + start: labelStart.end, + end: self.now() + }) + ) + ) + effects.enter('labelEnd') + effects.enter('labelMarker') + effects.consume(code) + effects.exit('labelMarker') + effects.exit('labelEnd') + return afterLabelEnd + } + /** @type {State} */ + + function afterLabelEnd(code) { + // Resource: `[asd](fgh)`. + if (code === 40) { + return effects.attempt( + resourceConstruct, + ok, + defined ? ok : balanced + )(code) + } // Collapsed (`[asd][]`) or full (`[asd][fgh]`) reference? + + if (code === 91) { + return effects.attempt( + fullReferenceConstruct, + ok, + defined + ? effects.attempt(collapsedReferenceConstruct, ok, balanced) + : balanced + )(code) + } // Shortcut reference: `[asd]`? + + return defined ? ok(code) : balanced(code) + } + /** @type {State} */ + + function balanced(code) { + labelStart._balanced = true + return nok(code) + } +} +/** @type {Tokenizer} */ + +function tokenizeResource(effects, ok, nok) { + return start + /** @type {State} */ + + function start(code) { + effects.enter('resource') + effects.enter('resourceMarker') + effects.consume(code) + effects.exit('resourceMarker') + return factoryWhitespace(effects, open) + } + /** @type {State} */ + + function open(code) { + if (code === 41) { + return end(code) + } + + return factoryDestination( + effects, + destinationAfter, + nok, + 'resourceDestination', + 'resourceDestinationLiteral', + 'resourceDestinationLiteralMarker', + 'resourceDestinationRaw', + 'resourceDestinationString', + 32 + )(code) + } + /** @type {State} */ + + function destinationAfter(code) { + return markdownLineEndingOrSpace(code) + ? factoryWhitespace(effects, between)(code) + : end(code) + } + /** @type {State} */ + + function between(code) { + if (code === 34 || code === 39 || code === 40) { + return factoryTitle( + effects, + factoryWhitespace(effects, end), + nok, + 'resourceTitle', + 'resourceTitleMarker', + 'resourceTitleString' + )(code) + } + + return end(code) + } + /** @type {State} */ + + function end(code) { + if (code === 41) { + effects.enter('resourceMarker') + effects.consume(code) + effects.exit('resourceMarker') + effects.exit('resource') + return ok + } + + return nok(code) + } +} +/** @type {Tokenizer} */ + +function tokenizeFullReference(effects, ok, nok) { + const self = this + return start + /** @type {State} */ + + function start(code) { + return factoryLabel.call( + self, + effects, + afterLabel, + nok, + 'reference', + 'referenceMarker', + 'referenceString' + )(code) + } + /** @type {State} */ + + function afterLabel(code) { + return self.parser.defined.includes( + normalizeIdentifier( + self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1) + ) + ) + ? ok(code) + : nok(code) + } +} +/** @type {Tokenizer} */ + +function tokenizeCollapsedReference(effects, ok, nok) { + return start + /** @type {State} */ + + function start(code) { + effects.enter('reference') + effects.enter('referenceMarker') + effects.consume(code) + effects.exit('referenceMarker') + return open + } + /** @type {State} */ + + function open(code) { + if (code === 93) { + effects.enter('referenceMarker') + effects.consume(code) + effects.exit('referenceMarker') + effects.exit('reference') + return ok + } + + return nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-image.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-image.d.ts new file mode 100644 index 00000000..302d8905 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-image.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const labelStartImage: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-image.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-image.js new file mode 100644 index 00000000..052ea3e4 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-image.js @@ -0,0 +1,55 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ +import {labelEnd} from './label-end.js' +/** @type {Construct} */ + +export const labelStartImage = { + name: 'labelStartImage', + tokenize: tokenizeLabelStartImage, + resolveAll: labelEnd.resolveAll +} +/** @type {Tokenizer} */ + +function tokenizeLabelStartImage(effects, ok, nok) { + const self = this + return start + /** @type {State} */ + + function start(code) { + effects.enter('labelImage') + effects.enter('labelImageMarker') + effects.consume(code) + effects.exit('labelImageMarker') + return open + } + /** @type {State} */ + + function open(code) { + if (code === 91) { + effects.enter('labelMarker') + effects.consume(code) + effects.exit('labelMarker') + effects.exit('labelImage') + return after + } + + return nok(code) + } + /** @type {State} */ + + function after(code) { + /* To do: remove in the future once we’ve switched from + * `micromark-extension-footnote` to `micromark-extension-gfm-footnote`, + * which doesn’t need this */ + + /* Hidden footnotes hook */ + + /* c8 ignore next 3 */ + return code === 94 && '_hiddenFootnoteSupport' in self.parser.constructs + ? nok(code) + : ok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-link.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-link.d.ts new file mode 100644 index 00000000..5a757d36 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-link.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const labelStartLink: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-link.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-link.js new file mode 100644 index 00000000..f080392e --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/label-start-link.js @@ -0,0 +1,43 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ +import {labelEnd} from './label-end.js' +/** @type {Construct} */ + +export const labelStartLink = { + name: 'labelStartLink', + tokenize: tokenizeLabelStartLink, + resolveAll: labelEnd.resolveAll +} +/** @type {Tokenizer} */ + +function tokenizeLabelStartLink(effects, ok, nok) { + const self = this + return start + /** @type {State} */ + + function start(code) { + effects.enter('labelLink') + effects.enter('labelMarker') + effects.consume(code) + effects.exit('labelMarker') + effects.exit('labelLink') + return after + } + /** @type {State} */ + + function after(code) { + /* To do: remove in the future once we’ve switched from + * `micromark-extension-footnote` to `micromark-extension-gfm-footnote`, + * which doesn’t need this */ + + /* Hidden footnotes hook. */ + + /* c8 ignore next 3 */ + return code === 94 && '_hiddenFootnoteSupport' in self.parser.constructs + ? nok(code) + : ok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/line-ending.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/line-ending.d.ts new file mode 100644 index 00000000..3d9fdf9e --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/line-ending.d.ts @@ -0,0 +1,5 @@ +/** @type {Construct} */ +export const lineEnding: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/line-ending.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/line-ending.js new file mode 100644 index 00000000..7ed94453 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/line-ending.js @@ -0,0 +1,26 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + */ +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding} from 'micromark-util-character' + +/** @type {Construct} */ +export const lineEnding = { + name: 'lineEnding', + tokenize: tokenizeLineEnding +} +/** @type {Tokenizer} */ + +function tokenizeLineEnding(effects, ok) { + return start + /** @type {State} */ + + function start(code) { + effects.enter('lineEnding') + effects.consume(code) + effects.exit('lineEnding') + return factorySpace(effects, ok, 'linePrefix') + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/list.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/list.d.ts new file mode 100644 index 00000000..6a138dbe --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/list.d.ts @@ -0,0 +1,16 @@ +/** @type {Construct} */ +export const list: Construct +export type Construct = import('micromark-util-types').Construct +export type TokenizeContext = import('micromark-util-types').TokenizeContext +export type Exiter = import('micromark-util-types').Exiter +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code +export type ListContainerState = Record & { + marker: Code + type: string + size: number +} +export type TokenizeContextWithState = TokenizeContext & { + containerState: ListContainerState +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/list.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/list.js new file mode 100644 index 00000000..37c13e30 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/list.js @@ -0,0 +1,269 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext + * @typedef {import('micromark-util-types').Exiter} Exiter + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ + +/** + * @typedef {Record & {marker: Code, type: string, size: number}} ListContainerState + * @typedef {TokenizeContext & {containerState: ListContainerState}} TokenizeContextWithState + */ +import {factorySpace} from 'micromark-factory-space' +import {asciiDigit, markdownSpace} from 'micromark-util-character' +import {blankLine} from './blank-line.js' +import {thematicBreak} from './thematic-break.js' +/** @type {Construct} */ + +export const list = { + name: 'list', + tokenize: tokenizeListStart, + continuation: { + tokenize: tokenizeListContinuation + }, + exit: tokenizeListEnd +} +/** @type {Construct} */ + +const listItemPrefixWhitespaceConstruct = { + tokenize: tokenizeListItemPrefixWhitespace, + partial: true +} +/** @type {Construct} */ + +const indentConstruct = { + tokenize: tokenizeIndent, + partial: true +} +/** + * @type {Tokenizer} + * @this {TokenizeContextWithState} + */ + +function tokenizeListStart(effects, ok, nok) { + const self = this + const tail = self.events[self.events.length - 1] + let initialSize = + tail && tail[1].type === 'linePrefix' + ? tail[2].sliceSerialize(tail[1], true).length + : 0 + let size = 0 + return start + /** @type {State} */ + + function start(code) { + const kind = + self.containerState.type || + (code === 42 || code === 43 || code === 45 + ? 'listUnordered' + : 'listOrdered') + + if ( + kind === 'listUnordered' + ? !self.containerState.marker || code === self.containerState.marker + : asciiDigit(code) + ) { + if (!self.containerState.type) { + self.containerState.type = kind + effects.enter(kind, { + _container: true + }) + } + + if (kind === 'listUnordered') { + effects.enter('listItemPrefix') + return code === 42 || code === 45 + ? effects.check(thematicBreak, nok, atMarker)(code) + : atMarker(code) + } + + if (!self.interrupt || code === 49) { + effects.enter('listItemPrefix') + effects.enter('listItemValue') + return inside(code) + } + } + + return nok(code) + } + /** @type {State} */ + + function inside(code) { + if (asciiDigit(code) && ++size < 10) { + effects.consume(code) + return inside + } + + if ( + (!self.interrupt || size < 2) && + (self.containerState.marker + ? code === self.containerState.marker + : code === 41 || code === 46) + ) { + effects.exit('listItemValue') + return atMarker(code) + } + + return nok(code) + } + /** + * @type {State} + **/ + + function atMarker(code) { + effects.enter('listItemMarker') + effects.consume(code) + effects.exit('listItemMarker') + self.containerState.marker = self.containerState.marker || code + return effects.check( + blankLine, // Can’t be empty when interrupting. + self.interrupt ? nok : onBlank, + effects.attempt( + listItemPrefixWhitespaceConstruct, + endOfPrefix, + otherPrefix + ) + ) + } + /** @type {State} */ + + function onBlank(code) { + self.containerState.initialBlankLine = true + initialSize++ + return endOfPrefix(code) + } + /** @type {State} */ + + function otherPrefix(code) { + if (markdownSpace(code)) { + effects.enter('listItemPrefixWhitespace') + effects.consume(code) + effects.exit('listItemPrefixWhitespace') + return endOfPrefix + } + + return nok(code) + } + /** @type {State} */ + + function endOfPrefix(code) { + self.containerState.size = + initialSize + + self.sliceSerialize(effects.exit('listItemPrefix'), true).length + return ok(code) + } +} +/** + * @type {Tokenizer} + * @this {TokenizeContextWithState} + */ + +function tokenizeListContinuation(effects, ok, nok) { + const self = this + self.containerState._closeFlow = undefined + return effects.check(blankLine, onBlank, notBlank) + /** @type {State} */ + + function onBlank(code) { + self.containerState.furtherBlankLines = + self.containerState.furtherBlankLines || + self.containerState.initialBlankLine // We have a blank line. + // Still, try to consume at most the items size. + + return factorySpace( + effects, + ok, + 'listItemIndent', + self.containerState.size + 1 + )(code) + } + /** @type {State} */ + + function notBlank(code) { + if (self.containerState.furtherBlankLines || !markdownSpace(code)) { + self.containerState.furtherBlankLines = undefined + self.containerState.initialBlankLine = undefined + return notInCurrentItem(code) + } + + self.containerState.furtherBlankLines = undefined + self.containerState.initialBlankLine = undefined + return effects.attempt(indentConstruct, ok, notInCurrentItem)(code) + } + /** @type {State} */ + + function notInCurrentItem(code) { + // While we do continue, we signal that the flow should be closed. + self.containerState._closeFlow = true // As we’re closing flow, we’re no longer interrupting. + + self.interrupt = undefined + return factorySpace( + effects, + effects.attempt(list, ok, nok), + 'linePrefix', + self.parser.constructs.disable.null.includes('codeIndented') + ? undefined + : 4 + )(code) + } +} +/** + * @type {Tokenizer} + * @this {TokenizeContextWithState} + */ + +function tokenizeIndent(effects, ok, nok) { + const self = this + return factorySpace( + effects, + afterPrefix, + 'listItemIndent', + self.containerState.size + 1 + ) + /** @type {State} */ + + function afterPrefix(code) { + const tail = self.events[self.events.length - 1] + return tail && + tail[1].type === 'listItemIndent' && + tail[2].sliceSerialize(tail[1], true).length === self.containerState.size + ? ok(code) + : nok(code) + } +} +/** + * @type {Exiter} + * @this {TokenizeContextWithState} + */ + +function tokenizeListEnd(effects) { + effects.exit(this.containerState.type) +} +/** + * @type {Tokenizer} + * @this {TokenizeContextWithState} + */ + +function tokenizeListItemPrefixWhitespace(effects, ok, nok) { + const self = this + return factorySpace( + effects, + afterPrefix, + 'listItemPrefixWhitespace', + self.parser.constructs.disable.null.includes('codeIndented') + ? undefined + : 4 + 1 + ) + /** @type {State} */ + + function afterPrefix(code) { + const tail = self.events[self.events.length - 1] + return !markdownSpace(code) && + tail && + tail[1].type === 'listItemPrefixWhitespace' + ? ok(code) + : nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/setext-underline.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/setext-underline.d.ts new file mode 100644 index 00000000..7dc65882 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/setext-underline.d.ts @@ -0,0 +1,7 @@ +/** @type {Construct} */ +export const setextUnderline: Construct +export type Construct = import('micromark-util-types').Construct +export type Resolver = import('micromark-util-types').Resolver +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/setext-underline.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/setext-underline.js new file mode 100644 index 00000000..09e1b98b --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/setext-underline.js @@ -0,0 +1,134 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Resolver} Resolver + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding} from 'micromark-util-character' + +/** @type {Construct} */ +export const setextUnderline = { + name: 'setextUnderline', + tokenize: tokenizeSetextUnderline, + resolveTo: resolveToSetextUnderline +} +/** @type {Resolver} */ + +function resolveToSetextUnderline(events, context) { + let index = events.length + /** @type {number|undefined} */ + + let content + /** @type {number|undefined} */ + + let text + /** @type {number|undefined} */ + + let definition // Find the opening of the content. + // It’ll always exist: we don’t tokenize if it isn’t there. + + while (index--) { + if (events[index][0] === 'enter') { + if (events[index][1].type === 'content') { + content = index + break + } + + if (events[index][1].type === 'paragraph') { + text = index + } + } // Exit + else { + if (events[index][1].type === 'content') { + // Remove the content end (if needed we’ll add it later) + events.splice(index, 1) + } + + if (!definition && events[index][1].type === 'definition') { + definition = index + } + } + } + + const heading = { + type: 'setextHeading', + start: Object.assign({}, events[text][1].start), + end: Object.assign({}, events[events.length - 1][1].end) + } // Change the paragraph to setext heading text. + + events[text][1].type = 'setextHeadingText' // If we have definitions in the content, we’ll keep on having content, + // but we need move it. + + if (definition) { + events.splice(text, 0, ['enter', heading, context]) + events.splice(definition + 1, 0, ['exit', events[content][1], context]) + events[content][1].end = Object.assign({}, events[definition][1].end) + } else { + events[content][1] = heading + } // Add the heading exit at the end. + + events.push(['exit', heading, context]) + return events +} +/** @type {Tokenizer} */ + +function tokenizeSetextUnderline(effects, ok, nok) { + const self = this + let index = self.events.length + /** @type {NonNullable} */ + + let marker + /** @type {boolean} */ + + let paragraph // Find an opening. + + while (index--) { + // Skip enter/exit of line ending, line prefix, and content. + // We can now either have a definition or a paragraph. + if ( + self.events[index][1].type !== 'lineEnding' && + self.events[index][1].type !== 'linePrefix' && + self.events[index][1].type !== 'content' + ) { + paragraph = self.events[index][1].type === 'paragraph' + break + } + } + + return start + /** @type {State} */ + + function start(code) { + if (!self.parser.lazy[self.now().line] && (self.interrupt || paragraph)) { + effects.enter('setextHeadingLine') + effects.enter('setextHeadingLineSequence') + marker = code + return closingSequence(code) + } + + return nok(code) + } + /** @type {State} */ + + function closingSequence(code) { + if (code === marker) { + effects.consume(code) + return closingSequence + } + + effects.exit('setextHeadingLineSequence') + return factorySpace(effects, closingSequenceEnd, 'lineSuffix')(code) + } + /** @type {State} */ + + function closingSequenceEnd(code) { + if (code === null || markdownLineEnding(code)) { + effects.exit('setextHeadingLine') + return ok(code) + } + + return nok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/thematic-break.d.ts b/_extensions/d2/node_modules/micromark-core-commonmark/lib/thematic-break.d.ts new file mode 100644 index 00000000..c5992e04 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/thematic-break.d.ts @@ -0,0 +1,6 @@ +/** @type {Construct} */ +export const thematicBreak: Construct +export type Construct = import('micromark-util-types').Construct +export type Tokenizer = import('micromark-util-types').Tokenizer +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/lib/thematic-break.js b/_extensions/d2/node_modules/micromark-core-commonmark/lib/thematic-break.js new file mode 100644 index 00000000..5536930c --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/lib/thematic-break.js @@ -0,0 +1,61 @@ +/** + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').Tokenizer} Tokenizer + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding, markdownSpace} from 'micromark-util-character' + +/** @type {Construct} */ +export const thematicBreak = { + name: 'thematicBreak', + tokenize: tokenizeThematicBreak +} +/** @type {Tokenizer} */ + +function tokenizeThematicBreak(effects, ok, nok) { + let size = 0 + /** @type {NonNullable} */ + + let marker + return start + /** @type {State} */ + + function start(code) { + effects.enter('thematicBreak') + marker = code + return atBreak(code) + } + /** @type {State} */ + + function atBreak(code) { + if (code === marker) { + effects.enter('thematicBreakSequence') + return sequence(code) + } + + if (markdownSpace(code)) { + return factorySpace(effects, atBreak, 'whitespace')(code) + } + + if (size < 3 || (code !== null && !markdownLineEnding(code))) { + return nok(code) + } + + effects.exit('thematicBreak') + return ok(code) + } + /** @type {State} */ + + function sequence(code) { + if (code === marker) { + effects.consume(code) + size++ + return sequence + } + + effects.exit('thematicBreakSequence') + return atBreak(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/package.json b/_extensions/d2/node_modules/micromark-core-commonmark/package.json new file mode 100644 index 00000000..02418b5a --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/package.json @@ -0,0 +1,69 @@ +{ + "name": "micromark-core-commonmark", + "version": "1.0.6", + "description": "The CommonMark markdown constructs", + "license": "MIT", + "keywords": [ + "micromark", + "core", + "commonmark" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-core-commonmark", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "lib/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-factory-destination": "^1.0.0", + "micromark-factory-label": "^1.0.0", + "micromark-factory-space": "^1.0.0", + "micromark-factory-title": "^1.0.0", + "micromark-factory-whitespace": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-classify-character": "^1.0.0", + "micromark-util-html-tag-name": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "decode-named-character-reference": "^1.0.0", + "uvu": "^0.5.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-core-commonmark/readme.md b/_extensions/d2/node_modules/micromark-core-commonmark/readme.md new file mode 100644 index 00000000..fa62914b --- /dev/null +++ b/_extensions/d2/node_modules/micromark-core-commonmark/readme.md @@ -0,0 +1,117 @@ +# micromark-core-commonmark + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +The core CommonMark constructs needed to tokenize markdown. +Some of these can be [turned off][disable], but they are often essential to +markdown and weird things might happen. + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-core-commonmark +``` + +## Use + +```js +import {autolink} from 'micromark-core-commonmark' + +console.log(autolink) // Do things with `autolink`. +``` + +## API + +This module exports the following identifiers: `attention`, `autolink`, +`blankLine`, `blockQuote`, `characterEscape`, `characterReference`, +`codeFenced`, `codeIndented`, `codeText`, `content`, `definition`, +`hardBreakEscape`, `headingAtx`, `htmlFlow`, `htmlText`, `labelEnd`, +`labelStartImage`, `labelStartLink`, `lineEnding`, `list`, `setextUnderline`, +`thematicBreak`. +There is no default export. + +Each identifier refers to a [construct](https://github.com/micromark/micromark#constructs). + +See the code for more on the exported constructs. + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-core-commonmark.svg + +[downloads]: https://www.npmjs.com/package/micromark-core-commonmark + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-core-commonmark.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-core-commonmark + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md + +[disable]: https://github.com/micromark/micromark#case-turn-off-constructs diff --git a/_extensions/d2/node_modules/micromark-factory-destination/dev/index.d.ts b/_extensions/d2/node_modules/micromark-factory-destination/dev/index.d.ts new file mode 100644 index 00000000..a99f0eab --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-destination/dev/index.d.ts @@ -0,0 +1,25 @@ +/** + * @param {Effects} effects + * @param {State} ok + * @param {State} nok + * @param {string} type + * @param {string} literalType + * @param {string} literalMarkerType + * @param {string} rawType + * @param {string} stringType + * @param {number} [max=Infinity] + * @returns {State} + */ +export function factoryDestination( + effects: Effects, + ok: State, + nok: State, + type: string, + literalType: string, + literalMarkerType: string, + rawType: string, + stringType: string, + max?: number | undefined +): State +export type Effects = import('micromark-util-types').Effects +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-factory-destination/dev/index.js b/_extensions/d2/node_modules/micromark-factory-destination/dev/index.js new file mode 100644 index 00000000..eb01deeb --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-destination/dev/index.js @@ -0,0 +1,170 @@ +/** + * @typedef {import('micromark-util-types').Effects} Effects + * @typedef {import('micromark-util-types').State} State + */ + +import { + asciiControl, + markdownLineEndingOrSpace, + markdownLineEnding +} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** + * @param {Effects} effects + * @param {State} ok + * @param {State} nok + * @param {string} type + * @param {string} literalType + * @param {string} literalMarkerType + * @param {string} rawType + * @param {string} stringType + * @param {number} [max=Infinity] + * @returns {State} + */ +// eslint-disable-next-line max-params +export function factoryDestination( + effects, + ok, + nok, + type, + literalType, + literalMarkerType, + rawType, + stringType, + max +) { + const limit = max || Number.POSITIVE_INFINITY + let balance = 0 + + return start + + /** @type {State} */ + function start(code) { + if (code === codes.lessThan) { + effects.enter(type) + effects.enter(literalType) + effects.enter(literalMarkerType) + effects.consume(code) + effects.exit(literalMarkerType) + return destinationEnclosedBefore + } + + if ( + code === codes.eof || + code === codes.rightParenthesis || + asciiControl(code) + ) { + return nok(code) + } + + effects.enter(type) + effects.enter(rawType) + effects.enter(stringType) + effects.enter(types.chunkString, {contentType: constants.contentTypeString}) + return destinationRaw(code) + } + + /** @type {State} */ + function destinationEnclosedBefore(code) { + if (code === codes.greaterThan) { + effects.enter(literalMarkerType) + effects.consume(code) + effects.exit(literalMarkerType) + effects.exit(literalType) + effects.exit(type) + return ok + } + + effects.enter(stringType) + effects.enter(types.chunkString, {contentType: constants.contentTypeString}) + return destinationEnclosed(code) + } + + /** @type {State} */ + function destinationEnclosed(code) { + if (code === codes.greaterThan) { + effects.exit(types.chunkString) + effects.exit(stringType) + return destinationEnclosedBefore(code) + } + + if ( + code === codes.eof || + code === codes.lessThan || + markdownLineEnding(code) + ) { + return nok(code) + } + + effects.consume(code) + return code === codes.backslash + ? destinationEnclosedEscape + : destinationEnclosed + } + + /** @type {State} */ + function destinationEnclosedEscape(code) { + if ( + code === codes.lessThan || + code === codes.greaterThan || + code === codes.backslash + ) { + effects.consume(code) + return destinationEnclosed + } + + return destinationEnclosed(code) + } + + /** @type {State} */ + function destinationRaw(code) { + if (code === codes.leftParenthesis) { + if (++balance > limit) return nok(code) + effects.consume(code) + return destinationRaw + } + + if (code === codes.rightParenthesis) { + if (!balance--) { + effects.exit(types.chunkString) + effects.exit(stringType) + effects.exit(rawType) + effects.exit(type) + return ok(code) + } + + effects.consume(code) + return destinationRaw + } + + if (code === codes.eof || markdownLineEndingOrSpace(code)) { + if (balance) return nok(code) + effects.exit(types.chunkString) + effects.exit(stringType) + effects.exit(rawType) + effects.exit(type) + return ok(code) + } + + if (asciiControl(code)) return nok(code) + effects.consume(code) + return code === codes.backslash ? destinationRawEscape : destinationRaw + } + + /** @type {State} */ + function destinationRawEscape(code) { + if ( + code === codes.leftParenthesis || + code === codes.rightParenthesis || + code === codes.backslash + ) { + effects.consume(code) + return destinationRaw + } + + return destinationRaw(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-destination/index.d.ts b/_extensions/d2/node_modules/micromark-factory-destination/index.d.ts new file mode 100644 index 00000000..a99f0eab --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-destination/index.d.ts @@ -0,0 +1,25 @@ +/** + * @param {Effects} effects + * @param {State} ok + * @param {State} nok + * @param {string} type + * @param {string} literalType + * @param {string} literalMarkerType + * @param {string} rawType + * @param {string} stringType + * @param {number} [max=Infinity] + * @returns {State} + */ +export function factoryDestination( + effects: Effects, + ok: State, + nok: State, + type: string, + literalType: string, + literalMarkerType: string, + rawType: string, + stringType: string, + max?: number | undefined +): State +export type Effects = import('micromark-util-types').Effects +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-factory-destination/index.js b/_extensions/d2/node_modules/micromark-factory-destination/index.js new file mode 100644 index 00000000..0aef8b17 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-destination/index.js @@ -0,0 +1,151 @@ +/** + * @typedef {import('micromark-util-types').Effects} Effects + * @typedef {import('micromark-util-types').State} State + */ +import { + asciiControl, + markdownLineEndingOrSpace, + markdownLineEnding +} from 'micromark-util-character' + +/** + * @param {Effects} effects + * @param {State} ok + * @param {State} nok + * @param {string} type + * @param {string} literalType + * @param {string} literalMarkerType + * @param {string} rawType + * @param {string} stringType + * @param {number} [max=Infinity] + * @returns {State} + */ +// eslint-disable-next-line max-params +export function factoryDestination( + effects, + ok, + nok, + type, + literalType, + literalMarkerType, + rawType, + stringType, + max +) { + const limit = max || Number.POSITIVE_INFINITY + let balance = 0 + return start + /** @type {State} */ + + function start(code) { + if (code === 60) { + effects.enter(type) + effects.enter(literalType) + effects.enter(literalMarkerType) + effects.consume(code) + effects.exit(literalMarkerType) + return destinationEnclosedBefore + } + + if (code === null || code === 41 || asciiControl(code)) { + return nok(code) + } + + effects.enter(type) + effects.enter(rawType) + effects.enter(stringType) + effects.enter('chunkString', { + contentType: 'string' + }) + return destinationRaw(code) + } + /** @type {State} */ + + function destinationEnclosedBefore(code) { + if (code === 62) { + effects.enter(literalMarkerType) + effects.consume(code) + effects.exit(literalMarkerType) + effects.exit(literalType) + effects.exit(type) + return ok + } + + effects.enter(stringType) + effects.enter('chunkString', { + contentType: 'string' + }) + return destinationEnclosed(code) + } + /** @type {State} */ + + function destinationEnclosed(code) { + if (code === 62) { + effects.exit('chunkString') + effects.exit(stringType) + return destinationEnclosedBefore(code) + } + + if (code === null || code === 60 || markdownLineEnding(code)) { + return nok(code) + } + + effects.consume(code) + return code === 92 ? destinationEnclosedEscape : destinationEnclosed + } + /** @type {State} */ + + function destinationEnclosedEscape(code) { + if (code === 60 || code === 62 || code === 92) { + effects.consume(code) + return destinationEnclosed + } + + return destinationEnclosed(code) + } + /** @type {State} */ + + function destinationRaw(code) { + if (code === 40) { + if (++balance > limit) return nok(code) + effects.consume(code) + return destinationRaw + } + + if (code === 41) { + if (!balance--) { + effects.exit('chunkString') + effects.exit(stringType) + effects.exit(rawType) + effects.exit(type) + return ok(code) + } + + effects.consume(code) + return destinationRaw + } + + if (code === null || markdownLineEndingOrSpace(code)) { + if (balance) return nok(code) + effects.exit('chunkString') + effects.exit(stringType) + effects.exit(rawType) + effects.exit(type) + return ok(code) + } + + if (asciiControl(code)) return nok(code) + effects.consume(code) + return code === 92 ? destinationRawEscape : destinationRaw + } + /** @type {State} */ + + function destinationRawEscape(code) { + if (code === 40 || code === 41 || code === 92) { + effects.consume(code) + return destinationRaw + } + + return destinationRaw(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-destination/package.json b/_extensions/d2/node_modules/micromark-factory-destination/package.json new file mode 100644 index 00000000..5d1bb325 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-destination/package.json @@ -0,0 +1,55 @@ +{ + "name": "micromark-factory-destination", + "version": "1.0.0", + "description": "micromark factory to parse destinations (found in resources, definitions)", + "license": "MIT", + "keywords": [ + "micromark", + "factory", + "destination" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-factory-destination", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-destination/readme.md b/_extensions/d2/node_modules/micromark-factory-destination/readme.md new file mode 100644 index 00000000..e42f3ec8 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-destination/readme.md @@ -0,0 +1,165 @@ +# micromark-factory-destination + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark factory to parse destinations (found in resources, definitions). + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`factoryDestination(…)`](#factorydestination) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-factory-destination +``` + +## Use + +```js +import {factoryDestination} from 'micromark-factory-destination' +import {codes} from 'micromark-util-symbol/codes' +import {types} from 'micromark-util-symbol/types' + +// A micromark tokenizer that uses the factory: +/** @type {Tokenizer} */ +function tokenizeResource(effects, ok, nok) { + return start + + // … + + /** @type {State} */ + function open(code) { + if (code === codes.rightParenthesis) { + return end(code) + } + + return factoryDestination( + effects, + destinationAfter, + nok, + types.resourceDestination, + types.resourceDestinationLiteral, + types.resourceDestinationLiteralMarker, + types.resourceDestinationRaw, + types.resourceDestinationString, + constants.linkResourceDestinationBalanceMax + )(code) + } + + // … +} +``` + +## API + +This module exports the following identifiers: `factoryDestination`. +There is no default export. + +### `factoryDestination(…)` + +###### Parameters + +* `effects` (`Effects`) — Context +* `ok` (`State`) — State switched to when successful +* `nok` (`State`) — State switched to when not successful +* `type` (`string`) — Token type for whole (`` or `b`) +* `literalType` (`string`) — Token type when enclosed (``) +* `literalMarkerType` (`string`) — Token type for enclosing (`<` and `>`) +* `rawType` (`string`) — Token type when not enclosed (`b`) +* `stringType` (`string`) — Token type for the URI (`a` or `b`) +* `max` (`number`, default: `Infinity`) — Max depth of nested parens + +###### Returns + +`State`. + +###### Examples + +```markdown + +b> + + +a +a\)b +a(b)c +a(b) +``` + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-factory-destination.svg + +[downloads]: https://www.npmjs.com/package/micromark-factory-destination + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-factory-destination.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-factory-destination + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-factory-label/dev/index.d.ts b/_extensions/d2/node_modules/micromark-factory-label/dev/index.d.ts new file mode 100644 index 00000000..afe4bed4 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-label/dev/index.d.ts @@ -0,0 +1,21 @@ +/** + * @this {TokenizeContext} + * @param {Effects} effects + * @param {State} ok + * @param {State} nok + * @param {string} type + * @param {string} markerType + * @param {string} stringType + * @returns {State} + */ +export function factoryLabel( + effects: Effects, + ok: State, + nok: State, + type: string, + markerType: string, + stringType: string +): State +export type Effects = import('micromark-util-types').Effects +export type TokenizeContext = import('micromark-util-types').TokenizeContext +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-factory-label/dev/index.js b/_extensions/d2/node_modules/micromark-factory-label/dev/index.js new file mode 100644 index 00000000..2e10e698 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-label/dev/index.js @@ -0,0 +1,114 @@ +/** + * @typedef {import('micromark-util-types').Effects} Effects + * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext + * @typedef {import('micromark-util-types').State} State + */ + +import {ok as assert} from 'uvu/assert' +import {markdownLineEnding, markdownSpace} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** + * @this {TokenizeContext} + * @param {Effects} effects + * @param {State} ok + * @param {State} nok + * @param {string} type + * @param {string} markerType + * @param {string} stringType + * @returns {State} + */ +// eslint-disable-next-line max-params +export function factoryLabel(effects, ok, nok, type, markerType, stringType) { + const self = this + let size = 0 + /** @type {boolean} */ + let data + + return start + + /** @type {State} */ + function start(code) { + assert(code === codes.leftSquareBracket, 'expected `[`') + effects.enter(type) + effects.enter(markerType) + effects.consume(code) + effects.exit(markerType) + effects.enter(stringType) + return atBreak + } + + /** @type {State} */ + function atBreak(code) { + if ( + code === codes.eof || + code === codes.leftSquareBracket || + (code === codes.rightSquareBracket && !data) || + /* To do: remove in the future once we’ve switched from + * `micromark-extension-footnote` to `micromark-extension-gfm-footnote`, + * which doesn’t need this */ + /* Hidden footnotes hook */ + /* c8 ignore next 3 */ + (code === codes.caret && + !size && + '_hiddenFootnoteSupport' in self.parser.constructs) || + size > constants.linkReferenceSizeMax + ) { + return nok(code) + } + + if (code === codes.rightSquareBracket) { + effects.exit(stringType) + effects.enter(markerType) + effects.consume(code) + effects.exit(markerType) + effects.exit(type) + return ok + } + + if (markdownLineEnding(code)) { + effects.enter(types.lineEnding) + effects.consume(code) + effects.exit(types.lineEnding) + return atBreak + } + + effects.enter(types.chunkString, {contentType: constants.contentTypeString}) + return label(code) + } + + /** @type {State} */ + function label(code) { + if ( + code === codes.eof || + code === codes.leftSquareBracket || + code === codes.rightSquareBracket || + markdownLineEnding(code) || + size++ > constants.linkReferenceSizeMax + ) { + effects.exit(types.chunkString) + return atBreak(code) + } + + effects.consume(code) + data = data || !markdownSpace(code) + return code === codes.backslash ? labelEscape : label + } + + /** @type {State} */ + function labelEscape(code) { + if ( + code === codes.leftSquareBracket || + code === codes.backslash || + code === codes.rightSquareBracket + ) { + effects.consume(code) + size++ + return label + } + + return label(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-label/index.d.ts b/_extensions/d2/node_modules/micromark-factory-label/index.d.ts new file mode 100644 index 00000000..afe4bed4 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-label/index.d.ts @@ -0,0 +1,21 @@ +/** + * @this {TokenizeContext} + * @param {Effects} effects + * @param {State} ok + * @param {State} nok + * @param {string} type + * @param {string} markerType + * @param {string} stringType + * @returns {State} + */ +export function factoryLabel( + effects: Effects, + ok: State, + nok: State, + type: string, + markerType: string, + stringType: string +): State +export type Effects = import('micromark-util-types').Effects +export type TokenizeContext = import('micromark-util-types').TokenizeContext +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-factory-label/index.js b/_extensions/d2/node_modules/micromark-factory-label/index.js new file mode 100644 index 00000000..64c233c7 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-label/index.js @@ -0,0 +1,108 @@ +/** + * @typedef {import('micromark-util-types').Effects} Effects + * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext + * @typedef {import('micromark-util-types').State} State + */ +import {markdownLineEnding, markdownSpace} from 'micromark-util-character' + +/** + * @this {TokenizeContext} + * @param {Effects} effects + * @param {State} ok + * @param {State} nok + * @param {string} type + * @param {string} markerType + * @param {string} stringType + * @returns {State} + */ +// eslint-disable-next-line max-params +export function factoryLabel(effects, ok, nok, type, markerType, stringType) { + const self = this + let size = 0 + /** @type {boolean} */ + + let data + return start + /** @type {State} */ + + function start(code) { + effects.enter(type) + effects.enter(markerType) + effects.consume(code) + effects.exit(markerType) + effects.enter(stringType) + return atBreak + } + /** @type {State} */ + + function atBreak(code) { + if ( + code === null || + code === 91 || + (code === 93 && !data) || + /* To do: remove in the future once we’ve switched from + * `micromark-extension-footnote` to `micromark-extension-gfm-footnote`, + * which doesn’t need this */ + + /* Hidden footnotes hook */ + + /* c8 ignore next 3 */ + (code === 94 && + !size && + '_hiddenFootnoteSupport' in self.parser.constructs) || + size > 999 + ) { + return nok(code) + } + + if (code === 93) { + effects.exit(stringType) + effects.enter(markerType) + effects.consume(code) + effects.exit(markerType) + effects.exit(type) + return ok + } + + if (markdownLineEnding(code)) { + effects.enter('lineEnding') + effects.consume(code) + effects.exit('lineEnding') + return atBreak + } + + effects.enter('chunkString', { + contentType: 'string' + }) + return label(code) + } + /** @type {State} */ + + function label(code) { + if ( + code === null || + code === 91 || + code === 93 || + markdownLineEnding(code) || + size++ > 999 + ) { + effects.exit('chunkString') + return atBreak(code) + } + + effects.consume(code) + data = data || !markdownSpace(code) + return code === 92 ? labelEscape : label + } + /** @type {State} */ + + function labelEscape(code) { + if (code === 91 || code === 92 || code === 93) { + effects.consume(code) + size++ + return label + } + + return label(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-label/package.json b/_extensions/d2/node_modules/micromark-factory-label/package.json new file mode 100644 index 00000000..f880bc5f --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-label/package.json @@ -0,0 +1,56 @@ +{ + "name": "micromark-factory-label", + "version": "1.0.2", + "description": "micromark factory to parse labels (found in media, definitions)", + "license": "MIT", + "keywords": [ + "micromark", + "factory", + "label" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-factory-label", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-label/readme.md b/_extensions/d2/node_modules/micromark-factory-label/readme.md new file mode 100644 index 00000000..9ce8a90a --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-label/readme.md @@ -0,0 +1,158 @@ +# micromark-factory-label + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark factory to parse labels (found in media, definitions). + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`factoryLabel(…)`](#factorylabel) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-factory-label +``` + +## Use + +```js +import {ok as assert} from 'uvu/assert' +import {factoryLabel} from 'micromark-factory-label' +import {codes} from 'micromark-util-symbol/codes' +import {types} from 'micromark-util-symbol/types' + +// A micromark tokenizer that uses the factory: +/** @type {Tokenizer} */ +function tokenizeDefinition(effects, ok, nok) { + return start + + // … + + /** @type {State} */ + function start(code) { + assert(code === codes.leftSquareBracket, 'expected `[`') + effects.enter(types.definition) + return factoryLabel.call( + self, + effects, + labelAfter, + nok, + types.definitionLabel, + types.definitionLabelMarker, + types.definitionLabelString + )(code) + } + + // … +} +``` + +## API + +This module exports the following identifiers: `factoryLabel`. +There is no default export. + +### `factoryLabel(…)` + +Note that labels in markdown are capped at 999 characters in the string. + +###### Parameters + +* `this` (`TokenizeContext`) — Tokenize context +* `effects` (`Effects`) — Context +* `ok` (`State`) — State switched to when successful +* `nok` (`State`) — State switched to when not successful +* `type` (`string`) — Token type for whole (`[a]`) +* `markerType` (`string`) — Token type for the markers (`[` and `]`) +* `stringType` (`string`) — Token type for the identifier (`a`) + +###### Returns + +`State`. + +###### Examples + +```markdown +[a] +[a +b] +[a\]b] +``` + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-factory-label.svg + +[downloads]: https://www.npmjs.com/package/micromark-factory-label + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-factory-label.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-factory-label + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-factory-space/dev/index.d.ts b/_extensions/d2/node_modules/micromark-factory-space/dev/index.d.ts new file mode 100644 index 00000000..7ab298d8 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-space/dev/index.d.ts @@ -0,0 +1,15 @@ +/** + * @param {Effects} effects + * @param {State} ok + * @param {string} type + * @param {number} [max=Infinity] + * @returns {State} + */ +export function factorySpace( + effects: Effects, + ok: State, + type: string, + max?: number | undefined +): State +export type Effects = import('micromark-util-types').Effects +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-factory-space/dev/index.js b/_extensions/d2/node_modules/micromark-factory-space/dev/index.js new file mode 100644 index 00000000..bbc87d70 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-space/dev/index.js @@ -0,0 +1,41 @@ +/** + * @typedef {import('micromark-util-types').Effects} Effects + * @typedef {import('micromark-util-types').State} State + */ + +import {markdownSpace} from 'micromark-util-character' + +/** + * @param {Effects} effects + * @param {State} ok + * @param {string} type + * @param {number} [max=Infinity] + * @returns {State} + */ +export function factorySpace(effects, ok, type, max) { + const limit = max ? max - 1 : Number.POSITIVE_INFINITY + let size = 0 + + return start + + /** @type {State} */ + function start(code) { + if (markdownSpace(code)) { + effects.enter(type) + return prefix(code) + } + + return ok(code) + } + + /** @type {State} */ + function prefix(code) { + if (markdownSpace(code) && size++ < limit) { + effects.consume(code) + return prefix + } + + effects.exit(type) + return ok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-space/index.d.ts b/_extensions/d2/node_modules/micromark-factory-space/index.d.ts new file mode 100644 index 00000000..7ab298d8 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-space/index.d.ts @@ -0,0 +1,15 @@ +/** + * @param {Effects} effects + * @param {State} ok + * @param {string} type + * @param {number} [max=Infinity] + * @returns {State} + */ +export function factorySpace( + effects: Effects, + ok: State, + type: string, + max?: number | undefined +): State +export type Effects = import('micromark-util-types').Effects +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-factory-space/index.js b/_extensions/d2/node_modules/micromark-factory-space/index.js new file mode 100644 index 00000000..e0a9d633 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-space/index.js @@ -0,0 +1,39 @@ +/** + * @typedef {import('micromark-util-types').Effects} Effects + * @typedef {import('micromark-util-types').State} State + */ +import {markdownSpace} from 'micromark-util-character' +/** + * @param {Effects} effects + * @param {State} ok + * @param {string} type + * @param {number} [max=Infinity] + * @returns {State} + */ + +export function factorySpace(effects, ok, type, max) { + const limit = max ? max - 1 : Number.POSITIVE_INFINITY + let size = 0 + return start + /** @type {State} */ + + function start(code) { + if (markdownSpace(code)) { + effects.enter(type) + return prefix(code) + } + + return ok(code) + } + /** @type {State} */ + + function prefix(code) { + if (markdownSpace(code) && size++ < limit) { + effects.consume(code) + return prefix + } + + effects.exit(type) + return ok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-space/package.json b/_extensions/d2/node_modules/micromark-factory-space/package.json new file mode 100644 index 00000000..0ee67c8c --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-space/package.json @@ -0,0 +1,54 @@ +{ + "name": "micromark-factory-space", + "version": "1.0.0", + "description": "micromark factory to parse markdown space (found in lots of places)", + "license": "MIT", + "keywords": [ + "micromark", + "factory", + "space" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-factory-space", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-types": "^1.0.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-space/readme.md b/_extensions/d2/node_modules/micromark-factory-space/readme.md new file mode 100644 index 00000000..a27fcbfe --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-space/readme.md @@ -0,0 +1,162 @@ +# micromark-factory-space + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark factory to parse [markdown space][markdown-space] (found in lots of +places). + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`factorySpace(…)`](#factoryspace) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-factory-space +``` + +## Use + +```js +import {factorySpace} from 'micromark-factory-space' +import {codes} from 'micromark-util-symbol/codes' +import {types} from 'micromark-util-symbol/types' + +// A micromark tokenizer that uses the factory: +/** @type {Tokenizer} */ +function tokenizeCodeFenced(effects, ok, nok) { + return start + + // … + + /** @type {State} */ + function info(code) { + if (code === codes.eof || markdownLineEndingOrSpace(code)) { + effects.exit(types.chunkString) + effects.exit(types.codeFencedFenceInfo) + return factorySpace(effects, infoAfter, types.whitespace)(code) + } + + if (code === codes.graveAccent && code === marker) return nok(code) + effects.consume(code) + return info + } + + // … +} +``` + +## API + +This module exports the following identifiers: `factorySpace`. +There is no default export. + +### `factorySpace(…)` + +Note that there is no `nok` parameter: + +* spaces in markdown are often optional, in which case this factory can be + used and `ok` will be switched to whether spaces were found or not, +* One space character can be detected with + [markdownSpace(code)][markdown-space] right before using `factorySpace` + +###### Parameters + +* `effects` (`Effects`) — Context +* `ok` (`State`) — State switched to when successful +* `type` (`string`) — Token type for whole (`' \t'`) +* `max` (`number`, default: `Infinity`) — Max size of whitespace + +###### Returns + +`State`. + +###### Examples + +Where `␉` represents a tab (plus how much it expands) and `␠` represents a +single space. + +```markdown +␉ +␠␠␠␠ +␉␠ +``` + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-factory-space.svg + +[downloads]: https://www.npmjs.com/package/micromark-factory-space + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-factory-space.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-factory-space + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md + +[markdown-space]: https://github.com/micromark/micromark/tree/main/packages/micromark-util-character#markdownspacecode diff --git a/_extensions/d2/node_modules/micromark-factory-title/dev/index.d.ts b/_extensions/d2/node_modules/micromark-factory-title/dev/index.d.ts new file mode 100644 index 00000000..eb5fa121 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-title/dev/index.d.ts @@ -0,0 +1,20 @@ +/** + * @param {Effects} effects + * @param {State} ok + * @param {State} nok + * @param {string} type + * @param {string} markerType + * @param {string} stringType + * @returns {State} + */ +export function factoryTitle( + effects: Effects, + ok: State, + nok: State, + type: string, + markerType: string, + stringType: string +): State +export type Effects = import('micromark-util-types').Effects +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-factory-title/dev/index.js b/_extensions/d2/node_modules/micromark-factory-title/dev/index.js new file mode 100644 index 00000000..42fc203c --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-title/dev/index.js @@ -0,0 +1,103 @@ +/** + * @typedef {import('micromark-util-types').Effects} Effects + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ + +import {ok as assert} from 'uvu/assert' +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' +import {types} from 'micromark-util-symbol/types.js' + +/** + * @param {Effects} effects + * @param {State} ok + * @param {State} nok + * @param {string} type + * @param {string} markerType + * @param {string} stringType + * @returns {State} + */ +// eslint-disable-next-line max-params +export function factoryTitle(effects, ok, nok, type, markerType, stringType) { + /** @type {NonNullable} */ + let marker + + return start + + /** @type {State} */ + function start(code) { + assert( + code === codes.quotationMark || + code === codes.apostrophe || + code === codes.leftParenthesis, + 'expected `"`, `\'`, or `(`' + ) + effects.enter(type) + effects.enter(markerType) + effects.consume(code) + effects.exit(markerType) + marker = code === codes.leftParenthesis ? codes.rightParenthesis : code + return atFirstTitleBreak + } + + /** @type {State} */ + function atFirstTitleBreak(code) { + if (code === marker) { + effects.enter(markerType) + effects.consume(code) + effects.exit(markerType) + effects.exit(type) + return ok + } + + effects.enter(stringType) + return atTitleBreak(code) + } + + /** @type {State} */ + function atTitleBreak(code) { + if (code === marker) { + effects.exit(stringType) + return atFirstTitleBreak(marker) + } + + if (code === codes.eof) { + return nok(code) + } + + // Note: blank lines can’t exist in content. + if (markdownLineEnding(code)) { + effects.enter(types.lineEnding) + effects.consume(code) + effects.exit(types.lineEnding) + return factorySpace(effects, atTitleBreak, types.linePrefix) + } + + effects.enter(types.chunkString, {contentType: constants.contentTypeString}) + return title(code) + } + + /** @type {State} */ + function title(code) { + if (code === marker || code === codes.eof || markdownLineEnding(code)) { + effects.exit(types.chunkString) + return atTitleBreak(code) + } + + effects.consume(code) + return code === codes.backslash ? titleEscape : title + } + + /** @type {State} */ + function titleEscape(code) { + if (code === marker || code === codes.backslash) { + effects.consume(code) + return title + } + + return title(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-title/index.d.ts b/_extensions/d2/node_modules/micromark-factory-title/index.d.ts new file mode 100644 index 00000000..eb5fa121 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-title/index.d.ts @@ -0,0 +1,20 @@ +/** + * @param {Effects} effects + * @param {State} ok + * @param {State} nok + * @param {string} type + * @param {string} markerType + * @param {string} stringType + * @returns {State} + */ +export function factoryTitle( + effects: Effects, + ok: State, + nok: State, + type: string, + markerType: string, + stringType: string +): State +export type Effects = import('micromark-util-types').Effects +export type State = import('micromark-util-types').State +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-factory-title/index.js b/_extensions/d2/node_modules/micromark-factory-title/index.js new file mode 100644 index 00000000..9170df66 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-title/index.js @@ -0,0 +1,92 @@ +/** + * @typedef {import('micromark-util-types').Effects} Effects + * @typedef {import('micromark-util-types').State} State + * @typedef {import('micromark-util-types').Code} Code + */ +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding} from 'micromark-util-character' + +/** + * @param {Effects} effects + * @param {State} ok + * @param {State} nok + * @param {string} type + * @param {string} markerType + * @param {string} stringType + * @returns {State} + */ +// eslint-disable-next-line max-params +export function factoryTitle(effects, ok, nok, type, markerType, stringType) { + /** @type {NonNullable} */ + let marker + return start + /** @type {State} */ + + function start(code) { + effects.enter(type) + effects.enter(markerType) + effects.consume(code) + effects.exit(markerType) + marker = code === 40 ? 41 : code + return atFirstTitleBreak + } + /** @type {State} */ + + function atFirstTitleBreak(code) { + if (code === marker) { + effects.enter(markerType) + effects.consume(code) + effects.exit(markerType) + effects.exit(type) + return ok + } + + effects.enter(stringType) + return atTitleBreak(code) + } + /** @type {State} */ + + function atTitleBreak(code) { + if (code === marker) { + effects.exit(stringType) + return atFirstTitleBreak(marker) + } + + if (code === null) { + return nok(code) + } // Note: blank lines can’t exist in content. + + if (markdownLineEnding(code)) { + effects.enter('lineEnding') + effects.consume(code) + effects.exit('lineEnding') + return factorySpace(effects, atTitleBreak, 'linePrefix') + } + + effects.enter('chunkString', { + contentType: 'string' + }) + return title(code) + } + /** @type {State} */ + + function title(code) { + if (code === marker || code === null || markdownLineEnding(code)) { + effects.exit('chunkString') + return atTitleBreak(code) + } + + effects.consume(code) + return code === 92 ? titleEscape : title + } + /** @type {State} */ + + function titleEscape(code) { + if (code === marker || code === 92) { + effects.consume(code) + return title + } + + return title(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-title/package.json b/_extensions/d2/node_modules/micromark-factory-title/package.json new file mode 100644 index 00000000..e65d0582 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-title/package.json @@ -0,0 +1,57 @@ +{ + "name": "micromark-factory-title", + "version": "1.0.2", + "description": "micromark factory to parse markdown titles (found in resources, definitions)", + "license": "MIT", + "keywords": [ + "micromark", + "factory", + "title" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-factory-title", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-title/readme.md b/_extensions/d2/node_modules/micromark-factory-title/readme.md new file mode 100644 index 00000000..578968a1 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-title/readme.md @@ -0,0 +1,164 @@ +# micromark-factory-title + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark factory to parse markdown titles (found in resources, definitions). + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`factoryTitle(…)`](#factorytitle) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-factory-title +``` + +## Use + +```js +import {factoryTitle} from 'micromark-factory-title' +import {codes} from 'micromark-util-symbol/codes' +import {types} from 'micromark-util-symbol/types' + +// A micromark tokenizer that uses the factory: +/** @type {Tokenizer} */ +function tokenizeDefinition(effects, ok, nok) { + return start + + // … + + /** @type {State} */ + function before(code) { + if ( + code === codes.quotationMark || + code === codes.apostrophe || + code === codes.leftParenthesis + ) { + return factoryTitle( + effects, + factorySpace(effects, after, types.whitespace), + nok, + types.definitionTitle, + types.definitionTitleMarker, + types.definitionTitleString + )(code) + } + + return nok(code) + } + + // … +} +``` + +## API + +This module exports the following identifiers: `factoryTitle`. +There is no default export. + +### `factoryTitle(…)` + +###### Parameters + +* `effects` (`Effects`) — Context +* `ok` (`State`) — State switched to when successful +* `nok` (`State`) — State switched to when not successful +* `type` (`string`) — Token type for whole (`"a"`, `'b'`, `(c)`) +* `markerType` (`string`) — Token type for the markers (`"`, `'`, `(`, and + `)`) +* `stringType` (`string`) — Token type for the value (`a`) + +###### Returns + +`State`. + +###### Examples + +```markdown +"a" +'b' +(c) +"a +b" +'a + b' +(a\)b) +``` + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-factory-title.svg + +[downloads]: https://www.npmjs.com/package/micromark-factory-title + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-factory-title.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-factory-title + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-factory-whitespace/dev/index.d.ts b/_extensions/d2/node_modules/micromark-factory-whitespace/dev/index.d.ts new file mode 100644 index 00000000..25158ccf --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-whitespace/dev/index.d.ts @@ -0,0 +1,12 @@ +/** + * @param {Effects} effects + * @param {State} ok + */ +export function factoryWhitespace( + effects: Effects, + ok: State +): ( + code: import('micromark-util-types').Code +) => void | import('micromark-util-types').State +export type Effects = import('micromark-util-types').Effects +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-factory-whitespace/dev/index.js b/_extensions/d2/node_modules/micromark-factory-whitespace/dev/index.js new file mode 100644 index 00000000..dfb3c4fa --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-whitespace/dev/index.js @@ -0,0 +1,40 @@ +/** + * @typedef {import('micromark-util-types').Effects} Effects + * @typedef {import('micromark-util-types').State} State + */ + +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding, markdownSpace} from 'micromark-util-character' +import {types} from 'micromark-util-symbol/types.js' + +/** + * @param {Effects} effects + * @param {State} ok + */ +export function factoryWhitespace(effects, ok) { + /** @type {boolean} */ + let seen + + return start + + /** @type {State} */ + function start(code) { + if (markdownLineEnding(code)) { + effects.enter(types.lineEnding) + effects.consume(code) + effects.exit(types.lineEnding) + seen = true + return start + } + + if (markdownSpace(code)) { + return factorySpace( + effects, + start, + seen ? types.linePrefix : types.lineSuffix + )(code) + } + + return ok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-whitespace/index.d.ts b/_extensions/d2/node_modules/micromark-factory-whitespace/index.d.ts new file mode 100644 index 00000000..25158ccf --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-whitespace/index.d.ts @@ -0,0 +1,12 @@ +/** + * @param {Effects} effects + * @param {State} ok + */ +export function factoryWhitespace( + effects: Effects, + ok: State +): ( + code: import('micromark-util-types').Code +) => void | import('micromark-util-types').State +export type Effects = import('micromark-util-types').Effects +export type State = import('micromark-util-types').State diff --git a/_extensions/d2/node_modules/micromark-factory-whitespace/index.js b/_extensions/d2/node_modules/micromark-factory-whitespace/index.js new file mode 100644 index 00000000..efc762d3 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-whitespace/index.js @@ -0,0 +1,37 @@ +/** + * @typedef {import('micromark-util-types').Effects} Effects + * @typedef {import('micromark-util-types').State} State + */ +import {factorySpace} from 'micromark-factory-space' +import {markdownLineEnding, markdownSpace} from 'micromark-util-character' + +/** + * @param {Effects} effects + * @param {State} ok + */ +export function factoryWhitespace(effects, ok) { + /** @type {boolean} */ + let seen + return start + /** @type {State} */ + + function start(code) { + if (markdownLineEnding(code)) { + effects.enter('lineEnding') + effects.consume(code) + effects.exit('lineEnding') + seen = true + return start + } + + if (markdownSpace(code)) { + return factorySpace( + effects, + start, + seen ? 'linePrefix' : 'lineSuffix' + )(code) + } + + return ok(code) + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-whitespace/package.json b/_extensions/d2/node_modules/micromark-factory-whitespace/package.json new file mode 100644 index 00000000..28f74ff9 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-whitespace/package.json @@ -0,0 +1,56 @@ +{ + "name": "micromark-factory-whitespace", + "version": "1.0.0", + "description": "micromark factory to parse markdown whitespace (found in lots of places)", + "license": "MIT", + "keywords": [ + "micromark", + "factory", + "whitespace" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-factory-whitespace", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-factory-whitespace/readme.md b/_extensions/d2/node_modules/micromark-factory-whitespace/readme.md new file mode 100644 index 00000000..19a3d64f --- /dev/null +++ b/_extensions/d2/node_modules/micromark-factory-whitespace/readme.md @@ -0,0 +1,142 @@ +# micromark-factory-whitespace + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark factory to parse [markdown line endings or spaces][ws] (found in lots +of places). + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`factoryWhitespace(…)`](#factorywhitespace) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-factory-whitespace +``` + +## Use + +```js +import {factoryWhitespace} from 'micromark-factory-whitespace' +import {codes} from 'micromark-util-symbol/codes' +import {types} from 'micromark-util-symbol/types' + +// A micromark tokenizer that uses the factory: +/** @type {Tokenizer} */ +function tokenizeTitle(effects, ok, nok) { + return start + + /** @type {State} */ + function start(code) { + return markdownLineEndingOrSpace(code) + ? factoryWhitespace(effects, before)(code) + : nok(code) + } + + // … +} +``` + +## API + +This module exports the following identifiers: `factoryWhitespace`. +There is no default export. + +### `factoryWhitespace(…)` + +Note that there is no `nok` parameter: + +* line endings or spaces in markdown are often optional, in which case this + factory can be used and `ok` will be switched to whether spaces were found + or not, +* One line ending or space can be detected with + [markdownLineEndingOrSpace(code)][ws] right before using `factoryWhitespace` + +###### Parameters + +* `effects` (`Effects`) — Context +* `ok` (`State`) — State switched to when successful + +###### Returns + +`State`. + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-factory-whitespace.svg + +[downloads]: https://www.npmjs.com/package/micromark-factory-whitespace + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-factory-whitespace.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-factory-whitespace + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md + +[ws]: https://github.com/micromark/micromark/tree/main/packages/micromark-util-character#markdownlineendingorspacecode diff --git a/_extensions/d2/node_modules/micromark-util-character/dev/index.d.ts b/_extensions/d2/node_modules/micromark-util-character/dev/index.d.ts new file mode 100644 index 00000000..d88ea54e --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-character/dev/index.d.ts @@ -0,0 +1,55 @@ +/** + * Check whether a character code is an ASCII control character. + * + * An **ASCII control** is a character in the inclusive range U+0000 NULL (NUL) + * to U+001F (US), or U+007F (DEL). + * + * @param {Code} code + * @returns {code is number} + */ +export function asciiControl(code: Code): code is number +/** + * Check whether a character code is a markdown line ending (see + * `markdownLineEnding`) or markdown space (see `markdownSpace`). + * + * @param {Code} code + * @returns {code is number} + */ +export function markdownLineEndingOrSpace(code: Code): code is number +/** + * Check whether a character code is a markdown line ending. + * + * A **markdown line ending** is the virtual characters M-0003 CARRIAGE RETURN + * LINE FEED (CRLF), M-0004 LINE FEED (LF) and M-0005 CARRIAGE RETURN (CR). + * + * In micromark, the actual character U+000A LINE FEED (LF) and U+000D CARRIAGE + * RETURN (CR) are replaced by these virtual characters depending on whether + * they occurred together. + * + * @param {Code} code + * @returns {code is number} + */ +export function markdownLineEnding(code: Code): code is number +/** + * Check whether a character code is a markdown space. + * + * A **markdown space** is the concrete character U+0020 SPACE (SP) and the + * virtual characters M-0001 VIRTUAL SPACE (VS) and M-0002 HORIZONTAL TAB (HT). + * + * In micromark, the actual character U+0009 CHARACTER TABULATION (HT) is + * replaced by one M-0002 HORIZONTAL TAB (HT) and between 0 and 3 M-0001 VIRTUAL + * SPACE (VS) characters, depending on the column at which the tab occurred. + * + * @param {Code} code + * @returns {code is number} + */ +export function markdownSpace(code: Code): code is number +export function asciiAlpha(code: Code): code is number +export function asciiDigit(code: Code): code is number +export function asciiHexDigit(code: Code): code is number +export function asciiAlphanumeric(code: Code): code is number +export function asciiPunctuation(code: Code): code is number +export function asciiAtext(code: Code): code is number +export function unicodeWhitespace(code: Code): code is number +export function unicodePunctuation(code: Code): code is number +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-util-character/dev/index.js b/_extensions/d2/node_modules/micromark-util-character/dev/index.js new file mode 100644 index 00000000..2ce197dc --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-character/dev/index.js @@ -0,0 +1,201 @@ +/** + * @typedef {import('micromark-util-types').Code} Code + */ + +import {codes} from 'micromark-util-symbol/codes.js' +import {unicodePunctuationRegex} from './lib/unicode-punctuation-regex.js' + +/** + * Check whether the character code represents an ASCII alpha (`a` through `z`, + * case insensitive). + * + * An **ASCII alpha** is an ASCII upper alpha or ASCII lower alpha. + * + * An **ASCII upper alpha** is a character in the inclusive range U+0041 (`A`) + * to U+005A (`Z`). + * + * An **ASCII lower alpha** is a character in the inclusive range U+0061 (`a`) + * to U+007A (`z`). + */ +export const asciiAlpha = regexCheck(/[A-Za-z]/) + +/** + * Check whether the character code represents an ASCII digit (`0` through `9`). + * + * An **ASCII digit** is a character in the inclusive range U+0030 (`0`) to + * U+0039 (`9`). + */ +export const asciiDigit = regexCheck(/\d/) + +/** + * Check whether the character code represents an ASCII hex digit (`a` through + * `f`, case insensitive, or `0` through `9`). + * + * An **ASCII hex digit** is an ASCII digit (see `asciiDigit`), ASCII upper hex + * digit, or an ASCII lower hex digit. + * + * An **ASCII upper hex digit** is a character in the inclusive range U+0041 + * (`A`) to U+0046 (`F`). + * + * An **ASCII lower hex digit** is a character in the inclusive range U+0061 + * (`a`) to U+0066 (`f`). + */ +export const asciiHexDigit = regexCheck(/[\dA-Fa-f]/) + +/** + * Check whether the character code represents an ASCII alphanumeric (`a` + * through `z`, case insensitive, or `0` through `9`). + * + * An **ASCII alphanumeric** is an ASCII digit (see `asciiDigit`) or ASCII alpha + * (see `asciiAlpha`). + */ +export const asciiAlphanumeric = regexCheck(/[\dA-Za-z]/) + +/** + * Check whether the character code represents ASCII punctuation. + * + * An **ASCII punctuation** is a character in the inclusive ranges U+0021 + * EXCLAMATION MARK (`!`) to U+002F SLASH (`/`), U+003A COLON (`:`) to U+0040 AT + * SIGN (`@`), U+005B LEFT SQUARE BRACKET (`[`) to U+0060 GRAVE ACCENT + * (`` ` ``), or U+007B LEFT CURLY BRACE (`{`) to U+007E TILDE (`~`). + */ +export const asciiPunctuation = regexCheck(/[!-/:-@[-`{-~]/) + +/** + * Check whether the character code represents an ASCII atext. + * + * atext is an ASCII alphanumeric (see `asciiAlphanumeric`), or a character in + * the inclusive ranges U+0023 NUMBER SIGN (`#`) to U+0027 APOSTROPHE (`'`), + * U+002A ASTERISK (`*`), U+002B PLUS SIGN (`+`), U+002D DASH (`-`), U+002F + * SLASH (`/`), U+003D EQUALS TO (`=`), U+003F QUESTION MARK (`?`), U+005E + * CARET (`^`) to U+0060 GRAVE ACCENT (`` ` ``), or U+007B LEFT CURLY BRACE + * (`{`) to U+007E TILDE (`~`). + * + * See: + * **\[RFC5322]**: + * [Internet Message Format](https://tools.ietf.org/html/rfc5322). + * P. Resnick. + * IETF. + */ +export const asciiAtext = regexCheck(/[#-'*+\--9=?A-Z^-~]/) + +/** + * Check whether a character code is an ASCII control character. + * + * An **ASCII control** is a character in the inclusive range U+0000 NULL (NUL) + * to U+001F (US), or U+007F (DEL). + * + * @param {Code} code + * @returns {code is number} + */ +export function asciiControl(code) { + return ( + // Special whitespace codes (which have negative values), C0 and Control + // character DEL + code !== null && (code < codes.space || code === codes.del) + ) +} + +/** + * Check whether a character code is a markdown line ending (see + * `markdownLineEnding`) or markdown space (see `markdownSpace`). + * + * @param {Code} code + * @returns {code is number} + */ +export function markdownLineEndingOrSpace(code) { + return code !== null && (code < codes.nul || code === codes.space) +} + +/** + * Check whether a character code is a markdown line ending. + * + * A **markdown line ending** is the virtual characters M-0003 CARRIAGE RETURN + * LINE FEED (CRLF), M-0004 LINE FEED (LF) and M-0005 CARRIAGE RETURN (CR). + * + * In micromark, the actual character U+000A LINE FEED (LF) and U+000D CARRIAGE + * RETURN (CR) are replaced by these virtual characters depending on whether + * they occurred together. + * + * @param {Code} code + * @returns {code is number} + */ +export function markdownLineEnding(code) { + return code !== null && code < codes.horizontalTab +} + +/** + * Check whether a character code is a markdown space. + * + * A **markdown space** is the concrete character U+0020 SPACE (SP) and the + * virtual characters M-0001 VIRTUAL SPACE (VS) and M-0002 HORIZONTAL TAB (HT). + * + * In micromark, the actual character U+0009 CHARACTER TABULATION (HT) is + * replaced by one M-0002 HORIZONTAL TAB (HT) and between 0 and 3 M-0001 VIRTUAL + * SPACE (VS) characters, depending on the column at which the tab occurred. + * + * @param {Code} code + * @returns {code is number} + */ +export function markdownSpace(code) { + return ( + code === codes.horizontalTab || + code === codes.virtualSpace || + code === codes.space + ) +} + +/** + * Check whether the character code represents Unicode whitespace. + * + * Note that this does handle micromark specific markdown whitespace characters. + * See `markdownLineEndingOrSpace` to check that. + * + * A **Unicode whitespace** is a character in the Unicode `Zs` (Separator, + * Space) category, or U+0009 CHARACTER TABULATION (HT), U+000A LINE FEED (LF), + * U+000C (FF), or U+000D CARRIAGE RETURN (CR) (**\[UNICODE]**). + * + * See: + * **\[UNICODE]**: + * [The Unicode Standard](https://www.unicode.org/versions/). + * Unicode Consortium. + */ +export const unicodeWhitespace = regexCheck(/\s/) + +/** + * Check whether the character code represents Unicode punctuation. + * + * A **Unicode punctuation** is a character in the Unicode `Pc` (Punctuation, + * Connector), `Pd` (Punctuation, Dash), `Pe` (Punctuation, Close), `Pf` + * (Punctuation, Final quote), `Pi` (Punctuation, Initial quote), `Po` + * (Punctuation, Other), or `Ps` (Punctuation, Open) categories, or an ASCII + * punctuation (see `asciiPunctuation`). + * + * See: + * **\[UNICODE]**: + * [The Unicode Standard](https://www.unicode.org/versions/). + * Unicode Consortium. + */ +// Size note: removing ASCII from the regex and using `asciiPunctuation` here +// In fact adds to the bundle size. +export const unicodePunctuation = regexCheck(unicodePunctuationRegex) + +/** + * Create a code check from a regex. + * + * @param {RegExp} regex + * @returns {(code: Code) => code is number} + */ +function regexCheck(regex) { + return check + + /** + * Check whether a code matches the bound regex. + * + * @param {Code} code Character code + * @returns {code is number} Whether the character code matches the bound regex + */ + function check(code) { + return code !== null && regex.test(String.fromCharCode(code)) + } +} diff --git a/_extensions/d2/node_modules/micromark-util-character/dev/lib/unicode-punctuation-regex.d.ts b/_extensions/d2/node_modules/micromark-util-character/dev/lib/unicode-punctuation-regex.d.ts new file mode 100644 index 00000000..42362fc6 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-character/dev/lib/unicode-punctuation-regex.d.ts @@ -0,0 +1 @@ +export const unicodePunctuationRegex: RegExp diff --git a/_extensions/d2/node_modules/micromark-util-character/dev/lib/unicode-punctuation-regex.js b/_extensions/d2/node_modules/micromark-util-character/dev/lib/unicode-punctuation-regex.js new file mode 100644 index 00000000..817b236a --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-character/dev/lib/unicode-punctuation-regex.js @@ -0,0 +1,8 @@ +// This module is generated by `script/`. +// +// CommonMark handles attention (emphasis, strong) markers based on what comes +// before or after them. +// One such difference is if those characters are Unicode punctuation. +// This script is generated from the Unicode data. +export const unicodePunctuationRegex = + /[!-/:-@[-`{-~\u00A1\u00A7\u00AB\u00B6\u00B7\u00BB\u00BF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061D-\u061F\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E\u085E\u0964\u0965\u0970\u09FD\u0A76\u0AF0\u0C77\u0C84\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB\u1360-\u1368\u1400\u166E\u169B\u169C\u16EB-\u16ED\u1735\u1736\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1B7D\u1B7E\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F\u1CC0-\u1CC7\u1CD3\u2010-\u2027\u2030-\u2043\u2045-\u2051\u2053-\u205E\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E4F\u2E52-\u2E5D\u3001-\u3003\u3008-\u3011\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]/ diff --git a/_extensions/d2/node_modules/micromark-util-character/index.d.ts b/_extensions/d2/node_modules/micromark-util-character/index.d.ts new file mode 100644 index 00000000..d88ea54e --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-character/index.d.ts @@ -0,0 +1,55 @@ +/** + * Check whether a character code is an ASCII control character. + * + * An **ASCII control** is a character in the inclusive range U+0000 NULL (NUL) + * to U+001F (US), or U+007F (DEL). + * + * @param {Code} code + * @returns {code is number} + */ +export function asciiControl(code: Code): code is number +/** + * Check whether a character code is a markdown line ending (see + * `markdownLineEnding`) or markdown space (see `markdownSpace`). + * + * @param {Code} code + * @returns {code is number} + */ +export function markdownLineEndingOrSpace(code: Code): code is number +/** + * Check whether a character code is a markdown line ending. + * + * A **markdown line ending** is the virtual characters M-0003 CARRIAGE RETURN + * LINE FEED (CRLF), M-0004 LINE FEED (LF) and M-0005 CARRIAGE RETURN (CR). + * + * In micromark, the actual character U+000A LINE FEED (LF) and U+000D CARRIAGE + * RETURN (CR) are replaced by these virtual characters depending on whether + * they occurred together. + * + * @param {Code} code + * @returns {code is number} + */ +export function markdownLineEnding(code: Code): code is number +/** + * Check whether a character code is a markdown space. + * + * A **markdown space** is the concrete character U+0020 SPACE (SP) and the + * virtual characters M-0001 VIRTUAL SPACE (VS) and M-0002 HORIZONTAL TAB (HT). + * + * In micromark, the actual character U+0009 CHARACTER TABULATION (HT) is + * replaced by one M-0002 HORIZONTAL TAB (HT) and between 0 and 3 M-0001 VIRTUAL + * SPACE (VS) characters, depending on the column at which the tab occurred. + * + * @param {Code} code + * @returns {code is number} + */ +export function markdownSpace(code: Code): code is number +export function asciiAlpha(code: Code): code is number +export function asciiDigit(code: Code): code is number +export function asciiHexDigit(code: Code): code is number +export function asciiAlphanumeric(code: Code): code is number +export function asciiPunctuation(code: Code): code is number +export function asciiAtext(code: Code): code is number +export function unicodeWhitespace(code: Code): code is number +export function unicodePunctuation(code: Code): code is number +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-util-character/index.js b/_extensions/d2/node_modules/micromark-util-character/index.js new file mode 100644 index 00000000..e734a942 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-character/index.js @@ -0,0 +1,195 @@ +/** + * @typedef {import('micromark-util-types').Code} Code + */ +import {unicodePunctuationRegex} from './lib/unicode-punctuation-regex.js' +/** + * Check whether the character code represents an ASCII alpha (`a` through `z`, + * case insensitive). + * + * An **ASCII alpha** is an ASCII upper alpha or ASCII lower alpha. + * + * An **ASCII upper alpha** is a character in the inclusive range U+0041 (`A`) + * to U+005A (`Z`). + * + * An **ASCII lower alpha** is a character in the inclusive range U+0061 (`a`) + * to U+007A (`z`). + */ + +export const asciiAlpha = regexCheck(/[A-Za-z]/) +/** + * Check whether the character code represents an ASCII digit (`0` through `9`). + * + * An **ASCII digit** is a character in the inclusive range U+0030 (`0`) to + * U+0039 (`9`). + */ + +export const asciiDigit = regexCheck(/\d/) +/** + * Check whether the character code represents an ASCII hex digit (`a` through + * `f`, case insensitive, or `0` through `9`). + * + * An **ASCII hex digit** is an ASCII digit (see `asciiDigit`), ASCII upper hex + * digit, or an ASCII lower hex digit. + * + * An **ASCII upper hex digit** is a character in the inclusive range U+0041 + * (`A`) to U+0046 (`F`). + * + * An **ASCII lower hex digit** is a character in the inclusive range U+0061 + * (`a`) to U+0066 (`f`). + */ + +export const asciiHexDigit = regexCheck(/[\dA-Fa-f]/) +/** + * Check whether the character code represents an ASCII alphanumeric (`a` + * through `z`, case insensitive, or `0` through `9`). + * + * An **ASCII alphanumeric** is an ASCII digit (see `asciiDigit`) or ASCII alpha + * (see `asciiAlpha`). + */ + +export const asciiAlphanumeric = regexCheck(/[\dA-Za-z]/) +/** + * Check whether the character code represents ASCII punctuation. + * + * An **ASCII punctuation** is a character in the inclusive ranges U+0021 + * EXCLAMATION MARK (`!`) to U+002F SLASH (`/`), U+003A COLON (`:`) to U+0040 AT + * SIGN (`@`), U+005B LEFT SQUARE BRACKET (`[`) to U+0060 GRAVE ACCENT + * (`` ` ``), or U+007B LEFT CURLY BRACE (`{`) to U+007E TILDE (`~`). + */ + +export const asciiPunctuation = regexCheck(/[!-/:-@[-`{-~]/) +/** + * Check whether the character code represents an ASCII atext. + * + * atext is an ASCII alphanumeric (see `asciiAlphanumeric`), or a character in + * the inclusive ranges U+0023 NUMBER SIGN (`#`) to U+0027 APOSTROPHE (`'`), + * U+002A ASTERISK (`*`), U+002B PLUS SIGN (`+`), U+002D DASH (`-`), U+002F + * SLASH (`/`), U+003D EQUALS TO (`=`), U+003F QUESTION MARK (`?`), U+005E + * CARET (`^`) to U+0060 GRAVE ACCENT (`` ` ``), or U+007B LEFT CURLY BRACE + * (`{`) to U+007E TILDE (`~`). + * + * See: + * **\[RFC5322]**: + * [Internet Message Format](https://tools.ietf.org/html/rfc5322). + * P. Resnick. + * IETF. + */ + +export const asciiAtext = regexCheck(/[#-'*+\--9=?A-Z^-~]/) +/** + * Check whether a character code is an ASCII control character. + * + * An **ASCII control** is a character in the inclusive range U+0000 NULL (NUL) + * to U+001F (US), or U+007F (DEL). + * + * @param {Code} code + * @returns {code is number} + */ + +export function asciiControl(code) { + return ( + // Special whitespace codes (which have negative values), C0 and Control + // character DEL + code !== null && (code < 32 || code === 127) + ) +} +/** + * Check whether a character code is a markdown line ending (see + * `markdownLineEnding`) or markdown space (see `markdownSpace`). + * + * @param {Code} code + * @returns {code is number} + */ + +export function markdownLineEndingOrSpace(code) { + return code !== null && (code < 0 || code === 32) +} +/** + * Check whether a character code is a markdown line ending. + * + * A **markdown line ending** is the virtual characters M-0003 CARRIAGE RETURN + * LINE FEED (CRLF), M-0004 LINE FEED (LF) and M-0005 CARRIAGE RETURN (CR). + * + * In micromark, the actual character U+000A LINE FEED (LF) and U+000D CARRIAGE + * RETURN (CR) are replaced by these virtual characters depending on whether + * they occurred together. + * + * @param {Code} code + * @returns {code is number} + */ + +export function markdownLineEnding(code) { + return code !== null && code < -2 +} +/** + * Check whether a character code is a markdown space. + * + * A **markdown space** is the concrete character U+0020 SPACE (SP) and the + * virtual characters M-0001 VIRTUAL SPACE (VS) and M-0002 HORIZONTAL TAB (HT). + * + * In micromark, the actual character U+0009 CHARACTER TABULATION (HT) is + * replaced by one M-0002 HORIZONTAL TAB (HT) and between 0 and 3 M-0001 VIRTUAL + * SPACE (VS) characters, depending on the column at which the tab occurred. + * + * @param {Code} code + * @returns {code is number} + */ + +export function markdownSpace(code) { + return code === -2 || code === -1 || code === 32 +} +/** + * Check whether the character code represents Unicode whitespace. + * + * Note that this does handle micromark specific markdown whitespace characters. + * See `markdownLineEndingOrSpace` to check that. + * + * A **Unicode whitespace** is a character in the Unicode `Zs` (Separator, + * Space) category, or U+0009 CHARACTER TABULATION (HT), U+000A LINE FEED (LF), + * U+000C (FF), or U+000D CARRIAGE RETURN (CR) (**\[UNICODE]**). + * + * See: + * **\[UNICODE]**: + * [The Unicode Standard](https://www.unicode.org/versions/). + * Unicode Consortium. + */ + +export const unicodeWhitespace = regexCheck(/\s/) +/** + * Check whether the character code represents Unicode punctuation. + * + * A **Unicode punctuation** is a character in the Unicode `Pc` (Punctuation, + * Connector), `Pd` (Punctuation, Dash), `Pe` (Punctuation, Close), `Pf` + * (Punctuation, Final quote), `Pi` (Punctuation, Initial quote), `Po` + * (Punctuation, Other), or `Ps` (Punctuation, Open) categories, or an ASCII + * punctuation (see `asciiPunctuation`). + * + * See: + * **\[UNICODE]**: + * [The Unicode Standard](https://www.unicode.org/versions/). + * Unicode Consortium. + */ +// Size note: removing ASCII from the regex and using `asciiPunctuation` here +// In fact adds to the bundle size. + +export const unicodePunctuation = regexCheck(unicodePunctuationRegex) +/** + * Create a code check from a regex. + * + * @param {RegExp} regex + * @returns {(code: Code) => code is number} + */ + +function regexCheck(regex) { + return check + /** + * Check whether a code matches the bound regex. + * + * @param {Code} code Character code + * @returns {code is number} Whether the character code matches the bound regex + */ + + function check(code) { + return code !== null && regex.test(String.fromCharCode(code)) + } +} diff --git a/_extensions/d2/node_modules/micromark-util-character/lib/unicode-punctuation-regex.d.ts b/_extensions/d2/node_modules/micromark-util-character/lib/unicode-punctuation-regex.d.ts new file mode 100644 index 00000000..42362fc6 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-character/lib/unicode-punctuation-regex.d.ts @@ -0,0 +1 @@ +export const unicodePunctuationRegex: RegExp diff --git a/_extensions/d2/node_modules/micromark-util-character/lib/unicode-punctuation-regex.js b/_extensions/d2/node_modules/micromark-util-character/lib/unicode-punctuation-regex.js new file mode 100644 index 00000000..32d84d4d --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-character/lib/unicode-punctuation-regex.js @@ -0,0 +1,8 @@ +// This module is generated by `script/`. +// +// CommonMark handles attention (emphasis, strong) markers based on what comes +// before or after them. +// One such difference is if those characters are Unicode punctuation. +// This script is generated from the Unicode data. +export const unicodePunctuationRegex = + /[!-/:-@[-`{-~\u00A1\u00A7\u00AB\u00B6\u00B7\u00BB\u00BF\u037E\u0387\u055A-\u055F\u0589\u058A\u05BE\u05C0\u05C3\u05C6\u05F3\u05F4\u0609\u060A\u060C\u060D\u061B\u061E\u061F\u066A-\u066D\u06D4\u0700-\u070D\u07F7-\u07F9\u0830-\u083E\u085E\u0964\u0965\u0970\u09FD\u0A76\u0AF0\u0C77\u0C84\u0DF4\u0E4F\u0E5A\u0E5B\u0F04-\u0F12\u0F14\u0F3A-\u0F3D\u0F85\u0FD0-\u0FD4\u0FD9\u0FDA\u104A-\u104F\u10FB\u1360-\u1368\u1400\u166E\u169B\u169C\u16EB-\u16ED\u1735\u1736\u17D4-\u17D6\u17D8-\u17DA\u1800-\u180A\u1944\u1945\u1A1E\u1A1F\u1AA0-\u1AA6\u1AA8-\u1AAD\u1B5A-\u1B60\u1BFC-\u1BFF\u1C3B-\u1C3F\u1C7E\u1C7F\u1CC0-\u1CC7\u1CD3\u2010-\u2027\u2030-\u2043\u2045-\u2051\u2053-\u205E\u207D\u207E\u208D\u208E\u2308-\u230B\u2329\u232A\u2768-\u2775\u27C5\u27C6\u27E6-\u27EF\u2983-\u2998\u29D8-\u29DB\u29FC\u29FD\u2CF9-\u2CFC\u2CFE\u2CFF\u2D70\u2E00-\u2E2E\u2E30-\u2E4F\u2E52\u3001-\u3003\u3008-\u3011\u3014-\u301F\u3030\u303D\u30A0\u30FB\uA4FE\uA4FF\uA60D-\uA60F\uA673\uA67E\uA6F2-\uA6F7\uA874-\uA877\uA8CE\uA8CF\uA8F8-\uA8FA\uA8FC\uA92E\uA92F\uA95F\uA9C1-\uA9CD\uA9DE\uA9DF\uAA5C-\uAA5F\uAADE\uAADF\uAAF0\uAAF1\uABEB\uFD3E\uFD3F\uFE10-\uFE19\uFE30-\uFE52\uFE54-\uFE61\uFE63\uFE68\uFE6A\uFE6B\uFF01-\uFF03\uFF05-\uFF0A\uFF0C-\uFF0F\uFF1A\uFF1B\uFF1F\uFF20\uFF3B-\uFF3D\uFF3F\uFF5B\uFF5D\uFF5F-\uFF65]/ diff --git a/_extensions/d2/node_modules/micromark-util-character/package.json b/_extensions/d2/node_modules/micromark-util-character/package.json new file mode 100644 index 00000000..c1baa125 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-character/package.json @@ -0,0 +1,56 @@ +{ + "name": "micromark-util-character", + "version": "1.1.0", + "description": "micromark utility to handle character codes", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "character" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-character", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "lib/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-character/readme.md b/_extensions/d2/node_modules/micromark-util-character/readme.md new file mode 100644 index 00000000..798a49e8 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-character/readme.md @@ -0,0 +1,275 @@ +# micromark-util-character + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility to handle [character codes](https://github.com/micromark/micromark#preprocess). + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`asciiAlpha(code)`](#asciialphacode) + * [`asciiDigit(code)`](#asciidigitcode) + * [`asciiHexDigit(code)`](#asciihexdigitcode) + * [`asciiAlphanumeric(code)`](#asciialphanumericcode) + * [`asciiPunctuation(code)`](#asciipunctuationcode) + * [`asciiAtext(code)`](#asciiatextcode) + * [`asciiControl(code)`](#asciicontrolcode) + * [`markdownLineEndingOrSpace(code)`](#markdownlineendingorspacecode) + * [`markdownLineEnding(code)`](#markdownlineendingcode) + * [`markdownSpace(code)`](#markdownspacecode) + * [`unicodeWhitespace(code)`](#unicodewhitespacecode) + * [`unicodePunctuation(code)`](#unicodepunctuationcode) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-util-character +``` + +## Use + +```js +import {asciiAlpha} from 'micromark-util-character' + +console.log(asciiAlpha(64)) // false +console.log(asciiAlpha(65)) // true +``` + +## API + +This module exports the following identifiers: `asciiAlpha`, +`asciiAlphanumeric`, `asciiAtext`, `asciiControl`, `asciiDigit`, +`asciiHexDigit`, `asciiPunctuation`, `markdownLineEnding`, +`markdownLineEndingOrSpace`, `markdownSpace`, `unicodePunctuation`, +`unicodeWhitespace`. +There is no default export. + +### `asciiAlpha(code)` + +Check whether the +[character code](https://github.com/micromark/micromark#preprocess) +represents an ASCII alpha (`a` through `z`, +case insensitive). + +An **ASCII alpha** is an ASCII upper alpha or ASCII lower alpha. + +An **ASCII upper alpha** is a character in the inclusive range U+0041 (`A`) +to U+005A (`Z`). + +An **ASCII lower alpha** is a character in the inclusive range U+0061 (`a`) +to U+007A (`z`). + +### `asciiDigit(code)` + +Check whether the +[character code](https://github.com/micromark/micromark#preprocess) +represents an ASCII digit (`0` through `9`). + +An **ASCII digit** is a character in the inclusive range U+0030 (`0`) to +U+0039 (`9`). + +### `asciiHexDigit(code)` + +Check whether the +[character code](https://github.com/micromark/micromark#preprocess) +represents an ASCII hex digit (`a` through `f`, case insensitive, or `0` through +`9`). + +An **ASCII hex digit** is an ASCII digit (see `asciiDigit`), ASCII upper hex +digit, or an ASCII lower hex digit. + +An **ASCII upper hex digit** is a character in the inclusive range U+0041 +(`A`) to U+0046 (`F`). + +An **ASCII lower hex digit** is a character in the inclusive range U+0061 +(`a`) to U+0066 (`f`). + +### `asciiAlphanumeric(code)` + +Check whether the +[character code](https://github.com/micromark/micromark#preprocess) +represents an ASCII alphanumeric (`a` through `z`, case insensitive, or `0` +through `9`). + +An **ASCII alphanumeric** is an ASCII digit (see `asciiDigit`) or ASCII alpha +(see `asciiAlpha`). + +### `asciiPunctuation(code)` + +Check whether the +[character code](https://github.com/micromark/micromark#preprocess) +represents ASCII punctuation. + +An **ASCII punctuation** is a character in the inclusive ranges U+0021 +EXCLAMATION MARK (`!`) to U+002F SLASH (`/`), U+003A COLON (`:`) to U+0040 AT +SIGN (`@`), U+005B LEFT SQUARE BRACKET (`[`) to U+0060 GRAVE ACCENT +(`` ` ``), or U+007B LEFT CURLY BRACE (`{`) to U+007E TILDE (`~`). + +### `asciiAtext(code)` + +Check whether the +[character code](https://github.com/micromark/micromark#preprocess) +represents an ASCII atext. + +atext is an ASCII alphanumeric (see `asciiAlphanumeric`), or a character in +the inclusive ranges U+0023 NUMBER SIGN (`#`) to U+0027 APOSTROPHE (`'`), +U+002A ASTERISK (`*`), U+002B PLUS SIGN (`+`), U+002D DASH (`-`), U+002F +SLASH (`/`), U+003D EQUALS TO (`=`), U+003F QUESTION MARK (`?`), U+005E +CARET (`^`) to U+0060 GRAVE ACCENT (`` ` ``), or U+007B LEFT CURLY BRACE +(`{`) to U+007E TILDE (`~`) (**\[RFC5322]**). + +See **\[RFC5322]**:\ +[Internet Message Format](https://tools.ietf.org/html/rfc5322).\ +P. Resnick.\ +IETF. + +### `asciiControl(code)` + +Check whether a +[character code](https://github.com/micromark/micromark#preprocess) +is an ASCII control character. + +An **ASCII control** is a character in the inclusive range U+0000 NULL (NUL) +to U+001F (US), or U+007F (DEL). + +### `markdownLineEndingOrSpace(code)` + +Check whether a +[character code](https://github.com/micromark/micromark#preprocess) +is a markdown line ending (see `markdownLineEnding`) or markdown space (see +`markdownSpace`). + +### `markdownLineEnding(code)` + +Check whether a +[character code](https://github.com/micromark/micromark#preprocess) +is a markdown line ending. + +A **markdown line ending** is the virtual characters M-0003 CARRIAGE RETURN +LINE FEED (CRLF), M-0004 LINE FEED (LF) and M-0005 CARRIAGE RETURN (CR). + +In micromark, the actual character U+000A LINE FEED (LF) and U+000D CARRIAGE +RETURN (CR) are replaced by these virtual characters depending on whether +they occurred together. + +### `markdownSpace(code)` + +Check whether a +[character code](https://github.com/micromark/micromark#preprocess) +is a markdown space. + +A **markdown space** is the concrete character U+0020 SPACE (SP) and the +virtual characters M-0001 VIRTUAL SPACE (VS) and M-0002 HORIZONTAL TAB (HT). + +In micromark, the actual character U+0009 CHARACTER TABULATION (HT) is +replaced by one M-0002 HORIZONTAL TAB (HT) and between 0 and 3 M-0001 VIRTUAL +SPACE (VS) characters, depending on the column at which the tab occurred. + +### `unicodeWhitespace(code)` + +Check whether the +[character code](https://github.com/micromark/micromark#preprocess) +represents Unicode whitespace. + +Note that this does handle micromark specific markdown whitespace characters. +See `markdownLineEndingOrSpace` to check that. + +A **Unicode whitespace** is a character in the Unicode `Zs` (Separator, +Space) category, or U+0009 CHARACTER TABULATION (HT), U+000A LINE FEED (LF), +U+000C (FF), or U+000D CARRIAGE RETURN (CR) (**\[UNICODE]**). + +See **\[UNICODE]**:\ +[The Unicode Standard](https://www.unicode.org/versions/).\ +Unicode Consortium. + +### `unicodePunctuation(code)` + +Check whether the +[character code](https://github.com/micromark/micromark#preprocess) +represents Unicode punctuation. + +A **Unicode punctuation** is a character in the Unicode `Pc` (Punctuation, +Connector), `Pd` (Punctuation, Dash), `Pe` (Punctuation, Close), `Pf` +(Punctuation, Final quote), `Pi` (Punctuation, Initial quote), `Po` +(Punctuation, Other), or `Ps` (Punctuation, Open) categories, or an ASCII +punctuation (see `asciiPunctuation`) (**\[UNICODE]**). + +See **\[UNICODE]**:\ +[The Unicode Standard](https://www.unicode.org/versions/).\ +Unicode Consortium. + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-character.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-character + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-character.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-character + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-util-chunked/dev/index.d.ts b/_extensions/d2/node_modules/micromark-util-chunked/dev/index.d.ts new file mode 100644 index 00000000..40c08363 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-chunked/dev/index.d.ts @@ -0,0 +1,35 @@ +/** + * Like `Array#splice`, but smarter for giant arrays. + * + * `Array#splice` takes all items to be inserted as individual argument which + * causes a stack overflow in V8 when trying to insert 100k items for instance. + * + * Otherwise, this does not return the removed items, and takes `items` as an + * array instead of rest parameters. + * + * @template {unknown} T + * @param {T[]} list + * @param {number} start + * @param {number} remove + * @param {T[]} items + * @returns {void} + */ +export function splice( + list: T[], + start: number, + remove: number, + items: T[] +): void +/** + * Append `items` (an array) at the end of `list` (another array). + * When `list` was empty, returns `items` instead. + * + * This prevents a potentially expensive operation when `list` is empty, + * and adds items in batches to prevent V8 from hanging. + * + * @template {unknown} T + * @param {T[]} list + * @param {T[]} items + * @returns {T[]} + */ +export function push(list: T[], items: T[]): T[] diff --git a/_extensions/d2/node_modules/micromark-util-chunked/dev/index.js b/_extensions/d2/node_modules/micromark-util-chunked/dev/index.js new file mode 100644 index 00000000..b32ae1e9 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-chunked/dev/index.js @@ -0,0 +1,79 @@ +import {constants} from 'micromark-util-symbol/constants.js' + +/** + * Like `Array#splice`, but smarter for giant arrays. + * + * `Array#splice` takes all items to be inserted as individual argument which + * causes a stack overflow in V8 when trying to insert 100k items for instance. + * + * Otherwise, this does not return the removed items, and takes `items` as an + * array instead of rest parameters. + * + * @template {unknown} T + * @param {T[]} list + * @param {number} start + * @param {number} remove + * @param {T[]} items + * @returns {void} + */ +export function splice(list, start, remove, items) { + const end = list.length + let chunkStart = 0 + /** @type {unknown[]} */ + let parameters + + // Make start between zero and `end` (included). + if (start < 0) { + start = -start > end ? 0 : end + start + } else { + start = start > end ? end : start + } + + remove = remove > 0 ? remove : 0 + + // No need to chunk the items if there’s only a couple (10k) items. + if (items.length < constants.v8MaxSafeChunkSize) { + parameters = Array.from(items) + parameters.unshift(start, remove) + // @ts-expect-error Hush, it’s fine. + ;[].splice.apply(list, parameters) + } else { + // Delete `remove` items starting from `start` + if (remove) [].splice.apply(list, [start, remove]) + + // Insert the items in chunks to not cause stack overflows. + while (chunkStart < items.length) { + parameters = items.slice( + chunkStart, + chunkStart + constants.v8MaxSafeChunkSize + ) + parameters.unshift(start, 0) + // @ts-expect-error Hush, it’s fine. + ;[].splice.apply(list, parameters) + + chunkStart += constants.v8MaxSafeChunkSize + start += constants.v8MaxSafeChunkSize + } + } +} + +/** + * Append `items` (an array) at the end of `list` (another array). + * When `list` was empty, returns `items` instead. + * + * This prevents a potentially expensive operation when `list` is empty, + * and adds items in batches to prevent V8 from hanging. + * + * @template {unknown} T + * @param {T[]} list + * @param {T[]} items + * @returns {T[]} + */ +export function push(list, items) { + if (list.length > 0) { + splice(list, list.length, 0, items) + return list + } + + return items +} diff --git a/_extensions/d2/node_modules/micromark-util-chunked/index.d.ts b/_extensions/d2/node_modules/micromark-util-chunked/index.d.ts new file mode 100644 index 00000000..40c08363 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-chunked/index.d.ts @@ -0,0 +1,35 @@ +/** + * Like `Array#splice`, but smarter for giant arrays. + * + * `Array#splice` takes all items to be inserted as individual argument which + * causes a stack overflow in V8 when trying to insert 100k items for instance. + * + * Otherwise, this does not return the removed items, and takes `items` as an + * array instead of rest parameters. + * + * @template {unknown} T + * @param {T[]} list + * @param {number} start + * @param {number} remove + * @param {T[]} items + * @returns {void} + */ +export function splice( + list: T[], + start: number, + remove: number, + items: T[] +): void +/** + * Append `items` (an array) at the end of `list` (another array). + * When `list` was empty, returns `items` instead. + * + * This prevents a potentially expensive operation when `list` is empty, + * and adds items in batches to prevent V8 from hanging. + * + * @template {unknown} T + * @param {T[]} list + * @param {T[]} items + * @returns {T[]} + */ +export function push(list: T[], items: T[]): T[] diff --git a/_extensions/d2/node_modules/micromark-util-chunked/index.js b/_extensions/d2/node_modules/micromark-util-chunked/index.js new file mode 100644 index 00000000..cedcf971 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-chunked/index.js @@ -0,0 +1,69 @@ +/** + * Like `Array#splice`, but smarter for giant arrays. + * + * `Array#splice` takes all items to be inserted as individual argument which + * causes a stack overflow in V8 when trying to insert 100k items for instance. + * + * Otherwise, this does not return the removed items, and takes `items` as an + * array instead of rest parameters. + * + * @template {unknown} T + * @param {T[]} list + * @param {number} start + * @param {number} remove + * @param {T[]} items + * @returns {void} + */ +export function splice(list, start, remove, items) { + const end = list.length + let chunkStart = 0 + /** @type {unknown[]} */ + + let parameters // Make start between zero and `end` (included). + + if (start < 0) { + start = -start > end ? 0 : end + start + } else { + start = start > end ? end : start + } + + remove = remove > 0 ? remove : 0 // No need to chunk the items if there’s only a couple (10k) items. + + if (items.length < 10000) { + parameters = Array.from(items) + parameters.unshift(start, remove) // @ts-expect-error Hush, it’s fine. + ;[].splice.apply(list, parameters) + } else { + // Delete `remove` items starting from `start` + if (remove) [].splice.apply(list, [start, remove]) // Insert the items in chunks to not cause stack overflows. + + while (chunkStart < items.length) { + parameters = items.slice(chunkStart, chunkStart + 10000) + parameters.unshift(start, 0) // @ts-expect-error Hush, it’s fine. + ;[].splice.apply(list, parameters) + chunkStart += 10000 + start += 10000 + } + } +} +/** + * Append `items` (an array) at the end of `list` (another array). + * When `list` was empty, returns `items` instead. + * + * This prevents a potentially expensive operation when `list` is empty, + * and adds items in batches to prevent V8 from hanging. + * + * @template {unknown} T + * @param {T[]} list + * @param {T[]} items + * @returns {T[]} + */ + +export function push(list, items) { + if (list.length > 0) { + splice(list, list.length, 0, items) + return list + } + + return items +} diff --git a/_extensions/d2/node_modules/micromark-util-chunked/package.json b/_extensions/d2/node_modules/micromark-util-chunked/package.json new file mode 100644 index 00000000..e7aee471 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-chunked/package.json @@ -0,0 +1,56 @@ +{ + "name": "micromark-util-chunked", + "version": "1.0.0", + "description": "micromark utility to splice and push with giant arrays", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "chunk", + "splice", + "push" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-chunked", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-util-symbol": "^1.0.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-chunked/readme.md b/_extensions/d2/node_modules/micromark-util-chunked/readme.md new file mode 100644 index 00000000..73f5c090 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-chunked/readme.md @@ -0,0 +1,155 @@ +# micromark-util-chunked + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility to splice and push with giant arrays. + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`push(list, items)`](#pushlist-items) + * [`splice(list, start, remove, items)`](#splicelist-start-remove-items) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-util-chunked +``` + +## Use + +```js +import {push, splice} from 'micromark-util-chunked' + +// … + +nextEvents = push(nextEvents, [ + ['enter', events[open][1], context], + ['exit', events[open][1], context] +]) + +// … + +splice(events, open - 1, index - open + 3, nextEvents) + +// … +``` + +## API + +This module exports the following identifiers: `push`, `splice`. +There is no default export. + +### `push(list, items)` + +Append `items` (an array) at the end of `list` (another array). +When `list` was empty, returns `items` instead. + +This prevents a potentially expensive operation when `list` is empty, +and adds items in batches to prevent V8 from hanging. + +###### Parameters + +* `list` (`unknown[]`) — List to operate on +* `items` (`unknown[]`) — Items to add to `list` + +###### Returns + +`list` or `items` + +### `splice(list, start, remove, items)` + +Like `Array#splice`, but smarter for giant arrays. + +`Array#splice` takes all items to be inserted as individual argument which +causes a stack overflow in V8 when trying to insert 100k items for instance. + +Otherwise, this does not return the removed items, and takes `items` as an +array instead of rest parameters. + +###### Parameters + +* `list` (`unknown[]`) — List to operate on +* `start` (`number`) — Index to remove/insert at (can be negative) +* `remove` (`number`) — Number of items to remove +* `items` (`unknown[]`) — Items to inject into `list` + +###### Returns + +`void` + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-chunked.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-chunked + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-chunked.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-chunked + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-util-classify-character/dev/index.d.ts b/_extensions/d2/node_modules/micromark-util-classify-character/dev/index.d.ts new file mode 100644 index 00000000..f1950166 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-classify-character/dev/index.d.ts @@ -0,0 +1,14 @@ +/** + * Classify whether a character code represents whitespace, punctuation, or + * something else. + * + * Used for attention (emphasis, strong), whose sequences can open or close + * based on the class of surrounding characters. + * + * Note that eof (`null`) is seen as whitespace. + * + * @param {Code} code + * @returns {number|undefined} + */ +export function classifyCharacter(code: Code): number | undefined +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-util-classify-character/dev/index.js b/_extensions/d2/node_modules/micromark-util-classify-character/dev/index.js new file mode 100644 index 00000000..502bb4c0 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-classify-character/dev/index.js @@ -0,0 +1,37 @@ +/** + * @typedef {import('micromark-util-types').Code} Code + */ + +import { + markdownLineEndingOrSpace, + unicodePunctuation, + unicodeWhitespace +} from 'micromark-util-character' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' + +/** + * Classify whether a character code represents whitespace, punctuation, or + * something else. + * + * Used for attention (emphasis, strong), whose sequences can open or close + * based on the class of surrounding characters. + * + * Note that eof (`null`) is seen as whitespace. + * + * @param {Code} code + * @returns {number|undefined} + */ +export function classifyCharacter(code) { + if ( + code === codes.eof || + markdownLineEndingOrSpace(code) || + unicodeWhitespace(code) + ) { + return constants.characterGroupWhitespace + } + + if (unicodePunctuation(code)) { + return constants.characterGroupPunctuation + } +} diff --git a/_extensions/d2/node_modules/micromark-util-classify-character/index.d.ts b/_extensions/d2/node_modules/micromark-util-classify-character/index.d.ts new file mode 100644 index 00000000..f1950166 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-classify-character/index.d.ts @@ -0,0 +1,14 @@ +/** + * Classify whether a character code represents whitespace, punctuation, or + * something else. + * + * Used for attention (emphasis, strong), whose sequences can open or close + * based on the class of surrounding characters. + * + * Note that eof (`null`) is seen as whitespace. + * + * @param {Code} code + * @returns {number|undefined} + */ +export function classifyCharacter(code: Code): number | undefined +export type Code = import('micromark-util-types').Code diff --git a/_extensions/d2/node_modules/micromark-util-classify-character/index.js b/_extensions/d2/node_modules/micromark-util-classify-character/index.js new file mode 100644 index 00000000..f924beb2 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-classify-character/index.js @@ -0,0 +1,34 @@ +/** + * @typedef {import('micromark-util-types').Code} Code + */ +import { + markdownLineEndingOrSpace, + unicodePunctuation, + unicodeWhitespace +} from 'micromark-util-character' + +/** + * Classify whether a character code represents whitespace, punctuation, or + * something else. + * + * Used for attention (emphasis, strong), whose sequences can open or close + * based on the class of surrounding characters. + * + * Note that eof (`null`) is seen as whitespace. + * + * @param {Code} code + * @returns {number|undefined} + */ +export function classifyCharacter(code) { + if ( + code === null || + markdownLineEndingOrSpace(code) || + unicodeWhitespace(code) + ) { + return 1 + } + + if (unicodePunctuation(code)) { + return 2 + } +} diff --git a/_extensions/d2/node_modules/micromark-util-classify-character/package.json b/_extensions/d2/node_modules/micromark-util-classify-character/package.json new file mode 100644 index 00000000..24716e25 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-classify-character/package.json @@ -0,0 +1,58 @@ +{ + "name": "micromark-util-classify-character", + "version": "1.0.0", + "description": "micromark utility to classify whether a character is whitespace or punctuation", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "attention", + "classify", + "character" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-classify-character", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-classify-character/readme.md b/_extensions/d2/node_modules/micromark-util-classify-character/readme.md new file mode 100644 index 00000000..881ea13a --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-classify-character/readme.md @@ -0,0 +1,141 @@ +# micromark-util-classify-character + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility to classify whether a character is whitespace or punctuation. + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`classifyCharacter(code)`](#classifycharactercode) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-util-classify-character +``` + +## Use + +```js +/** @type {Tokenizer} */ +function tokenizeAttention(effects, ok) { + return start + + // … + + /** @type {State} */ + function sequence(code) { + if (code === marker) { + // … + } + + const token = effects.exit('attentionSequence') + const after = classifyCharacter(code) + const open = + !after || (after === constants.characterGroupPunctuation && before) + const close = + !before || (before === constants.characterGroupPunctuation && after) + // … + } + + // … +} +``` + +## API + +This module exports the following identifiers: `classifyCharacter`. +There is no default export. + +### `classifyCharacter(code)` + +Classify whether a +[character code](https://github.com/micromark/micromark#preprocess) +represents whitespace, punctuation, or +something else. +Used for attention (emphasis, strong), whose sequences can open or close based +on the class of surrounding characters. + +Note that eof (`null`) is seen as whitespace. + +###### Returns + +`constants.characterGroupWhitespace`, `constants.characterGroupPunctuation`, +or `undefined.` + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-classify-character.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-classify-character + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-classify-character.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-classify-character + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-util-combine-extensions/index.d.ts b/_extensions/d2/node_modules/micromark-util-combine-extensions/index.d.ts new file mode 100644 index 00000000..cbd40ba5 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-combine-extensions/index.d.ts @@ -0,0 +1,21 @@ +/** + * Combine several syntax extensions into one. + * + * @param {Extension[]} extensions List of syntax extensions. + * @returns {NormalizedExtension} A single combined extension. + */ +export function combineExtensions(extensions: Extension[]): NormalizedExtension +/** + * Combine several HTML extensions into one. + * + * @param {HtmlExtension[]} htmlExtensions List of HTML extensions. + * @returns {HtmlExtension} A single combined extension. + */ +export function combineHtmlExtensions( + htmlExtensions: HtmlExtension[] +): HtmlExtension +export type NormalizedExtension = + import('micromark-util-types').NormalizedExtension +export type Extension = import('micromark-util-types').Extension +export type Construct = import('micromark-util-types').Construct +export type HtmlExtension = import('micromark-util-types').HtmlExtension diff --git a/_extensions/d2/node_modules/micromark-util-combine-extensions/index.js b/_extensions/d2/node_modules/micromark-util-combine-extensions/index.js new file mode 100644 index 00000000..006f0e06 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-combine-extensions/index.js @@ -0,0 +1,123 @@ +/** + * @typedef {import('micromark-util-types').NormalizedExtension} NormalizedExtension + * @typedef {import('micromark-util-types').Extension} Extension + * @typedef {import('micromark-util-types').Construct} Construct + * @typedef {import('micromark-util-types').HtmlExtension} HtmlExtension + */ + +import {splice} from 'micromark-util-chunked' + +const hasOwnProperty = {}.hasOwnProperty + +/** + * Combine several syntax extensions into one. + * + * @param {Extension[]} extensions List of syntax extensions. + * @returns {NormalizedExtension} A single combined extension. + */ +export function combineExtensions(extensions) { + /** @type {NormalizedExtension} */ + const all = {} + let index = -1 + + while (++index < extensions.length) { + syntaxExtension(all, extensions[index]) + } + + return all +} + +/** + * Merge `extension` into `all`. + * + * @param {NormalizedExtension} all Extension to merge into. + * @param {Extension} extension Extension to merge. + * @returns {void} + */ +function syntaxExtension(all, extension) { + /** @type {string} */ + let hook + + for (hook in extension) { + const maybe = hasOwnProperty.call(all, hook) ? all[hook] : undefined + const left = maybe || (all[hook] = {}) + const right = extension[hook] + /** @type {string} */ + let code + + for (code in right) { + if (!hasOwnProperty.call(left, code)) left[code] = [] + const value = right[code] + constructs( + // @ts-expect-error Looks like a list. + left[code], + Array.isArray(value) ? value : value ? [value] : [] + ) + } + } +} + +/** + * Merge `list` into `existing` (both lists of constructs). + * Mutates `existing`. + * + * @param {unknown[]} existing + * @param {unknown[]} list + * @returns {void} + */ +function constructs(existing, list) { + let index = -1 + /** @type {unknown[]} */ + const before = [] + + while (++index < list.length) { + // @ts-expect-error Looks like an object. + ;(list[index].add === 'after' ? existing : before).push(list[index]) + } + + splice(existing, 0, 0, before) +} + +/** + * Combine several HTML extensions into one. + * + * @param {HtmlExtension[]} htmlExtensions List of HTML extensions. + * @returns {HtmlExtension} A single combined extension. + */ +export function combineHtmlExtensions(htmlExtensions) { + /** @type {HtmlExtension} */ + const handlers = {} + let index = -1 + + while (++index < htmlExtensions.length) { + htmlExtension(handlers, htmlExtensions[index]) + } + + return handlers +} + +/** + * Merge `extension` into `all`. + * + * @param {HtmlExtension} all Extension to merge into. + * @param {HtmlExtension} extension Extension to merge. + * @returns {void} + */ +function htmlExtension(all, extension) { + /** @type {string} */ + let hook + + for (hook in extension) { + const maybe = hasOwnProperty.call(all, hook) ? all[hook] : undefined + const left = maybe || (all[hook] = {}) + const right = extension[hook] + /** @type {string} */ + let type + + if (right) { + for (type in right) { + left[type] = right[type] + } + } + } +} diff --git a/_extensions/d2/node_modules/micromark-util-combine-extensions/package.json b/_extensions/d2/node_modules/micromark-util-combine-extensions/package.json new file mode 100644 index 00000000..86934b49 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-combine-extensions/package.json @@ -0,0 +1,53 @@ +{ + "name": "micromark-util-combine-extensions", + "version": "1.0.0", + "description": "micromark utility to combine syntax or html extensions", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "extension", + "combine", + "merge" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-combine-extensions", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "index.d.ts", + "index.js" + ], + "exports": "./index.js", + "dependencies": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-types": "^1.0.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" && tsc && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-combine-extensions/readme.md b/_extensions/d2/node_modules/micromark-util-combine-extensions/readme.md new file mode 100644 index 00000000..2a82be59 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-combine-extensions/readme.md @@ -0,0 +1,139 @@ +# micromark-util-combine-extensions + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility to combine [syntax][] or [html][] extensions. + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`combineExtensions(extensions)`](#combineextensionsextensions) + * [`combineHtmlExtensions(htmlExtensions)`](#combinehtmlextensionshtmlextensions) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-util-combine-extensions +``` + +## Use + +```js +import {gfmAutolinkLiteral} from 'micromark-extension-gfm-autolink-literal' +import {gfmStrikethrough} from 'micromark-extension-gfm-strikethrough' +import {gfmTable} from 'micromark-extension-gfm-table' +import {gfmTaskListItem} from 'micromark-extension-gfm-task-list-item' +import {combineExtensions} from 'micromark-util-combine-extensions' + +const gfm = combineExtensions([gfmAutolinkLiteral, gfmStrikethrough(), gfmTable, gfmTaskListItem]) +``` + +## API + +This module exports the following identifiers: `combineExtensions`, +`combineHtmlExtensions`. +There is no default export. + +### `combineExtensions(extensions)` + +Combine several syntax extensions into one. + +###### Parameters + +* `extensions` (`Extension[]`) — List of syntax extensions + +###### Returns + +A single combined extension (`Extension`). + +### `combineHtmlExtensions(htmlExtensions)` + +Combine several html extensions into one. + +###### Parameters + +* `htmlExtensions` (`HtmlExtension[]`) — List of html extensions + +###### Returns + +A single combined html extension (`HtmlExtension`). + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-combine-extensions.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-combine-extensions + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-combine-extensions.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-combine-extensions + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md + +[syntax]: https://github.com/micromark/micromark#syntaxextension + +[html]: https://github.com/micromark/micromark#htmlextension diff --git a/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/dev/index.d.ts b/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/dev/index.d.ts new file mode 100644 index 00000000..b0a37e59 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/dev/index.d.ts @@ -0,0 +1,14 @@ +/** + * Turn the number (in string form as either hexa- or plain decimal) coming from + * a numeric character reference into a character. + * + * @param {string} value + * Value to decode. + * @param {number} base + * Numeric base. + * @returns {string} + */ +export function decodeNumericCharacterReference( + value: string, + base: number +): string diff --git a/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/dev/index.js b/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/dev/index.js new file mode 100644 index 00000000..ea4c5c30 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/dev/index.js @@ -0,0 +1,37 @@ +import {codes} from 'micromark-util-symbol/codes.js' +import {values} from 'micromark-util-symbol/values.js' + +/** + * Turn the number (in string form as either hexa- or plain decimal) coming from + * a numeric character reference into a character. + * + * @param {string} value + * Value to decode. + * @param {number} base + * Numeric base. + * @returns {string} + */ +export function decodeNumericCharacterReference(value, base) { + const code = Number.parseInt(value, base) + + if ( + // C0 except for HT, LF, FF, CR, space + code < codes.ht || + code === codes.vt || + (code > codes.cr && code < codes.space) || + // Control character (DEL) of the basic block and C1 controls. + (code > codes.tilde && code < 160) || + // Lone high surrogates and low surrogates. + (code > 55295 && code < 57344) || + // Noncharacters. + (code > 64975 && code < 65008) || + (code & 65535) === 65535 || + (code & 65535) === 65534 || + // Out of range + code > 1114111 + ) { + return values.replacementCharacter + } + + return String.fromCharCode(code) +} diff --git a/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/index.d.ts b/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/index.d.ts new file mode 100644 index 00000000..b0a37e59 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/index.d.ts @@ -0,0 +1,14 @@ +/** + * Turn the number (in string form as either hexa- or plain decimal) coming from + * a numeric character reference into a character. + * + * @param {string} value + * Value to decode. + * @param {number} base + * Numeric base. + * @returns {string} + */ +export function decodeNumericCharacterReference( + value: string, + base: number +): string diff --git a/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/index.js b/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/index.js new file mode 100644 index 00000000..110e3947 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/index.js @@ -0,0 +1,30 @@ +/** + * Turn the number (in string form as either hexa- or plain decimal) coming from + * a numeric character reference into a character. + * + * @param {string} value + * Value to decode. + * @param {number} base + * Numeric base. + * @returns {string} + */ +export function decodeNumericCharacterReference(value, base) { + const code = Number.parseInt(value, base) + + if ( + // C0 except for HT, LF, FF, CR, space + code < 9 || + code === 11 || + (code > 13 && code < 32) || // Control character (DEL) of the basic block and C1 controls. + (code > 126 && code < 160) || // Lone high surrogates and low surrogates. + (code > 55295 && code < 57344) || // Noncharacters. + (code > 64975 && code < 65008) || + (code & 65535) === 65535 || + (code & 65535) === 65534 || // Out of range + code > 1114111 + ) { + return '\uFFFD' + } + + return String.fromCharCode(code) +} diff --git a/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/package.json b/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/package.json new file mode 100644 index 00000000..8831cd4e --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/package.json @@ -0,0 +1,58 @@ +{ + "name": "micromark-util-decode-numeric-character-reference", + "version": "1.0.0", + "description": "micromark utility to decode numeric character references", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "decode", + "numeric", + "number", + "character", + "reference" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-decode-numeric-character-reference", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-util-symbol": "^1.0.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/readme.md b/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/readme.md new file mode 100644 index 00000000..86c8a6b7 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-decode-numeric-character-reference/readme.md @@ -0,0 +1,124 @@ +# micromark-util-decode-numeric-character-reference + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility to decode numeric character references. + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`decodeNumericCharacterReference(value)`](#decodenumericcharacterreferencevalue) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-util-decode-numeric-character-reference +``` + +## Use + +```js +import {decodeNumericCharacterReference} from 'micromark-util-decode-numeric-character-reference' + +decodeNumericCharacterReference('41', 16) // 'A' +decodeNumericCharacterReference('65', 10) // 'A' +decodeNumericCharacterReference('A', 16) // '\n' +decodeNumericCharacterReference('7F', 16) // '�' - Control +decodeNumericCharacterReference('110000', 16) // '�' - Out of range +``` + +## API + +This module exports the following identifiers: +`decodeNumericCharacterReference`. +There is no default export. + +### `decodeNumericCharacterReference(value)` + +Sort of like `String.fromCharCode(Number.parseInt(value, base))`, +but makes non-characters and control characters safe. + +###### Parameters + +* `value` (`string`) — Value to decode. +* `base` (`number`, probably `10` or `16`) — Numeric base. + +###### Returns + +`string` — Character code. + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-normalize-identifier.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-normalize-identifier + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-normalize-identifier.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-normalize-identifier + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-util-decode-string/dev/index.d.ts b/_extensions/d2/node_modules/micromark-util-decode-string/dev/index.d.ts new file mode 100644 index 00000000..6f0f4889 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-decode-string/dev/index.d.ts @@ -0,0 +1,10 @@ +/** + * Utility to decode markdown strings (which occur in places such as fenced + * code info strings, destinations, labels, and titles). + * The “string” content type allows character escapes and -references. + * This decodes those. + * + * @param {string} value + * @returns {string} + */ +export function decodeString(value: string): string diff --git a/_extensions/d2/node_modules/micromark-util-decode-string/dev/index.js b/_extensions/d2/node_modules/micromark-util-decode-string/dev/index.js new file mode 100644 index 00000000..eb20ca4c --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-decode-string/dev/index.js @@ -0,0 +1,47 @@ +import {decodeNamedCharacterReference} from 'decode-named-character-reference' +import {decodeNumericCharacterReference} from 'micromark-util-decode-numeric-character-reference' +import {codes} from 'micromark-util-symbol/codes.js' +import {constants} from 'micromark-util-symbol/constants.js' + +const characterEscapeOrReference = + /\\([!-/:-@[-`{-~])|&(#(?:\d{1,7}|x[\da-f]{1,6})|[\da-z]{1,31});/gi + +/** + * Utility to decode markdown strings (which occur in places such as fenced + * code info strings, destinations, labels, and titles). + * The “string” content type allows character escapes and -references. + * This decodes those. + * + * @param {string} value + * @returns {string} + */ +export function decodeString(value) { + return value.replace(characterEscapeOrReference, decode) +} + +/** + * @param {string} $0 + * @param {string} $1 + * @param {string} $2 + * @returns {string} + */ +function decode($0, $1, $2) { + if ($1) { + // Escape. + return $1 + } + + // Reference. + const head = $2.charCodeAt(0) + + if (head === codes.numberSign) { + const head = $2.charCodeAt(1) + const hex = head === codes.lowercaseX || head === codes.uppercaseX + return decodeNumericCharacterReference( + $2.slice(hex ? 2 : 1), + hex ? constants.numericBaseHexadecimal : constants.numericBaseDecimal + ) + } + + return decodeNamedCharacterReference($2) || $0 +} diff --git a/_extensions/d2/node_modules/micromark-util-decode-string/index.d.ts b/_extensions/d2/node_modules/micromark-util-decode-string/index.d.ts new file mode 100644 index 00000000..6f0f4889 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-decode-string/index.d.ts @@ -0,0 +1,10 @@ +/** + * Utility to decode markdown strings (which occur in places such as fenced + * code info strings, destinations, labels, and titles). + * The “string” content type allows character escapes and -references. + * This decodes those. + * + * @param {string} value + * @returns {string} + */ +export function decodeString(value: string): string diff --git a/_extensions/d2/node_modules/micromark-util-decode-string/index.js b/_extensions/d2/node_modules/micromark-util-decode-string/index.js new file mode 100644 index 00000000..c70e6bbe --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-decode-string/index.js @@ -0,0 +1,40 @@ +import {decodeNamedCharacterReference} from 'decode-named-character-reference' +import {decodeNumericCharacterReference} from 'micromark-util-decode-numeric-character-reference' +const characterEscapeOrReference = + /\\([!-/:-@[-`{-~])|&(#(?:\d{1,7}|x[\da-f]{1,6})|[\da-z]{1,31});/gi +/** + * Utility to decode markdown strings (which occur in places such as fenced + * code info strings, destinations, labels, and titles). + * The “string” content type allows character escapes and -references. + * This decodes those. + * + * @param {string} value + * @returns {string} + */ + +export function decodeString(value) { + return value.replace(characterEscapeOrReference, decode) +} +/** + * @param {string} $0 + * @param {string} $1 + * @param {string} $2 + * @returns {string} + */ + +function decode($0, $1, $2) { + if ($1) { + // Escape. + return $1 + } // Reference. + + const head = $2.charCodeAt(0) + + if (head === 35) { + const head = $2.charCodeAt(1) + const hex = head === 120 || head === 88 + return decodeNumericCharacterReference($2.slice(hex ? 2 : 1), hex ? 16 : 10) + } + + return decodeNamedCharacterReference($2) || $0 +} diff --git a/_extensions/d2/node_modules/micromark-util-decode-string/package.json b/_extensions/d2/node_modules/micromark-util-decode-string/package.json new file mode 100644 index 00000000..048564b7 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-decode-string/package.json @@ -0,0 +1,61 @@ +{ + "name": "micromark-util-decode-string", + "version": "1.0.2", + "description": "micromark utility to decode markdown strings", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "decode", + "character", + "reference", + "escape", + "string" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-decode-string", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "decode-named-character-reference": "^1.0.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-decode-string/readme.md b/_extensions/d2/node_modules/micromark-util-decode-string/readme.md new file mode 100644 index 00000000..20e14027 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-decode-string/readme.md @@ -0,0 +1,122 @@ +# micromark-util-decode-string + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility to decode markdown strings. + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`decodeString(value)`](#decodestringvalue) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-util-decode-string +``` + +## Use + +```js +import {decodeString} from 'micromark-util-decode-string' + +decodeString('a ; b') // 'a ; b' +decodeString('a \\; b') // 'a ; b' +decodeString('a ; b') // 'a ; b' +``` + +## API + +This module exports the following identifiers: `decodeString`. +There is no default export. + +### `decodeString(value)` + +micromark utility to decode markdown strings (which occur in places such as +fenced code info strings, destinations, labels, and titles). +The “string” content type allows character escapes and -references. +This decodes those. + +###### Parameters + +* `value` (`string`) — Value to decode. + +###### Returns + +`string` — Decoded value. + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-normalize-identifier.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-normalize-identifier + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-normalize-identifier.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-normalize-identifier + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-util-encode/index.d.ts b/_extensions/d2/node_modules/micromark-util-encode/index.d.ts new file mode 100644 index 00000000..a22db903 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-encode/index.d.ts @@ -0,0 +1,11 @@ +/** + * Encode only the dangerous HTML characters. + * + * This ensures that certain characters which have special meaning in HTML are + * dealt with. + * Technically, we can skip `>` and `"` in many cases, but CM includes them. + * + * @param {string} value + * @returns {string} + */ +export function encode(value: string): string diff --git a/_extensions/d2/node_modules/micromark-util-encode/index.js b/_extensions/d2/node_modules/micromark-util-encode/index.js new file mode 100644 index 00000000..259f4b33 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-encode/index.js @@ -0,0 +1,24 @@ +const characterReferences = {'"': 'quot', '&': 'amp', '<': 'lt', '>': 'gt'} + +/** + * Encode only the dangerous HTML characters. + * + * This ensures that certain characters which have special meaning in HTML are + * dealt with. + * Technically, we can skip `>` and `"` in many cases, but CM includes them. + * + * @param {string} value + * @returns {string} + */ +export function encode(value) { + return value.replace(/["&<>]/g, replace) + + /** + * @param {string} value + * @returns {string} + */ + function replace(value) { + // @ts-expect-error Hush, it’s fine. + return '&' + characterReferences[value] + ';' + } +} diff --git a/_extensions/d2/node_modules/micromark-util-encode/package.json b/_extensions/d2/node_modules/micromark-util-encode/package.json new file mode 100644 index 00000000..cb996b5a --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-encode/package.json @@ -0,0 +1,48 @@ +{ + "name": "micromark-util-encode", + "version": "1.0.1", + "description": "micromark utility to encode dangerous html characters", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "html", + "encode" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-encode", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "index.d.ts", + "index.js" + ], + "exports": "./index.js", + "scripts": { + "build": "rimraf \"*.d.ts\" && tsc && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-encode/readme.md b/_extensions/d2/node_modules/micromark-util-encode/readme.md new file mode 100644 index 00000000..0d20dcb2 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-encode/readme.md @@ -0,0 +1,121 @@ +# micromark-util-encode + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility to encode dangerous html characters. + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`encode(value)`](#encodevalue) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-util-encode +``` + +## Use + +```js +import {encode} from 'micromark-util-encode' + +encode('<3') // '<3' +``` + +## API + +This module exports the following identifiers: `encode`. +There is no default export. + +### `encode(value)` + +Encode only the dangerous HTML characters. + +This ensures that certain characters which have special meaning in HTML are +dealt with. +Technically, we can skip `>` and `"` in many cases, but CM includes them. + +###### Parameters + +* `value` (`string`) — Value to encode. + +###### Returns + +`string` — Encoded value. + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-encode.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-encode + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-encode.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-encode + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-util-html-tag-name/index.d.ts b/_extensions/d2/node_modules/micromark-util-html-tag-name/index.d.ts new file mode 100644 index 00000000..467e0291 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-html-tag-name/index.d.ts @@ -0,0 +1,22 @@ +/** + * List of lowercase HTML tag names which when parsing HTML (flow), result + * in more relaxed rules (condition 6): because they are known blocks, the + * HTML-like syntax doesn’t have to be strictly parsed. + * For tag names not in this list, a more strict algorithm (condition 7) is used + * to detect whether the HTML-like syntax is seen as HTML (flow) or not. + * + * This is copied from: + * . + */ +export const htmlBlockNames: string[] +/** + * List of lowercase HTML tag names which when parsing HTML (flow), result in + * HTML that can include lines w/o exiting, until a closing tag also in this + * list is found (condition 1). + * + * This module is copied from: + * . + * + * Note that `textarea` was added in `CommonMark@0.30`. + */ +export const htmlRawNames: string[] diff --git a/_extensions/d2/node_modules/micromark-util-html-tag-name/index.js b/_extensions/d2/node_modules/micromark-util-html-tag-name/index.js new file mode 100644 index 00000000..3bb5cd1c --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-html-tag-name/index.js @@ -0,0 +1,85 @@ +/** + * List of lowercase HTML tag names which when parsing HTML (flow), result + * in more relaxed rules (condition 6): because they are known blocks, the + * HTML-like syntax doesn’t have to be strictly parsed. + * For tag names not in this list, a more strict algorithm (condition 7) is used + * to detect whether the HTML-like syntax is seen as HTML (flow) or not. + * + * This is copied from: + * . + */ +export const htmlBlockNames = [ + 'address', + 'article', + 'aside', + 'base', + 'basefont', + 'blockquote', + 'body', + 'caption', + 'center', + 'col', + 'colgroup', + 'dd', + 'details', + 'dialog', + 'dir', + 'div', + 'dl', + 'dt', + 'fieldset', + 'figcaption', + 'figure', + 'footer', + 'form', + 'frame', + 'frameset', + 'h1', + 'h2', + 'h3', + 'h4', + 'h5', + 'h6', + 'head', + 'header', + 'hr', + 'html', + 'iframe', + 'legend', + 'li', + 'link', + 'main', + 'menu', + 'menuitem', + 'nav', + 'noframes', + 'ol', + 'optgroup', + 'option', + 'p', + 'param', + 'section', + 'summary', + 'table', + 'tbody', + 'td', + 'tfoot', + 'th', + 'thead', + 'title', + 'tr', + 'track', + 'ul' +] + +/** + * List of lowercase HTML tag names which when parsing HTML (flow), result in + * HTML that can include lines w/o exiting, until a closing tag also in this + * list is found (condition 1). + * + * This module is copied from: + * . + * + * Note that `textarea` was added in `CommonMark@0.30`. + */ +export const htmlRawNames = ['pre', 'script', 'style', 'textarea'] diff --git a/_extensions/d2/node_modules/micromark-util-html-tag-name/package.json b/_extensions/d2/node_modules/micromark-util-html-tag-name/package.json new file mode 100644 index 00000000..70bcc270 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-html-tag-name/package.json @@ -0,0 +1,49 @@ +{ + "name": "micromark-util-html-tag-name", + "version": "1.1.0", + "description": "micromark utility with list of html tag names", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "html", + "tag", + "name" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-html-tag-name", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "index.d.ts", + "index.js" + ], + "exports": "./index.js", + "scripts": { + "build": "rimraf \"*.d.ts\" && tsc && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-html-tag-name/readme.md b/_extensions/d2/node_modules/micromark-util-html-tag-name/readme.md new file mode 100644 index 00000000..fdfef05c --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-html-tag-name/readme.md @@ -0,0 +1,148 @@ +# micromark-util-html-tag-name + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility with list of html tag names. + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`htmlBlockNames`](#htmlblocknames) + * [`htmlRawNames`](#htmlrawnames) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +This package is [ESM only][esm]. +In Node.js (version 12.20+, 14.14+, 16.0+, 18.0+), install with [npm][]: + +```sh +npm install micromark-util-html-tag-name +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {htmlBlockNames, htmlRawNames} from 'https://esm.sh/micromark-util-html-tag-name@1' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {htmlBlockNames, htmlRawNames} from 'micromark-util-html-tag-name' + +console.log(htmlBlockNames) // ['address', 'article', …] +console.log(htmlRawNames) // ['pre', 'script', …] +``` + +## API + +This module exports the following identifiers: `htmlBlockNames`, +`htmlRawNames`. +There is no default export. + +### `htmlBlockNames` + +List of lowercase HTML tag names (`Array`) which when parsing HTML +(flow), result in more relaxed rules (condition 6): because they are known +blocks, the HTML-like syntax doesn’t have to be strictly parsed. +For tag names not in this list, a more strict algorithm (condition 7) is used +to detect whether the HTML-like syntax is seen as HTML (flow) or not. + +This is copied from: . + +### `htmlRawNames` + +List of lowercase HTML tag names (`Array`) which when parsing HTML +(flow), result in HTML that can include lines w/o exiting, until a closing tag +also in this list is found (condition 1). + +This is copied from: +. + +Note that `textarea` was added in `CommonMark@0.30`. + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-html-tag-name.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-html-tag-name + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-html-tag-name.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-html-tag-name + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[esmsh]: https://esm.sh + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-util-normalize-identifier/dev/index.d.ts b/_extensions/d2/node_modules/micromark-util-normalize-identifier/dev/index.d.ts new file mode 100644 index 00000000..677a11a7 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-normalize-identifier/dev/index.d.ts @@ -0,0 +1,7 @@ +/** + * Normalize an identifier (such as used in definitions). + * + * @param {string} value + * @returns {string} + */ +export function normalizeIdentifier(value: string): string diff --git a/_extensions/d2/node_modules/micromark-util-normalize-identifier/dev/index.js b/_extensions/d2/node_modules/micromark-util-normalize-identifier/dev/index.js new file mode 100644 index 00000000..185582ce --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-normalize-identifier/dev/index.js @@ -0,0 +1,25 @@ +import {values} from 'micromark-util-symbol/values.js' + +/** + * Normalize an identifier (such as used in definitions). + * + * @param {string} value + * @returns {string} + */ +export function normalizeIdentifier(value) { + return ( + value + // Collapse Markdown whitespace. + .replace(/[\t\n\r ]+/g, values.space) + // Trim. + .replace(/^ | $/g, '') + // Some characters are considered “uppercase”, but if their lowercase + // counterpart is uppercased will result in a different uppercase + // character. + // Hence, to get that form, we perform both lower- and uppercase. + // Upper case makes sure keys will not interact with default prototypal + // methods: no method is uppercase. + .toLowerCase() + .toUpperCase() + ) +} diff --git a/_extensions/d2/node_modules/micromark-util-normalize-identifier/index.d.ts b/_extensions/d2/node_modules/micromark-util-normalize-identifier/index.d.ts new file mode 100644 index 00000000..677a11a7 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-normalize-identifier/index.d.ts @@ -0,0 +1,7 @@ +/** + * Normalize an identifier (such as used in definitions). + * + * @param {string} value + * @returns {string} + */ +export function normalizeIdentifier(value: string): string diff --git a/_extensions/d2/node_modules/micromark-util-normalize-identifier/index.js b/_extensions/d2/node_modules/micromark-util-normalize-identifier/index.js new file mode 100644 index 00000000..a5dbd0cc --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-normalize-identifier/index.js @@ -0,0 +1,20 @@ +/** + * Normalize an identifier (such as used in definitions). + * + * @param {string} value + * @returns {string} + */ +export function normalizeIdentifier(value) { + return ( + value // Collapse Markdown whitespace. + .replace(/[\t\n\r ]+/g, ' ') // Trim. + .replace(/^ | $/g, '') // Some characters are considered “uppercase”, but if their lowercase + // counterpart is uppercased will result in a different uppercase + // character. + // Hence, to get that form, we perform both lower- and uppercase. + // Upper case makes sure keys will not interact with default prototypal + // methods: no method is uppercase. + .toLowerCase() + .toUpperCase() + ) +} diff --git a/_extensions/d2/node_modules/micromark-util-normalize-identifier/package.json b/_extensions/d2/node_modules/micromark-util-normalize-identifier/package.json new file mode 100644 index 00000000..bf6aee47 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-normalize-identifier/package.json @@ -0,0 +1,56 @@ +{ + "name": "micromark-util-normalize-identifier", + "version": "1.0.0", + "description": "micromark utility normalize identifiers (as found in references, definitions)", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "normalize", + "id", + "identifier" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-normalize-identifier", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-util-symbol": "^1.0.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-normalize-identifier/readme.md b/_extensions/d2/node_modules/micromark-util-normalize-identifier/readme.md new file mode 100644 index 00000000..5791c187 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-normalize-identifier/readme.md @@ -0,0 +1,129 @@ +# micromark-util-normalize-identifier + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility normalize identifiers (as found in references, definitions). + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`normalizeIdentifier(value)`](#normalizeidentifiervalue) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-util-normalize-identifier +``` + +## Use + +```js +import {normalizeIdentifier} from 'micromark-util-normalize-identifier' + +normalizeIdentifier(' a ') // 'A' +normalizeIdentifier('a\t\r\nb') // 'A B' +normalizeIdentifier('ТОЛПОЙ') // 'ТОЛПОЙ' +normalizeIdentifier('Толпой') // 'ТОЛПОЙ' +``` + +## API + +This module exports the following identifiers: `normalizeIdentifier`. +There is no default export. + +### `normalizeIdentifier(value)` + +Normalize an identifier (such as used in definitions). +Collapse Markdown whitespace, trim, and then lower- and uppercase. + +Some characters are considered “uppercase”, such as U+03F4 (`ϴ`), but if their +lowercase counterpart (U+03B8 (`θ`)) is uppercased will result in a different +uppercase character (U+0398 (`Θ`)). +Hence, to get that form, we perform both lower- and uppercase. + +Using uppercase last makes sure keys will not interact with default prototypal +methods: no method is uppercase. + +###### Parameters + +* `value` (`string`) — Identifier to normalize. + +###### Returns + +`string` — Normalized value. + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-normalize-identifier.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-normalize-identifier + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-normalize-identifier.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-normalize-identifier + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-util-resolve-all/index.d.ts b/_extensions/d2/node_modules/micromark-util-resolve-all/index.d.ts new file mode 100644 index 00000000..2f44355d --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-resolve-all/index.d.ts @@ -0,0 +1,23 @@ +/** + * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext + * @typedef {import('micromark-util-types').Event} Event + * @typedef {import('micromark-util-types').Resolver} Resolver + */ +/** + * Call all `resolveAll`s. + * + * @param {{resolveAll?: Resolver}[]} constructs + * @param {Event[]} events + * @param {TokenizeContext} context + * @returns {Event[]} + */ +export function resolveAll( + constructs: { + resolveAll?: Resolver + }[], + events: import('micromark-util-types').Event[], + context: TokenizeContext +): import('micromark-util-types').Event[] +export type TokenizeContext = import('micromark-util-types').TokenizeContext +export type Event = import('micromark-util-types').Event +export type Resolver = import('micromark-util-types').Resolver diff --git a/_extensions/d2/node_modules/micromark-util-resolve-all/index.js b/_extensions/d2/node_modules/micromark-util-resolve-all/index.js new file mode 100644 index 00000000..cf6ac6e5 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-resolve-all/index.js @@ -0,0 +1,30 @@ +/** + * @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext + * @typedef {import('micromark-util-types').Event} Event + * @typedef {import('micromark-util-types').Resolver} Resolver + */ + +/** + * Call all `resolveAll`s. + * + * @param {{resolveAll?: Resolver}[]} constructs + * @param {Event[]} events + * @param {TokenizeContext} context + * @returns {Event[]} + */ +export function resolveAll(constructs, events, context) { + /** @type {Resolver[]} */ + const called = [] + let index = -1 + + while (++index < constructs.length) { + const resolve = constructs[index].resolveAll + + if (resolve && !called.includes(resolve)) { + events = resolve(events, context) + called.push(resolve) + } + } + + return events +} diff --git a/_extensions/d2/node_modules/micromark-util-resolve-all/package.json b/_extensions/d2/node_modules/micromark-util-resolve-all/package.json new file mode 100644 index 00000000..e2226dc2 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-resolve-all/package.json @@ -0,0 +1,50 @@ +{ + "name": "micromark-util-resolve-all", + "version": "1.0.0", + "description": "micromark utility to resolve subtokens", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "resolve" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-resolve-all", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "index.d.ts", + "index.js" + ], + "exports": "./index.js", + "dependencies": { + "micromark-util-types": "^1.0.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" && tsc && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-resolve-all/readme.md b/_extensions/d2/node_modules/micromark-util-resolve-all/readme.md new file mode 100644 index 00000000..b437c92e --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-resolve-all/readme.md @@ -0,0 +1,182 @@ +# micromark-util-resolve-all + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility to resolve subtokens. + +[Resolvers][resolver] are functions that take events and manipulate them. +This is needed for example because media (links, images) and attention (strong, +italic) aren’t parsed left-to-right. +Instead, their openings and closings are parsed, and when done, their openings +and closings are matched, and left overs are turned into plain text. +Because media and attention can’t overlap, we need to perform that operation +when one closing matches an opening, too. + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`resolveAll(constructs, events, context)`](#resolveallconstructs-events-context) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-util-resolve-all +``` + +## Use + +```js +import {push} from 'micromark-util-chunked' +import {resolveAll} from 'micromark-util-resolve-all' + +/** + * @type {Resolver} + */ +function resolveAllAttention(events, context) { + // … + + // Walk through all events. + while (++index < events.length) { + // Find a token that can close. + if ( + events[index][0] === 'enter' && + events[index][1].type === 'attentionSequence' && + events[index][1]._close + ) { + open = index + + // Now walk back to find an opener. + while (open--) { + // Find a token that can open the closer. + if ( + // … + ) { + // … + + // Opening. + nextEvents = push(nextEvents, [ + // … + ]) + + // Between. + nextEvents = push( + nextEvents, + resolveAll( + context.parser.constructs.insideSpan.null, + events.slice(open + 1, index), + context + ) + ) + + // Closing. + nextEvents = push(nextEvents, [ + // … + ]) + + // … + } + } + } + } + + // … +} +``` + +## API + +This module exports the following identifiers: `resolveAll`. +There is no default export. + +### `resolveAll(constructs, events, context)` + +Call all `resolveAll`s in `constructs`. + +###### Parameters + +* `constructs` (`Construct[]`) — List of constructs, optionally with + `resolveAll`s +* `events` (`Event[]`) — List of events +* `context` (`TokenizeContext`) — Context used by `tokenize` + +###### Returns + +`Events[]` — Changed events. + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-resolve-all.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-resolve-all + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-resolve-all.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-resolve-all + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md + +[resolver]: https://github.com/micromark/micromark/blob/a571c09/packages/micromark-util-types/index.js#L219 diff --git a/_extensions/d2/node_modules/micromark-util-sanitize-uri/dev/index.d.ts b/_extensions/d2/node_modules/micromark-util-sanitize-uri/dev/index.d.ts new file mode 100644 index 00000000..99cd8ecf --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-sanitize-uri/dev/index.d.ts @@ -0,0 +1,33 @@ +/** + * Make a value safe for injection as a URL. + * + * This encodes unsafe characters with percent-encoding and skips already + * encoded sequences (see `normalizeUri` below). + * Further unsafe characters are encoded as character references (see + * `micromark-util-encode`). + * + * Then, a regex of allowed protocols can be given, in which case the URL is + * sanitized. + * For example, `/^(https?|ircs?|mailto|xmpp)$/i` can be used for `a[href]`, + * or `/^https?$/i` for `img[src]`. + * If the URL includes an unknown protocol (one not matched by `protocol`, such + * as a dangerous example, `javascript:`), the value is ignored. + * + * @param {string|undefined} url + * @param {RegExp} [protocol] + * @returns {string} + */ +export function sanitizeUri( + url: string | undefined, + protocol?: RegExp | undefined +): string +/** + * Normalize a URL (such as used in definitions). + * + * Encode unsafe characters with percent-encoding, skipping already encoded + * sequences. + * + * @param {string} value + * @returns {string} + */ +export function normalizeUri(value: string): string diff --git a/_extensions/d2/node_modules/micromark-util-sanitize-uri/dev/index.js b/_extensions/d2/node_modules/micromark-util-sanitize-uri/dev/index.js new file mode 100644 index 00000000..58370110 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-sanitize-uri/dev/index.js @@ -0,0 +1,120 @@ +import {asciiAlphanumeric} from 'micromark-util-character' +import {encode} from 'micromark-util-encode' +import {codes} from 'micromark-util-symbol/codes.js' +import {values} from 'micromark-util-symbol/values.js' + +/** + * Make a value safe for injection as a URL. + * + * This encodes unsafe characters with percent-encoding and skips already + * encoded sequences (see `normalizeUri` below). + * Further unsafe characters are encoded as character references (see + * `micromark-util-encode`). + * + * Then, a regex of allowed protocols can be given, in which case the URL is + * sanitized. + * For example, `/^(https?|ircs?|mailto|xmpp)$/i` can be used for `a[href]`, + * or `/^https?$/i` for `img[src]`. + * If the URL includes an unknown protocol (one not matched by `protocol`, such + * as a dangerous example, `javascript:`), the value is ignored. + * + * @param {string|undefined} url + * @param {RegExp} [protocol] + * @returns {string} + */ +export function sanitizeUri(url, protocol) { + const value = encode(normalizeUri(url || '')) + + if (!protocol) { + return value + } + + const colon = value.indexOf(':') + const questionMark = value.indexOf('?') + const numberSign = value.indexOf('#') + const slash = value.indexOf('/') + + if ( + // If there is no protocol, it’s relative. + colon < 0 || + // If the first colon is after a `?`, `#`, or `/`, it’s not a protocol. + (slash > -1 && colon > slash) || + (questionMark > -1 && colon > questionMark) || + (numberSign > -1 && colon > numberSign) || + // It is a protocol, it should be allowed. + protocol.test(value.slice(0, colon)) + ) { + return value + } + + return '' +} + +/** + * Normalize a URL (such as used in definitions). + * + * Encode unsafe characters with percent-encoding, skipping already encoded + * sequences. + * + * @param {string} value + * @returns {string} + */ +export function normalizeUri(value) { + /** @type {Array} */ + const result = [] + let index = -1 + let start = 0 + let skip = 0 + + while (++index < value.length) { + const code = value.charCodeAt(index) + /** @type {string} */ + let replace = '' + + // A correct percent encoded value. + if ( + code === codes.percentSign && + asciiAlphanumeric(value.charCodeAt(index + 1)) && + asciiAlphanumeric(value.charCodeAt(index + 2)) + ) { + skip = 2 + } + // ASCII. + else if (code < 128) { + if (!/[!#$&-;=?-Z_a-z~]/.test(String.fromCharCode(code))) { + replace = String.fromCharCode(code) + } + } + // Astral. + else if (code > 55295 && code < 57344) { + const next = value.charCodeAt(index + 1) + + // A correct surrogate pair. + if (code < 56320 && next > 56319 && next < 57344) { + replace = String.fromCharCode(code, next) + skip = 1 + } + // Lone surrogate. + else { + replace = values.replacementCharacter + } + } + // Unicode. + else { + replace = String.fromCharCode(code) + } + + if (replace) { + result.push(value.slice(start, index), encodeURIComponent(replace)) + start = index + skip + 1 + replace = '' + } + + if (skip) { + index += skip + skip = 0 + } + } + + return result.join('') + value.slice(start) +} diff --git a/_extensions/d2/node_modules/micromark-util-sanitize-uri/index.d.ts b/_extensions/d2/node_modules/micromark-util-sanitize-uri/index.d.ts new file mode 100644 index 00000000..99cd8ecf --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-sanitize-uri/index.d.ts @@ -0,0 +1,33 @@ +/** + * Make a value safe for injection as a URL. + * + * This encodes unsafe characters with percent-encoding and skips already + * encoded sequences (see `normalizeUri` below). + * Further unsafe characters are encoded as character references (see + * `micromark-util-encode`). + * + * Then, a regex of allowed protocols can be given, in which case the URL is + * sanitized. + * For example, `/^(https?|ircs?|mailto|xmpp)$/i` can be used for `a[href]`, + * or `/^https?$/i` for `img[src]`. + * If the URL includes an unknown protocol (one not matched by `protocol`, such + * as a dangerous example, `javascript:`), the value is ignored. + * + * @param {string|undefined} url + * @param {RegExp} [protocol] + * @returns {string} + */ +export function sanitizeUri( + url: string | undefined, + protocol?: RegExp | undefined +): string +/** + * Normalize a URL (such as used in definitions). + * + * Encode unsafe characters with percent-encoding, skipping already encoded + * sequences. + * + * @param {string} value + * @returns {string} + */ +export function normalizeUri(value: string): string diff --git a/_extensions/d2/node_modules/micromark-util-sanitize-uri/index.js b/_extensions/d2/node_modules/micromark-util-sanitize-uri/index.js new file mode 100644 index 00000000..758970bf --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-sanitize-uri/index.js @@ -0,0 +1,111 @@ +import {asciiAlphanumeric} from 'micromark-util-character' +import {encode} from 'micromark-util-encode' + +/** + * Make a value safe for injection as a URL. + * + * This encodes unsafe characters with percent-encoding and skips already + * encoded sequences (see `normalizeUri` below). + * Further unsafe characters are encoded as character references (see + * `micromark-util-encode`). + * + * Then, a regex of allowed protocols can be given, in which case the URL is + * sanitized. + * For example, `/^(https?|ircs?|mailto|xmpp)$/i` can be used for `a[href]`, + * or `/^https?$/i` for `img[src]`. + * If the URL includes an unknown protocol (one not matched by `protocol`, such + * as a dangerous example, `javascript:`), the value is ignored. + * + * @param {string|undefined} url + * @param {RegExp} [protocol] + * @returns {string} + */ +export function sanitizeUri(url, protocol) { + const value = encode(normalizeUri(url || '')) + + if (!protocol) { + return value + } + + const colon = value.indexOf(':') + const questionMark = value.indexOf('?') + const numberSign = value.indexOf('#') + const slash = value.indexOf('/') + + if ( + // If there is no protocol, it’s relative. + colon < 0 || // If the first colon is after a `?`, `#`, or `/`, it’s not a protocol. + (slash > -1 && colon > slash) || + (questionMark > -1 && colon > questionMark) || + (numberSign > -1 && colon > numberSign) || // It is a protocol, it should be allowed. + protocol.test(value.slice(0, colon)) + ) { + return value + } + + return '' +} +/** + * Normalize a URL (such as used in definitions). + * + * Encode unsafe characters with percent-encoding, skipping already encoded + * sequences. + * + * @param {string} value + * @returns {string} + */ + +export function normalizeUri(value) { + /** @type {Array} */ + const result = [] + let index = -1 + let start = 0 + let skip = 0 + + while (++index < value.length) { + const code = value.charCodeAt(index) + /** @type {string} */ + + let replace = '' // A correct percent encoded value. + + if ( + code === 37 && + asciiAlphanumeric(value.charCodeAt(index + 1)) && + asciiAlphanumeric(value.charCodeAt(index + 2)) + ) { + skip = 2 + } // ASCII. + else if (code < 128) { + if (!/[!#$&-;=?-Z_a-z~]/.test(String.fromCharCode(code))) { + replace = String.fromCharCode(code) + } + } // Astral. + else if (code > 55295 && code < 57344) { + const next = value.charCodeAt(index + 1) // A correct surrogate pair. + + if (code < 56320 && next > 56319 && next < 57344) { + replace = String.fromCharCode(code, next) + skip = 1 + } // Lone surrogate. + else { + replace = '\uFFFD' + } + } // Unicode. + else { + replace = String.fromCharCode(code) + } + + if (replace) { + result.push(value.slice(start, index), encodeURIComponent(replace)) + start = index + skip + 1 + replace = '' + } + + if (skip) { + index += skip + skip = 0 + } + } + + return result.join('') + value.slice(start) +} diff --git a/_extensions/d2/node_modules/micromark-util-sanitize-uri/package.json b/_extensions/d2/node_modules/micromark-util-sanitize-uri/package.json new file mode 100644 index 00000000..721b93a1 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-sanitize-uri/package.json @@ -0,0 +1,58 @@ +{ + "name": "micromark-util-sanitize-uri", + "version": "1.1.0", + "description": "micromark utility to sanitize urls", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "sanitize", + "clear", + "url" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-sanitize-uri", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-util-character": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-symbol": "^1.0.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-sanitize-uri/readme.md b/_extensions/d2/node_modules/micromark-util-sanitize-uri/readme.md new file mode 100644 index 00000000..ac1c826c --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-sanitize-uri/readme.md @@ -0,0 +1,171 @@ +# micromark-util-sanitize-uri + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility to sanitize urls. + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`sanitizeUri(url[, pattern])`](#sanitizeuriurl-pattern) + * [`normalizeUri(url[, pattern])`](#normalizeuriurl-pattern) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +This package is [ESM only][esm]. +In Node.js (version 12.20+, 14.14+, 16.0+, 18.0+), install with [npm][]: + +```sh +npm install micromark-util-sanitize-uri +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {sanitizeUri} from 'https://esm.sh/micromark-util-sanitize-uri@1' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {sanitizeUri} from 'micromark-util-sanitize-uri' + +sanitizeUri('https://example.com/a&b') // 'https://example.com/a&amp;b' +sanitizeUri('https://example.com/a%b') // 'https://example.com/a%25b' +sanitizeUri('https://example.com/a%20b') // 'https://example.com/a%20b' +sanitizeUri('https://example.com/👍') // 'https://example.com/%F0%9F%91%8D' +sanitizeUri('https://example.com/', /^https?$/i) // 'https://example.com/' +sanitizeUri('javascript:alert(1)', /^https?$/i) // '' +sanitizeUri('./example.jpg', /^https?$/i) // './example.jpg' +sanitizeUri('#a', /^https?$/i) // '#a' +``` + +## API + +This module exports the following identifiers: `sanitizeUri`. +There is no default export. + +### `sanitizeUri(url[, pattern])` + +Make a value safe for injection as a URL. + +This encodes unsafe characters with percent-encoding and skips already +encoded sequences (see `normalizeUri` internally). +Further unsafe characters are encoded as character references (see +`micromark-util-encode`). + +A regex of allowed protocols can be given, in which case the URL is sanitized. +For example, `/^(https?|ircs?|mailto|xmpp)$/i` can be used for `a[href]`, or +`/^https?$/i` for `img[src]` (this is what `github.com` allows). +If the URL includes an unknown protocol (one not matched by `protocol`, such +as a dangerous example, `javascript:`), the value is ignored. + +###### Parameters + +* `url` (`string`) — URI to sanitize. +* `pattern` (`RegExp`, optional) — Allowed protocols. + +###### Returns + +`string` — Sanitized URI. + +### `normalizeUri(url[, pattern])` + +Normalize a URL (such as used in definitions). + +Encode unsafe characters with percent-encoding, skipping already encoded +sequences. + +###### Parameters + +* `url` (`string`) — URI to normalize. + +###### Returns + +`string` — Normalized URI. + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-sanitize-uri.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-sanitize-uri + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-sanitize-uri.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-sanitize-uri + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[esmsh]: https://esm.sh + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-util-subtokenize/dev/index.d.ts b/_extensions/d2/node_modules/micromark-util-subtokenize/dev/index.d.ts new file mode 100644 index 00000000..dd3f9c2a --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-subtokenize/dev/index.d.ts @@ -0,0 +1,12 @@ +/** + * Tokenize subcontent. + * + * @param {Event[]} events + * @returns {boolean} + */ +export function subtokenize( + events: import('micromark-util-types').Event[] +): boolean +export type Token = import('micromark-util-types').Token +export type Chunk = import('micromark-util-types').Chunk +export type Event = import('micromark-util-types').Event diff --git a/_extensions/d2/node_modules/micromark-util-subtokenize/dev/index.js b/_extensions/d2/node_modules/micromark-util-subtokenize/dev/index.js new file mode 100644 index 00000000..9573a886 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-subtokenize/dev/index.js @@ -0,0 +1,259 @@ +/** + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').Chunk} Chunk + * @typedef {import('micromark-util-types').Event} Event + */ + +import {ok as assert} from 'uvu/assert' +import {splice} from 'micromark-util-chunked' +import {codes} from 'micromark-util-symbol/codes.js' +import {types} from 'micromark-util-symbol/types.js' + +/** + * Tokenize subcontent. + * + * @param {Event[]} events + * @returns {boolean} + */ +export function subtokenize(events) { + /** @type {Record} */ + const jumps = {} + let index = -1 + /** @type {Event} */ + let event + /** @type {number|undefined} */ + let lineIndex + /** @type {number} */ + let otherIndex + /** @type {Event} */ + let otherEvent + /** @type {Event[]} */ + let parameters + /** @type {Event[]} */ + let subevents + /** @type {boolean|undefined} */ + let more + + while (++index < events.length) { + while (index in jumps) { + index = jumps[index] + } + + event = events[index] + + // Add a hook for the GFM tasklist extension, which needs to know if text + // is in the first content of a list item. + if ( + index && + event[1].type === types.chunkFlow && + events[index - 1][1].type === types.listItemPrefix + ) { + assert(event[1]._tokenizer, 'expected `_tokenizer` on subtokens') + subevents = event[1]._tokenizer.events + otherIndex = 0 + + if ( + otherIndex < subevents.length && + subevents[otherIndex][1].type === types.lineEndingBlank + ) { + otherIndex += 2 + } + + if ( + otherIndex < subevents.length && + subevents[otherIndex][1].type === types.content + ) { + while (++otherIndex < subevents.length) { + if (subevents[otherIndex][1].type === types.content) { + break + } + + if (subevents[otherIndex][1].type === types.chunkText) { + subevents[otherIndex][1]._isInFirstContentOfListItem = true + otherIndex++ + } + } + } + } + + // Enter. + if (event[0] === 'enter') { + if (event[1].contentType) { + Object.assign(jumps, subcontent(events, index)) + index = jumps[index] + more = true + } + } + // Exit. + else if (event[1]._container) { + otherIndex = index + lineIndex = undefined + + while (otherIndex--) { + otherEvent = events[otherIndex] + + if ( + otherEvent[1].type === types.lineEnding || + otherEvent[1].type === types.lineEndingBlank + ) { + if (otherEvent[0] === 'enter') { + if (lineIndex) { + events[lineIndex][1].type = types.lineEndingBlank + } + + otherEvent[1].type = types.lineEnding + lineIndex = otherIndex + } + } else { + break + } + } + + if (lineIndex) { + // Fix position. + event[1].end = Object.assign({}, events[lineIndex][1].start) + + // Switch container exit w/ line endings. + parameters = events.slice(lineIndex, index) + parameters.unshift(event) + splice(events, lineIndex, index - lineIndex + 1, parameters) + } + } + } + + return !more +} + +/** + * Tokenize embedded tokens. + * + * @param {Event[]} events + * @param {number} eventIndex + * @returns {Record} + */ +function subcontent(events, eventIndex) { + const token = events[eventIndex][1] + const context = events[eventIndex][2] + let startPosition = eventIndex - 1 + /** @type {number[]} */ + const startPositions = [] + assert(token.contentType, 'expected `contentType` on subtokens') + const tokenizer = + token._tokenizer || context.parser[token.contentType](token.start) + const childEvents = tokenizer.events + /** @type {[number, number][]} */ + const jumps = [] + /** @type {Record} */ + const gaps = {} + /** @type {Chunk[]} */ + let stream + /** @type {Token|undefined} */ + let previous + let index = -1 + /** @type {Token|undefined} */ + let current = token + let adjust = 0 + let start = 0 + const breaks = [start] + + // Loop forward through the linked tokens to pass them in order to the + // subtokenizer. + while (current) { + // Find the position of the event for this token. + while (events[++startPosition][1] !== current) { + // Empty. + } + + assert( + !previous || current.previous === previous, + 'expected previous to match' + ) + assert(!previous || previous.next === current, 'expected next to match') + + startPositions.push(startPosition) + + if (!current._tokenizer) { + stream = context.sliceStream(current) + + if (!current.next) { + stream.push(codes.eof) + } + + if (previous) { + tokenizer.defineSkip(current.start) + } + + if (current._isInFirstContentOfListItem) { + tokenizer._gfmTasklistFirstContentOfListItem = true + } + + tokenizer.write(stream) + + if (current._isInFirstContentOfListItem) { + tokenizer._gfmTasklistFirstContentOfListItem = undefined + } + } + + // Unravel the next token. + previous = current + current = current.next + } + + // Now, loop back through all events (and linked tokens), to figure out which + // parts belong where. + current = token + + while (++index < childEvents.length) { + if ( + // Find a void token that includes a break. + childEvents[index][0] === 'exit' && + childEvents[index - 1][0] === 'enter' && + childEvents[index][1].type === childEvents[index - 1][1].type && + childEvents[index][1].start.line !== childEvents[index][1].end.line + ) { + assert(current, 'expected a current token') + start = index + 1 + breaks.push(start) + // Help GC. + current._tokenizer = undefined + current.previous = undefined + current = current.next + } + } + + // Help GC. + tokenizer.events = [] + + // If there’s one more token (which is the cases for lines that end in an + // EOF), that’s perfect: the last point we found starts it. + // If there isn’t then make sure any remaining content is added to it. + if (current) { + // Help GC. + current._tokenizer = undefined + current.previous = undefined + assert(!current.next, 'expected no next token') + } else { + breaks.pop() + } + + // Now splice the events from the subtokenizer into the current events, + // moving back to front so that splice indices aren’t affected. + index = breaks.length + + while (index--) { + const slice = childEvents.slice(breaks[index], breaks[index + 1]) + const start = startPositions.pop() + assert(start !== undefined, 'expected a start position when splicing') + jumps.unshift([start, start + slice.length - 1]) + splice(events, start, 2, slice) + } + + index = -1 + + while (++index < jumps.length) { + gaps[adjust + jumps[index][0]] = adjust + jumps[index][1] + adjust += jumps[index][1] - jumps[index][0] - 1 + } + + return gaps +} diff --git a/_extensions/d2/node_modules/micromark-util-subtokenize/index.d.ts b/_extensions/d2/node_modules/micromark-util-subtokenize/index.d.ts new file mode 100644 index 00000000..dd3f9c2a --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-subtokenize/index.d.ts @@ -0,0 +1,12 @@ +/** + * Tokenize subcontent. + * + * @param {Event[]} events + * @returns {boolean} + */ +export function subtokenize( + events: import('micromark-util-types').Event[] +): boolean +export type Token = import('micromark-util-types').Token +export type Chunk = import('micromark-util-types').Chunk +export type Event = import('micromark-util-types').Event diff --git a/_extensions/d2/node_modules/micromark-util-subtokenize/index.js b/_extensions/d2/node_modules/micromark-util-subtokenize/index.js new file mode 100644 index 00000000..c52963b7 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-subtokenize/index.js @@ -0,0 +1,247 @@ +/** + * @typedef {import('micromark-util-types').Token} Token + * @typedef {import('micromark-util-types').Chunk} Chunk + * @typedef {import('micromark-util-types').Event} Event + */ +import {splice} from 'micromark-util-chunked' + +/** + * Tokenize subcontent. + * + * @param {Event[]} events + * @returns {boolean} + */ +export function subtokenize(events) { + /** @type {Record} */ + const jumps = {} + let index = -1 + /** @type {Event} */ + + let event + /** @type {number|undefined} */ + + let lineIndex + /** @type {number} */ + + let otherIndex + /** @type {Event} */ + + let otherEvent + /** @type {Event[]} */ + + let parameters + /** @type {Event[]} */ + + let subevents + /** @type {boolean|undefined} */ + + let more + + while (++index < events.length) { + while (index in jumps) { + index = jumps[index] + } + + event = events[index] // Add a hook for the GFM tasklist extension, which needs to know if text + // is in the first content of a list item. + + if ( + index && + event[1].type === 'chunkFlow' && + events[index - 1][1].type === 'listItemPrefix' + ) { + subevents = event[1]._tokenizer.events + otherIndex = 0 + + if ( + otherIndex < subevents.length && + subevents[otherIndex][1].type === 'lineEndingBlank' + ) { + otherIndex += 2 + } + + if ( + otherIndex < subevents.length && + subevents[otherIndex][1].type === 'content' + ) { + while (++otherIndex < subevents.length) { + if (subevents[otherIndex][1].type === 'content') { + break + } + + if (subevents[otherIndex][1].type === 'chunkText') { + subevents[otherIndex][1]._isInFirstContentOfListItem = true + otherIndex++ + } + } + } + } // Enter. + + if (event[0] === 'enter') { + if (event[1].contentType) { + Object.assign(jumps, subcontent(events, index)) + index = jumps[index] + more = true + } + } // Exit. + else if (event[1]._container) { + otherIndex = index + lineIndex = undefined + + while (otherIndex--) { + otherEvent = events[otherIndex] + + if ( + otherEvent[1].type === 'lineEnding' || + otherEvent[1].type === 'lineEndingBlank' + ) { + if (otherEvent[0] === 'enter') { + if (lineIndex) { + events[lineIndex][1].type = 'lineEndingBlank' + } + + otherEvent[1].type = 'lineEnding' + lineIndex = otherIndex + } + } else { + break + } + } + + if (lineIndex) { + // Fix position. + event[1].end = Object.assign({}, events[lineIndex][1].start) // Switch container exit w/ line endings. + + parameters = events.slice(lineIndex, index) + parameters.unshift(event) + splice(events, lineIndex, index - lineIndex + 1, parameters) + } + } + } + + return !more +} +/** + * Tokenize embedded tokens. + * + * @param {Event[]} events + * @param {number} eventIndex + * @returns {Record} + */ + +function subcontent(events, eventIndex) { + const token = events[eventIndex][1] + const context = events[eventIndex][2] + let startPosition = eventIndex - 1 + /** @type {number[]} */ + + const startPositions = [] + const tokenizer = + token._tokenizer || context.parser[token.contentType](token.start) + const childEvents = tokenizer.events + /** @type {[number, number][]} */ + + const jumps = [] + /** @type {Record} */ + + const gaps = {} + /** @type {Chunk[]} */ + + let stream + /** @type {Token|undefined} */ + + let previous + let index = -1 + /** @type {Token|undefined} */ + + let current = token + let adjust = 0 + let start = 0 + const breaks = [start] // Loop forward through the linked tokens to pass them in order to the + // subtokenizer. + + while (current) { + // Find the position of the event for this token. + while (events[++startPosition][1] !== current) { + // Empty. + } + + startPositions.push(startPosition) + + if (!current._tokenizer) { + stream = context.sliceStream(current) + + if (!current.next) { + stream.push(null) + } + + if (previous) { + tokenizer.defineSkip(current.start) + } + + if (current._isInFirstContentOfListItem) { + tokenizer._gfmTasklistFirstContentOfListItem = true + } + + tokenizer.write(stream) + + if (current._isInFirstContentOfListItem) { + tokenizer._gfmTasklistFirstContentOfListItem = undefined + } + } // Unravel the next token. + + previous = current + current = current.next + } // Now, loop back through all events (and linked tokens), to figure out which + // parts belong where. + + current = token + + while (++index < childEvents.length) { + if ( + // Find a void token that includes a break. + childEvents[index][0] === 'exit' && + childEvents[index - 1][0] === 'enter' && + childEvents[index][1].type === childEvents[index - 1][1].type && + childEvents[index][1].start.line !== childEvents[index][1].end.line + ) { + start = index + 1 + breaks.push(start) // Help GC. + + current._tokenizer = undefined + current.previous = undefined + current = current.next + } + } // Help GC. + + tokenizer.events = [] // If there’s one more token (which is the cases for lines that end in an + // EOF), that’s perfect: the last point we found starts it. + // If there isn’t then make sure any remaining content is added to it. + + if (current) { + // Help GC. + current._tokenizer = undefined + current.previous = undefined + } else { + breaks.pop() + } // Now splice the events from the subtokenizer into the current events, + // moving back to front so that splice indices aren’t affected. + + index = breaks.length + + while (index--) { + const slice = childEvents.slice(breaks[index], breaks[index + 1]) + const start = startPositions.pop() + jumps.unshift([start, start + slice.length - 1]) + splice(events, start, 2, slice) + } + + index = -1 + + while (++index < jumps.length) { + gaps[adjust + jumps[index][0]] = adjust + jumps[index][1] + adjust += jumps[index][1] - jumps[index][0] - 1 + } + + return gaps +} diff --git a/_extensions/d2/node_modules/micromark-util-subtokenize/package.json b/_extensions/d2/node_modules/micromark-util-subtokenize/package.json new file mode 100644 index 00000000..cf0e009f --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-subtokenize/package.json @@ -0,0 +1,57 @@ +{ + "name": "micromark-util-subtokenize", + "version": "1.0.2", + "description": "micromark utility to tokenize subtokens", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "tokenize" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-subtokenize", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "dev/index.d.ts", + "files": [ + "dev/", + "index.d.ts", + "index.js" + ], + "exports": { + "development": "./dev/index.js", + "default": "./index.js" + }, + "dependencies": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" \"{dev/,lib/}**/*.d.ts\" && tsc && micromark-build && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-subtokenize/readme.md b/_extensions/d2/node_modules/micromark-util-subtokenize/readme.md new file mode 100644 index 00000000..1a660a88 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-subtokenize/readme.md @@ -0,0 +1,126 @@ +# micromark-util-subtokenize + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility to tokenize subtokens. + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`subtokenize(events)`](#subtokenizeevents) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-util-subtokenize +``` + +## Use + +```js +import {subtokenize} from 'micromark-util-subtokenize' + +/** + * Content is transparent: it’s parsed right now. That way, definitions are also + * parsed right now: before text in paragraphs (specifically, media) are parsed. + * + * @type {Resolver} + */ +function resolveContent(events) { + subtokenize(events) + return events +} +``` + +## API + +This module exports the following identifiers: `subtokenize`. +There is no default export. + +### `subtokenize(events)` + +Tokenize subcontent. + +###### Parameters + +* `events` (`Event[]`) — List of events + +###### Returns + +`boolean` — Whether subtokens were found. + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-subtokenize.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-subtokenize + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-subtokenize.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-subtokenize + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md diff --git a/_extensions/d2/node_modules/micromark-util-symbol/codes.d.ts b/_extensions/d2/node_modules/micromark-util-symbol/codes.d.ts new file mode 100644 index 00000000..87540014 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-symbol/codes.d.ts @@ -0,0 +1,138 @@ +export namespace codes { + const carriageReturn: number + const lineFeed: number + const carriageReturnLineFeed: number + const horizontalTab: number + const virtualSpace: number + const eof: null + const nul: number + const soh: number + const stx: number + const etx: number + const eot: number + const enq: number + const ack: number + const bel: number + const bs: number + const ht: number + const lf: number + const vt: number + const ff: number + const cr: number + const so: number + const si: number + const dle: number + const dc1: number + const dc2: number + const dc3: number + const dc4: number + const nak: number + const syn: number + const etb: number + const can: number + const em: number + const sub: number + const esc: number + const fs: number + const gs: number + const rs: number + const us: number + const space: number + const exclamationMark: number + const quotationMark: number + const numberSign: number + const dollarSign: number + const percentSign: number + const ampersand: number + const apostrophe: number + const leftParenthesis: number + const rightParenthesis: number + const asterisk: number + const plusSign: number + const comma: number + const dash: number + const dot: number + const slash: number + const digit0: number + const digit1: number + const digit2: number + const digit3: number + const digit4: number + const digit5: number + const digit6: number + const digit7: number + const digit8: number + const digit9: number + const colon: number + const semicolon: number + const lessThan: number + const equalsTo: number + const greaterThan: number + const questionMark: number + const atSign: number + const uppercaseA: number + const uppercaseB: number + const uppercaseC: number + const uppercaseD: number + const uppercaseE: number + const uppercaseF: number + const uppercaseG: number + const uppercaseH: number + const uppercaseI: number + const uppercaseJ: number + const uppercaseK: number + const uppercaseL: number + const uppercaseM: number + const uppercaseN: number + const uppercaseO: number + const uppercaseP: number + const uppercaseQ: number + const uppercaseR: number + const uppercaseS: number + const uppercaseT: number + const uppercaseU: number + const uppercaseV: number + const uppercaseW: number + const uppercaseX: number + const uppercaseY: number + const uppercaseZ: number + const leftSquareBracket: number + const backslash: number + const rightSquareBracket: number + const caret: number + const underscore: number + const graveAccent: number + const lowercaseA: number + const lowercaseB: number + const lowercaseC: number + const lowercaseD: number + const lowercaseE: number + const lowercaseF: number + const lowercaseG: number + const lowercaseH: number + const lowercaseI: number + const lowercaseJ: number + const lowercaseK: number + const lowercaseL: number + const lowercaseM: number + const lowercaseN: number + const lowercaseO: number + const lowercaseP: number + const lowercaseQ: number + const lowercaseR: number + const lowercaseS: number + const lowercaseT: number + const lowercaseU: number + const lowercaseV: number + const lowercaseW: number + const lowercaseX: number + const lowercaseY: number + const lowercaseZ: number + const leftCurlyBrace: number + const verticalBar: number + const rightCurlyBrace: number + const tilde: number + const del: number + const byteOrderMarker: number + const replacementCharacter: number +} diff --git a/_extensions/d2/node_modules/micromark-util-symbol/codes.js b/_extensions/d2/node_modules/micromark-util-symbol/codes.js new file mode 100644 index 00000000..e6db3443 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-symbol/codes.js @@ -0,0 +1,158 @@ +/** + * Character codes. + * + * This module is compiled away! + * + * micromark works based on character codes. + * This module contains constants for the ASCII block and the replacement + * character. + * A couple of them are handled in a special way, such as the line endings + * (CR, LF, and CR+LF, commonly known as end-of-line: EOLs), the tab (horizontal + * tab) and its expansion based on what column it’s at (virtual space), + * and the end-of-file (eof) character. + * As values are preprocessed before handling them, the actual characters LF, + * CR, HT, and NUL (which is present as the replacement character), are + * guaranteed to not exist. + * + * Unicode basic latin block. + */ +export const codes = { + carriageReturn: -5, + lineFeed: -4, + carriageReturnLineFeed: -3, + horizontalTab: -2, + virtualSpace: -1, + eof: null, + nul: 0, + soh: 1, + stx: 2, + etx: 3, + eot: 4, + enq: 5, + ack: 6, + bel: 7, + bs: 8, + ht: 9, // `\t` + lf: 10, // `\n` + vt: 11, // `\v` + ff: 12, // `\f` + cr: 13, // `\r` + so: 14, + si: 15, + dle: 16, + dc1: 17, + dc2: 18, + dc3: 19, + dc4: 20, + nak: 21, + syn: 22, + etb: 23, + can: 24, + em: 25, + sub: 26, + esc: 27, + fs: 28, + gs: 29, + rs: 30, + us: 31, + space: 32, + exclamationMark: 33, // `!` + quotationMark: 34, // `"` + numberSign: 35, // `#` + dollarSign: 36, // `$` + percentSign: 37, // `%` + ampersand: 38, // `&` + apostrophe: 39, // `'` + leftParenthesis: 40, // `(` + rightParenthesis: 41, // `)` + asterisk: 42, // `*` + plusSign: 43, // `+` + comma: 44, // `,` + dash: 45, // `-` + dot: 46, // `.` + slash: 47, // `/` + digit0: 48, // `0` + digit1: 49, // `1` + digit2: 50, // `2` + digit3: 51, // `3` + digit4: 52, // `4` + digit5: 53, // `5` + digit6: 54, // `6` + digit7: 55, // `7` + digit8: 56, // `8` + digit9: 57, // `9` + colon: 58, // `:` + semicolon: 59, // `;` + lessThan: 60, // `<` + equalsTo: 61, // `=` + greaterThan: 62, // `>` + questionMark: 63, // `?` + atSign: 64, // `@` + uppercaseA: 65, // `A` + uppercaseB: 66, // `B` + uppercaseC: 67, // `C` + uppercaseD: 68, // `D` + uppercaseE: 69, // `E` + uppercaseF: 70, // `F` + uppercaseG: 71, // `G` + uppercaseH: 72, // `H` + uppercaseI: 73, // `I` + uppercaseJ: 74, // `J` + uppercaseK: 75, // `K` + uppercaseL: 76, // `L` + uppercaseM: 77, // `M` + uppercaseN: 78, // `N` + uppercaseO: 79, // `O` + uppercaseP: 80, // `P` + uppercaseQ: 81, // `Q` + uppercaseR: 82, // `R` + uppercaseS: 83, // `S` + uppercaseT: 84, // `T` + uppercaseU: 85, // `U` + uppercaseV: 86, // `V` + uppercaseW: 87, // `W` + uppercaseX: 88, // `X` + uppercaseY: 89, // `Y` + uppercaseZ: 90, // `Z` + leftSquareBracket: 91, // `[` + backslash: 92, // `\` + rightSquareBracket: 93, // `]` + caret: 94, // `^` + underscore: 95, // `_` + graveAccent: 96, // `` ` `` + lowercaseA: 97, // `a` + lowercaseB: 98, // `b` + lowercaseC: 99, // `c` + lowercaseD: 100, // `d` + lowercaseE: 101, // `e` + lowercaseF: 102, // `f` + lowercaseG: 103, // `g` + lowercaseH: 104, // `h` + lowercaseI: 105, // `i` + lowercaseJ: 106, // `j` + lowercaseK: 107, // `k` + lowercaseL: 108, // `l` + lowercaseM: 109, // `m` + lowercaseN: 110, // `n` + lowercaseO: 111, // `o` + lowercaseP: 112, // `p` + lowercaseQ: 113, // `q` + lowercaseR: 114, // `r` + lowercaseS: 115, // `s` + lowercaseT: 116, // `t` + lowercaseU: 117, // `u` + lowercaseV: 118, // `v` + lowercaseW: 119, // `w` + lowercaseX: 120, // `x` + lowercaseY: 121, // `y` + lowercaseZ: 122, // `z` + leftCurlyBrace: 123, // `{` + verticalBar: 124, // `|` + rightCurlyBrace: 125, // `}` + tilde: 126, // `~` + del: 127, + // Unicode Specials block. + byteOrderMarker: 65279, + // Unicode Specials block. + replacementCharacter: 65533 // `�` +} diff --git a/_extensions/d2/node_modules/micromark-util-symbol/constants.d.ts b/_extensions/d2/node_modules/micromark-util-symbol/constants.d.ts new file mode 100644 index 00000000..7147496b --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-symbol/constants.d.ts @@ -0,0 +1,36 @@ +export namespace constants { + const attentionSideBefore: number + const attentionSideAfter: number + const atxHeadingOpeningFenceSizeMax: number + const autolinkDomainSizeMax: number + const autolinkSchemeSizeMax: number + const cdataOpeningString: string + const characterGroupWhitespace: number + const characterGroupPunctuation: number + const characterReferenceDecimalSizeMax: number + const characterReferenceHexadecimalSizeMax: number + const characterReferenceNamedSizeMax: number + const codeFencedSequenceSizeMin: number + const contentTypeDocument: string + const contentTypeFlow: string + const contentTypeContent: string + const contentTypeString: string + const contentTypeText: string + const hardBreakPrefixSizeMin: number + const htmlRaw: number + const htmlComment: number + const htmlInstruction: number + const htmlDeclaration: number + const htmlCdata: number + const htmlBasic: number + const htmlComplete: number + const htmlRawSizeMax: number + const linkResourceDestinationBalanceMax: number + const linkReferenceSizeMax: number + const listItemValueSizeMax: number + const numericBaseDecimal: number + const numericBaseHexadecimal: number + const tabSize: number + const thematicBreakMarkerCountMin: number + const v8MaxSafeChunkSize: number +} diff --git a/_extensions/d2/node_modules/micromark-util-symbol/constants.js b/_extensions/d2/node_modules/micromark-util-symbol/constants.js new file mode 100644 index 00000000..e1cd0ced --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-symbol/constants.js @@ -0,0 +1,44 @@ +/** + * This module is compiled away! + * + * Parsing markdown comes with a couple of constants, such as minimum or maximum + * sizes of certain sequences. + * Additionally, there are a couple symbols used inside micromark. + * These are all defined here, but compiled away by scripts. + */ +export const constants = { + attentionSideBefore: 1, // Symbol to mark an attention sequence as before content: `*a` + attentionSideAfter: 2, // Symbol to mark an attention sequence as after content: `a*` + atxHeadingOpeningFenceSizeMax: 6, // 6 number signs is fine, 7 isn’t. + autolinkDomainSizeMax: 63, // 63 characters is fine, 64 is too many. + autolinkSchemeSizeMax: 32, // 32 characters is fine, 33 is too many. + cdataOpeningString: 'CDATA[', // And preceded by `` + htmlComment: 2, // Symbol for `` + htmlInstruction: 3, // Symbol for `` + htmlDeclaration: 4, // Symbol for `` + htmlCdata: 5, // Symbol for `` + htmlBasic: 6, // Symbol for `` + htmlRawSizeMax: 8, // Length of `textarea`. + linkResourceDestinationBalanceMax: 32, // See: , + linkReferenceSizeMax: 999, // See: + listItemValueSizeMax: 10, // See: + numericBaseDecimal: 10, + numericBaseHexadecimal: 0x10, + tabSize: 4, // Tabs have a hard-coded size of 4, per CommonMark. + thematicBreakMarkerCountMin: 3, // At least 3 asterisks, dashes, or underscores are needed. + v8MaxSafeChunkSize: 10000 // V8 (and potentially others) have problems injecting giant arrays into other arrays, hence we operate in chunks. +} diff --git a/_extensions/d2/node_modules/micromark-util-symbol/package.json b/_extensions/d2/node_modules/micromark-util-symbol/package.json new file mode 100644 index 00000000..3baa2b50 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-symbol/package.json @@ -0,0 +1,60 @@ +{ + "name": "micromark-util-symbol", + "version": "1.0.1", + "description": "micromark utility with symbols", + "license": "MIT", + "keywords": [ + "micromark", + "util", + "utility", + "symbol" + ], + "repository": "https://github.com/micromark/micromark/tree/main/packages/micromark-util-symbol", + "bugs": "https://github.com/micromark/micromark/issues", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "files": [ + "codes.d.ts", + "codes.js", + "constants.d.ts", + "constants.js", + "types.d.ts", + "types.js", + "values.d.ts", + "values.js" + ], + "exports": { + "./codes": "./codes.js", + "./codes.js": "./codes.js", + "./constants": "./constants.js", + "./constants.js": "./constants.js", + "./types": "./types.js", + "./types.js": "./types.js", + "./values": "./values.js", + "./values.js": "./values.js" + }, + "scripts": { + "build": "rimraf \"*.d.ts\" && tsc && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/micromark-util-symbol/readme.md b/_extensions/d2/node_modules/micromark-util-symbol/readme.md new file mode 100644 index 00000000..5c89ebbc --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-symbol/readme.md @@ -0,0 +1,122 @@ +# micromark-util-symbol + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][bundle-size-badge]][bundle-size] +[![Sponsors][sponsors-badge]][opencollective] +[![Backers][backers-badge]][opencollective] +[![Chat][chat-badge]][chat] + +micromark utility with symbols. + +It’s useful to reference these by name instead of value while developing. +[`micromark-build`][micromark-build] compiles them away for production code. + +## Contents + +* [Install](#install) +* [Use](#use) +* [API](#api) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## Install + +[npm][]: + +```sh +npm install micromark-util-symbol +``` + +## Use + +```js +import {codes} from 'micromark-util-symbol/codes' +import {constants} from 'micromark-util-symbol/constants' +import {types} from 'micromark-util-symbol/types' +import {values} from 'micromark-util-symbol/values' + +console.log(codes.atSign) // 64 +console.log(constants.characterReferenceNamedSizeMax) // 31 +console.log(types.definitionDestinationRaw) // 'definitionDestinationRaw' +console.log(values.atSign) // '@' +``` + +## API + +This package has four entries in its export map: `micromark-util-symbol/codes`, +`micromark-util-symbol/constants`, `micromark-util-symbol/types`, +`micromark-util-symbol/values`. + +Each module exports an identifier with the same name (for example, +`micromark-util-symbol/codes` has `codes`), which is an object mapping strings +to other values. + +Take a peek at the code to learn more! + +## Security + +See [`security.md`][securitymd] in [`micromark/.github`][health] for how to +submit a security report. + +## Contribute + +See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organisation, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg + +[build]: https://github.com/micromark/micromark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg + +[coverage]: https://codecov.io/github/micromark/micromark + +[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-symbol.svg + +[downloads]: https://www.npmjs.com/package/micromark-util-symbol + +[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-symbol.svg + +[bundle-size]: https://bundlephobia.com/result?p=micromark-util-symbol + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[opencollective]: https://opencollective.com/unified + +[npm]: https://docs.npmjs.com/cli/install + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/micromark/micromark/discussions + +[license]: https://github.com/micromark/micromark/blob/main/license + +[author]: https://wooorm.com + +[health]: https://github.com/micromark/.github + +[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md + +[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md + +[support]: https://github.com/micromark/.github/blob/HEAD/support.md + +[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md + +[micromark-build]: https://github.com/micromark/micromark/tree/main/packages/micromark-build diff --git a/_extensions/d2/node_modules/micromark-util-symbol/types.d.ts b/_extensions/d2/node_modules/micromark-util-symbol/types.d.ts new file mode 100644 index 00000000..1dfc5a3d --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-symbol/types.d.ts @@ -0,0 +1,105 @@ +export namespace types { + const data: string + const whitespace: string + const lineEnding: string + const lineEndingBlank: string + const linePrefix: string + const lineSuffix: string + const atxHeading: string + const atxHeadingSequence: string + const atxHeadingText: string + const autolink: string + const autolinkEmail: string + const autolinkMarker: string + const autolinkProtocol: string + const characterEscape: string + const characterEscapeValue: string + const characterReference: string + const characterReferenceMarker: string + const characterReferenceMarkerNumeric: string + const characterReferenceMarkerHexadecimal: string + const characterReferenceValue: string + const codeFenced: string + const codeFencedFence: string + const codeFencedFenceSequence: string + const codeFencedFenceInfo: string + const codeFencedFenceMeta: string + const codeFlowValue: string + const codeIndented: string + const codeText: string + const codeTextData: string + const codeTextPadding: string + const codeTextSequence: string + const content: string + const definition: string + const definitionDestination: string + const definitionDestinationLiteral: string + const definitionDestinationLiteralMarker: string + const definitionDestinationRaw: string + const definitionDestinationString: string + const definitionLabel: string + const definitionLabelMarker: string + const definitionLabelString: string + const definitionMarker: string + const definitionTitle: string + const definitionTitleMarker: string + const definitionTitleString: string + const emphasis: string + const emphasisSequence: string + const emphasisText: string + const escapeMarker: string + const hardBreakEscape: string + const hardBreakTrailing: string + const htmlFlow: string + const htmlFlowData: string + const htmlText: string + const htmlTextData: string + const image: string + const label: string + const labelText: string + const labelLink: string + const labelImage: string + const labelMarker: string + const labelImageMarker: string + const labelEnd: string + const link: string + const paragraph: string + const reference: string + const referenceMarker: string + const referenceString: string + const resource: string + const resourceDestination: string + const resourceDestinationLiteral: string + const resourceDestinationLiteralMarker: string + const resourceDestinationRaw: string + const resourceDestinationString: string + const resourceMarker: string + const resourceTitle: string + const resourceTitleMarker: string + const resourceTitleString: string + const setextHeading: string + const setextHeadingText: string + const setextHeadingLine: string + const setextHeadingLineSequence: string + const strong: string + const strongSequence: string + const strongText: string + const thematicBreak: string + const thematicBreakSequence: string + const blockQuote: string + const blockQuotePrefix: string + const blockQuoteMarker: string + const blockQuotePrefixWhitespace: string + const listOrdered: string + const listUnordered: string + const listItemIndent: string + const listItemMarker: string + const listItemPrefix: string + const listItemPrefixWhitespace: string + const listItemValue: string + const chunkDocument: string + const chunkContent: string + const chunkFlow: string + const chunkText: string + const chunkString: string +} diff --git a/_extensions/d2/node_modules/micromark-util-symbol/types.js b/_extensions/d2/node_modules/micromark-util-symbol/types.js new file mode 100644 index 00000000..db72be19 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-symbol/types.js @@ -0,0 +1,451 @@ +/** + * This module is compiled away! + * + * Here is the list of all types of tokens exposed by micromark, with a short + * explanation of what they include and where they are found. + * In picking names, generally, the rule is to be as explicit as possible + * instead of reusing names. + * For example, there is a `definitionDestination` and a `resourceDestination`, + * instead of one shared name. + */ + +export const types = { + // Generic type for data, such as in a title, a destination, etc. + data: 'data', + + // Generic type for syntactic whitespace (tabs, virtual spaces, spaces). + // Such as, between a fenced code fence and an info string. + whitespace: 'whitespace', + + // Generic type for line endings (line feed, carriage return, carriage return + + // line feed). + lineEnding: 'lineEnding', + + // A line ending, but ending a blank line. + lineEndingBlank: 'lineEndingBlank', + + // Generic type for whitespace (tabs, virtual spaces, spaces) at the start of a + // line. + linePrefix: 'linePrefix', + + // Generic type for whitespace (tabs, virtual spaces, spaces) at the end of a + // line. + lineSuffix: 'lineSuffix', + + // Whole ATX heading: + // + // ```markdown + // # + // ## Alpha + // ### Bravo ### + // ``` + // + // Includes `atxHeadingSequence`, `whitespace`, `atxHeadingText`. + atxHeading: 'atxHeading', + + // Sequence of number signs in an ATX heading (`###`). + atxHeadingSequence: 'atxHeadingSequence', + + // Content in an ATX heading (`alpha`). + // Includes text. + atxHeadingText: 'atxHeadingText', + + // Whole autolink (`` or ``) + // Includes `autolinkMarker` and `autolinkProtocol` or `autolinkEmail`. + autolink: 'autolink', + + // Email autolink w/o markers (`admin@example.com`) + autolinkEmail: 'autolinkEmail', + + // Marker around an `autolinkProtocol` or `autolinkEmail` (`<` or `>`). + autolinkMarker: 'autolinkMarker', + + // Protocol autolink w/o markers (`https://example.com`) + autolinkProtocol: 'autolinkProtocol', + + // A whole character escape (`\-`). + // Includes `escapeMarker` and `characterEscapeValue`. + characterEscape: 'characterEscape', + + // The escaped character (`-`). + characterEscapeValue: 'characterEscapeValue', + + // A whole character reference (`&`, `≠`, or `𝌆`). + // Includes `characterReferenceMarker`, an optional + // `characterReferenceMarkerNumeric`, in which case an optional + // `characterReferenceMarkerHexadecimal`, and a `characterReferenceValue`. + characterReference: 'characterReference', + + // The start or end marker (`&` or `;`). + characterReferenceMarker: 'characterReferenceMarker', + + // Mark reference as numeric (`#`). + characterReferenceMarkerNumeric: 'characterReferenceMarkerNumeric', + + // Mark reference as numeric (`x` or `X`). + characterReferenceMarkerHexadecimal: 'characterReferenceMarkerHexadecimal', + + // Value of character reference w/o markers (`amp`, `8800`, or `1D306`). + characterReferenceValue: 'characterReferenceValue', + + // Whole fenced code: + // + // ````markdown + // ```js + // alert(1) + // ``` + // ```` + codeFenced: 'codeFenced', + + // A fenced code fence, including whitespace, sequence, info, and meta + // (` ```js `). + codeFencedFence: 'codeFencedFence', + + // Sequence of grave accent or tilde characters (` ``` `) in a fence. + codeFencedFenceSequence: 'codeFencedFenceSequence', + + // Info word (`js`) in a fence. + // Includes string. + codeFencedFenceInfo: 'codeFencedFenceInfo', + + // Meta words (`highlight="1"`) in a fence. + // Includes string. + codeFencedFenceMeta: 'codeFencedFenceMeta', + + // A line of code. + codeFlowValue: 'codeFlowValue', + + // Whole indented code: + // + // ```markdown + // alert(1) + // ``` + // + // Includes `lineEnding`, `linePrefix`, and `codeFlowValue`. + codeIndented: 'codeIndented', + + // A text code (``` `alpha` ```). + // Includes `codeTextSequence`, `codeTextData`, `lineEnding`, and can include + // `codeTextPadding`. + codeText: 'codeText', + + codeTextData: 'codeTextData', + + // A space or line ending right after or before a tick. + codeTextPadding: 'codeTextPadding', + + // A text code fence (` `` `). + codeTextSequence: 'codeTextSequence', + + // Whole content: + // + // ```markdown + // [a]: b + // c + // = + // d + // ``` + // + // Includes `paragraph` and `definition`. + content: 'content', + // Whole definition: + // + // ```markdown + // [micromark]: https://github.com/micromark/micromark + // ``` + // + // Includes `definitionLabel`, `definitionMarker`, `whitespace`, + // `definitionDestination`, and optionally `lineEnding` and `definitionTitle`. + definition: 'definition', + + // Destination of a definition (`https://github.com/micromark/micromark` or + // ``). + // Includes `definitionDestinationLiteral` or `definitionDestinationRaw`. + definitionDestination: 'definitionDestination', + + // Enclosed destination of a definition + // (``). + // Includes `definitionDestinationLiteralMarker` and optionally + // `definitionDestinationString`. + definitionDestinationLiteral: 'definitionDestinationLiteral', + + // Markers of an enclosed definition destination (`<` or `>`). + definitionDestinationLiteralMarker: 'definitionDestinationLiteralMarker', + + // Unenclosed destination of a definition + // (`https://github.com/micromark/micromark`). + // Includes `definitionDestinationString`. + definitionDestinationRaw: 'definitionDestinationRaw', + + // Text in an destination (`https://github.com/micromark/micromark`). + // Includes string. + definitionDestinationString: 'definitionDestinationString', + + // Label of a definition (`[micromark]`). + // Includes `definitionLabelMarker` and `definitionLabelString`. + definitionLabel: 'definitionLabel', + + // Markers of a definition label (`[` or `]`). + definitionLabelMarker: 'definitionLabelMarker', + + // Value of a definition label (`micromark`). + // Includes string. + definitionLabelString: 'definitionLabelString', + + // Marker between a label and a destination (`:`). + definitionMarker: 'definitionMarker', + + // Title of a definition (`"x"`, `'y'`, or `(z)`). + // Includes `definitionTitleMarker` and optionally `definitionTitleString`. + definitionTitle: 'definitionTitle', + + // Marker around a title of a definition (`"`, `'`, `(`, or `)`). + definitionTitleMarker: 'definitionTitleMarker', + + // Data without markers in a title (`z`). + // Includes string. + definitionTitleString: 'definitionTitleString', + + // Emphasis (`*alpha*`). + // Includes `emphasisSequence` and `emphasisText`. + emphasis: 'emphasis', + + // Sequence of emphasis markers (`*` or `_`). + emphasisSequence: 'emphasisSequence', + + // Emphasis text (`alpha`). + // Includes text. + emphasisText: 'emphasisText', + + // The character escape marker (`\`). + escapeMarker: 'escapeMarker', + + // A hard break created with a backslash (`\\n`). + // Includes `escapeMarker` (does not include the line ending) + hardBreakEscape: 'hardBreakEscape', + + // A hard break created with trailing spaces (` \n`). + // Does not include the line ending. + hardBreakTrailing: 'hardBreakTrailing', + + // Flow HTML: + // + // ```markdown + //
b`). + // Includes `lineEnding`, `htmlTextData`. + htmlText: 'htmlText', + + htmlTextData: 'htmlTextData', + + // Whole image (`![alpha](bravo)`, `![alpha][bravo]`, `![alpha][]`, or + // `![alpha]`). + // Includes `label` and an optional `resource` or `reference`. + image: 'image', + + // Whole link label (`[*alpha*]`). + // Includes `labelLink` or `labelImage`, `labelText`, and `labelEnd`. + label: 'label', + + // Text in an label (`*alpha*`). + // Includes text. + labelText: 'labelText', + + // Start a link label (`[`). + // Includes a `labelMarker`. + labelLink: 'labelLink', + + // Start an image label (`![`). + // Includes `labelImageMarker` and `labelMarker`. + labelImage: 'labelImage', + + // Marker of a label (`[` or `]`). + labelMarker: 'labelMarker', + + // Marker to start an image (`!`). + labelImageMarker: 'labelImageMarker', + + // End a label (`]`). + // Includes `labelMarker`. + labelEnd: 'labelEnd', + + // Whole link (`[alpha](bravo)`, `[alpha][bravo]`, `[alpha][]`, or `[alpha]`). + // Includes `label` and an optional `resource` or `reference`. + link: 'link', + + // Whole paragraph: + // + // ```markdown + // alpha + // bravo. + // ``` + // + // Includes text. + paragraph: 'paragraph', + + // A reference (`[alpha]` or `[]`). + // Includes `referenceMarker` and an optional `referenceString`. + reference: 'reference', + + // A reference marker (`[` or `]`). + referenceMarker: 'referenceMarker', + + // Reference text (`alpha`). + // Includes string. + referenceString: 'referenceString', + + // A resource (`(https://example.com "alpha")`). + // Includes `resourceMarker`, an optional `resourceDestination` with an optional + // `whitespace` and `resourceTitle`. + resource: 'resource', + + // A resource destination (`https://example.com`). + // Includes `resourceDestinationLiteral` or `resourceDestinationRaw`. + resourceDestination: 'resourceDestination', + + // A literal resource destination (``). + // Includes `resourceDestinationLiteralMarker` and optionally + // `resourceDestinationString`. + resourceDestinationLiteral: 'resourceDestinationLiteral', + + // A resource destination marker (`<` or `>`). + resourceDestinationLiteralMarker: 'resourceDestinationLiteralMarker', + + // A raw resource destination (`https://example.com`). + // Includes `resourceDestinationString`. + resourceDestinationRaw: 'resourceDestinationRaw', + + // Resource destination text (`https://example.com`). + // Includes string. + resourceDestinationString: 'resourceDestinationString', + + // A resource marker (`(` or `)`). + resourceMarker: 'resourceMarker', + + // A resource title (`"alpha"`, `'alpha'`, or `(alpha)`). + // Includes `resourceTitleMarker` and optionally `resourceTitleString`. + resourceTitle: 'resourceTitle', + + // A resource title marker (`"`, `'`, `(`, or `)`). + resourceTitleMarker: 'resourceTitleMarker', + + // Resource destination title (`alpha`). + // Includes string. + resourceTitleString: 'resourceTitleString', + + // Whole setext heading: + // + // ```markdown + // alpha + // bravo + // ===== + // ``` + // + // Includes `setextHeadingText`, `lineEnding`, `linePrefix`, and + // `setextHeadingLine`. + setextHeading: 'setextHeading', + + // Content in a setext heading (`alpha\nbravo`). + // Includes text. + setextHeadingText: 'setextHeadingText', + + // Underline in a setext heading, including whitespace suffix (`==`). + // Includes `setextHeadingLineSequence`. + setextHeadingLine: 'setextHeadingLine', + + // Sequence of equals or dash characters in underline in a setext heading (`-`). + setextHeadingLineSequence: 'setextHeadingLineSequence', + + // Strong (`**alpha**`). + // Includes `strongSequence` and `strongText`. + strong: 'strong', + + // Sequence of strong markers (`**` or `__`). + strongSequence: 'strongSequence', + + // Strong text (`alpha`). + // Includes text. + strongText: 'strongText', + + // Whole thematic break: + // + // ```markdown + // * * * + // ``` + // + // Includes `thematicBreakSequence` and `whitespace`. + thematicBreak: 'thematicBreak', + + // A sequence of one or more thematic break markers (`***`). + thematicBreakSequence: 'thematicBreakSequence', + + // Whole block quote: + // + // ```markdown + // > a + // > + // > b + // ``` + // + // Includes `blockQuotePrefix` and flow. + blockQuote: 'blockQuote', + // The `>` or `> ` of a block quote. + blockQuotePrefix: 'blockQuotePrefix', + // The `>` of a block quote prefix. + blockQuoteMarker: 'blockQuoteMarker', + // The optional ` ` of a block quote prefix. + blockQuotePrefixWhitespace: 'blockQuotePrefixWhitespace', + + // Whole unordered list: + // + // ```markdown + // - a + // b + // ``` + // + // Includes `listItemPrefix`, flow, and optionally `listItemIndent` on further + // lines. + listOrdered: 'listOrdered', + + // Whole ordered list: + // + // ```markdown + // 1. a + // b + // ``` + // + // Includes `listItemPrefix`, flow, and optionally `listItemIndent` on further + // lines. + listUnordered: 'listUnordered', + + // The indent of further list item lines. + listItemIndent: 'listItemIndent', + + // A marker, as in, `*`, `+`, `-`, `.`, or `)`. + listItemMarker: 'listItemMarker', + + // The thing that starts a list item, such as `1. `. + // Includes `listItemValue` if ordered, `listItemMarker`, and + // `listItemPrefixWhitespace` (unless followed by a line ending). + listItemPrefix: 'listItemPrefix', + + // The whitespace after a marker. + listItemPrefixWhitespace: 'listItemPrefixWhitespace', + + // The numerical value of an ordered item. + listItemValue: 'listItemValue', + + // Internal types used for subtokenizers, compiled away + chunkDocument: 'chunkDocument', + chunkContent: 'chunkContent', + chunkFlow: 'chunkFlow', + chunkText: 'chunkText', + chunkString: 'chunkString' +} diff --git a/_extensions/d2/node_modules/micromark-util-symbol/values.d.ts b/_extensions/d2/node_modules/micromark-util-symbol/values.d.ts new file mode 100644 index 00000000..25afda5f --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-symbol/values.d.ts @@ -0,0 +1,101 @@ +export namespace values { + const ht: string + const lf: string + const cr: string + const space: string + const exclamationMark: string + const quotationMark: string + const numberSign: string + const dollarSign: string + const percentSign: string + const ampersand: string + const apostrophe: string + const leftParenthesis: string + const rightParenthesis: string + const asterisk: string + const plusSign: string + const comma: string + const dash: string + const dot: string + const slash: string + const digit0: string + const digit1: string + const digit2: string + const digit3: string + const digit4: string + const digit5: string + const digit6: string + const digit7: string + const digit8: string + const digit9: string + const colon: string + const semicolon: string + const lessThan: string + const equalsTo: string + const greaterThan: string + const questionMark: string + const atSign: string + const uppercaseA: string + const uppercaseB: string + const uppercaseC: string + const uppercaseD: string + const uppercaseE: string + const uppercaseF: string + const uppercaseG: string + const uppercaseH: string + const uppercaseI: string + const uppercaseJ: string + const uppercaseK: string + const uppercaseL: string + const uppercaseM: string + const uppercaseN: string + const uppercaseO: string + const uppercaseP: string + const uppercaseQ: string + const uppercaseR: string + const uppercaseS: string + const uppercaseT: string + const uppercaseU: string + const uppercaseV: string + const uppercaseW: string + const uppercaseX: string + const uppercaseY: string + const uppercaseZ: string + const leftSquareBracket: string + const backslash: string + const rightSquareBracket: string + const caret: string + const underscore: string + const graveAccent: string + const lowercaseA: string + const lowercaseB: string + const lowercaseC: string + const lowercaseD: string + const lowercaseE: string + const lowercaseF: string + const lowercaseG: string + const lowercaseH: string + const lowercaseI: string + const lowercaseJ: string + const lowercaseK: string + const lowercaseL: string + const lowercaseM: string + const lowercaseN: string + const lowercaseO: string + const lowercaseP: string + const lowercaseQ: string + const lowercaseR: string + const lowercaseS: string + const lowercaseT: string + const lowercaseU: string + const lowercaseV: string + const lowercaseW: string + const lowercaseX: string + const lowercaseY: string + const lowercaseZ: string + const leftCurlyBrace: string + const verticalBar: string + const rightCurlyBrace: string + const tilde: string + const replacementCharacter: string +} diff --git a/_extensions/d2/node_modules/micromark-util-symbol/values.js b/_extensions/d2/node_modules/micromark-util-symbol/values.js new file mode 100644 index 00000000..4f1d1000 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-symbol/values.js @@ -0,0 +1,107 @@ +// This module is compiled away! +// +// While micromark works based on character codes, this module includes the +// string versions of ’em. +// The C0 block, except for LF, CR, HT, and w/ the replacement character added, +// are available here. +export const values = { + ht: '\t', + lf: '\n', + cr: '\r', + space: ' ', + exclamationMark: '!', + quotationMark: '"', + numberSign: '#', + dollarSign: '$', + percentSign: '%', + ampersand: '&', + apostrophe: "'", + leftParenthesis: '(', + rightParenthesis: ')', + asterisk: '*', + plusSign: '+', + comma: ',', + dash: '-', + dot: '.', + slash: '/', + digit0: '0', + digit1: '1', + digit2: '2', + digit3: '3', + digit4: '4', + digit5: '5', + digit6: '6', + digit7: '7', + digit8: '8', + digit9: '9', + colon: ':', + semicolon: ';', + lessThan: '<', + equalsTo: '=', + greaterThan: '>', + questionMark: '?', + atSign: '@', + uppercaseA: 'A', + uppercaseB: 'B', + uppercaseC: 'C', + uppercaseD: 'D', + uppercaseE: 'E', + uppercaseF: 'F', + uppercaseG: 'G', + uppercaseH: 'H', + uppercaseI: 'I', + uppercaseJ: 'J', + uppercaseK: 'K', + uppercaseL: 'L', + uppercaseM: 'M', + uppercaseN: 'N', + uppercaseO: 'O', + uppercaseP: 'P', + uppercaseQ: 'Q', + uppercaseR: 'R', + uppercaseS: 'S', + uppercaseT: 'T', + uppercaseU: 'U', + uppercaseV: 'V', + uppercaseW: 'W', + uppercaseX: 'X', + uppercaseY: 'Y', + uppercaseZ: 'Z', + leftSquareBracket: '[', + backslash: '\\', + rightSquareBracket: ']', + caret: '^', + underscore: '_', + graveAccent: '`', + lowercaseA: 'a', + lowercaseB: 'b', + lowercaseC: 'c', + lowercaseD: 'd', + lowercaseE: 'e', + lowercaseF: 'f', + lowercaseG: 'g', + lowercaseH: 'h', + lowercaseI: 'i', + lowercaseJ: 'j', + lowercaseK: 'k', + lowercaseL: 'l', + lowercaseM: 'm', + lowercaseN: 'n', + lowercaseO: 'o', + lowercaseP: 'p', + lowercaseQ: 'q', + lowercaseR: 'r', + lowercaseS: 's', + lowercaseT: 't', + lowercaseU: 'u', + lowercaseV: 'v', + lowercaseW: 'w', + lowercaseX: 'x', + lowercaseY: 'y', + lowercaseZ: 'z', + leftCurlyBrace: '{', + verticalBar: '|', + rightCurlyBrace: '}', + tilde: '~', + replacementCharacter: '�' +} diff --git a/_extensions/d2/node_modules/micromark-util-types/index.d.ts b/_extensions/d2/node_modules/micromark-util-types/index.d.ts new file mode 100644 index 00000000..77bbfb83 --- /dev/null +++ b/_extensions/d2/node_modules/micromark-util-types/index.d.ts @@ -0,0 +1,738 @@ +/** + * A character code. + * + * This is often the same as what `String#charCodeAt()` yields but micromark + * adds meaning to certain other values. + * + * `null` represents the end of the input stream (called eof). + * Negative integers are used instead of certain sequences of characters (such + * as line endings and tabs). + */ +export type Code = number | null +/** + * A chunk is either a character code or a slice of a buffer in the form of a + * string. + * + * Chunks are used because strings are more efficient storage that character + * codes, but limited in what they can represent. + */ +export type Chunk = Code | string +/** + * Enumeration of the content types. + * + * Technically `document` is also a content type, which includes containers + * (lists, block quotes) and flow. + * As `ContentType` is used on tokens to define the type of subcontent but + * `document` is the highest level of content, so it’s not listed here. + * + * Containers in markdown come from the margin and include more constructs + * on the lines that define them. + * Take for example a block quote with a paragraph inside it (such as + * `> asd`). + * + * `flow` represents the sections, such as headings, code, and content, which + * is also parsed per line + * An example is HTML, which has a certain starting condition (such as + * ` +``` + +## Use + +Typical use (buffering): + +```js +import {micromark} from 'micromark' + +console.log(micromark('## Hello, *world*!')) +``` + +Yields: + +```html +

Hello, world!

+``` + +You can pass extensions (in this case [`micromark-extension-gfm`][gfm]): + +```js +import {micromark} from 'micromark' +import {gfm, gfmHtml} from 'micromark-extension-gfm' + +const value = '* [x] contact@example.com ~~strikethrough~~' + +const result = micromark(value, { + extensions: [gfm()], + htmlExtensions: [gfmHtml()] +}) + +console.log(result) +``` + +Yields: + +```html +
+``` + +Streaming interface: + +```js +import fs from 'fs' +import {stream} from 'micromark/stream' + +fs.createReadStream('example.md') + .on('error', handleError) + .pipe(stream()) + .pipe(process.stdout) + +function handleError(error) { + // Handle your error here! + throw error +} +``` + +## API + +`micromark` core has two entries in its export map: `micromark` and +`micromark/stream`. + +`micromark` exports the following identifier: `micromark`. +`micromark/stream` exports the following identifier: `stream`. +There are no default exports. + +The export map supports the endorsed +[`development` condition](https://nodejs.org/api/packages.html#packages_resolving_user_conditions). +Run `node --conditions development module.js` to get instrumented dev code. +Without this condition, production code is loaded. +See [§ Size & debug][size-debug] for more info. + +### `micromark(value[, encoding][, options])` + +Compile markdown to HTML. + +##### Parameters + +###### `value` + +Markdown to parse (`string` or `Buffer`). + +###### `encoding` + +[Character encoding][encoding] to understand `value` as when it’s a +[`Buffer`][buffer] (`string`, default: `'utf8'`). + +###### `options.defaultLineEnding` + +Value to use for line endings not in `value` (`string`, default: first line +ending or `'\n'`). + +Generally, micromark copies line endings (`'\r'`, `'\n'`, `'\r\n'`) in the +markdown document over to the compiled HTML. +In some cases, such as `> a`, CommonMark requires that extra line endings are +added: `
\n

a

\n
`. + +###### `options.allowDangerousHtml` + +Whether to allow embedded HTML (`boolean`, default: `false`). +See [§ Security][security]. + +###### `options.allowDangerousProtocol` + +Whether to allow potentially dangerous protocols in links and images (`boolean`, +default: `false`). +URLs relative to the current protocol are always allowed (such as, `image.jpg`). +For links, the allowed protocols are `http`, `https`, `irc`, `ircs`, `mailto`, +and `xmpp`. +For images, the allowed protocols are `http` and `https`. +See [§ Security][security]. + +###### `options.extensions` + +Array of syntax extensions ([`Array`][syntax-extension], +default: `[]`). +See [§ Extensions][extensions]. + +###### `options.htmlExtensions` + +Array of HTML extensions ([`Array`][html-extension], default: +`[]`). +See [§ Extensions][extensions]. + +##### Returns + +`string` — Compiled HTML. + +### `stream(options?)` + +Streaming interface of micromark. +Compiles markdown to HTML. +`options` are the same as the buffering API above. +Note that some of the work to parse markdown can be done streaming, but in the +end buffering is required. + +micromark does not handle errors for you, so you must handle errors on whatever +streams you pipe into it. +As markdown does not know errors, `micromark` itself does not emit errors. + +## Extensions + +micromark supports extensions. +There are two types of extensions for micromark: +[`SyntaxExtension`][syntax-extension], +which change how markdown is parsed, and [`HtmlExtension`][html-extension], +which change how it compiles. +They can be passed in [`options.extensions`][option-extensions] or +[`options.htmlExtensions`][option-htmlextensions], respectively. + +As a user of extensions, refer to each extension’s readme for more on how to use +them. +As a (potential) author of extensions, refer to +[§ Extending markdown][extending-markdown] and +[§ Creating a micromark extension][create-extension]. + +### List of extensions + +* [`micromark/micromark-extension-directive`][directives] + — support directives (generic extensions) +* [`micromark/micromark-extension-frontmatter`][frontmatter] + — support frontmatter (YAML, TOML, etc) +* [`micromark/micromark-extension-gfm`][gfm] + — support GFM (GitHub Flavored Markdown) +* [`micromark/micromark-extension-gfm-autolink-literal`](https://github.com/micromark/micromark-extension-gfm-autolink-literal) + — support GFM autolink literals +* [`micromark/micromark-extension-gfm-footnote`](https://github.com/micromark/micromark-extension-gfm-footnote) + — support GFM footnotes +* [`micromark/micromark-extension-gfm-strikethrough`](https://github.com/micromark/micromark-extension-gfm-strikethrough) + — support GFM strikethrough +* [`micromark/micromark-extension-gfm-table`](https://github.com/micromark/micromark-extension-gfm-table) + — support GFM tables +* [`micromark/micromark-extension-gfm-tagfilter`](https://github.com/micromark/micromark-extension-gfm-tagfilter) + — support GFM tagfilter +* [`micromark/micromark-extension-gfm-task-list-item`](https://github.com/micromark/micromark-extension-gfm-task-list-item) + — support GFM tasklists +* [`micromark/micromark-extension-math`][math] + — support math +* [`micromark/micromark-extension-mdx`](https://github.com/micromark/micromark-extension-mdx) + — support MDX +* [`micromark/micromark-extension-mdxjs`][mdxjs] + — support MDX.js +* [`micromark/micromark-extension-mdx-expression`](https://github.com/micromark/micromark-extension-mdx-expression) + — support MDX (or MDX.js) expressions +* [`micromark/micromark-extension-mdx-jsx`](https://github.com/micromark/micromark-extension-mdx-jsx) + — support MDX (or MDX.js) JSX +* [`micromark/micromark-extension-mdx-md`](https://github.com/micromark/micromark-extension-mdx-md) + — support misc MDX changes +* [`micromark/micromark-extension-mdxjs-esm`](https://github.com/micromark/micromark-extension-mdxjs-esm) + — support MDX.js import/exports + +#### Community extensions + +* [`wataru-chocola/micromark-extension-definition-list`](https://github.com/wataru-chocola/micromark-extension-definition-list) + — support definition lists + +### `SyntaxExtension` + +A syntax extension is an object whose fields are typically the names of hooks, +referring to where constructs “hook” into. +The fields at such objects are character codes, mapping to constructs as values. + +The built in [constructs][] are an example. +See it and [existing extensions][extensions] for inspiration. + +### `HtmlExtension` + +An HTML extension is an object whose fields are typically `enter` or `exit` +(reflecting whether a token is entered or exited). +The values at such objects are names of tokens mapping to handlers. + +See [existing extensions][extensions] for inspiration. + +### Extending markdown + +micromark lets you change markdown syntax, yes, but there are alternatives. +The alternatives are often better. + +Over the years, many micromark and remark users have asked about their unique +goals for markdown. +Some exemplary goals are: + +1. I want to add `rel="nofollow"` to external links +2. I want to add links from headings to themselves +3. I want line breaks in paragraphs to become hard breaks +4. I want to support embedded music sheets +5. I want authors to add arbitrary attributes +6. I want authors to mark certain blocks with meaning, such as tip, warning, + etc +7. I want to combine markdown with JS(X) +8. I want to support our legacy flavor of markdown-like syntax + +These can be solved in different ways and which solution is best is both +subjective and dependant on unique needs. +Often, there is already a solution in the form of an existing remark or rehype +plugin. +Respectively, their solutions are: + +1. [`remark-external-links`](https://github.com/remarkjs/remark-external-links) +2. [`rehype-autolink-headings`](https://github.com/rehypejs/rehype-autolink-headings) +3. [`remark-breaks`](https://github.com/remarkjs/remark-breaks) +4. custom plugin similar to + [`rehype-katex`](https://github.com/remarkjs/remark-math/tree/main/packages/rehype-katex) + but integrating [`abcjs`](https://www.abcjs.net) +5. either [`remark-directive`](https://github.com/remarkjs/remark-directive) + and a custom plugin or with + [`rehype-attr`](https://github.com/jaywcjlove/rehype-attr) +6. [`remark-directive`](https://github.com/remarkjs/remark-directive) + combined with a custom plugin +7. combining the existing micromark MDX extensions however you please, such as + done by [`mdx-js/mdx`](https://github.com/mdx-js/mdx) or + [`xdm`](https://github.com/wooorm/xdm) +8. Writing a micromark extension + +Looking at these from a higher level, they can be categorized: + +* **Changing the output by transforming syntax trees** + (1 and 2) + + This category is nice as the format remains plain markdown that authors are + already familiar with and which will work with existing tools and platforms. + + Implementations will deal with the syntax tree + ([`mdast`][mdast]) and the ecosystems + **[remark][]** and **[rehype][]**. + There are many existing + [utilities for working with that tree][utilities]. + Many [remark plugins][] and [rehype plugins][] also exist. +* **Using and abusing markdown to add new meaning** + (3, 4, potentially 5) + + This category is similar to *Changing the output by transforming syntax + trees*, but adds a new meaning to certain things which already have + semantics in markdown. + + Some examples in pseudo code: + + ````markdown + * **A list item with the first paragraph bold** + + And then more content, is turned into `
` / `
` / `
` elements + + Or, the title attributes on links or images is [overloaded](/url 'rel:nofollow') + with a new meaning. + + ```csv + fenced,code,can,include,data + which,is,turned,into,a,graph + ``` + + ```js data can="be" passed=true + // after the code language name + ``` + + HTML, especially comments, could be used as **markers** + ```` +* **Arbitrary extension mechanism** + (potentially 5; 6) + + This category is nice when content should contain embedded “components”. + Often this means it’s required for authors to have some programming + experience. + There are three good ways to solve arbitrary extensions. + + **HTML**: Markdown already has an arbitrary extension syntax. + It works in most places and authors are already familiar with the syntax, + but it’s reasonably hard to implement securely. + Certain platforms will remove HTML completely, others sanitize it to varying + degrees. + HTML also supports custom elements. + These could be used and enhanced by client side JavaScript or enhanced when + transforming the syntax tree. + + **Generic directives**: although + [a proposal][directive-proposal] + and not supported on most platforms, directives do work with many tools + already. + They’re not the easiest to author compared to, say, a heading, but sometimes + that’s okay. + They do have potential: they nicely solve the need for an infinite number of + potential extensions to markdown in a single markdown-esque way. + + **MDX** also adds support for components by swapping HTML out for JS(X). + JSX is an extension to JavaScript, so MDX is something along the lines of + literate programming. + This does require knowledge of React (or Vue) and JavaScript, excluding some + authors. +* **Extending markdown syntax** + (7 and 8) + + Extend the syntax of markdown means: + + * Authors won’t be familiar with the syntax + * Content won’t work in other places (such as on GitHub) + * Defeating the purpose of markdown: being simple to author and looking + like what it means + + …and it’s hard to do as it requires some in-depth knowledge of JavaScript + and parsing. + But it’s possible and in certain cases very powerful. + +### Creating a micromark extension + +This section shows how to create an extension for micromark that parses +“variables” (a way to render some data) and one to turn a default construct off. + +> Stuck? +> See [`support.md`][support]. + +#### Prerequisites + +* You should possess an intermediate to high understanding of JavaScript: + it’s going to get a bit complex +* Read the readme of [unified][] (until you hit the API section) to better + understand where micromark fits +* Read the [§ Architecture][architecture] section to understand how micromark + works +* Read the [§ Extending markdown][extending-markdown] section to understand + whether it’s a good idea to extend the syntax of markdown + +#### Extension basics + +micromark supports two types of extensions. +Syntax extensions change how markdown is parsed. +HTML extensions change how it compiles. + +HTML extensions are not always needed, as micromark is often used through +[`mdast-util-from-markdown`][from-markdown] to parse to a markdown syntax tree +So instead of an HTML extension a `from-markdown` utility is needed. +Then, a [`mdast-util-to-markdown`][to-markdown] utility, which is responsible +for serializing syntax trees to markdown, is also needed. + +When developing something for internal use only, you can pick and choose which +parts you need. +When open sourcing your extensions, it should probably contain four parts: +syntax extension, HTML extension, `from-markdown` utility, and a `to-markdown` +utility. + +On to our first case! + +#### Case: variables + +Let’s first outline what we want to make: render some data, similar to how +[Liquid](https://github.com/Shopify/liquid/wiki/Liquid-for-Designers) and the +like work, in our markdown. +It could look like this: + +```markdown +Hello, {planet}! +``` + +Turned into: + +```html +

Hello, Venus!

+``` + +An opening curly brace, followed by one or more characters, and then a closing +brace. +We’ll then look up `planet` in some object and replace the variable with its +corresponding value, to get something like `Venus` out. + +It looks simple enough, but with markdown there are often a couple more things +to think about. +For this case, I can see the following: + +* Is there a “block” version too? +* Are spaces allowed? + Line endings? + Should initial and final white space be ignored? +* Balanced nested braces? + Superfluous ones such as `{{planet}}` or meaningful ones such as + `{a {pla} net}`? +* Character escapes (`{pla\}net}`) and character references + (`{pla}net}`)? + +To keep things as simple as possible, let’s not support a block syntax, see +spaces as special, support line endings, or support nested braces. +But to learn interesting things, we *will* support character escapes and +\-references. + +Note that this particular case is already solved quite nicely by +[`micromark-extension-mdx-expression`][mdx-expression]. +It’s a bit more powerful and does more things, but it can be used to solve this +case and otherwise serve as inspiration. + +##### Setup + +Create a new folder, enter it, and set up a new package: + +```sh +mkdir example +cd example +npm init -y +``` + +In this example we’ll use ESM, so add `type: 'module'` to `package.json`: + +```diff +@@ -2,6 +2,7 @@ + "name": "example", + "version": "1.0.0", + "description": "", ++ "type": "module", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" +``` + +Add a markdown file, `example.md`, with the following text: + +```markdown +Hello, {planet}! + +{pla\}net} and {pla}net}. +``` + +To check if our extension works, add an `example.js` module, with the following +code: + +```js +import {promises as fs} from 'node:fs' +import {micromark} from 'micromark' +import {variables} from './index.js' + +main() + +async function main() { + const buf = await fs.readFile('example.md') + const out = micromark(buf, {extensions: [variables]}) + console.log(out) +} +``` + +While working on the extension, run `node example` to see whether things work. +Feel free to add more examples of the variables syntax in `example.md` if +needed. + +Our extension doesn’t work yet, for one because `micromark` is not installed: + +```sh +npm install micromark --save-dev +``` + +…and we need to write our extension. +Let’s do that in `index.js`: + +```js +export const variables = {} +``` + +Although our extension doesn’t do anything, running `node example` now somewhat +works! + +##### Syntax extension + +Much in micromark is based on character codes (see [§ Preprocess][preprocess]). +For this extension, the relevant codes are: + +* `-5` + — M-0005 CARRIAGE RETURN (CR) +* `-4` + — M-0004 LINE FEED (LF) +* `-3` + — M-0003 CARRIAGE RETURN LINE FEED (CRLF) +* `null` + — EOF (end of the stream) +* `92` + — U+005C BACKSLASH (`\`) +* `123` + — U+007B LEFT CURLY BRACE (`{`) +* `125` + — U+007D RIGHT CURLY BRACE (`}`) + +Also relevant are the content types (see [§ Content types][content-types]). +This extension is a *text* construct, as it’s parsed alongsides links and such. +The content inside it (between the braces) is *string*, to support character +escapes and -references. + +Let’s write our extension. +Add the following code to `index.js`: + +```js +const variableConstruct = {name: 'variable', tokenize: variableTokenize} + +export const variables = {text: {123: variableConstruct}} + +function variableTokenize(effects, ok, nok) { + return start + + function start(code) { + console.log('start:', effects, code); + return nok(code) + } +} +``` + +The above code exports an extension with the identifier `variables`. +The extension defines a *text* construct for the character code `123`. +The construct has a `name`, so that it can be turned off (optional, see next +case), and it has a `tokenize` function that sets up a state machine, which +receives `effects` and the `ok` and `nok` states. +`ok` can be used when successful, `nok` when not, and so constructs are a bit +similar to how promises can *resolve* or *reject*. +`tokenize` returns the initial state, `start`, which itself receives the current +character code, prints some debugging information, and then returns a call +to `nok`. + +Ensure that things work by running `node example` and see what it prints. + +Now we need to define our states and figure out how variables work. +Some people prefer sketching a diagram of the flow. +I often prefer writing it down in pseudo-code prose. +I’ve also found that test driven development works well, where I write unit +tests for how it should work, then write the state machine, and finally use a +code coverage tool to ensure I’ve thought of everything. + +In prose, what we have to code looks like this: + +* **start**: + Receive `123` as `code`, enter a token for the whole (let’s call it + `variable`), enter a token for the marker (`variableMarker`), consume + `code`, exit the marker token, enter a token for the contents + (`variableString`), switch to *begin* +* **begin**: + If `code` is `125`, reconsume in *nok*. + Else, reconsume in *inside* +* **inside**: + If `code` is `-5`, `-4`, `-3`, or `null`, reconsume in `nok`. + Else, if `code` is `125`, exit the string token, enter a `variableMarker`, + consume `code`, exit the marker token, exit the variable token, and switch + to *ok*. + Else, consume, and remain in *inside*. + +That should be it! +Replace `variableTokenize` with the following to include the needed states: + +```js +function variableTokenize(effects, ok, nok) { + return start + + function start(code) { + effects.enter('variable') + effects.enter('variableMarker') + effects.consume(code) + effects.exit('variableMarker') + effects.enter('variableString') + return begin + } + + function begin(code) { + return code === 125 ? nok(code) : inside(code) + } + + function inside(code) { + if (code === -5 || code === -4 || code === -3 || code === null) { + return nok(code) + } + + if (code === 125) { + effects.exit('variableString') + effects.enter('variableMarker') + effects.consume(code) + effects.exit('variableMarker') + effects.exit('variable') + return ok + } + + effects.consume(code) + return inside + } +} +``` + +Run `node example` again and see what it prints! +The HTML compiler ignores things it doesn’t know, so variables are now removed. + +We have our first syntax extension, and it sort of works, but we don’t handle +character escapes and -references yet. +We need to do two things to make that work: +a) skip over `\\` and `\}` in our algorithm, +b) tell micromark to parse them. + +Change the code in `index.js` to support escapes like so: + +```diff +@@ -23,6 +23,11 @@ function variableTokenize(effects, ok, nok) { + return nok(code) + } + ++ if (code === 92) { ++ effects.consume(code) ++ return insideEscape ++ } ++ + if (code === 125) { + effects.exit('variableString') + effects.enter('variableMarker') +@@ -35,4 +40,13 @@ function variableTokenize(effects, ok, nok) { + effects.consume(code) + return inside + } ++ ++ function insideEscape(code) { ++ if (code === 92 || code === 125) { ++ effects.consume(code) ++ return inside ++ } ++ ++ return inside(code) ++ } + } +``` + +Finally add support for character references and character escapes between +braces by adding a special token that defines a content type: + +```diff +@@ -11,6 +11,7 @@ function variableTokenize(effects, ok, nok) { + effects.consume(code) + effects.exit('variableMarker') + effects.enter('variableString') ++ effects.enter('chunkString', {contentType: 'string'}) + return begin + } + +@@ -29,6 +30,7 @@ function variableTokenize(effects, ok, nok) { + } + + if (code === 125) { ++ effects.exit('chunkString') + effects.exit('variableString') + effects.enter('variableMarker') + effects.consume(code) +``` + +Tokens with a `contentType` will be replaced by *postprocess* (see +[§ Postprocess][postprocess]) by the tokens belonging to that content type. + +##### HTML extension + +Up next is an HTML extension to replace variables with data. +Change `example.js` to use one like so: + +```diff +@@ -1,11 +1,12 @@ + import {promises as fs} from 'node:fs' + import {micromark} from 'micromark' +-import {variables} from './index.js' ++import {variables, variablesHtml} from './index.js' + + main() + + async function main() { + const buf = await fs.readFile('example.md') +- const out = micromark(buf, {extensions: [variables]}) ++ const html = variablesHtml({planet: '1', 'pla}net': '2'}) ++ const out = micromark(buf, {extensions: [variables], htmlExtensions: [html]}) + console.log(out) + } +``` + +And add the HTML extension, `variablesHtml`, to `index.js` like so: + +```diff +@@ -52,3 +52,19 @@ function variableTokenize(effects, ok, nok) { + return inside(code) + } + } ++ ++export function variablesHtml(data = {}) { ++ return { ++ enter: {variableString: enterVariableString}, ++ exit: {variableString: exitVariableString}, ++ } ++ ++ function enterVariableString() { ++ this.buffer() ++ } ++ ++ function exitVariableString() { ++ var id = this.resume() ++ if (id in data) { ++ this.raw(this.encode(data[id])) ++ } ++ } ++} +``` + +`variablesHtml` is a function that receives an object mapping “variables” to +strings and returns an HTML extension. +The extension hooks two functions to `variableString`, one when it starts, +the other when it ends. +We don’t need to do anything to handle the other tokens as they’re already +ignored by default. +`enterVariableString` calls `buffer`, which is a function that “stashes” what +would otherwise be emitted. +`exitVariableString` calls `resume`, which is the inverse of `buffer` and +returns the stashed value. +If the variable is defined, we ensure it’s made safe (with `this.encode`) and +finally output that (with `this.raw`). + +##### Further exercises + +It works! +We’re done! +Of course, it can be better, such as with the following potential features: + +* Add support for empty variables +* Add support for spaces between markers and string +* Add support for line endings in variables +* Add support for nested braces +* Add support for blocks +* Add warnings on undefined variables +* Use `micromark-build`, and use `uvu/assert`, `debug`, and + `micromark-util-symbol` (see [§ Size & debug][size-debug]) +* Add [`mdast-util-from-markdown`][from-markdown] and + [`mdast-util-to-markdown`][to-markdown] utilities to parse and serialize the + AST + +#### Case: turn off constructs + +Sometimes it’s needed to turn a default construct off. +That’s possible through a syntax extension. +Note that not everything can be turned off (such as paragraphs) and even if it’s +possible to turn something off, it could break micromark (such as character +escapes). + +To disable constructs, refer to them by name in an array at the `disable.null` +field of an extension: + +```js +import {micromark} from 'micromark' + +const extension = {disable: {null: ['codeIndented']}} + +console.log(micromark('\ta', {extensions: [extension]})) +``` + +Yields: + +```html +

a

+``` + +## Architecture + +micromark is maintained as a monorepo. +Many of its internals, which are used in `micromark` (core) but also useful for +developers of extensions or integrations, are available as separate modules. +Each module maintained here is available in [`packages/`][packages]. + +### Overview + +The naming scheme in [`packages/`][packages] is as follows: + +* `micromark-build` + — Small CLI to build dev code into production code +* `micromark-core-commonmark` + — CommonMark constructs used in micromark +* `micromark-factory-*` + — Reusable subroutines used to parse parts of constructs +* `micromark-util-*` + — Reusable helpers often needed when parsing markdown +* `micromark` + — Core module + +micromark has two interfaces: buffering (maintained in +[`micromark/dev/index.js`](https://github.com/micromark/micromark/blob/main/packages/micromark/dev/index.js)) +and streaming (maintained in +[`micromark/dev/stream.js`](https://github.com/micromark/micromark/blob/main/packages/micromark/dev/stream.js)). +The first takes all input at once whereas the last uses a Node.js stream to take +input separately. +They thinly wrap how data flows through micromark: + +```txt + micromark ++-----------------------------------------------------------------------------------------------+ +| +------------+ +-------+ +-------------+ +---------+ | +| -markdown->+ preprocess +-chunks->+ parse +-events->+ postprocess +-events->+ compile +-html- | +| +------------+ +-------+ +-------------+ +---------+ | ++-----------------------------------------------------------------------------------------------+ +``` + +### Preprocess + +The **preprocessor** +([`micromark/dev/lib/preprocess.js`](https://github.com/micromark/micromark/blob/main/packages/micromark/dev/lib/preprocess.js)) +takes markdown and turns it into chunks. + +A **chunk** is either a character code or a slice of a buffer in the form of a +string. +Chunks are used because strings are more efficient storage than character codes, +but limited in what they can represent. +For example, the input `ab\ncd` is represented as `['ab', -4, 'cd']` in chunks. + +A character **code** is often the same as what `String#charCodeAt()` yields but +micromark adds meaning to certain other values. + +In micromark, the actual character U+0009 CHARACTER TABULATION (HT) is replaced +by one M-0002 HORIZONTAL TAB (HT) and between 0 and 3 M-0001 VIRTUAL SPACE (VS) +characters, depending on the column at which the tab occurred. +For example, the input `\ta` is represented as `[-2, -1, -1, -1, 97]` and `a\tb` +as `[97, -2, -1, -1, 98]` in character codes. + +The characters U+000A LINE FEED (LF) and U+000D CARRIAGE RETURN (CR) are +replaced by virtual characters depending on whether they occur together: M-0003 +CARRIAGE RETURN LINE FEED (CRLF), M-0004 LINE FEED (LF), and M-0005 CARRIAGE +RETURN (CR). +For example, the input `a\r\nb\nc\rd` is represented as +`[97, -5, 98, -4, 99, -3, 100]` in character codes. + +The `0` (U+0000 NUL) character code is replaced by U+FFFD REPLACEMENT CHARACTER +(`�`). + +The `null` code represents the end of the input stream (called *eof* for end of +file). + +### Parse + +The **parser** +([`micromark/dev/lib/parse.js`](https://github.com/micromark/micromark/blob/main/packages/micromark/dev/lib/parse.js)) +takes chunks and turns them into events. + +An **event** is the start or end of a token amongst other events. +Tokens can “contain” other tokens, even though they are stored in a flat list, +by entering before and exiting after them. + +A **token** is a span of one or more codes. +Tokens are most of what micromark produces: the built in HTML compiler or other +tools can turn them into different things. +Tokens are essentially names attached to a slice, such as `lineEndingBlank` for +certain line endings, or `codeFenced` for a whole fenced code. + +Sometimes, more info is attached to tokens, such as `_open` and `_close` by +`attention` (strong, emphasis) to signal whether the sequence can open or close +an attention run. +These fields have to do with how the parser works, which is complex and not +always pretty. + +Certain fields (`previous`, `next`, and `contentType`) are used in many cases: +linked tokens for subcontent. +Linked tokens are used because outer constructs are parsed first. +Take for example: + +```markdown +- *a + b*. +``` + +1. The list marker and the space after it is parsed first +2. The rest of the line is a `chunkFlow` token +3. The two spaces on the second line are a `linePrefix` of the list +4. The rest of the line is another `chunkFlow` token + +The two `chunkFlow` tokens are linked together and the chunks they span are +passed through the flow tokenizer. +There the chunks are seen as `chunkContent` and passed through the content +tokenizer. +There the chunks are seen as a paragraph and seen as `chunkText` and passed +through the text tokenizer. +Finally, the attention (emphasis) and data (“raw” characters) is parsed there, +and we’re done! + +#### Content types + +The parser starts out with a document tokenizer. +*Document* is the top-most content type, which includes containers such as block +quotes and lists. +Containers in markdown come from the margin and include more constructs +on the lines that define them. + +*Flow* represents the sections (block constructs such as ATX and setext +headings, HTML, indented and fenced code, thematic breaks), which like +*document* are also parsed per line. +An example is HTML, which has a certain starting condition (such as ` +``` + +## Use + +Say we have the following module `example.js`: + +```js +import {unified} from 'unified' +import remarkParse from 'remark-parse' +import remarkGfm from 'remark-gfm' +import remarkRehype from 'remark-rehype' +import rehypeStringify from 'rehype-stringify' + +main() + +async function main() { + const file = await unified() + .use(remarkParse) + .use(remarkGfm) + .use(remarkRehype) + .use(rehypeStringify) + .process('# Hi\n\n*Hello*, world!') + + console.log(String(file)) +} +``` + +Running that with `node example.js` yields: + +```html +

Hi

+

Hello, world!

+``` + +## API + +This package exports no identifiers. +The default export is `remarkParse`. + +### `unified().use(remarkParse)` + +Add support for parsing markdown input. +There are no options. + +## Examples + +### Example: support GFM and frontmatter + +We support CommonMark by default. +Non-standard markdown extensions can be enabled with plugins. +The following example adds support for GFM features (autolink literals, +footnotes, strikethrough, tables, tasklists) and frontmatter (YAML): + +```js +import {unified} from 'unified' +import remarkParse from 'remark-parse' +import remarkFrontmatter from 'remark-frontmatter' +import remarkGfm from 'remark-gfm' +import remarkRehype from 'remark-rehype' +import rehypeStringify from 'rehype-stringify' + +main() + +async function main() { + const file = await unified() + .use(remarkParse) + .use(remarkFrontmatter) + .use(remarkGfm) + .use(remarkRehype) + .use(rehypeStringify) + .process('---\nlayout: home\n---\n\n# Hi ~~Mars~~Venus!') + + console.log(String(file)) +} +``` + +Yields: + +```html +

Hi MarsVenus!

+``` + +### Example: turning markdown into a man page + +Man pages (short for manual pages) are a way to document CLIs (example: type +`man git-log` in your terminal). +They use an old markup format called roff. +There’s a remark plugin, [`remark-man`][remark-man], that can serialize as roff. +The following example turns markdown into man pages by using unified with +`remark-parse` and `remark-man`: + +```js +import {unified} from 'unified' +import remarkParse from 'remark-parse' +import remarkMan from 'remark-man' + +main() + +async function main() { + const file = await unified() + .use(remarkParse) + .use(remarkMan) + .process('# titan(7) -- largest moon of saturn\n\nTitan is the largest moon…') + + console.log(String(file)) +} +``` + +Yields: + +```roff +.TH "TITAN" "7" "November 2021" "" "" +.SH "NAME" +\fBtitan\fR - largest moon of saturn +.P +Titan is the largest moon… +``` + +## Syntax + +Markdown is parsed according to CommonMark. +Other plugins can add support for syntax extensions. +If you’re interested in extending markdown, +[more information is available in micromark’s readme][micromark-extend]. + +## Syntax tree + +The syntax tree format used in remark is [mdast][]. + +## Types + +This package is fully typed with [TypeScript][]. +There are no extra exported types. + +## Compatibility + +Projects maintained by the unified collective are compatible with all maintained +versions of Node.js. +As of now, that is Node.js 12.20+, 14.14+, and 16.0+. +Our projects sometimes work with older versions, but this is not guaranteed. + +## Security + +As markdown can be turned into HTML and improper use of HTML can open you up to +[cross-site scripting (XSS)][xss] attacks, use of remark can be unsafe. +When going to HTML, you will likely combine remark with **[rehype][]**, in which +case you should use [`rehype-sanitize`][rehype-sanitize]. + +Use of remark plugins could also open you up to other attacks. +Carefully assess each plugin and the risks involved in using them. + +For info on how to submit a report, see our [security policy][security]. + +## Contribute + +See [`contributing.md`][contributing] in [`remarkjs/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. +Join us in [Discussions][chat] to chat with the community and contributors. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## Sponsor + +Support this effort and give back by sponsoring on [OpenCollective][collective]! + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Vercel

+ +
+ Motif

+ +
+ HashiCorp

+ +
+ Gatsby

+ +
+ Netlify

+ + +
+ Coinbase

+ +
+ ThemeIsle

+ +
+ Expo

+ +
+ Boost Hub

+ +
+ Holloway

+ +
+
+ You? +

+
+ +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/remarkjs/remark/workflows/main/badge.svg + +[build]: https://github.com/remarkjs/remark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/remarkjs/remark.svg + +[coverage]: https://codecov.io/github/remarkjs/remark + +[downloads-badge]: https://img.shields.io/npm/dm/remark-parse.svg + +[downloads]: https://www.npmjs.com/package/remark-parse + +[size-badge]: https://img.shields.io/bundlephobia/minzip/remark-parse.svg + +[size]: https://bundlephobia.com/result?p=remark-parse + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/remarkjs/remark/discussions + +[security]: https://github.com/remarkjs/.github/blob/main/security.md + +[health]: https://github.com/remarkjs/.github + +[contributing]: https://github.com/remarkjs/.github/blob/main/contributing.md + +[support]: https://github.com/remarkjs/.github/blob/main/support.md + +[coc]: https://github.com/remarkjs/.github/blob/main/code-of-conduct.md + +[license]: https://github.com/remarkjs/remark/blob/main/license + +[author]: https://wooorm.com + +[npm]: https://docs.npmjs.com/cli/install + +[skypack]: https://www.skypack.dev + +[unified]: https://github.com/unifiedjs/unified + +[remark]: https://github.com/remarkjs/remark + +[mdast]: https://github.com/syntax-tree/mdast + +[xss]: https://en.wikipedia.org/wiki/Cross-site_scripting + +[typescript]: https://www.typescriptlang.org + +[rehype]: https://github.com/rehypejs/rehype + +[rehype-sanitize]: https://github.com/rehypejs/rehype-sanitize + +[mdast-util-from-markdown]: https://github.com/syntax-tree/mdast-util-from-markdown + +[micromark]: https://github.com/micromark/micromark + +[micromark-extend]: https://github.com/micromark/micromark#extensions + +[remark-gfm]: https://github.com/remarkjs/remark-gfm + +[remark-mdx]: https://github.com/mdx-js/mdx/tree/main/packages/remark-mdx + +[remark-frontmatter]: https://github.com/remarkjs/remark-frontmatter + +[remark-math]: https://github.com/remarkjs/remark-math + +[remark-man]: https://github.com/remarkjs/remark-man + +[remark-directive]: https://github.com/remarkjs/remark-directive + +[remark-stringify]: ../remark-stringify/ + +[remark-core]: ../remark/ + +[plugin]: https://github.com/remarkjs/remark#plugin diff --git a/_extensions/d2/node_modules/remark-stringify/index.d.ts b/_extensions/d2/node_modules/remark-stringify/index.d.ts new file mode 100644 index 00000000..e22538ca --- /dev/null +++ b/_extensions/d2/node_modules/remark-stringify/index.d.ts @@ -0,0 +1,8 @@ +// This wrapper exists because JS in TS can’t export a `@type` of a function. +import type {Root} from 'mdast' +import type {Plugin} from 'unified' +import type {Options} from './lib/index.js' + +declare const remarkStringify: Plugin<[Options?] | void[], Root, string> +export default remarkStringify +export type {Options} diff --git a/_extensions/d2/node_modules/remark-stringify/index.js b/_extensions/d2/node_modules/remark-stringify/index.js new file mode 100644 index 00000000..514a8afe --- /dev/null +++ b/_extensions/d2/node_modules/remark-stringify/index.js @@ -0,0 +1,3 @@ +import remarkStringify from './lib/index.js' + +export default remarkStringify diff --git a/_extensions/d2/node_modules/remark-stringify/lib/index.d.ts b/_extensions/d2/node_modules/remark-stringify/lib/index.d.ts new file mode 100644 index 00000000..c30e5033 --- /dev/null +++ b/_extensions/d2/node_modules/remark-stringify/lib/index.d.ts @@ -0,0 +1,5 @@ +/** @type {import('unified').Plugin<[Options]|void[], Node, string>} */ +export default function remarkStringify(options: void | Options): void +export type Node = import('mdast').Root | import('mdast').Content +export type ToMarkdownOptions = import('mdast-util-to-markdown').Options +export type Options = Omit diff --git a/_extensions/d2/node_modules/remark-stringify/lib/index.js b/_extensions/d2/node_modules/remark-stringify/lib/index.js new file mode 100644 index 00000000..867f2ef2 --- /dev/null +++ b/_extensions/d2/node_modules/remark-stringify/lib/index.js @@ -0,0 +1,31 @@ +/** + * @typedef {import('mdast').Root|import('mdast').Content} Node + * @typedef {import('mdast-util-to-markdown').Options} ToMarkdownOptions + * @typedef {Omit} Options + */ + +import {toMarkdown} from 'mdast-util-to-markdown' + +/** @type {import('unified').Plugin<[Options]|void[], Node, string>} */ +export default function remarkStringify(options) { + /** @type {import('unified').CompilerFunction} */ + const compiler = (tree) => { + // Assume options. + const settings = /** @type {Options} */ (this.data('settings')) + + return toMarkdown( + tree, + Object.assign({}, settings, options, { + // Note: this option is not in the readme. + // The goal is for it to be set by plugins on `data` instead of being + // passed by users. + extensions: + /** @type {ToMarkdownOptions['extensions']} */ ( + this.data('toMarkdownExtensions') + ) || [] + }) + ) + } + + Object.assign(this, {Compiler: compiler}) +} diff --git a/_extensions/d2/node_modules/remark-stringify/license b/_extensions/d2/node_modules/remark-stringify/license new file mode 100644 index 00000000..d39f9fa6 --- /dev/null +++ b/_extensions/d2/node_modules/remark-stringify/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2014-2020 Titus Wormer +Copyright (c) 2011-2014, Christopher Jeffrey (https://github.com/chjj/) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/_extensions/d2/node_modules/remark-stringify/package.json b/_extensions/d2/node_modules/remark-stringify/package.json new file mode 100644 index 00000000..d5ecd72b --- /dev/null +++ b/_extensions/d2/node_modules/remark-stringify/package.json @@ -0,0 +1,59 @@ +{ + "name": "remark-stringify", + "version": "10.0.2", + "description": "remark plugin to add support for serializing markdown", + "license": "MIT", + "keywords": [ + "unified", + "remark", + "remark-plugin", + "plugin", + "markdown", + "mdast", + "markdown", + "abstract", + "syntax", + "tree", + "ast", + "stringify", + "serialize", + "compile" + ], + "homepage": "https://remark.js.org", + "repository": "https://github.com/remarkjs/remark/tree/main/packages/remark-stringify", + "bugs": "https://github.com/remarkjs/remark/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)", + "Eugene Sharygin " + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "lib/", + "index.d.ts", + "index.js" + ], + "dependencies": { + "@types/mdast": "^3.0.0", + "mdast-util-to-markdown": "^1.0.0", + "unified": "^10.0.0" + }, + "scripts": { + "test": "node --conditions development test.js", + "build": "rimraf \"test.d.ts\" \"lib/**/*.d.ts\" && tsc && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/remark-stringify/readme.md b/_extensions/d2/node_modules/remark-stringify/readme.md new file mode 100644 index 00000000..641854cb --- /dev/null +++ b/_extensions/d2/node_modules/remark-stringify/readme.md @@ -0,0 +1,434 @@ +# remark-stringify + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +**[remark][]** plugin to add support for serializing markdown. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`unified().use(remarkStringify[, options])`](#unifieduseremarkstringify-options) +* [Syntax](#syntax) +* [Syntax tree](#syntax-tree) +* [Types](#types) +* [Security](#security) +* [Contribute](#contribute) +* [Sponsor](#sponsor) +* [License](#license) + +## What is this? + +This package is a [unified][] ([remark][]) plugin that defines how to take a +syntax tree as input and turn it into serialized markdown. + +This plugin is built on [`mdast-util-to-markdown`][mdast-util-to-markdown], +which turns [mdast][] syntax trees into a string. +remark focusses on making it easier to transform content by abstracting such +internals away. + +**unified** is a project that transforms content with abstract syntax trees +(ASTs). +**remark** adds support for markdown to unified. +**mdast** is the markdown AST that remark uses. +This is a remark plugin that defines how mdast is turned into markdown. + +## When should I use this? + +This plugin adds support to unified for serializing markdown. +You can alternatively use [`remark`][remark-core] instead, which combines +unified, [`remark-parse`][remark-parse], and this plugin. + +You can combine this plugin with other plugins to add syntax extensions. +Notable examples that deeply integrate with it are +[`remark-gfm`][remark-gfm], +[`remark-mdx`][remark-mdx], +[`remark-frontmatter`][remark-frontmatter], +[`remark-math`][remark-math], and +[`remark-directive`][remark-directive]. +You can also use any other [remark plugin][plugin] before `remark-stringify`. + +If you want to handle syntax trees manually, you can use +[`mdast-util-to-markdown`][mdast-util-to-markdown]. + +## Install + +This package is [ESM only](https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c). +In Node.js (version 12.20+, 14.14+, or 16.0+), install with [npm][]: + +```sh +npm install remark-stringify +``` + +In Deno with [Skypack][]: + +```js +import remarkStringify from 'https://cdn.skypack.dev/remark-stringify@10?dts' +``` + +In browsers with [Skypack][]: + +```html + +``` + +## Use + +Say we have the following module `example.js`: + +```js +import {unified} from 'unified' +import rehypeParse from 'rehype-parse' +import rehypeRemark from 'rehype-remark' +import remarkStringify from 'remark-stringify' + +main() + +async function main() { + const file = await unified() + .use(rehypeParse) + .use(rehypeRemark) + .use(remarkStringify, { + bullet: '*', + fence: '~', + fences: true, + incrementListMarker: false + }) + .process('

Hello, world!

') + + console.log(String(file)) +} +``` + +Running that with `node example.js` yields: + +```markdown +# Hello, world! +``` + +## API + +This package exports no identifiers. +The default export is `remarkStringify`. + +### `unified().use(remarkStringify[, options])` + +Add support for serializing markdown. +Options are passed to [`mdast-util-to-markdown`][mdast-util-to-markdown]: +all formatting options are supported. + +##### `options` + +Configuration (optional). + +###### `options.bullet` + +Marker to use for bullets of items in unordered lists (`'*'`, `'+'`, or `'-'`, +default: `'*'`). + +###### `options.bulletOther` + +Marker to use in certain cases where the primary bullet doesn’t work (`'*'`, +`'+'`, or `'-'`, default: depends). +See [`mdast-util-to-markdown`][mdast-util-to-markdown] for more information. + +###### `options.bulletOrdered` + +Marker to use for bullets of items in ordered lists (`'.'` or `')'`, default: +`'.'`). + +###### `options.bulletOrderedOther` + +Marker to use in certain cases where the primary bullet for ordered items +doesn’t work (`'.'` or `')'`, default: none). +See [`mdast-util-to-markdown`][mdast-util-to-markdown] for more information. + +###### `options.closeAtx` + +Whether to add the same number of number signs (`#`) at the end of an ATX +heading as the opening sequence (`boolean`, default: `false`). + +###### `options.emphasis` + +Marker to use for emphasis (`'*'` or `'_'`, default: `'*'`). + +###### `options.fence` + +Marker to use for fenced code (``'`'`` or `'~'`, default: ``'`'``). + +###### `options.fences` + +Whether to use fenced code always (`boolean`, default: `false`). +The default is to use fenced code if there is a language defined, if the code is +empty, or if it starts or ends in blank lines. + +###### `options.incrementListMarker` + +Whether to increment the counter of ordered lists items (`boolean`, default: +`true`). + +###### `options.listItemIndent` + +How to indent the content of list items (`'one'`, `'tab'`, or `'mixed'`, +default: `'tab'`). +Either with the size of the bullet plus one space (when `'one'`), a tab stop +(`'tab'`), or depending on the item and its parent list (`'mixed'`, uses `'one'` +if the item and list are tight and `'tab'` otherwise). + +###### `options.quote` + +Marker to use for titles (`'"'` or `"'"`, default: `'"'`). + +###### `options.resourceLink` + +Whether to always use resource links (`boolean`, default: `false`). +The default is to use autolinks (``) when possible +and resource links (`[text](url)`) otherwise. + +###### `options.rule` + +Marker to use for thematic breaks (`'*'`, `'-'`, or `'_'`, default: `'*'`). + +###### `options.ruleRepetition` + +Number of markers to use for thematic breaks (`number`, default: +`3`, min: `3`). + +###### `options.ruleSpaces` + +Whether to add spaces between markers in thematic breaks (`boolean`, default: +`false`). + +###### `options.setext` + +Whether to use setext headings when possible (`boolean`, default: `false`). +The default is to always use ATX headings (`# heading`) instead of setext +headings (`heading\n=======`). +Setext headings can’t be used for empty headings or headings with a rank of +three or more. + +###### `options.strong` + +Marker to use for strong (`'*'` or `'_'`, default: `'*'`). + +###### `options.tightDefinitions` + +Whether to join definitions without a blank line (`boolean`, default: `false`). +The default is to add blank lines between any flow (“block”) construct. + +###### `options.handlers` + +This option is a bit advanced as it requires knowledge of ASTs, so we defer +to the documentation available in +[`mdast-util-to-markdown`][mdast-util-to-markdown]. + +###### `options.join` + +This option is a bit advanced as it requires knowledge of ASTs, so we defer +to the documentation available in +[`mdast-util-to-markdown`][mdast-util-to-markdown]. + +###### `options.unsafe` + +This option is a bit advanced as it requires deep knowledge of markdown, so we +defer to the documentation available in +[`mdast-util-to-markdown`][mdast-util-to-markdown]. + +## Syntax + +Markdown is serialized according to CommonMark but care is taken to format in +such a way that the resulting markdown should work with most markdown parsers. +Other plugins can add support for syntax extensions. + +## Syntax tree + +The syntax tree format used in remark is [mdast][]. + +## Types + +This package is fully typed with [TypeScript][]. +An `Options` type is exported, which models the interface of accepted options. + +## Security + +As markdown can be turned into HTML and improper use of HTML can open you up to +[cross-site scripting (XSS)][xss] attacks, use of remark can be unsafe. +When going to HTML, you will likely combine remark with **[rehype][]**, in which +case you should use [`rehype-sanitize`][rehype-sanitize]. + +Use of remark plugins could also open you up to other attacks. +Carefully assess each plugin and the risks involved in using them. + +For info on how to submit a report, see our [security policy][security]. + +## Contribute + +See [`contributing.md`][contributing] in [`remarkjs/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. +Join us in [Discussions][chat] to chat with the community and contributors. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## Sponsor + +Support this effort and give back by sponsoring on [OpenCollective][collective]! + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Vercel

+ +
+ Motif

+ +
+ HashiCorp

+ +
+ Gatsby

+ +
+ Netlify

+ + +
+ Coinbase

+ +
+ ThemeIsle

+ +
+ Expo

+ +
+ Boost Hub

+ +
+ Holloway

+ +
+
+ You? +

+
+ +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/remarkjs/remark/workflows/main/badge.svg + +[build]: https://github.com/remarkjs/remark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/remarkjs/remark.svg + +[coverage]: https://codecov.io/github/remarkjs/remark + +[downloads-badge]: https://img.shields.io/npm/dm/remark-stringify.svg + +[downloads]: https://www.npmjs.com/package/remark-stringify + +[size-badge]: https://img.shields.io/bundlephobia/minzip/remark-stringify.svg + +[size]: https://bundlephobia.com/result?p=remark-stringify + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/remarkjs/remark/discussions + +[security]: https://github.com/remarkjs/.github/blob/main/security.md + +[health]: https://github.com/remarkjs/.github + +[contributing]: https://github.com/remarkjs/.github/blob/main/contributing.md + +[support]: https://github.com/remarkjs/.github/blob/main/support.md + +[coc]: https://github.com/remarkjs/.github/blob/main/code-of-conduct.md + +[license]: https://github.com/remarkjs/remark/blob/main/license + +[author]: https://wooorm.com + +[npm]: https://docs.npmjs.com/cli/install + +[skypack]: https://www.skypack.dev + +[unified]: https://github.com/unifiedjs/unified + +[remark]: https://github.com/remarkjs/remark + +[mdast]: https://github.com/syntax-tree/mdast + +[xss]: https://en.wikipedia.org/wiki/Cross-site_scripting + +[typescript]: https://www.typescriptlang.org + +[rehype]: https://github.com/rehypejs/rehype + +[rehype-sanitize]: https://github.com/rehypejs/rehype-sanitize + +[mdast-util-to-markdown]: https://github.com/syntax-tree/mdast-util-to-markdown + +[remark-gfm]: https://github.com/remarkjs/remark-gfm + +[remark-mdx]: https://github.com/mdx-js/mdx/tree/main/packages/remark-mdx + +[remark-frontmatter]: https://github.com/remarkjs/remark-frontmatter + +[remark-math]: https://github.com/remarkjs/remark-math + +[remark-directive]: https://github.com/remarkjs/remark-directive + +[remark-parse]: ../remark-parse/ + +[remark-core]: ../remark/ + +[plugin]: https://github.com/remarkjs/remark#plugin diff --git a/_extensions/d2/node_modules/remark/index.d.ts b/_extensions/d2/node_modules/remark/index.d.ts new file mode 100644 index 00000000..debe0e57 --- /dev/null +++ b/_extensions/d2/node_modules/remark/index.d.ts @@ -0,0 +1,6 @@ +export const remark: import('unified').FrozenProcessor< + import('mdast').Root, + import('mdast').Root, + import('mdast').Root, + string +> diff --git a/_extensions/d2/node_modules/remark/index.js b/_extensions/d2/node_modules/remark/index.js new file mode 100644 index 00000000..1cb80ce7 --- /dev/null +++ b/_extensions/d2/node_modules/remark/index.js @@ -0,0 +1,5 @@ +import {unified} from 'unified' +import remarkParse from 'remark-parse' +import remarkStringify from 'remark-stringify' + +export const remark = unified().use(remarkParse).use(remarkStringify).freeze() diff --git a/_extensions/d2/node_modules/remark/license b/_extensions/d2/node_modules/remark/license new file mode 100644 index 00000000..d39f9fa6 --- /dev/null +++ b/_extensions/d2/node_modules/remark/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2014-2020 Titus Wormer +Copyright (c) 2011-2014, Christopher Jeffrey (https://github.com/chjj/) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/_extensions/d2/node_modules/remark/package.json b/_extensions/d2/node_modules/remark/package.json new file mode 100644 index 00000000..6a75f082 --- /dev/null +++ b/_extensions/d2/node_modules/remark/package.json @@ -0,0 +1,57 @@ +{ + "name": "remark", + "version": "14.0.2", + "description": "unified processor with support for parsing markdown input and serializing markdown as output", + "license": "MIT", + "keywords": [ + "unified", + "remark", + "markdown", + "mdast", + "abstract", + "syntax", + "tree", + "ast", + "parse", + "stringify", + "serialize", + "compile", + "process" + ], + "homepage": "https://remark.js.org", + "repository": "https://github.com/remarkjs/remark/tree/main/packages/remark", + "bugs": "https://github.com/remarkjs/remark/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "index.d.ts", + "index.js" + ], + "dependencies": { + "@types/mdast": "^3.0.0", + "remark-parse": "^10.0.0", + "remark-stringify": "^10.0.0", + "unified": "^10.0.0" + }, + "scripts": { + "test": "node --conditions development test.js", + "build": "rimraf \"*.d.ts\" && tsc && type-coverage" + }, + "xo": false, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/remark/readme.md b/_extensions/d2/node_modules/remark/readme.md new file mode 100644 index 00000000..fd9a05ab --- /dev/null +++ b/_extensions/d2/node_modules/remark/readme.md @@ -0,0 +1,377 @@ +# remark + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +**[unified][]** processor with support for parsing markdown input and +serializing markdown as output. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`remark()`](#remark-1) +* [Examples](#examples) + * [Example: checking markdown](#example-checking-markdown) + * [Example: passing options to `remark-stringify`](#example-passing-options-to-remark-stringify) +* [Syntax](#syntax) +* [Syntax tree](#syntax-tree) +* [Types](#types) +* [Compatibility](#compatibility) +* [Security](#security) +* [Contribute](#contribute) +* [Sponsor](#sponsor) +* [License](#license) + +## What is this? + +This package is a [unified][] processor with support for parsing markdown input +and serializing markdown as output by using unified with +[`remark-parse`][remark-parse] and [`remark-stringify`][remark-stringify]. + +**unified** is a project that transforms content with abstract syntax trees +(ASTs). +**remark** adds support for markdown to unified. +**mdast** is the markdown AST that remark uses. +Please see [the monorepo readme][remark] for what the remark ecosystem is. + +## When should I use this? + +You can use this package when you want to use unified, have markdown as input, +and want markdown as output. +This package is a shortcut for +`unified().use(remarkParse).use(remarkStringify)`. +When the input isn’t markdown (meaning you don’t need `remark-parse`) or the +output is not markdown (you don’t need `remark-stringify`), it’s recommended to +use unified directly. + +When you want to inspect and format markdown files in a project on the command +line, you can use [`remark-cli`][remark-cli]. + +## Install + +This package is [ESM only](https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c). +In Node.js (version 12.20+, 14.14+, or 16.0+), install with [npm][]: + +```sh +npm install remark +``` + +In Deno with [Skypack][]: + +```js +import {remark} from 'https://cdn.skypack.dev/remark@14?dts' +``` + +In browsers with [Skypack][]: + +```html + +``` + +## Use + +Say we have the following module `example.js`: + +```js +import {remark} from 'remark' +import remarkGfm from 'remark-gfm' +import remarkToc from 'remark-toc' + +main() + +async function main() { + const file = await remark() + .use(remarkGfm) + .use(remarkToc) + .process('# Hi\n\n## Table of contents\n\n## Hello\n\n*Some* ~more~ _things_.') + + console.error(String(file)) +} +``` + +Running that with `node example.js` yields: + +```markdown +# Hi + +## Table of contents + +* [Hello](#hello) + +## Hello + +*Some* ~~more~~ *things*. +``` + +## API + +This package exports the following identifier: `remark`. +There is no default export. + +### `remark()` + +Create a new (unfrozen) unified processor that already uses `remark-parse` and +`remark-stringify` and you can add more plugins to. +See [`unified`][unified] for more information. + +## Examples + +### Example: checking markdown + +The following example checks that markdown code style is consistent and follows +some best practices: + +```js +import {reporter} from 'vfile-reporter' +import {remark} from 'remark' +import remarkPresetLintConsistent from 'remark-preset-lint-consistent' +import remarkPresetLintRecommended from 'remark-preset-lint-recommended' + +main() + +async function main() { + const file = await remark() + .use(remarkPresetLintConsistent) + .use(remarkPresetLintRecommended) + .process('1) Hello, _Jupiter_ and *Neptune*!') + + console.error(reporter(file)) +} +``` + +Yields: + +```txt + 1:1 warning Missing newline character at end of file final-newline remark-lint + 1:1-1:35 warning Marker style should be `.` ordered-list-marker-style remark-lint + 1:4 warning Incorrect list-item indent: add 1 space list-item-indent remark-lint + 1:25-1:34 warning Emphasis should use `_` as a marker emphasis-marker remark-lint + +⚠ 4 warnings +``` + +### Example: passing options to `remark-stringify` + +When you use `remark-stringify` manually you can pass options to `use`. +Because `remark-stringify` is already used in `remark`, that’s not possible. +To define options for `remark-stringify`, you can instead pass options to +`data`: + +```js +import {remark} from 'remark' + +main() + +async function main() { + const file = await remark() + .data('settings', {bullet: '*', setext: true, listItemIndent: 'one'}) + .process('# Moons of Neptune\n\n- Naiad\n- Thalassa\n- Despine\n- …') + + console.log(String(file)) +} +``` + +Yields: + +```markdown +Moons of Neptune +================ + +* Naiad +* Thalassa +* Despine +* … +``` + +## Syntax + +Markdown is parsed and serialized according to CommonMark. +Other plugins can add support for syntax extensions. + +## Syntax tree + +The syntax tree format used in remark is [mdast][]. + +## Types + +This package is fully typed with [TypeScript][]. +There are no extra exported types. + +## Compatibility + +Projects maintained by the unified collective are compatible with all maintained +versions of Node.js. +As of now, that is Node.js 12.20+, 14.14+, and 16.0+. +Our projects sometimes work with older versions, but this is not guaranteed. + +## Security + +As markdown can be turned into HTML and improper use of HTML can open you up to +[cross-site scripting (XSS)][xss] attacks, use of remark can be unsafe. +When going to HTML, you will likely combine remark with **[rehype][]**, in which +case you should use [`rehype-sanitize`][rehype-sanitize]. + +Use of remark plugins could also open you up to other attacks. +Carefully assess each plugin and the risks involved in using them. + +For info on how to submit a report, see our [security policy][security]. + +## Contribute + +See [`contributing.md`][contributing] in [`remarkjs/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. +Join us in [Discussions][chat] to chat with the community and contributors. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## Sponsor + +Support this effort and give back by sponsoring on [OpenCollective][collective]! + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Vercel

+ +
+ Motif

+ +
+ HashiCorp

+ +
+ Gatsby

+ +
+ Netlify

+ + +
+ Coinbase

+ +
+ ThemeIsle

+ +
+ Expo

+ +
+ Boost Hub

+ +
+ Holloway

+ +
+
+ You? +

+
+ +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/remarkjs/remark/workflows/main/badge.svg + +[build]: https://github.com/remarkjs/remark/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/remarkjs/remark.svg + +[coverage]: https://codecov.io/github/remarkjs/remark + +[downloads-badge]: https://img.shields.io/npm/dm/remark.svg + +[downloads]: https://www.npmjs.com/package/remark + +[size-badge]: https://img.shields.io/bundlephobia/minzip/remark.svg + +[size]: https://bundlephobia.com/result?p=remark + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/remarkjs/remark/discussions + +[security]: https://github.com/remarkjs/.github/blob/main/security.md + +[health]: https://github.com/remarkjs/.github + +[contributing]: https://github.com/remarkjs/.github/blob/main/contributing.md + +[support]: https://github.com/remarkjs/.github/blob/main/support.md + +[coc]: https://github.com/remarkjs/.github/blob/main/code-of-conduct.md + +[license]: https://github.com/remarkjs/remark/blob/main/license + +[author]: https://wooorm.com + +[npm]: https://docs.npmjs.com/cli/install + +[skypack]: https://www.skypack.dev + +[unified]: https://github.com/unifiedjs/unified + +[mdast]: https://github.com/syntax-tree/mdast + +[xss]: https://en.wikipedia.org/wiki/Cross-site_scripting + +[typescript]: https://www.typescriptlang.org + +[rehype]: https://github.com/rehypejs/rehype + +[remark]: https://github.com/remarkjs/remark + +[rehype-sanitize]: https://github.com/rehypejs/rehype-sanitize + +[remark-parse]: ../remark-parse + +[remark-stringify]: ../remark-stringify + +[remark-cli]: ../remark-cli diff --git a/_extensions/d2/node_modules/rimraf/CHANGELOG.md b/_extensions/d2/node_modules/rimraf/CHANGELOG.md new file mode 100644 index 00000000..f116f141 --- /dev/null +++ b/_extensions/d2/node_modules/rimraf/CHANGELOG.md @@ -0,0 +1,65 @@ +# v3.0 + +- Add `--preserve-root` option to executable (default true) +- Drop support for Node.js below version 6 + +# v2.7 + +- Make `glob` an optional dependency + +# 2.6 + +- Retry on EBUSY on non-windows platforms as well +- Make `rimraf.sync` 10000% more reliable on Windows + +# 2.5 + +- Handle Windows EPERM when lstat-ing read-only dirs +- Add glob option to pass options to glob + +# 2.4 + +- Add EPERM to delay/retry loop +- Add `disableGlob` option + +# 2.3 + +- Make maxBusyTries and emfileWait configurable +- Handle weird SunOS unlink-dir issue +- Glob the CLI arg for better Windows support + +# 2.2 + +- Handle ENOENT properly on Windows +- Allow overriding fs methods +- Treat EPERM as indicative of non-empty dir +- Remove optional graceful-fs dep +- Consistently return null error instead of undefined on success +- win32: Treat ENOTEMPTY the same as EBUSY +- Add `rimraf` binary + +# 2.1 + +- Fix SunOS error code for a non-empty directory +- Try rmdir before readdir +- Treat EISDIR like EPERM +- Remove chmod +- Remove lstat polyfill, node 0.7 is not supported + +# 2.0 + +- Fix myGid call to check process.getgid +- Simplify the EBUSY backoff logic. +- Use fs.lstat in node >= 0.7.9 +- Remove gently option +- remove fiber implementation +- Delete files that are marked read-only + +# 1.0 + +- Allow ENOENT in sync method +- Throw when no callback is provided +- Make opts.gently an absolute path +- use 'stat' if 'lstat' is not available +- Consistent error naming, and rethrow non-ENOENT stat errors +- add fiber implementation diff --git a/_extensions/d2/node_modules/rimraf/LICENSE b/_extensions/d2/node_modules/rimraf/LICENSE new file mode 100644 index 00000000..19129e31 --- /dev/null +++ b/_extensions/d2/node_modules/rimraf/LICENSE @@ -0,0 +1,15 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/_extensions/d2/node_modules/rimraf/README.md b/_extensions/d2/node_modules/rimraf/README.md new file mode 100644 index 00000000..423b8cf8 --- /dev/null +++ b/_extensions/d2/node_modules/rimraf/README.md @@ -0,0 +1,101 @@ +[![Build Status](https://travis-ci.org/isaacs/rimraf.svg?branch=master)](https://travis-ci.org/isaacs/rimraf) [![Dependency Status](https://david-dm.org/isaacs/rimraf.svg)](https://david-dm.org/isaacs/rimraf) [![devDependency Status](https://david-dm.org/isaacs/rimraf/dev-status.svg)](https://david-dm.org/isaacs/rimraf#info=devDependencies) + +The [UNIX command](http://en.wikipedia.org/wiki/Rm_(Unix)) `rm -rf` for node. + +Install with `npm install rimraf`, or just drop rimraf.js somewhere. + +## API + +`rimraf(f, [opts], callback)` + +The first parameter will be interpreted as a globbing pattern for files. If you +want to disable globbing you can do so with `opts.disableGlob` (defaults to +`false`). This might be handy, for instance, if you have filenames that contain +globbing wildcard characters. + +The callback will be called with an error if there is one. Certain +errors are handled for you: + +* Windows: `EBUSY` and `ENOTEMPTY` - rimraf will back off a maximum of + `opts.maxBusyTries` times before giving up, adding 100ms of wait + between each attempt. The default `maxBusyTries` is 3. +* `ENOENT` - If the file doesn't exist, rimraf will return + successfully, since your desired outcome is already the case. +* `EMFILE` - Since `readdir` requires opening a file descriptor, it's + possible to hit `EMFILE` if too many file descriptors are in use. + In the sync case, there's nothing to be done for this. But in the + async case, rimraf will gradually back off with timeouts up to + `opts.emfileWait` ms, which defaults to 1000. + +## options + +* unlink, chmod, stat, lstat, rmdir, readdir, + unlinkSync, chmodSync, statSync, lstatSync, rmdirSync, readdirSync + + In order to use a custom file system library, you can override + specific fs functions on the options object. + + If any of these functions are present on the options object, then + the supplied function will be used instead of the default fs + method. + + Sync methods are only relevant for `rimraf.sync()`, of course. + + For example: + + ```javascript + var myCustomFS = require('some-custom-fs') + + rimraf('some-thing', myCustomFS, callback) + ``` + +* maxBusyTries + + If an `EBUSY`, `ENOTEMPTY`, or `EPERM` error code is encountered + on Windows systems, then rimraf will retry with a linear backoff + wait of 100ms longer on each try. The default maxBusyTries is 3. + + Only relevant for async usage. + +* emfileWait + + If an `EMFILE` error is encountered, then rimraf will retry + repeatedly with a linear backoff of 1ms longer on each try, until + the timeout counter hits this max. The default limit is 1000. + + If you repeatedly encounter `EMFILE` errors, then consider using + [graceful-fs](http://npm.im/graceful-fs) in your program. + + Only relevant for async usage. + +* glob + + Set to `false` to disable [glob](http://npm.im/glob) pattern + matching. + + Set to an object to pass options to the glob module. The default + glob options are `{ nosort: true, silent: true }`. + + Glob version 6 is used in this module. + + Relevant for both sync and async usage. + +* disableGlob + + Set to any non-falsey value to disable globbing entirely. + (Equivalent to setting `glob: false`.) + +## rimraf.sync + +It can remove stuff synchronously, too. But that's not so good. Use +the async API. It's better. + +## CLI + +If installed with `npm install rimraf -g` it can be used as a global +command `rimraf [ ...]` which is useful for cross platform support. + +## mkdirp + +If you need to create a directory recursively, check out +[mkdirp](https://github.com/substack/node-mkdirp). diff --git a/_extensions/d2/node_modules/rimraf/bin.js b/_extensions/d2/node_modules/rimraf/bin.js new file mode 100644 index 00000000..023814cc --- /dev/null +++ b/_extensions/d2/node_modules/rimraf/bin.js @@ -0,0 +1,68 @@ +#!/usr/bin/env node + +const rimraf = require('./') + +const path = require('path') + +const isRoot = arg => /^(\/|[a-zA-Z]:\\)$/.test(path.resolve(arg)) +const filterOutRoot = arg => { + const ok = preserveRoot === false || !isRoot(arg) + if (!ok) { + console.error(`refusing to remove ${arg}`) + console.error('Set --no-preserve-root to allow this') + } + return ok +} + +let help = false +let dashdash = false +let noglob = false +let preserveRoot = true +const args = process.argv.slice(2).filter(arg => { + if (dashdash) + return !!arg + else if (arg === '--') + dashdash = true + else if (arg === '--no-glob' || arg === '-G') + noglob = true + else if (arg === '--glob' || arg === '-g') + noglob = false + else if (arg.match(/^(-+|\/)(h(elp)?|\?)$/)) + help = true + else if (arg === '--preserve-root') + preserveRoot = true + else if (arg === '--no-preserve-root') + preserveRoot = false + else + return !!arg +}).filter(arg => !preserveRoot || filterOutRoot(arg)) + +const go = n => { + if (n >= args.length) + return + const options = noglob ? { glob: false } : {} + rimraf(args[n], options, er => { + if (er) + throw er + go(n+1) + }) +} + +if (help || args.length === 0) { + // If they didn't ask for help, then this is not a "success" + const log = help ? console.log : console.error + log('Usage: rimraf [ ...]') + log('') + log(' Deletes all files and folders at "path" recursively.') + log('') + log('Options:') + log('') + log(' -h, --help Display this usage info') + log(' -G, --no-glob Do not expand glob patterns in arguments') + log(' -g, --glob Expand glob patterns in arguments (default)') + log(' --preserve-root Do not remove \'/\' (default)') + log(' --no-preserve-root Do not treat \'/\' specially') + log(' -- Stop parsing flags') + process.exit(help ? 0 : 1) +} else + go(0) diff --git a/_extensions/d2/node_modules/rimraf/package.json b/_extensions/d2/node_modules/rimraf/package.json new file mode 100644 index 00000000..1bf8d5e3 --- /dev/null +++ b/_extensions/d2/node_modules/rimraf/package.json @@ -0,0 +1,32 @@ +{ + "name": "rimraf", + "version": "3.0.2", + "main": "rimraf.js", + "description": "A deep deletion module for node (like `rm -rf`)", + "author": "Isaac Z. Schlueter (http://blog.izs.me/)", + "license": "ISC", + "repository": "git://github.com/isaacs/rimraf.git", + "scripts": { + "preversion": "npm test", + "postversion": "npm publish", + "postpublish": "git push origin --follow-tags", + "test": "tap test/*.js" + }, + "bin": "./bin.js", + "dependencies": { + "glob": "^7.1.3" + }, + "files": [ + "LICENSE", + "README.md", + "bin.js", + "rimraf.js" + ], + "devDependencies": { + "mkdirp": "^0.5.1", + "tap": "^12.1.1" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } +} diff --git a/_extensions/d2/node_modules/rimraf/rimraf.js b/_extensions/d2/node_modules/rimraf/rimraf.js new file mode 100644 index 00000000..34da4171 --- /dev/null +++ b/_extensions/d2/node_modules/rimraf/rimraf.js @@ -0,0 +1,360 @@ +const assert = require("assert") +const path = require("path") +const fs = require("fs") +let glob = undefined +try { + glob = require("glob") +} catch (_err) { + // treat glob as optional. +} + +const defaultGlobOpts = { + nosort: true, + silent: true +} + +// for EMFILE handling +let timeout = 0 + +const isWindows = (process.platform === "win32") + +const defaults = options => { + const methods = [ + 'unlink', + 'chmod', + 'stat', + 'lstat', + 'rmdir', + 'readdir' + ] + methods.forEach(m => { + options[m] = options[m] || fs[m] + m = m + 'Sync' + options[m] = options[m] || fs[m] + }) + + options.maxBusyTries = options.maxBusyTries || 3 + options.emfileWait = options.emfileWait || 1000 + if (options.glob === false) { + options.disableGlob = true + } + if (options.disableGlob !== true && glob === undefined) { + throw Error('glob dependency not found, set `options.disableGlob = true` if intentional') + } + options.disableGlob = options.disableGlob || false + options.glob = options.glob || defaultGlobOpts +} + +const rimraf = (p, options, cb) => { + if (typeof options === 'function') { + cb = options + options = {} + } + + assert(p, 'rimraf: missing path') + assert.equal(typeof p, 'string', 'rimraf: path should be a string') + assert.equal(typeof cb, 'function', 'rimraf: callback function required') + assert(options, 'rimraf: invalid options argument provided') + assert.equal(typeof options, 'object', 'rimraf: options should be object') + + defaults(options) + + let busyTries = 0 + let errState = null + let n = 0 + + const next = (er) => { + errState = errState || er + if (--n === 0) + cb(errState) + } + + const afterGlob = (er, results) => { + if (er) + return cb(er) + + n = results.length + if (n === 0) + return cb() + + results.forEach(p => { + const CB = (er) => { + if (er) { + if ((er.code === "EBUSY" || er.code === "ENOTEMPTY" || er.code === "EPERM") && + busyTries < options.maxBusyTries) { + busyTries ++ + // try again, with the same exact callback as this one. + return setTimeout(() => rimraf_(p, options, CB), busyTries * 100) + } + + // this one won't happen if graceful-fs is used. + if (er.code === "EMFILE" && timeout < options.emfileWait) { + return setTimeout(() => rimraf_(p, options, CB), timeout ++) + } + + // already gone + if (er.code === "ENOENT") er = null + } + + timeout = 0 + next(er) + } + rimraf_(p, options, CB) + }) + } + + if (options.disableGlob || !glob.hasMagic(p)) + return afterGlob(null, [p]) + + options.lstat(p, (er, stat) => { + if (!er) + return afterGlob(null, [p]) + + glob(p, options.glob, afterGlob) + }) + +} + +// Two possible strategies. +// 1. Assume it's a file. unlink it, then do the dir stuff on EPERM or EISDIR +// 2. Assume it's a directory. readdir, then do the file stuff on ENOTDIR +// +// Both result in an extra syscall when you guess wrong. However, there +// are likely far more normal files in the world than directories. This +// is based on the assumption that a the average number of files per +// directory is >= 1. +// +// If anyone ever complains about this, then I guess the strategy could +// be made configurable somehow. But until then, YAGNI. +const rimraf_ = (p, options, cb) => { + assert(p) + assert(options) + assert(typeof cb === 'function') + + // sunos lets the root user unlink directories, which is... weird. + // so we have to lstat here and make sure it's not a dir. + options.lstat(p, (er, st) => { + if (er && er.code === "ENOENT") + return cb(null) + + // Windows can EPERM on stat. Life is suffering. + if (er && er.code === "EPERM" && isWindows) + fixWinEPERM(p, options, er, cb) + + if (st && st.isDirectory()) + return rmdir(p, options, er, cb) + + options.unlink(p, er => { + if (er) { + if (er.code === "ENOENT") + return cb(null) + if (er.code === "EPERM") + return (isWindows) + ? fixWinEPERM(p, options, er, cb) + : rmdir(p, options, er, cb) + if (er.code === "EISDIR") + return rmdir(p, options, er, cb) + } + return cb(er) + }) + }) +} + +const fixWinEPERM = (p, options, er, cb) => { + assert(p) + assert(options) + assert(typeof cb === 'function') + + options.chmod(p, 0o666, er2 => { + if (er2) + cb(er2.code === "ENOENT" ? null : er) + else + options.stat(p, (er3, stats) => { + if (er3) + cb(er3.code === "ENOENT" ? null : er) + else if (stats.isDirectory()) + rmdir(p, options, er, cb) + else + options.unlink(p, cb) + }) + }) +} + +const fixWinEPERMSync = (p, options, er) => { + assert(p) + assert(options) + + try { + options.chmodSync(p, 0o666) + } catch (er2) { + if (er2.code === "ENOENT") + return + else + throw er + } + + let stats + try { + stats = options.statSync(p) + } catch (er3) { + if (er3.code === "ENOENT") + return + else + throw er + } + + if (stats.isDirectory()) + rmdirSync(p, options, er) + else + options.unlinkSync(p) +} + +const rmdir = (p, options, originalEr, cb) => { + assert(p) + assert(options) + assert(typeof cb === 'function') + + // try to rmdir first, and only readdir on ENOTEMPTY or EEXIST (SunOS) + // if we guessed wrong, and it's not a directory, then + // raise the original error. + options.rmdir(p, er => { + if (er && (er.code === "ENOTEMPTY" || er.code === "EEXIST" || er.code === "EPERM")) + rmkids(p, options, cb) + else if (er && er.code === "ENOTDIR") + cb(originalEr) + else + cb(er) + }) +} + +const rmkids = (p, options, cb) => { + assert(p) + assert(options) + assert(typeof cb === 'function') + + options.readdir(p, (er, files) => { + if (er) + return cb(er) + let n = files.length + if (n === 0) + return options.rmdir(p, cb) + let errState + files.forEach(f => { + rimraf(path.join(p, f), options, er => { + if (errState) + return + if (er) + return cb(errState = er) + if (--n === 0) + options.rmdir(p, cb) + }) + }) + }) +} + +// this looks simpler, and is strictly *faster*, but will +// tie up the JavaScript thread and fail on excessively +// deep directory trees. +const rimrafSync = (p, options) => { + options = options || {} + defaults(options) + + assert(p, 'rimraf: missing path') + assert.equal(typeof p, 'string', 'rimraf: path should be a string') + assert(options, 'rimraf: missing options') + assert.equal(typeof options, 'object', 'rimraf: options should be object') + + let results + + if (options.disableGlob || !glob.hasMagic(p)) { + results = [p] + } else { + try { + options.lstatSync(p) + results = [p] + } catch (er) { + results = glob.sync(p, options.glob) + } + } + + if (!results.length) + return + + for (let i = 0; i < results.length; i++) { + const p = results[i] + + let st + try { + st = options.lstatSync(p) + } catch (er) { + if (er.code === "ENOENT") + return + + // Windows can EPERM on stat. Life is suffering. + if (er.code === "EPERM" && isWindows) + fixWinEPERMSync(p, options, er) + } + + try { + // sunos lets the root user unlink directories, which is... weird. + if (st && st.isDirectory()) + rmdirSync(p, options, null) + else + options.unlinkSync(p) + } catch (er) { + if (er.code === "ENOENT") + return + if (er.code === "EPERM") + return isWindows ? fixWinEPERMSync(p, options, er) : rmdirSync(p, options, er) + if (er.code !== "EISDIR") + throw er + + rmdirSync(p, options, er) + } + } +} + +const rmdirSync = (p, options, originalEr) => { + assert(p) + assert(options) + + try { + options.rmdirSync(p) + } catch (er) { + if (er.code === "ENOENT") + return + if (er.code === "ENOTDIR") + throw originalEr + if (er.code === "ENOTEMPTY" || er.code === "EEXIST" || er.code === "EPERM") + rmkidsSync(p, options) + } +} + +const rmkidsSync = (p, options) => { + assert(p) + assert(options) + options.readdirSync(p).forEach(f => rimrafSync(path.join(p, f), options)) + + // We only end up here once we got ENOTEMPTY at least once, and + // at this point, we are guaranteed to have removed all the kids. + // So, we know that it won't be ENOENT or ENOTDIR or anything else. + // try really hard to delete stuff on windows, because it has a + // PROFOUNDLY annoying habit of not closing handles promptly when + // files are deleted, resulting in spurious ENOTEMPTY errors. + const retries = isWindows ? 100 : 1 + let i = 0 + do { + let threw = true + try { + const ret = options.rmdirSync(p, options) + threw = false + return ret + } finally { + if (++i < retries && threw) + continue + } + } while (true) +} + +module.exports = rimraf +rimraf.sync = rimrafSync diff --git a/_extensions/d2/node_modules/sade/index.d.ts b/_extensions/d2/node_modules/sade/index.d.ts new file mode 100644 index 00000000..4a269ecc --- /dev/null +++ b/_extensions/d2/node_modules/sade/index.d.ts @@ -0,0 +1,37 @@ +import type * as mri from 'mri'; + +type Arrayable = T | T[]; + +declare function sade(usage: string, isSingle?: boolean): sade.Sade; + +declare namespace sade { + export type Handler = (...args: any[]) => any; + export type Value = number | string | boolean | null; + + export interface LazyOutput { + name: string; + handler: Handler; + args: string[]; + } + + export interface Sade { + command(usage: string, description?: string, options?: { + alias?: Arrayable; + default?: boolean; + }): Sade; + + option(flag: string, description?: string, value?: Value): Sade; + action(handler: Handler): Sade; + describe(text: Arrayable): Sade; + alias(...names: string[]): Sade; + example(usage: string): Sade; + + parse(arr: string[], opts: { lazy: true } & mri.Options): LazyOutput; + parse(arr: string[], opts?: { lazy?: boolean } & mri.Options): void; + + version(value: string): Sade; + help(cmd?: string): void; + } +} + +export = sade; diff --git a/_extensions/d2/node_modules/sade/lib/index.js b/_extensions/d2/node_modules/sade/lib/index.js new file mode 100644 index 00000000..146fb979 --- /dev/null +++ b/_extensions/d2/node_modules/sade/lib/index.js @@ -0,0 +1 @@ +const e=require("mri"),t="__all__",i="__default__",s="\n";function r(e){if(!e.length)return"";let t=function(e){let t=0,i=0,s=0,r=e.length;if(r)for(;r--;)i=e[r].length,i>t&&(s=r,t=i);return e[s].length}(e.map(e=>e[0]))+4;return e.map(e=>e[0]+" ".repeat(t-e[0].length)+e[1]+(null==e[2]?"":` (default ${e[2]})`))}function n(e){return e}function l(e,t,i){if(!t||!t.length)return"";let r=0,n="";for(n+="\n "+e;r0,this.bin=r,this.ver="0.0.0",this.default="",this.tree={},this.command(t),this.command([i].concat(s?n:"").join(" ")),this.single=s,this.curr=""}command(e,t,i={}){if(this.single)throw new Error('Disable "single" mode to add commands');let s=[],r=[],n=/(\[|<)/;if(e.split(/\s+/).forEach(e=>{(n.test(e.charAt(0))?r:s).push(e)}),s=s.join(" "),s in this.tree)throw new Error("Command already exists: "+s);return s.includes("__")||r.unshift(s),r=r.join(" "),this.curr=s,i.default&&(this.default=s),this.tree[s]={usage:r,alibi:[],options:[],alias:{},default:{},examples:[]},i.alias&&this.alias(i.alias),t&&this.describe(t),this}describe(e){return this.tree[this.curr||i].describe=Array.isArray(e)?e:function(e){return(e||"").replace(/([.?!])\s*(?=[A-Z])/g,"$1|").split("|")}(e),this}alias(...e){if(this.single)throw new Error('Cannot call `alias()` in "single" mode');if(!this.curr)throw new Error("Cannot call `alias()` before defining a command");return(this.tree[this.curr].alibi=this.tree[this.curr].alibi.concat(...e)).forEach(e=>this.tree[e]=this.curr),this}option(e,i,s){let r=this.tree[this.curr||t],[n,l]=function(e){return(e||"").split(/^-{1,2}|,|\s+-{1,2}|\s+/).filter(Boolean)}(e);if(l&&l.length>1&&([n,l]=[l,n]),e="--"+n,l&&l.length>0){e=`-${l}, ${e}`;let t=r.alias[l];r.alias[l]=(t||[]).concat(n)}let a=[e,i||""];return void 0!==s?(a.push(s),r.default[n]=s):l||(r.default[n]=void 0),r.options.push(a),this}action(e){return this.tree[this.curr||i].handler=e,this}example(e){return this.tree[this.curr||i].examples.push(e),this}version(e){return this.ver=e,this}parse(s,r={}){s=s.slice();let n,l,o,h,u=2,c=e(s.slice(u),{alias:{h:"help",v:"version"}}),f=this.single,p=this.bin,d="";if(f)h=this.tree[i];else{let e,t=1,i=c._.length+1;for(;t"<"===e.charAt(0)),v=m._.splice(0,_.length);if(v.length<_.length)return d&&(p+=" "+d),a(p,"Insufficient arguments!");b.filter(e=>"["===e.charAt(0)).forEach(e=>{v.push(m._.shift())}),v.push(m);let $=h.handler;return r.lazy?{args:v,name:d,handler:$}:$.apply(null,v)}help(e){console.log(function(e,a,o,h){let u="",c=a[o],f="$ "+e,p=a[t],d=e=>`${f} ${e}`.replace(/\s+/g," "),g=[["-h, --help","Displays this message"]];if(o===i&&g.unshift(["-v, --version","Displays current version"]),c.options=(c.options||[]).concat(p.options,g),c.options.length>0&&(c.usage+=" [options]"),u+=l("Description",c.describe,n),u+=l("Usage",[c.usage],d),h||o!==i)h||o===i||(u+=l("Aliases",c.alibi,d));else{let e,t=/^__/,i="",o=[];for(e in a)"string"==typeof a[e]||t.test(e)||o.push([e,(a[e].describe||[""])[0]])<3&&(i+=`\n ${f} ${e} --help`);u+=l("Available Commands",r(o),n),u+="\n For more info, run any command with the `--help` flag"+i+s}return u+=l("Options",r(c.options),n),u+=l("Examples",c.examples.map(d),n),u}(this.bin,this.tree,e||i,this.single))}_version(){console.log(`${this.bin}, ${this.ver}`)}}module.exports=(e,t)=>new o(e,t); diff --git a/_extensions/d2/node_modules/sade/lib/index.mjs b/_extensions/d2/node_modules/sade/lib/index.mjs new file mode 100644 index 00000000..76d35dc1 --- /dev/null +++ b/_extensions/d2/node_modules/sade/lib/index.mjs @@ -0,0 +1 @@ +import e from"mri";const t="__all__",i="__default__",s="\n";function r(e){if(!e.length)return"";let t=function(e){let t=0,i=0,s=0,r=e.length;if(r)for(;r--;)i=e[r].length,i>t&&(s=r,t=i);return e[s].length}(e.map(e=>e[0]))+4;return e.map(e=>e[0]+" ".repeat(t-e[0].length)+e[1]+(null==e[2]?"":` (default ${e[2]})`))}function n(e){return e}function l(e,t,i){if(!t||!t.length)return"";let r=0,n="";for(n+="\n "+e;r0,this.bin=r,this.ver="0.0.0",this.default="",this.tree={},this.command(t),this.command([i].concat(s?n:"").join(" ")),this.single=s,this.curr=""}command(e,t,i={}){if(this.single)throw new Error('Disable "single" mode to add commands');let s=[],r=[],n=/(\[|<)/;if(e.split(/\s+/).forEach(e=>{(n.test(e.charAt(0))?r:s).push(e)}),s=s.join(" "),s in this.tree)throw new Error("Command already exists: "+s);return s.includes("__")||r.unshift(s),r=r.join(" "),this.curr=s,i.default&&(this.default=s),this.tree[s]={usage:r,alibi:[],options:[],alias:{},default:{},examples:[]},i.alias&&this.alias(i.alias),t&&this.describe(t),this}describe(e){return this.tree[this.curr||i].describe=Array.isArray(e)?e:function(e){return(e||"").replace(/([.?!])\s*(?=[A-Z])/g,"$1|").split("|")}(e),this}alias(...e){if(this.single)throw new Error('Cannot call `alias()` in "single" mode');if(!this.curr)throw new Error("Cannot call `alias()` before defining a command");return(this.tree[this.curr].alibi=this.tree[this.curr].alibi.concat(...e)).forEach(e=>this.tree[e]=this.curr),this}option(e,i,s){let r=this.tree[this.curr||t],[n,l]=function(e){return(e||"").split(/^-{1,2}|,|\s+-{1,2}|\s+/).filter(Boolean)}(e);if(l&&l.length>1&&([n,l]=[l,n]),e="--"+n,l&&l.length>0){e=`-${l}, ${e}`;let t=r.alias[l];r.alias[l]=(t||[]).concat(n)}let a=[e,i||""];return void 0!==s?(a.push(s),r.default[n]=s):l||(r.default[n]=void 0),r.options.push(a),this}action(e){return this.tree[this.curr||i].handler=e,this}example(e){return this.tree[this.curr||i].examples.push(e),this}version(e){return this.ver=e,this}parse(s,r={}){s=s.slice();let n,l,o,h,u=2,f=e(s.slice(u),{alias:{h:"help",v:"version"}}),c=this.single,p=this.bin,d="";if(c)h=this.tree[i];else{let e,t=1,i=f._.length+1;for(;t"<"===e.charAt(0)),v=m._.splice(0,_.length);if(v.length<_.length)return d&&(p+=" "+d),a(p,"Insufficient arguments!");b.filter(e=>"["===e.charAt(0)).forEach(e=>{v.push(m._.shift())}),v.push(m);let $=h.handler;return r.lazy?{args:v,name:d,handler:$}:$.apply(null,v)}help(e){console.log(function(e,a,o,h){let u="",f=a[o],c="$ "+e,p=a[t],d=e=>`${c} ${e}`.replace(/\s+/g," "),g=[["-h, --help","Displays this message"]];if(o===i&&g.unshift(["-v, --version","Displays current version"]),f.options=(f.options||[]).concat(p.options,g),f.options.length>0&&(f.usage+=" [options]"),u+=l("Description",f.describe,n),u+=l("Usage",[f.usage],d),h||o!==i)h||o===i||(u+=l("Aliases",f.alibi,d));else{let e,t=/^__/,i="",o=[];for(e in a)"string"==typeof a[e]||t.test(e)||o.push([e,(a[e].describe||[""])[0]])<3&&(i+=`\n ${c} ${e} --help`);u+=l("Available Commands",r(o),n),u+="\n For more info, run any command with the `--help` flag"+i+s}return u+=l("Options",r(f.options),n),u+=l("Examples",f.examples.map(d),n),u}(this.bin,this.tree,e||i,this.single))}_version(){console.log(`${this.bin}, ${this.ver}`)}}export default(e,t)=>new o(e,t); diff --git a/_extensions/d2/node_modules/sade/license b/_extensions/d2/node_modules/sade/license new file mode 100644 index 00000000..d46889ae --- /dev/null +++ b/_extensions/d2/node_modules/sade/license @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Luke Edwards (https://lukeed.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/_extensions/d2/node_modules/sade/package.json b/_extensions/d2/node_modules/sade/package.json new file mode 100644 index 00000000..bb40f10d --- /dev/null +++ b/_extensions/d2/node_modules/sade/package.json @@ -0,0 +1,45 @@ +{ + "name": "sade", + "version": "1.8.1", + "description": "Smooth (CLI) operator 🎶", + "repository": "lukeed/sade", + "module": "lib/index.mjs", + "main": "lib/index.js", + "types": "index.d.ts", + "license": "MIT", + "files": [ + "*.d.ts", + "lib" + ], + "author": { + "name": "Luke Edwards", + "email": "luke.edwards05@gmail.com", + "url": "https://lukeed.com" + }, + "scripts": { + "build": "rollup -c", + "test": "tape -r esm test/*.js | tap-spec" + }, + "dependencies": { + "mri": "^1.1.0" + }, + "engines": { + "node": ">=6" + }, + "keywords": [ + "cli", + "cli-app", + "commander", + "arguments", + "parser", + "yargs", + "argv" + ], + "devDependencies": { + "esm": "3.2.25", + "rollup": "1.32.1", + "tap-spec": "4.1.2", + "tape": "4.14.0", + "terser": "4.8.0" + } +} diff --git a/_extensions/d2/node_modules/sade/readme.md b/_extensions/d2/node_modules/sade/readme.md new file mode 100644 index 00000000..fb8f70c5 --- /dev/null +++ b/_extensions/d2/node_modules/sade/readme.md @@ -0,0 +1,672 @@ +# sade [![Build Status](https://travis-ci.org/lukeed/sade.svg?branch=master)](https://travis-ci.org/lukeed/sade) + +> Smooth (CLI) Operator 🎶 + +Sade is a small but powerful tool for building command-line interface (CLI) applications for Node.js that are fast, responsive, and helpful! + +It enables default commands, git-like subcommands, option flags with aliases, default option values with type-casting, required-vs-optional argument handling, command validation, and automated help text generation! + +Your app's UX will be as smooth as butter... just like [Sade's voice](https://www.youtube.com/watch?v=4TYv2PhG89A). 😉 + + +## Install + +``` +$ npm install --save sade +``` + + +## Usage + +***Input:*** + +```js +#!/usr/bin/env node + +const sade = require('sade'); + +const prog = sade('my-cli'); + +prog + .version('1.0.5') + .option('--global, -g', 'An example global flag') + .option('-c, --config', 'Provide path to custom config', 'foo.config.js'); + +prog + .command('build ') + .describe('Build the source directory. Expects an `index.js` entry file.') + .option('-o, --output', 'Change the name of the output file', 'bundle.js') + .example('build src build --global --config my-conf.js') + .example('build app public -o main.js') + .action((src, dest, opts) => { + console.log(`> building from ${src} to ${dest}`); + console.log('> these are extra opts', opts); + }); + +prog.parse(process.argv); +``` + +***Output:*** + +```a +$ my-cli --help + + Usage + $ my-cli [options] + + Available Commands + build Build the source directory. + + For more info, run any command with the `--help` flag + $ my-cli build --help + + Options + -v, --version Displays current version + -g, --global An example global flag + -c, --config Provide path to custom config (default foo.config.js) + -h, --help Displays this message + + +$ my-cli build --help + + Description + Build the source directory. + Expects an `index.js` entry file. + + Usage + $ my-cli build [options] + + Options + -o, --output Change the name of the output file (default bundle.js) + -g, --global An example global flag + -c, --config Provide path to custom config (default foo.config.js) + -h, --help Displays this message + + Examples + $ my-cli build src build --global --config my-conf.js + $ my-cli build app public -o main.js +``` + +## Tips + +- **Define your global/program-wide version, options, description, and/or examples first.**
+ _Once you define a Command, you can't access the global-scope again._ + +- **Define all commands & options in the order that you want them to appear.**
+ _Sade will not mutate or sort your CLI for you. Global options print before local options._ + +- **Required arguments without values will error & exit**
+ _An `Insufficient arguments!` error will be displayed along with a help prompt._ + +- **Don't worry about manually displaying help~!**
+ _Your help text is displayed automatically... including command-specific help text!_ + +- **Automatic default/basic patterns**
+ _Usage text will always append `[options]` & `--help` and `--version` are done for you._ + +- **Only define what you want to display!**
+ _Help text sections (example, options, etc) will only display if you provide values._ + + +## Subcommands + +Subcommands are defined & parsed like any other command! When defining their [`usage`](#usage-1), everything up until the first argument (`[foo]` or ``) is interpreted as the command string. + +They should be defined in the order that you want them to appear in your general `--help` output. + +Lastly, it is _not_ necessary to define the subcommand's "base" as an additional command. However, if you choose to do so, it's recommended that you define it first for better visibility. + +```js +const prog = sade('git'); + +// Not necessary for subcommands to work, but it's here anyway! +prog + .command('remote') + .describe('Manage set of tracked repositories') + .action(opts => { + console.log('~> Print current remotes...'); + }); + +prog + .command('remote add ', 'Demo...') + .action((name, url, opts) => { + console.log(`~> Adding a new remote (${name}) to ${url}`); + }); + +prog + .command('remote rename ', 'Demo...') + .action((old, nxt, opts) => { + console.log(`~> Renaming from ${old} to ${nxt}~!`); + }); +``` + + +## Single Command Mode + +In certain circumstances, you may only need `sade` for a single-command CLI application. + +> **Note:** Until `v1.6.0`, this made for an awkward pairing. + +To enable this, you may make use of the [`isSingle`](#issingle) argument. Doing so allows you to pass the program's entire [`usage` text](#usage-1) into the `name` argument. + +With "Single Command Mode" enabled, your entire binary operates as one command. This means that any [`prog.command`](#progcommandusage-desc-opts) calls are disallowed & will instead throw an Error. Of course, you may still define a program version, a description, an example or two, and declare options. You are customizing the program's attributes as a whole.* + +> * This is true for multi-command applications, too, up until your first `prog.command()` call! + +***Example*** + +Let's reconstruct [`sirv-cli`](https://github.com/lukeed/sirv), which is a single-command application that (optionally) accepts a directory from which to serve files. It also offers a slew of option flags: + +```js +sade('sirv [dir]', true) + .version('1.0.0') + .describe('Run a static file server') + .example('public -qeim 31536000') + .example('--port 8080 --etag') + .example('my-app --dev') + .option('-D, --dev', 'Enable "dev" mode') + .option('-e, --etag', 'Enable "Etag" header') + // There are a lot... + .option('-H, --host', 'Hostname to bind', 'localhost') + .option('-p, --port', 'Port to bind', 5000) + .action((dir, opts) => { + // Program handler + }) + .parse(process.argv); +``` + +When `sirv --help` is run, the generated help text is trimmed, fully aware that there's only one command in this program: + +``` + Description + Run a static file server + + Usage + $ sirv [dir] [options] + + Options + -D, --dev Enable "dev" mode + -e, --etag Enable "Etag" header + -H, --host Hostname to bind (default localhost) + -p, --port Port to bind (default 5000) + -v, --version Displays current version + -h, --help Displays this message + + Examples + $ sirv public -qeim 31536000 + $ sirv --port 8080 --etag + $ sirv my-app --dev +``` + +## Command Aliases + +Command aliases are alternative names (aliases) for a command. They are often used as shortcuts or as typo relief! + +The aliased names do not appear in the general help text.
+Instead, they only appear within the Command-specific help text under an "Aliases" section. + +***Limitations*** + +* You cannot assign aliases while in [Single Command Mode](#single-command-mode) +* You cannot call [`prog.alias()`](#progaliasnames) before defining any Commands (via `prog.commmand()`) +* You, the developer, must keep track of which aliases have already been used and/or exist as Command names + +***Example*** + +Let's reconstruct the `npm install` command as a Sade program: + +```js +sade('npm') + // ... + .command('install [package]', 'Install a package', { + alias: ['i', 'add', 'isntall'] + }) + .option('-P, --save-prod', 'Package will appear in your dependencies.') + .option('-D, --save-dev', 'Package will appear in your devDependencies.') + .option('-O, --save-optional', 'Package will appear in your optionalDependencies') + .option('-E, --save-exact', 'Save exact versions instead of using a semver range operator') + // ... +``` + +When we run `npm --help` we'll see this general help text: + +``` + Usage + $ npm [options] + + Available Commands + install Install a package + + For more info, run any command with the `--help` flag + $ npm install --help + + Options + -v, --version Displays current version + -h, --help Displays this message +``` + +When we run `npm install --help` — ***or*** the help flag with any of `install`'s aliases — we'll see this command-specific help text: + +``` + Description + Install a package + + Usage + $ npm install [package] [options] + + Aliases + $ npm i + $ npm add + $ npm isntall + + Options + -P, --save-prod Package will appear in your dependencies. + -D, --save-dev Package will appear in your devDependencies. + -O, --save-optional Package will appear in your optionalDependencies + -E, --save-exact Save exact versions instead of using a semver range operator + -h, --help Displays this message +``` + + + +## API + +### sade(name, isSingle) +Returns: `Program` + +Returns your chainable Sade instance, aka your `Program`. + +#### name +Type: `String`
+Required: `true` + +The name of your `Program` / binary application. + +#### isSingle +Type: `Boolean`
+Default: `name.includes(' ');` + +If your `Program` is meant to have ***only one command***.
+When `true`, this simplifies your generated `--help` output such that: + +* the "root-level help" is your _only_ help text +* the "root-level help" does not display an `Available Commands` section +* the "root-level help" does not inject `$ name ` into the `Usage` section +* the "root-level help" does not display `For more info, run any command with the `--help` flag` text + +You may customize the `Usage` of your command by modifying the `name` argument directly.
+Please read [Single Command Mode](#single-command-mode) for an example and more information. + +> **Important:** Whenever `name` includes a custom usage, then `isSingle` is automatically assumed and enforced! + +### prog.command(usage, desc, opts) + +Create a new Command for your Program. This changes the current state of your Program. + +All configuration methods (`prog.describe`, `prog.action`, etc) will apply to this Command until another Command has been created! + +#### usage + +Type: `String` + +The usage pattern for your current Command. This will be included in the general or command-specific `--help` output. + +_Required_ arguments are wrapped with `<` and `>` characters; for example, `` and ``. + +_Optional_ arguments are wrapped with `[` and `]` characters; for example, `[foo]` and `[bar]`. + +All arguments are ***positionally important***, which means they are passed to your current Command's [`handler`](#handler) function in the order that they were defined. + +When optional arguments are defined but don't receive a value, their positionally-equivalent function parameter will be `undefined`. + +> **Important:** You **must** define & expect required arguments _before_ optional arguments! + +```js +sade('foo') + + .command('greet ') + .action((adjective, noun, opts) => { + console.log(`Hello, ${adjective} ${noun}!`); + }) + + .command('drive [color] [speed]') + .action((vehicle, color, speed, opts) => { + let arr = ['Driving my']; + arr.push(color ? `${color} ${vehicle}` : vehicle); + speed && arr.push(`at ${speed}`); + opts.yolo && arr.push('...YOLO!!'); + let str = arr.join(' '); + console.log(str); + }); +``` + +```sh +$ foo greet beautiful person +# //=> Hello, beautiful person! + +$ foo drive car +# //=> Driving my car + +$ foo drive car red +# //=> Driving my red card + +$ foo drive car blue 100mph --yolo +# //=> Driving my blue car at 100mph ...YOLO!! +``` + + +#### desc + +Type: `String`
+Default: `''` + +The Command's description. The value is passed directly to [`prog.describe`](#progdescribetext). + +#### opts + +Type: `Object`
+Default: `{}` + +##### opts.alias +Type: `String|Array` + +Optionally define one or more aliases for the current Command.
+When declared, the `opts.alias` value is passed _directly_ to the [`prog.alias`](#progaliasnames) method. + +```js +// Program A is equivalent to Program B +// --- + +const A = sade('bin') + .command('build', 'My build command', { alias: 'b' }) + .command('watch', 'My watch command', { alias: ['w', 'dev'] }); + +const B = sade('bin') + .command('build', 'My build command').alias('b') + .command('watch', 'My watch command').alias('w', 'dev'); +``` + + +##### opts.default + +Type: `Boolean` + +Manually set/force the current Command to be the Program's default command. This ensures that the current Command will run if no command was specified. + +> **Important:** If you run your Program without a Command _and_ without specifying a default command, your Program will exit with a `No command specified` error. + +```js +const prog = sade('greet'); + +prog.command('hello'); +//=> only runs if :: `$ greet hello` + +// $ greet +//=> error: No command specified. + +prog.command('howdy', '', { default:true }); +//=> runs as `$ greet` OR `$ greet howdy` + +// $ greet +//=> runs 'howdy' handler + +// $ greet foobar +//=> error: Invalid command +``` + + +### prog.describe(text) + +Add a description to the current Command. + +#### text + +Type: `String|Array` + +The description text for the current Command. This will be included in the general or command-specific `--help` output. + +Internally, your description will be separated into an `Array` of sentences. + +For general `--help` output, ***only*** the first sentence will be displayed. However, **all sentences** will be printed for command-specific `--help` text. + +> **Note:** Pass an `Array` if you don't want internal assumptions. However, the first item is _always_ displayed in general help, so it's recommended to keep it short. + + +### prog.alias(...names) + +Define one or more aliases for the current Command. + +> **Important:** An error will be thrown if:
1) the program is in [Single Command Mode](#single-command-mode); or
2) `prog.alias` is called before any `prog.command`. + +#### names + +Type: `String` + +The list of alternative names (aliases) for the current Command.
+For example, you may want to define shortcuts and/or common typos for the Command's full name. + +> **Important:** Sade _does not_ check if the incoming `names` are already in use by other Commands or their aliases.
During conflicts, the Command with the same `name` is given priority, otherwise the first Command (according to Program order) with `name` as an alias is chosen. + +The `prog.alias()` is append-only, so calling it multiple times within a Command context will _keep_ all aliases, including those initially passed via [`opts.alias`](#optsdefault). + +```js +sade('bin') + .command('hello ', 'Greet someone by their name', { + alias: ['hey', 'yo'] + }) + .alias('hi', 'howdy') + .alias('hola', 'oi'); +//=> hello aliases: hey, yo, hi, howdy, hola, oi +``` + + +### prog.action(handler) + +Attach a callback to the current Command. + +#### handler + +Type: `Function` + +The function to run when the current Command is executed. + +Its parameters are based (positionally) on your Command's [`usage`](#usage-1) definition. + +All options, flags, and extra/unknown values are included as the last parameter. + +> **Note:** Optional arguments are also passed as parameters & may be `undefined`! + +```js +sade('foo') + .command('cp ') + .option('-f, --force', 'Overwrite without confirmation') + .option('-c, --clone-dir', 'Copy files to additional directory') + .option('-v, --verbose', 'Enable verbose output') + .action((src, dest, opts) => { + console.log(`Copying files from ${src} --> ${dest}`); + opts.c && console.log(`ALSO copying files from ${src} --> ${opts['clone-dir']}`); + console.log('My options:', opts); + }) + +// $ foo cp original my-copy -v +//=> Copying files from original --> my-copy +//=> My options: { _:[], v:true, verbose:true } + +// $ foo cp original my-copy --clone-dir my-backup +//=> Copying files from original --> my-copy +//=> ALSO copying files from original --> my-backup +//=> My options: { _:[], c:'my-backup', 'clone-dir':'my-backup' } +``` + + +### prog.example(str) + +Add an example for the current Command. + +#### str + +Type: `String` + +The example string to add. This will be included in the general or command-specific `--help` output. + +> **Note:** Your example's `str` will be prefixed with your Program's [`name`](#sadename). + + +### prog.option(flags, desc, value) + +Add an Option to the current Command. + +#### flags + +Type: `String` + +The Option's flags, which may optionally include an alias. + +You may use a comma (`,`) or a space (` `) to separate the flags. + +> **Note:** The short & long flags can be declared in any order. However, the alias will always be displayed first. + +> **Important:** If using hyphenated flag names, they will be accessible **as declared** within your [`action()`](#progactionhandler) handler! + +```js +prog.option('--global'); // no alias +prog.option('-g, --global'); // alias first, comma +prog.option('--global -g'); // alias last, space +// etc... +``` + +#### desc + +Type: `String` + +The description for the Option. + +#### value + +Type: `String` + +The **default** value for the Option. + +Flags and aliases, if parsed, are `true` by default. See [`mri`](https://github.com/lukeed/mri#minimist) for more info. + +> **Note:** You probably only want to define a default `value` if you're expecting a `String` or `Number` value type. + +If you _do_ pass a `String` or `Number` value type, your flag value will be casted to the same type. See [`mri#options.default`](https://github.com/lukeed/mri#optionsdefault) for info~! + + +### prog.version(str) + +The `--version` and `-v` flags will automatically output the Program version. + +#### str + +Type: `String`
+Default: `0.0.0` + +The new version number for your Program. + +> **Note:** Your Program `version` is `0.0.0` until you change it. + +### prog.parse(arr, opts) + +Parse a set of CLI arguments. + +#### arr + +Type: `Array` + +Your Program's `process.argv` input. + +> **Important:** Do not `.slice(2)`! Doing so will break parsing~! + +#### opts + +Type: `Object`
+Default: `{}` + +Additional `process.argv` parsing config. See [`mri`'s options](https://github.com/lukeed/mri#mriargs-options) for details. + +> **Important:** These values _override_ any internal values! + +```js +prog + .command('hello') + .option('-f, --force', 'My flag'); +//=> currently has alias pair: f <--> force + +prog.parse(process.argv, { + alias: { + f: ['foo', 'fizz'] + }, + default: { + abc: 123 + } +}); +//=> ADDS alias pair: f <--> foo +//=> REMOVES alias pair: f <--> force +//=> ADDS alias pair: f <--> fizz +//=> ADDS default: abc -> 123 (number) +``` + +#### opts.unknown + +Type: `Function`
+Default: `undefined` + +Callback to run when an unspecified option flag has been found. This is [passed directly to `mri`](https://github.com/lukeed/mri#optionsunknown). + +Your handler will receive the unknown flag (string) as its only argument.
+You may return a string, which will be used as a custom error message. Otherwise, a default message is displayed. + +```js +sade('sirv') + .command('start [dir]') + .parse(process.argv, { + unknown: arg => `Custom error message: ${arg}` + }); + +/* +$ sirv start --foobar + + ERROR + Custom error message: --foobar + + Run `$ sirv --help` for more info. +*/ +``` + +#### opts.lazy + +Type: `Boolean`
+Default: `false` + +If true, Sade will not immediately execute the `action` handler. Instead, `parse()` will return an object of `{ name, args, handler }` shape, wherein the `name` is the command name, `args` is all arguments that _would be_ passed to the action handler, and `handler` is the function itself. + +From this, you may choose when to run the `handler` function. You also have the option to further modify the `args` for any reason, if needed. + +```js +let { name, args, handler } = prog.parse(process.argv, { lazy:true }); +console.log('> Received command: ', name); + +// later on... +handler.apply(null, args); +``` + +### prog.help(cmd) + +Manually display the help text for a given command. If no command name is provided, the general/global help is printed. + +Your general and command-specific help text is automatically attached to the `--help` and `-h` flags. + +> **Note:** You don't have to call this directly! It's automatically run when you `bin --help` + +#### cmd +Type: `String`
+Default: `null` + +The name of the command for which to display help. Otherwise displays the general help. + + +## License + +MIT © [Luke Edwards](https://lukeed.com) diff --git a/_extensions/d2/node_modules/tmp/CHANGELOG.md b/_extensions/d2/node_modules/tmp/CHANGELOG.md new file mode 100644 index 00000000..0aa54882 --- /dev/null +++ b/_extensions/d2/node_modules/tmp/CHANGELOG.md @@ -0,0 +1,288 @@ + + +## v0.2.1 (2020-04-28) + +#### :rocket: Enhancement +* [#252](https://github.com/raszi/node-tmp/pull/252) Closes [#250](https://github.com/raszi/node-tmp/issues/250): introduce tmpdir option for overriding the system tmp dir ([@silkentrance](https://github.com/silkentrance)) + +#### :house: Internal +* [#253](https://github.com/raszi/node-tmp/pull/253) Closes [#191](https://github.com/raszi/node-tmp/issues/191): generate changelog from pull requests using lerna-changelog ([@silkentrance](https://github.com/silkentrance)) + +#### Committers: 1 +- Carsten Klein ([@silkentrance](https://github.com/silkentrance)) + + +## v0.2.0 (2020-04-25) + +#### :rocket: Enhancement +* [#234](https://github.com/raszi/node-tmp/pull/234) feat: stabilize tmp for v0.2.0 release ([@silkentrance](https://github.com/silkentrance)) + +#### :bug: Bug Fix +* [#231](https://github.com/raszi/node-tmp/pull/231) Closes [#230](https://github.com/raszi/node-tmp/issues/230): regression after fix for #197 ([@silkentrance](https://github.com/silkentrance)) +* [#220](https://github.com/raszi/node-tmp/pull/220) Closes [#197](https://github.com/raszi/node-tmp/issues/197): return sync callback when using the sync interface, otherwise return the async callback ([@silkentrance](https://github.com/silkentrance)) +* [#193](https://github.com/raszi/node-tmp/pull/193) Closes [#192](https://github.com/raszi/node-tmp/issues/192): tmp must not exit the process on its own ([@silkentrance](https://github.com/silkentrance)) + +#### :memo: Documentation +* [#221](https://github.com/raszi/node-tmp/pull/221) Gh 206 document name option ([@silkentrance](https://github.com/silkentrance)) + +#### :house: Internal +* [#226](https://github.com/raszi/node-tmp/pull/226) Closes [#212](https://github.com/raszi/node-tmp/issues/212): enable direct name option test ([@silkentrance](https://github.com/silkentrance)) +* [#225](https://github.com/raszi/node-tmp/pull/225) Closes [#211](https://github.com/raszi/node-tmp/issues/211): existing tests must clean up after themselves ([@silkentrance](https://github.com/silkentrance)) +* [#224](https://github.com/raszi/node-tmp/pull/224) Closes [#217](https://github.com/raszi/node-tmp/issues/217): name tests must use tmpName ([@silkentrance](https://github.com/silkentrance)) +* [#223](https://github.com/raszi/node-tmp/pull/223) Closes [#214](https://github.com/raszi/node-tmp/issues/214): refactor tests and lib ([@silkentrance](https://github.com/silkentrance)) +* [#198](https://github.com/raszi/node-tmp/pull/198) Update dependencies to latest versions ([@matsev](https://github.com/matsev)) + +#### Committers: 2 +- Carsten Klein ([@silkentrance](https://github.com/silkentrance)) +- Mattias Severson ([@matsev](https://github.com/matsev)) + + +## v0.1.0 (2019-03-20) + +#### :rocket: Enhancement +* [#177](https://github.com/raszi/node-tmp/pull/177) fix: fail early if there is no tmp dir specified ([@silkentrance](https://github.com/silkentrance)) +* [#159](https://github.com/raszi/node-tmp/pull/159) Closes [#121](https://github.com/raszi/node-tmp/issues/121) ([@silkentrance](https://github.com/silkentrance)) +* [#161](https://github.com/raszi/node-tmp/pull/161) Closes [#155](https://github.com/raszi/node-tmp/issues/155) ([@silkentrance](https://github.com/silkentrance)) +* [#166](https://github.com/raszi/node-tmp/pull/166) fix: avoid relying on Node’s internals ([@addaleax](https://github.com/addaleax)) +* [#144](https://github.com/raszi/node-tmp/pull/144) prepend opts.dir || tmpDir to template if no path is given ([@silkentrance](https://github.com/silkentrance)) + +#### :bug: Bug Fix +* [#183](https://github.com/raszi/node-tmp/pull/183) Closes [#182](https://github.com/raszi/node-tmp/issues/182) fileSync takes empty string postfix option ([@gutte](https://github.com/gutte)) +* [#130](https://github.com/raszi/node-tmp/pull/130) Closes [#129](https://github.com/raszi/node-tmp/issues/129) install process listeners safely ([@silkentrance](https://github.com/silkentrance)) + +#### :memo: Documentation +* [#188](https://github.com/raszi/node-tmp/pull/188) HOTCloses [#187](https://github.com/raszi/node-tmp/issues/187): restore behaviour for #182 ([@silkentrance](https://github.com/silkentrance)) +* [#180](https://github.com/raszi/node-tmp/pull/180) fix gh-179: template no longer accepts arbitrary paths ([@silkentrance](https://github.com/silkentrance)) +* [#175](https://github.com/raszi/node-tmp/pull/175) docs: add `unsafeCleanup` option to jsdoc ([@kerimdzhanov](https://github.com/kerimdzhanov)) +* [#151](https://github.com/raszi/node-tmp/pull/151) docs: fix link to tmp-promise ([@silkentrance](https://github.com/silkentrance)) + +#### :house: Internal +* [#184](https://github.com/raszi/node-tmp/pull/184) test: add missing tests for #182 ([@silkentrance](https://github.com/silkentrance)) +* [#171](https://github.com/raszi/node-tmp/pull/171) chore: drop old NodeJS support ([@poppinlp](https://github.com/poppinlp)) +* [#170](https://github.com/raszi/node-tmp/pull/170) chore: update dependencies ([@raszi](https://github.com/raszi)) +* [#165](https://github.com/raszi/node-tmp/pull/165) test: add missing tests ([@raszi](https://github.com/raszi)) +* [#163](https://github.com/raszi/node-tmp/pull/163) chore: add lint npm task ([@raszi](https://github.com/raszi)) +* [#107](https://github.com/raszi/node-tmp/pull/107) chore: add coverage report ([@raszi](https://github.com/raszi)) +* [#141](https://github.com/raszi/node-tmp/pull/141) test: refactor tests for mocha ([@silkentrance](https://github.com/silkentrance)) +* [#154](https://github.com/raszi/node-tmp/pull/154) chore: change Travis configuration ([@raszi](https://github.com/raszi)) +* [#152](https://github.com/raszi/node-tmp/pull/152) fix: drop Node v0.6.0 ([@raszi](https://github.com/raszi)) + +#### Committers: 6 +- Anna Henningsen ([@addaleax](https://github.com/addaleax)) +- Carsten Klein ([@silkentrance](https://github.com/silkentrance)) +- Dan Kerimdzhanov ([@kerimdzhanov](https://github.com/kerimdzhanov)) +- Gustav Klingstedt ([@gutte](https://github.com/gutte)) +- KARASZI István ([@raszi](https://github.com/raszi)) +- PoppinL ([@poppinlp](https://github.com/poppinlp)) + + +## v0.0.33 (2017-08-12) + +#### :rocket: Enhancement +* [#147](https://github.com/raszi/node-tmp/pull/147) fix: with name option try at most once to get a unique tmp name ([@silkentrance](https://github.com/silkentrance)) + +#### :bug: Bug Fix +* [#149](https://github.com/raszi/node-tmp/pull/149) fix(fileSync): must honor detachDescriptor and discardDescriptor options ([@silkentrance](https://github.com/silkentrance)) +* [#119](https://github.com/raszi/node-tmp/pull/119) Closes [#115](https://github.com/raszi/node-tmp/issues/115) ([@silkentrance](https://github.com/silkentrance)) + +#### :memo: Documentation +* [#128](https://github.com/raszi/node-tmp/pull/128) Closes [#127](https://github.com/raszi/node-tmp/issues/127) add reference to tmp-promise ([@silkentrance](https://github.com/silkentrance)) + +#### :house: Internal +* [#135](https://github.com/raszi/node-tmp/pull/135) Closes [#133](https://github.com/raszi/node-tmp/issues/133), #134 ([@silkentrance](https://github.com/silkentrance)) +* [#123](https://github.com/raszi/node-tmp/pull/123) docs: update tmp.js MIT license header to 2017 ([@madnight](https://github.com/madnight)) +* [#122](https://github.com/raszi/node-tmp/pull/122) chore: add issue template ([@silkentrance](https://github.com/silkentrance)) + +#### Committers: 2 +- Carsten Klein ([@silkentrance](https://github.com/silkentrance)) +- Fabian Beuke ([@madnight](https://github.com/madnight)) + + +## v0.0.32 (2017-03-24) + +#### :memo: Documentation +* [#106](https://github.com/raszi/node-tmp/pull/106) doc: add proper JSDoc documentation ([@raszi](https://github.com/raszi)) + +#### :house: Internal +* [#111](https://github.com/raszi/node-tmp/pull/111) test: add Windows tests ([@binki](https://github.com/binki)) +* [#110](https://github.com/raszi/node-tmp/pull/110) chore: add AppVeyor ([@binki](https://github.com/binki)) +* [#105](https://github.com/raszi/node-tmp/pull/105) chore: use const where possible ([@raszi](https://github.com/raszi)) +* [#104](https://github.com/raszi/node-tmp/pull/104) style: fix various style issues ([@raszi](https://github.com/raszi)) + +#### Committers: 2 +- KARASZI István ([@raszi](https://github.com/raszi)) +- Nathan Phillip Brink ([@binki](https://github.com/binki)) + + +## v0.0.31 (2016-11-21) + +#### :rocket: Enhancement +* [#99](https://github.com/raszi/node-tmp/pull/99) feat: add next callback functionality ([@silkentrance](https://github.com/silkentrance)) +* [#94](https://github.com/raszi/node-tmp/pull/94) feat: add options to control descriptor management ([@pabigot](https://github.com/pabigot)) + +#### :house: Internal +* [#101](https://github.com/raszi/node-tmp/pull/101) fix: Include files in the package.json ([@raszi](https://github.com/raszi)) + +#### Committers: 3 +- Carsten Klein ([@silkentrance](https://github.com/silkentrance)) +- KARASZI István ([@raszi](https://github.com/raszi)) +- Peter A. Bigot ([@pabigot](https://github.com/pabigot)) + + +## v0.0.30 (2016-11-01) + +#### :bug: Bug Fix +* [#96](https://github.com/raszi/node-tmp/pull/96) fix: constants for Node 6 ([@jnj16180340](https://github.com/jnj16180340)) +* [#98](https://github.com/raszi/node-tmp/pull/98) fix: garbage collector ([@Ari-H](https://github.com/Ari-H)) + +#### Committers: 2 +- Nate Johnson ([@jnj16180340](https://github.com/jnj16180340)) +- [@Ari-H](https://github.com/Ari-H) + + +## v0.0.29 (2016-09-18) + +#### :rocket: Enhancement +* [#87](https://github.com/raszi/node-tmp/pull/87) fix: replace calls to deprecated fs API functions ([@OlliV](https://github.com/OlliV)) + +#### :bug: Bug Fix +* [#70](https://github.com/raszi/node-tmp/pull/70) fix: prune `_removeObjects` correctly ([@joliss](https://github.com/joliss)) +* [#71](https://github.com/raszi/node-tmp/pull/71) Fix typo ([@gcampax](https://github.com/gcampax)) + +#### :memo: Documentation +* [#77](https://github.com/raszi/node-tmp/pull/77) docs: change mkstemps to mkstemp ([@thefourtheye](https://github.com/thefourtheye)) + +#### :house: Internal +* [#92](https://github.com/raszi/node-tmp/pull/92) chore: add Travis CI support for Node 6 ([@amilajack](https://github.com/amilajack)) +* [#79](https://github.com/raszi/node-tmp/pull/79) fix: remove unneeded require statement ([@whmountains](https://github.com/whmountains)) + +#### Committers: 6 +- Amila Welihinda ([@amilajack](https://github.com/amilajack)) +- Caleb Whiting ([@whmountains](https://github.com/whmountains)) +- Giovanni Campagna ([@gcampax](https://github.com/gcampax)) +- Jo Liss ([@joliss](https://github.com/joliss)) +- Olli Vanhoja ([@OlliV](https://github.com/OlliV)) +- Sakthipriyan Vairamani ([@thefourtheye](https://github.com/thefourtheye)) + + +## v0.0.28 (2015-09-27) + +#### :bug: Bug Fix +* [#63](https://github.com/raszi/node-tmp/pull/63) fix: delete for _rmdirRecursiveSync ([@voltrevo](https://github.com/voltrevo)) + +#### :memo: Documentation +* [#64](https://github.com/raszi/node-tmp/pull/64) docs: fix typo in the README ([@JTKnox91](https://github.com/JTKnox91)) + +#### :house: Internal +* [#67](https://github.com/raszi/node-tmp/pull/67) test: add node v4.0 v4.1 to travis config ([@raszi](https://github.com/raszi)) +* [#66](https://github.com/raszi/node-tmp/pull/66) chore(deps): update deps ([@raszi](https://github.com/raszi)) + +#### Committers: 3 +- Andrew Morris ([@voltrevo](https://github.com/voltrevo)) +- John T. Knox ([@JTKnox91](https://github.com/JTKnox91)) +- KARASZI István ([@raszi](https://github.com/raszi)) + + +## v0.0.27 (2015-08-15) + +#### :bug: Bug Fix +* [#60](https://github.com/raszi/node-tmp/pull/60) fix: unlinking when the file has been already removed ([@silkentrance](https://github.com/silkentrance)) + +#### :memo: Documentation +* [#55](https://github.com/raszi/node-tmp/pull/55) docs(README): update README ([@raszi](https://github.com/raszi)) + +#### :house: Internal +* [#56](https://github.com/raszi/node-tmp/pull/56) style(jshint): fix JSHint error ([@raszi](https://github.com/raszi)) +* [#53](https://github.com/raszi/node-tmp/pull/53) chore: update license attribute ([@pdehaan](https://github.com/pdehaan)) + +#### Committers: 3 +- Carsten Klein ([@silkentrance](https://github.com/silkentrance)) +- KARASZI István ([@raszi](https://github.com/raszi)) +- Peter deHaan ([@pdehaan](https://github.com/pdehaan)) + + +## v0.0.26 (2015-05-12) + +#### :rocket: Enhancement +* [#40](https://github.com/raszi/node-tmp/pull/40) Fix for #39 ([@silkentrance](https://github.com/silkentrance)) +* [#42](https://github.com/raszi/node-tmp/pull/42) Fix for #17 ([@silkentrance](https://github.com/silkentrance)) +* [#41](https://github.com/raszi/node-tmp/pull/41) Fix for #37 ([@silkentrance](https://github.com/silkentrance)) +* [#32](https://github.com/raszi/node-tmp/pull/32) add ability to customize file/dir names ([@shime](https://github.com/shime)) +* [#29](https://github.com/raszi/node-tmp/pull/29) tmp.file have responsibility to close file, not only unlink file ([@vhain](https://github.com/vhain)) + +#### :bug: Bug Fix +* [#51](https://github.com/raszi/node-tmp/pull/51) fix(windows): fix tempDir on windows ([@raszi](https://github.com/raszi)) +* [#49](https://github.com/raszi/node-tmp/pull/49) remove object from _removeObjects if cleanup fn is called Closes [#48](https://github.com/raszi/node-tmp/issues/48) ([@bmeck](https://github.com/bmeck)) + +#### :memo: Documentation +* [#45](https://github.com/raszi/node-tmp/pull/45) Fix for #44 ([@silkentrance](https://github.com/silkentrance)) + +#### :house: Internal +* [#34](https://github.com/raszi/node-tmp/pull/34) Create LICENSE ([@ScottWeinstein](https://github.com/ScottWeinstein)) + +#### Committers: 6 +- Bradley Farias ([@bmeck](https://github.com/bmeck)) +- Carsten Klein ([@silkentrance](https://github.com/silkentrance)) +- Hrvoje Šimić ([@shime](https://github.com/shime)) +- Juwan Yoo ([@vhain](https://github.com/vhain)) +- KARASZI István ([@raszi](https://github.com/raszi)) +- Scott Weinstein ([@ScottWeinstein](https://github.com/ScottWeinstein)) + + +## v0.0.24 (2014-07-11) + +#### :rocket: Enhancement +* [#25](https://github.com/raszi/node-tmp/pull/25) Added removeCallback passing ([@foxel](https://github.com/foxel)) + +#### Committers: 1 +- Andrey Kupreychik ([@foxel](https://github.com/foxel)) + + +## v0.0.23 (2013-12-03) + +#### :rocket: Enhancement +* [#21](https://github.com/raszi/node-tmp/pull/21) If we are not on node 0.8, don't register an uncaughtException handler ([@wibblymat](https://github.com/wibblymat)) + +#### Committers: 1 +- Mat Scales ([@wibblymat](https://github.com/wibblymat)) + + +## v0.0.22 (2013-11-29) + +#### :rocket: Enhancement +* [#19](https://github.com/raszi/node-tmp/pull/19) Rethrow only on node v0.8. ([@mcollina](https://github.com/mcollina)) + +#### Committers: 1 +- Matteo Collina ([@mcollina](https://github.com/mcollina)) + + +## v0.0.21 (2013-08-07) + +#### :bug: Bug Fix +* [#16](https://github.com/raszi/node-tmp/pull/16) Fix bug where we delete contents of symlinks ([@lightsofapollo](https://github.com/lightsofapollo)) + +#### Committers: 1 +- James Lal ([@lightsofapollo](https://github.com/lightsofapollo)) + + +## v0.0.17 (2013-04-09) + +#### :rocket: Enhancement +* [#9](https://github.com/raszi/node-tmp/pull/9) add recursive remove option ([@oscar-broman](https://github.com/oscar-broman)) + +#### Committers: 1 +- [@oscar-broman](https://github.com/oscar-broman) + + +## v0.0.14 (2012-08-26) + +#### :rocket: Enhancement +* [#5](https://github.com/raszi/node-tmp/pull/5) Export _getTmpName for temporary file name creation ([@joscha](https://github.com/joscha)) + +#### Committers: 1 +- Joscha Feth ([@joscha](https://github.com/joscha)) + + +## Previous Releases < v0.0.14 + +- no information available diff --git a/_extensions/d2/node_modules/tmp/LICENSE b/_extensions/d2/node_modules/tmp/LICENSE new file mode 100644 index 00000000..72418bd9 --- /dev/null +++ b/_extensions/d2/node_modules/tmp/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 KARASZI István + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/_extensions/d2/node_modules/tmp/README.md b/_extensions/d2/node_modules/tmp/README.md new file mode 100644 index 00000000..bb20fb7b --- /dev/null +++ b/_extensions/d2/node_modules/tmp/README.md @@ -0,0 +1,365 @@ +# Tmp + +A simple temporary file and directory creator for [node.js.][1] + +[![Build Status](https://travis-ci.org/raszi/node-tmp.svg?branch=master)](https://travis-ci.org/raszi/node-tmp) +[![Dependencies](https://david-dm.org/raszi/node-tmp.svg)](https://david-dm.org/raszi/node-tmp) +[![npm version](https://badge.fury.io/js/tmp.svg)](https://badge.fury.io/js/tmp) +[![API documented](https://img.shields.io/badge/API-documented-brightgreen.svg)](https://raszi.github.io/node-tmp/) +[![Known Vulnerabilities](https://snyk.io/test/npm/tmp/badge.svg)](https://snyk.io/test/npm/tmp) + +## About + +This is a [widely used library][2] to create temporary files and directories +in a [node.js][1] environment. + +Tmp offers both an asynchronous and a synchronous API. For all API calls, all +the parameters are optional. There also exists a promisified version of the +API, see [tmp-promise][5]. + +Tmp uses crypto for determining random file names, or, when using templates, +a six letter random identifier. And just in case that you do not have that much +entropy left on your system, Tmp will fall back to pseudo random numbers. + +You can set whether you want to remove the temporary file on process exit or +not. + +If you do not want to store your temporary directories and files in the +standard OS temporary directory, then you are free to override that as well. + +## An Important Note on Compatibility + +See the [CHANGELOG](./CHANGELOG.md) for more information. + +### Version 0.1.0 + +Since version 0.1.0, all support for node versions < 0.10.0 has been dropped. + +Most importantly, any support for earlier versions of node-tmp was also dropped. + +If you still require node versions < 0.10.0, then you must limit your node-tmp +dependency to versions below 0.1.0. + +### Version 0.0.33 + +Since version 0.0.33, all support for node versions < 0.8 has been dropped. + +If you still require node version 0.8, then you must limit your node-tmp +dependency to version 0.0.33. + +For node versions < 0.8 you must limit your node-tmp dependency to +versions < 0.0.33. + +## How to install + +```bash +npm install tmp +``` + +## Usage + +Please also check [API docs][4]. + +### Asynchronous file creation + +Simple temporary file creation, the file will be closed and unlinked on process exit. + +```javascript +const tmp = require('tmp'); + +tmp.file(function _tempFileCreated(err, path, fd, cleanupCallback) { + if (err) throw err; + + console.log('File: ', path); + console.log('Filedescriptor: ', fd); + + // If we don't need the file anymore we could manually call the cleanupCallback + // But that is not necessary if we didn't pass the keep option because the library + // will clean after itself. + cleanupCallback(); +}); +``` + +### Synchronous file creation + +A synchronous version of the above. + +```javascript +const tmp = require('tmp'); + +const tmpobj = tmp.fileSync(); +console.log('File: ', tmpobj.name); +console.log('Filedescriptor: ', tmpobj.fd); + +// If we don't need the file anymore we could manually call the removeCallback +// But that is not necessary if we didn't pass the keep option because the library +// will clean after itself. +tmpobj.removeCallback(); +``` + +Note that this might throw an exception if either the maximum limit of retries +for creating a temporary name fails, or, in case that you do not have the permission +to write to the directory where the temporary file should be created in. + +### Asynchronous directory creation + +Simple temporary directory creation, it will be removed on process exit. + +If the directory still contains items on process exit, then it won't be removed. + +```javascript +const tmp = require('tmp'); + +tmp.dir(function _tempDirCreated(err, path, cleanupCallback) { + if (err) throw err; + + console.log('Dir: ', path); + + // Manual cleanup + cleanupCallback(); +}); +``` + +If you want to cleanup the directory even when there are entries in it, then +you can pass the `unsafeCleanup` option when creating it. + +### Synchronous directory creation + +A synchronous version of the above. + +```javascript +const tmp = require('tmp'); + +const tmpobj = tmp.dirSync(); +console.log('Dir: ', tmpobj.name); +// Manual cleanup +tmpobj.removeCallback(); +``` + +Note that this might throw an exception if either the maximum limit of retries +for creating a temporary name fails, or, in case that you do not have the permission +to write to the directory where the temporary directory should be created in. + +### Asynchronous filename generation + +It is possible with this library to generate a unique filename in the specified +directory. + +```javascript +const tmp = require('tmp'); + +tmp.tmpName(function _tempNameGenerated(err, path) { + if (err) throw err; + + console.log('Created temporary filename: ', path); +}); +``` + +### Synchronous filename generation + +A synchronous version of the above. + +```javascript +const tmp = require('tmp'); + +const name = tmp.tmpNameSync(); +console.log('Created temporary filename: ', name); +``` + +## Advanced usage + +### Asynchronous file creation + +Creates a file with mode `0644`, prefix will be `prefix-` and postfix will be `.txt`. + +```javascript +const tmp = require('tmp'); + +tmp.file({ mode: 0o644, prefix: 'prefix-', postfix: '.txt' }, function _tempFileCreated(err, path, fd) { + if (err) throw err; + + console.log('File: ', path); + console.log('Filedescriptor: ', fd); +}); +``` + +### Synchronous file creation + +A synchronous version of the above. + +```javascript +const tmp = require('tmp'); + +const tmpobj = tmp.fileSync({ mode: 0o644, prefix: 'prefix-', postfix: '.txt' }); +console.log('File: ', tmpobj.name); +console.log('Filedescriptor: ', tmpobj.fd); +``` + +### Controlling the Descriptor + +As a side effect of creating a unique file `tmp` gets a file descriptor that is +returned to the user as the `fd` parameter. The descriptor may be used by the +application and is closed when the `removeCallback` is invoked. + +In some use cases the application does not need the descriptor, needs to close it +without removing the file, or needs to remove the file without closing the +descriptor. Two options control how the descriptor is managed: + +* `discardDescriptor` - if `true` causes `tmp` to close the descriptor after the file + is created. In this case the `fd` parameter is undefined. +* `detachDescriptor` - if `true` causes `tmp` to return the descriptor in the `fd` + parameter, but it is the application's responsibility to close it when it is no + longer needed. + +```javascript +const tmp = require('tmp'); + +tmp.file({ discardDescriptor: true }, function _tempFileCreated(err, path, fd, cleanupCallback) { + if (err) throw err; + // fd will be undefined, allowing application to use fs.createReadStream(path) + // without holding an unused descriptor open. +}); +``` + +```javascript +const tmp = require('tmp'); + +tmp.file({ detachDescriptor: true }, function _tempFileCreated(err, path, fd, cleanupCallback) { + if (err) throw err; + + cleanupCallback(); + // Application can store data through fd here; the space used will automatically + // be reclaimed by the operating system when the descriptor is closed or program + // terminates. +}); +``` + +### Asynchronous directory creation + +Creates a directory with mode `0755`, prefix will be `myTmpDir_`. + +```javascript +const tmp = require('tmp'); + +tmp.dir({ mode: 0o750, prefix: 'myTmpDir_' }, function _tempDirCreated(err, path) { + if (err) throw err; + + console.log('Dir: ', path); +}); +``` + +### Synchronous directory creation + +Again, a synchronous version of the above. + +```javascript +const tmp = require('tmp'); + +const tmpobj = tmp.dirSync({ mode: 0750, prefix: 'myTmpDir_' }); +console.log('Dir: ', tmpobj.name); +``` + +### mkstemp like, asynchronously + +Creates a new temporary directory with mode `0700` and filename like `/tmp/tmp-nk2J1u`. + +IMPORTANT NOTE: template no longer accepts a path. Use the dir option instead if you +require tmp to create your temporary filesystem object in a different place than the +default `tmp.tmpdir`. + +```javascript +const tmp = require('tmp'); + +tmp.dir({ template: 'tmp-XXXXXX' }, function _tempDirCreated(err, path) { + if (err) throw err; + + console.log('Dir: ', path); +}); +``` + +### mkstemp like, synchronously + +This will behave similarly to the asynchronous version. + +```javascript +const tmp = require('tmp'); + +const tmpobj = tmp.dirSync({ template: 'tmp-XXXXXX' }); +console.log('Dir: ', tmpobj.name); +``` + +### Asynchronous filename generation + +Using `tmpName()` you can create temporary file names asynchronously. +The function accepts all standard options, e.g. `prefix`, `postfix`, `dir`, and so on. + +You can also leave out the options altogether and just call the function with a callback as first parameter. + +```javascript +const tmp = require('tmp'); + +const options = {}; + +tmp.tmpName(options, function _tempNameGenerated(err, path) { + if (err) throw err; + + console.log('Created temporary filename: ', path); +}); +``` + +### Synchronous filename generation + +The `tmpNameSync()` function works similarly to `tmpName()`. +Again, you can leave out the options altogether and just invoke the function without any parameters. + +```javascript +const tmp = require('tmp'); +const options = {}; +const tmpname = tmp.tmpNameSync(options); +console.log('Created temporary filename: ', tmpname); +``` + +## Graceful cleanup + +If graceful cleanup is set, tmp will remove all controlled temporary objects on process exit, otherwise the +temporary objects will remain in place, waiting to be cleaned up on system restart or otherwise scheduled temporary +object removal. + +To enforce this, you can call the `setGracefulCleanup()` method: + +```javascript +const tmp = require('tmp'); + +tmp.setGracefulCleanup(); +``` + +## Options + +All options are optional :) + + * `name`: a fixed name that overrides random name generation, the name must be relative and must not contain path segments + * `mode`: the file mode to create with, falls back to `0o600` on file creation and `0o700` on directory creation + * `prefix`: the optional prefix, defaults to `tmp` + * `postfix`: the optional postfix + * `template`: [`mkstemp`][3] like filename template, no default, can be either an absolute or a relative path that resolves + to a relative path of the system's default temporary directory, must include `XXXXXX` once for random name generation, e.g. + 'foo/bar/XXXXXX'. Absolute paths are also fine as long as they are relative to os.tmpdir(). + Any directories along the so specified path must exist, otherwise a ENOENT error will be thrown upon access, + as tmp will not check the availability of the path, nor will it establish the requested path for you. + * `dir`: the optional temporary directory that must be relative to the system's default temporary directory. + absolute paths are fine as long as they point to a location under the system's default temporary directory. + Any directories along the so specified path must exist, otherwise a ENOENT error will be thrown upon access, + as tmp will not check the availability of the path, nor will it establish the requested path for you. + * `tmpdir`: allows you to override the system's root tmp directory + * `tries`: how many times should the function try to get a unique filename before giving up, default `3` + * `keep`: signals that the temporary file or directory should not be deleted on exit, default is `false` + * In order to clean up, you will have to call the provided `cleanupCallback` function manually. + * `unsafeCleanup`: recursively removes the created temporary directory, even when it's not empty. default is `false` + * `detachDescriptor`: detaches the file descriptor, caller is responsible for closing the file, tmp will no longer try closing the file during garbage collection + * `discardDescriptor`: discards the file descriptor (closes file, fd is -1), tmp will no longer try closing the file during garbage collection + +[1]: http://nodejs.org/ +[2]: https://www.npmjs.com/browse/depended/tmp +[3]: http://www.kernel.org/doc/man-pages/online/pages/man3/mkstemp.3.html +[4]: https://raszi.github.io/node-tmp/ +[5]: https://github.com/benjamingr/tmp-promise diff --git a/_extensions/d2/node_modules/tmp/lib/tmp.js b/_extensions/d2/node_modules/tmp/lib/tmp.js new file mode 100644 index 00000000..b41c29d4 --- /dev/null +++ b/_extensions/d2/node_modules/tmp/lib/tmp.js @@ -0,0 +1,780 @@ +/*! + * Tmp + * + * Copyright (c) 2011-2017 KARASZI Istvan + * + * MIT Licensed + */ + +/* + * Module dependencies. + */ +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const crypto = require('crypto'); +const _c = { fs: fs.constants, os: os.constants }; +const rimraf = require('rimraf'); + +/* + * The working inner variables. + */ +const + // the random characters to choose from + RANDOM_CHARS = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', + + TEMPLATE_PATTERN = /XXXXXX/, + + DEFAULT_TRIES = 3, + + CREATE_FLAGS = (_c.O_CREAT || _c.fs.O_CREAT) | (_c.O_EXCL || _c.fs.O_EXCL) | (_c.O_RDWR || _c.fs.O_RDWR), + + // constants are off on the windows platform and will not match the actual errno codes + IS_WIN32 = os.platform() === 'win32', + EBADF = _c.EBADF || _c.os.errno.EBADF, + ENOENT = _c.ENOENT || _c.os.errno.ENOENT, + + DIR_MODE = 0o700 /* 448 */, + FILE_MODE = 0o600 /* 384 */, + + EXIT = 'exit', + + // this will hold the objects need to be removed on exit + _removeObjects = [], + + // API change in fs.rmdirSync leads to error when passing in a second parameter, e.g. the callback + FN_RMDIR_SYNC = fs.rmdirSync.bind(fs), + FN_RIMRAF_SYNC = rimraf.sync; + +let + _gracefulCleanup = false; + +/** + * Gets a temporary file name. + * + * @param {(Options|tmpNameCallback)} options options or callback + * @param {?tmpNameCallback} callback the callback function + */ +function tmpName(options, callback) { + const + args = _parseArguments(options, callback), + opts = args[0], + cb = args[1]; + + try { + _assertAndSanitizeOptions(opts); + } catch (err) { + return cb(err); + } + + let tries = opts.tries; + (function _getUniqueName() { + try { + const name = _generateTmpName(opts); + + // check whether the path exists then retry if needed + fs.stat(name, function (err) { + /* istanbul ignore else */ + if (!err) { + /* istanbul ignore else */ + if (tries-- > 0) return _getUniqueName(); + + return cb(new Error('Could not get a unique tmp filename, max tries reached ' + name)); + } + + cb(null, name); + }); + } catch (err) { + cb(err); + } + }()); +} + +/** + * Synchronous version of tmpName. + * + * @param {Object} options + * @returns {string} the generated random name + * @throws {Error} if the options are invalid or could not generate a filename + */ +function tmpNameSync(options) { + const + args = _parseArguments(options), + opts = args[0]; + + _assertAndSanitizeOptions(opts); + + let tries = opts.tries; + do { + const name = _generateTmpName(opts); + try { + fs.statSync(name); + } catch (e) { + return name; + } + } while (tries-- > 0); + + throw new Error('Could not get a unique tmp filename, max tries reached'); +} + +/** + * Creates and opens a temporary file. + * + * @param {(Options|null|undefined|fileCallback)} options the config options or the callback function or null or undefined + * @param {?fileCallback} callback + */ +function file(options, callback) { + const + args = _parseArguments(options, callback), + opts = args[0], + cb = args[1]; + + // gets a temporary filename + tmpName(opts, function _tmpNameCreated(err, name) { + /* istanbul ignore else */ + if (err) return cb(err); + + // create and open the file + fs.open(name, CREATE_FLAGS, opts.mode || FILE_MODE, function _fileCreated(err, fd) { + /* istanbu ignore else */ + if (err) return cb(err); + + if (opts.discardDescriptor) { + return fs.close(fd, function _discardCallback(possibleErr) { + // the chance of getting an error on close here is rather low and might occur in the most edgiest cases only + return cb(possibleErr, name, undefined, _prepareTmpFileRemoveCallback(name, -1, opts, false)); + }); + } else { + // detachDescriptor passes the descriptor whereas discardDescriptor closes it, either way, we no longer care + // about the descriptor + const discardOrDetachDescriptor = opts.discardDescriptor || opts.detachDescriptor; + cb(null, name, fd, _prepareTmpFileRemoveCallback(name, discardOrDetachDescriptor ? -1 : fd, opts, false)); + } + }); + }); +} + +/** + * Synchronous version of file. + * + * @param {Options} options + * @returns {FileSyncObject} object consists of name, fd and removeCallback + * @throws {Error} if cannot create a file + */ +function fileSync(options) { + const + args = _parseArguments(options), + opts = args[0]; + + const discardOrDetachDescriptor = opts.discardDescriptor || opts.detachDescriptor; + const name = tmpNameSync(opts); + var fd = fs.openSync(name, CREATE_FLAGS, opts.mode || FILE_MODE); + /* istanbul ignore else */ + if (opts.discardDescriptor) { + fs.closeSync(fd); + fd = undefined; + } + + return { + name: name, + fd: fd, + removeCallback: _prepareTmpFileRemoveCallback(name, discardOrDetachDescriptor ? -1 : fd, opts, true) + }; +} + +/** + * Creates a temporary directory. + * + * @param {(Options|dirCallback)} options the options or the callback function + * @param {?dirCallback} callback + */ +function dir(options, callback) { + const + args = _parseArguments(options, callback), + opts = args[0], + cb = args[1]; + + // gets a temporary filename + tmpName(opts, function _tmpNameCreated(err, name) { + /* istanbul ignore else */ + if (err) return cb(err); + + // create the directory + fs.mkdir(name, opts.mode || DIR_MODE, function _dirCreated(err) { + /* istanbul ignore else */ + if (err) return cb(err); + + cb(null, name, _prepareTmpDirRemoveCallback(name, opts, false)); + }); + }); +} + +/** + * Synchronous version of dir. + * + * @param {Options} options + * @returns {DirSyncObject} object consists of name and removeCallback + * @throws {Error} if it cannot create a directory + */ +function dirSync(options) { + const + args = _parseArguments(options), + opts = args[0]; + + const name = tmpNameSync(opts); + fs.mkdirSync(name, opts.mode || DIR_MODE); + + return { + name: name, + removeCallback: _prepareTmpDirRemoveCallback(name, opts, true) + }; +} + +/** + * Removes files asynchronously. + * + * @param {Object} fdPath + * @param {Function} next + * @private + */ +function _removeFileAsync(fdPath, next) { + const _handler = function (err) { + if (err && !_isENOENT(err)) { + // reraise any unanticipated error + return next(err); + } + next(); + }; + + if (0 <= fdPath[0]) + fs.close(fdPath[0], function () { + fs.unlink(fdPath[1], _handler); + }); + else fs.unlink(fdPath[1], _handler); +} + +/** + * Removes files synchronously. + * + * @param {Object} fdPath + * @private + */ +function _removeFileSync(fdPath) { + let rethrownException = null; + try { + if (0 <= fdPath[0]) fs.closeSync(fdPath[0]); + } catch (e) { + // reraise any unanticipated error + if (!_isEBADF(e) && !_isENOENT(e)) throw e; + } finally { + try { + fs.unlinkSync(fdPath[1]); + } + catch (e) { + // reraise any unanticipated error + if (!_isENOENT(e)) rethrownException = e; + } + } + if (rethrownException !== null) { + throw rethrownException; + } +} + +/** + * Prepares the callback for removal of the temporary file. + * + * Returns either a sync callback or a async callback depending on whether + * fileSync or file was called, which is expressed by the sync parameter. + * + * @param {string} name the path of the file + * @param {number} fd file descriptor + * @param {Object} opts + * @param {boolean} sync + * @returns {fileCallback | fileCallbackSync} + * @private + */ +function _prepareTmpFileRemoveCallback(name, fd, opts, sync) { + const removeCallbackSync = _prepareRemoveCallback(_removeFileSync, [fd, name], sync); + const removeCallback = _prepareRemoveCallback(_removeFileAsync, [fd, name], sync, removeCallbackSync); + + if (!opts.keep) _removeObjects.unshift(removeCallbackSync); + + return sync ? removeCallbackSync : removeCallback; +} + +/** + * Prepares the callback for removal of the temporary directory. + * + * Returns either a sync callback or a async callback depending on whether + * tmpFileSync or tmpFile was called, which is expressed by the sync parameter. + * + * @param {string} name + * @param {Object} opts + * @param {boolean} sync + * @returns {Function} the callback + * @private + */ +function _prepareTmpDirRemoveCallback(name, opts, sync) { + const removeFunction = opts.unsafeCleanup ? rimraf : fs.rmdir.bind(fs); + const removeFunctionSync = opts.unsafeCleanup ? FN_RIMRAF_SYNC : FN_RMDIR_SYNC; + const removeCallbackSync = _prepareRemoveCallback(removeFunctionSync, name, sync); + const removeCallback = _prepareRemoveCallback(removeFunction, name, sync, removeCallbackSync); + if (!opts.keep) _removeObjects.unshift(removeCallbackSync); + + return sync ? removeCallbackSync : removeCallback; +} + +/** + * Creates a guarded function wrapping the removeFunction call. + * + * The cleanup callback is save to be called multiple times. + * Subsequent invocations will be ignored. + * + * @param {Function} removeFunction + * @param {string} fileOrDirName + * @param {boolean} sync + * @param {cleanupCallbackSync?} cleanupCallbackSync + * @returns {cleanupCallback | cleanupCallbackSync} + * @private + */ +function _prepareRemoveCallback(removeFunction, fileOrDirName, sync, cleanupCallbackSync) { + let called = false; + + // if sync is true, the next parameter will be ignored + return function _cleanupCallback(next) { + + /* istanbul ignore else */ + if (!called) { + // remove cleanupCallback from cache + const toRemove = cleanupCallbackSync || _cleanupCallback; + const index = _removeObjects.indexOf(toRemove); + /* istanbul ignore else */ + if (index >= 0) _removeObjects.splice(index, 1); + + called = true; + if (sync || removeFunction === FN_RMDIR_SYNC || removeFunction === FN_RIMRAF_SYNC) { + return removeFunction(fileOrDirName); + } else { + return removeFunction(fileOrDirName, next || function() {}); + } + } + }; +} + +/** + * The garbage collector. + * + * @private + */ +function _garbageCollector() { + /* istanbul ignore else */ + if (!_gracefulCleanup) return; + + // the function being called removes itself from _removeObjects, + // loop until _removeObjects is empty + while (_removeObjects.length) { + try { + _removeObjects[0](); + } catch (e) { + // already removed? + } + } +} + +/** + * Random name generator based on crypto. + * Adapted from http://blog.tompawlak.org/how-to-generate-random-values-nodejs-javascript + * + * @param {number} howMany + * @returns {string} the generated random name + * @private + */ +function _randomChars(howMany) { + let + value = [], + rnd = null; + + // make sure that we do not fail because we ran out of entropy + try { + rnd = crypto.randomBytes(howMany); + } catch (e) { + rnd = crypto.pseudoRandomBytes(howMany); + } + + for (var i = 0; i < howMany; i++) { + value.push(RANDOM_CHARS[rnd[i] % RANDOM_CHARS.length]); + } + + return value.join(''); +} + +/** + * Helper which determines whether a string s is blank, that is undefined, or empty or null. + * + * @private + * @param {string} s + * @returns {Boolean} true whether the string s is blank, false otherwise + */ +function _isBlank(s) { + return s === null || _isUndefined(s) || !s.trim(); +} + +/** + * Checks whether the `obj` parameter is defined or not. + * + * @param {Object} obj + * @returns {boolean} true if the object is undefined + * @private + */ +function _isUndefined(obj) { + return typeof obj === 'undefined'; +} + +/** + * Parses the function arguments. + * + * This function helps to have optional arguments. + * + * @param {(Options|null|undefined|Function)} options + * @param {?Function} callback + * @returns {Array} parsed arguments + * @private + */ +function _parseArguments(options, callback) { + /* istanbul ignore else */ + if (typeof options === 'function') { + return [{}, options]; + } + + /* istanbul ignore else */ + if (_isUndefined(options)) { + return [{}, callback]; + } + + // copy options so we do not leak the changes we make internally + const actualOptions = {}; + for (const key of Object.getOwnPropertyNames(options)) { + actualOptions[key] = options[key]; + } + + return [actualOptions, callback]; +} + +/** + * Generates a new temporary name. + * + * @param {Object} opts + * @returns {string} the new random name according to opts + * @private + */ +function _generateTmpName(opts) { + + const tmpDir = opts.tmpdir; + + /* istanbul ignore else */ + if (!_isUndefined(opts.name)) + return path.join(tmpDir, opts.dir, opts.name); + + /* istanbul ignore else */ + if (!_isUndefined(opts.template)) + return path.join(tmpDir, opts.dir, opts.template).replace(TEMPLATE_PATTERN, _randomChars(6)); + + // prefix and postfix + const name = [ + opts.prefix ? opts.prefix : 'tmp', + '-', + process.pid, + '-', + _randomChars(12), + opts.postfix ? '-' + opts.postfix : '' + ].join(''); + + return path.join(tmpDir, opts.dir, name); +} + +/** + * Asserts whether the specified options are valid, also sanitizes options and provides sane defaults for missing + * options. + * + * @param {Options} options + * @private + */ +function _assertAndSanitizeOptions(options) { + + options.tmpdir = _getTmpDir(options); + + const tmpDir = options.tmpdir; + + /* istanbul ignore else */ + if (!_isUndefined(options.name)) + _assertIsRelative(options.name, 'name', tmpDir); + /* istanbul ignore else */ + if (!_isUndefined(options.dir)) + _assertIsRelative(options.dir, 'dir', tmpDir); + /* istanbul ignore else */ + if (!_isUndefined(options.template)) { + _assertIsRelative(options.template, 'template', tmpDir); + if (!options.template.match(TEMPLATE_PATTERN)) + throw new Error(`Invalid template, found "${options.template}".`); + } + /* istanbul ignore else */ + if (!_isUndefined(options.tries) && isNaN(options.tries) || options.tries < 0) + throw new Error(`Invalid tries, found "${options.tries}".`); + + // if a name was specified we will try once + options.tries = _isUndefined(options.name) ? options.tries || DEFAULT_TRIES : 1; + options.keep = !!options.keep; + options.detachDescriptor = !!options.detachDescriptor; + options.discardDescriptor = !!options.discardDescriptor; + options.unsafeCleanup = !!options.unsafeCleanup; + + // sanitize dir, also keep (multiple) blanks if the user, purportedly sane, requests us to + options.dir = _isUndefined(options.dir) ? '' : path.relative(tmpDir, _resolvePath(options.dir, tmpDir)); + options.template = _isUndefined(options.template) ? undefined : path.relative(tmpDir, _resolvePath(options.template, tmpDir)); + // sanitize further if template is relative to options.dir + options.template = _isBlank(options.template) ? undefined : path.relative(options.dir, options.template); + + // for completeness' sake only, also keep (multiple) blanks if the user, purportedly sane, requests us to + options.name = _isUndefined(options.name) ? undefined : _sanitizeName(options.name); + options.prefix = _isUndefined(options.prefix) ? '' : options.prefix; + options.postfix = _isUndefined(options.postfix) ? '' : options.postfix; +} + +/** + * Resolve the specified path name in respect to tmpDir. + * + * The specified name might include relative path components, e.g. ../ + * so we need to resolve in order to be sure that is is located inside tmpDir + * + * @param name + * @param tmpDir + * @returns {string} + * @private + */ +function _resolvePath(name, tmpDir) { + const sanitizedName = _sanitizeName(name); + if (sanitizedName.startsWith(tmpDir)) { + return path.resolve(sanitizedName); + } else { + return path.resolve(path.join(tmpDir, sanitizedName)); + } +} + +/** + * Sanitize the specified path name by removing all quote characters. + * + * @param name + * @returns {string} + * @private + */ +function _sanitizeName(name) { + if (_isBlank(name)) { + return name; + } + return name.replace(/["']/g, ''); +} + +/** + * Asserts whether specified name is relative to the specified tmpDir. + * + * @param {string} name + * @param {string} option + * @param {string} tmpDir + * @throws {Error} + * @private + */ +function _assertIsRelative(name, option, tmpDir) { + if (option === 'name') { + // assert that name is not absolute and does not contain a path + if (path.isAbsolute(name)) + throw new Error(`${option} option must not contain an absolute path, found "${name}".`); + // must not fail on valid . or .. or similar such constructs + let basename = path.basename(name); + if (basename === '..' || basename === '.' || basename !== name) + throw new Error(`${option} option must not contain a path, found "${name}".`); + } + else { // if (option === 'dir' || option === 'template') { + // assert that dir or template are relative to tmpDir + if (path.isAbsolute(name) && !name.startsWith(tmpDir)) { + throw new Error(`${option} option must be relative to "${tmpDir}", found "${name}".`); + } + let resolvedPath = _resolvePath(name, tmpDir); + if (!resolvedPath.startsWith(tmpDir)) + throw new Error(`${option} option must be relative to "${tmpDir}", found "${resolvedPath}".`); + } +} + +/** + * Helper for testing against EBADF to compensate changes made to Node 7.x under Windows. + * + * @private + */ +function _isEBADF(error) { + return _isExpectedError(error, -EBADF, 'EBADF'); +} + +/** + * Helper for testing against ENOENT to compensate changes made to Node 7.x under Windows. + * + * @private + */ +function _isENOENT(error) { + return _isExpectedError(error, -ENOENT, 'ENOENT'); +} + +/** + * Helper to determine whether the expected error code matches the actual code and errno, + * which will differ between the supported node versions. + * + * - Node >= 7.0: + * error.code {string} + * error.errno {number} any numerical value will be negated + * + * CAVEAT + * + * On windows, the errno for EBADF is -4083 but os.constants.errno.EBADF is different and we must assume that ENOENT + * is no different here. + * + * @param {SystemError} error + * @param {number} errno + * @param {string} code + * @private + */ +function _isExpectedError(error, errno, code) { + return IS_WIN32 ? error.code === code : error.code === code && error.errno === errno; +} + +/** + * Sets the graceful cleanup. + * + * If graceful cleanup is set, tmp will remove all controlled temporary objects on process exit, otherwise the + * temporary objects will remain in place, waiting to be cleaned up on system restart or otherwise scheduled temporary + * object removals. + */ +function setGracefulCleanup() { + _gracefulCleanup = true; +} + +/** + * Returns the currently configured tmp dir from os.tmpdir(). + * + * @private + * @param {?Options} options + * @returns {string} the currently configured tmp dir + */ +function _getTmpDir(options) { + return path.resolve(_sanitizeName(options && options.tmpdir || os.tmpdir())); +} + +// Install process exit listener +process.addListener(EXIT, _garbageCollector); + +/** + * Configuration options. + * + * @typedef {Object} Options + * @property {?boolean} keep the temporary object (file or dir) will not be garbage collected + * @property {?number} tries the number of tries before give up the name generation + * @property (?int) mode the access mode, defaults are 0o700 for directories and 0o600 for files + * @property {?string} template the "mkstemp" like filename template + * @property {?string} name fixed name relative to tmpdir or the specified dir option + * @property {?string} dir tmp directory relative to the root tmp directory in use + * @property {?string} prefix prefix for the generated name + * @property {?string} postfix postfix for the generated name + * @property {?string} tmpdir the root tmp directory which overrides the os tmpdir + * @property {?boolean} unsafeCleanup recursively removes the created temporary directory, even when it's not empty + * @property {?boolean} detachDescriptor detaches the file descriptor, caller is responsible for closing the file, tmp will no longer try closing the file during garbage collection + * @property {?boolean} discardDescriptor discards the file descriptor (closes file, fd is -1), tmp will no longer try closing the file during garbage collection + */ + +/** + * @typedef {Object} FileSyncObject + * @property {string} name the name of the file + * @property {string} fd the file descriptor or -1 if the fd has been discarded + * @property {fileCallback} removeCallback the callback function to remove the file + */ + +/** + * @typedef {Object} DirSyncObject + * @property {string} name the name of the directory + * @property {fileCallback} removeCallback the callback function to remove the directory + */ + +/** + * @callback tmpNameCallback + * @param {?Error} err the error object if anything goes wrong + * @param {string} name the temporary file name + */ + +/** + * @callback fileCallback + * @param {?Error} err the error object if anything goes wrong + * @param {string} name the temporary file name + * @param {number} fd the file descriptor or -1 if the fd had been discarded + * @param {cleanupCallback} fn the cleanup callback function + */ + +/** + * @callback fileCallbackSync + * @param {?Error} err the error object if anything goes wrong + * @param {string} name the temporary file name + * @param {number} fd the file descriptor or -1 if the fd had been discarded + * @param {cleanupCallbackSync} fn the cleanup callback function + */ + +/** + * @callback dirCallback + * @param {?Error} err the error object if anything goes wrong + * @param {string} name the temporary file name + * @param {cleanupCallback} fn the cleanup callback function + */ + +/** + * @callback dirCallbackSync + * @param {?Error} err the error object if anything goes wrong + * @param {string} name the temporary file name + * @param {cleanupCallbackSync} fn the cleanup callback function + */ + +/** + * Removes the temporary created file or directory. + * + * @callback cleanupCallback + * @param {simpleCallback} [next] function to call whenever the tmp object needs to be removed + */ + +/** + * Removes the temporary created file or directory. + * + * @callback cleanupCallbackSync + */ + +/** + * Callback function for function composition. + * @see {@link https://github.com/raszi/node-tmp/issues/57|raszi/node-tmp#57} + * + * @callback simpleCallback + */ + +// exporting all the needed methods + +// evaluate _getTmpDir() lazily, mainly for simplifying testing but it also will +// allow users to reconfigure the temporary directory +Object.defineProperty(module.exports, 'tmpdir', { + enumerable: true, + configurable: false, + get: function () { + return _getTmpDir(); + } +}); + +module.exports.dir = dir; +module.exports.dirSync = dirSync; + +module.exports.file = file; +module.exports.fileSync = fileSync; + +module.exports.tmpName = tmpName; +module.exports.tmpNameSync = tmpNameSync; + +module.exports.setGracefulCleanup = setGracefulCleanup; diff --git a/_extensions/d2/node_modules/tmp/package.json b/_extensions/d2/node_modules/tmp/package.json new file mode 100644 index 00000000..d98a9108 --- /dev/null +++ b/_extensions/d2/node_modules/tmp/package.json @@ -0,0 +1,58 @@ +{ + "name": "tmp", + "version": "0.2.1", + "description": "Temporary file and directory creator", + "author": "KARASZI István (http://raszi.hu/)", + "contributors": [ + "Carsten Klein (https://github.com/silkentrance)" + ], + "keywords": [ + "temporary", + "tmp", + "temp", + "tempdir", + "tempfile", + "tmpdir", + "tmpfile" + ], + "license": "MIT", + "repository": "https://github.com/raszi/node-tmp.git", + "homepage": "http://github.com/raszi/node-tmp", + "bugs": { + "url": "http://github.com/raszi/node-tmp/issues" + }, + "engines": { + "node": ">=8.17.0" + }, + "dependencies": { + "rimraf": "^3.0.0" + }, + "devDependencies": { + "eslint": "^6.3.0", + "eslint-plugin-mocha": "^6.1.1", + "istanbul": "^0.4.5", + "lerna-changelog": "^1.0.1", + "mocha": "^6.2.0" + }, + "main": "lib/tmp.js", + "files": [ + "lib/" + ], + "changelog": { + "labels": { + "breaking": ":boom: Breaking Change", + "enhancement": ":rocket: Enhancement", + "bug": ":bug: Bug Fix", + "documentation": ":memo: Documentation", + "internal": ":house: Internal" + }, + "cacheDir": ".changelog" + }, + "scripts": { + "changelog": "lerna-changelog", + "lint": "eslint lib --env mocha test", + "clean": "rm -Rf ./coverage", + "test": "npm run clean && istanbul cover ./node_modules/mocha/bin/_mocha --report none --print none --dir ./coverage/json -u exports -R test/*-test.js && istanbul report --root ./coverage/json html && istanbul report text-summary", + "doc": "jsdoc -c .jsdoc.json" + } +} diff --git a/_extensions/d2/node_modules/trough/index.d.ts b/_extensions/d2/node_modules/trough/index.d.ts new file mode 100644 index 00000000..64e51e1a --- /dev/null +++ b/_extensions/d2/node_modules/trough/index.d.ts @@ -0,0 +1,49 @@ +/** + * @typedef {(error?: Error|null|undefined, ...output: Array) => void} Callback + * @typedef {(...input: Array) => any} Middleware + * + * @typedef {(...input: Array) => void} Run + * Call all middleware. + * @typedef {(fn: Middleware) => Pipeline} Use + * Add `fn` (middleware) to the list. + * @typedef {{run: Run, use: Use}} Pipeline + * Middleware. + */ +/** + * Create new middleware. + * + * @returns {Pipeline} + */ +export function trough(): Pipeline +/** + * Wrap `middleware`. + * Can be sync or async; return a promise, receive a callback, or return new + * values and errors. + * + * @param {Middleware} middleware + * @param {Callback} callback + */ +export function wrap( + middleware: Middleware, + callback: Callback +): (...parameters: Array) => void +export type Callback = ( + error?: Error | null | undefined, + ...output: Array +) => void +export type Middleware = (...input: Array) => any +/** + * Call all middleware. + */ +export type Run = (...input: Array) => void +/** + * Add `fn` (middleware) to the list. + */ +export type Use = (fn: Middleware) => Pipeline +/** + * Middleware. + */ +export type Pipeline = { + run: Run + use: Use +} diff --git a/_extensions/d2/node_modules/trough/index.js b/_extensions/d2/node_modules/trough/index.js new file mode 100644 index 00000000..6e04a454 --- /dev/null +++ b/_extensions/d2/node_modules/trough/index.js @@ -0,0 +1,160 @@ +/** + * @typedef {(error?: Error|null|undefined, ...output: Array) => void} Callback + * @typedef {(...input: Array) => any} Middleware + * + * @typedef {(...input: Array) => void} Run + * Call all middleware. + * @typedef {(fn: Middleware) => Pipeline} Use + * Add `fn` (middleware) to the list. + * @typedef {{run: Run, use: Use}} Pipeline + * Middleware. + */ + +/** + * Create new middleware. + * + * @returns {Pipeline} + */ +export function trough() { + /** @type {Array} */ + const fns = [] + /** @type {Pipeline} */ + const pipeline = {run, use} + + return pipeline + + /** @type {Run} */ + function run(...values) { + let middlewareIndex = -1 + /** @type {Callback} */ + const callback = values.pop() + + if (typeof callback !== 'function') { + throw new TypeError('Expected function as last argument, not ' + callback) + } + + next(null, ...values) + + /** + * Run the next `fn`, or we’re done. + * + * @param {Error|null|undefined} error + * @param {Array} output + */ + function next(error, ...output) { + const fn = fns[++middlewareIndex] + let index = -1 + + if (error) { + callback(error) + return + } + + // Copy non-nullish input into values. + while (++index < values.length) { + if (output[index] === null || output[index] === undefined) { + output[index] = values[index] + } + } + + // Save the newly created `output` for the next call. + values = output + + // Next or done. + if (fn) { + wrap(fn, next)(...output) + } else { + callback(null, ...output) + } + } + } + + /** @type {Use} */ + function use(middelware) { + if (typeof middelware !== 'function') { + throw new TypeError( + 'Expected `middelware` to be a function, not ' + middelware + ) + } + + fns.push(middelware) + return pipeline + } +} + +/** + * Wrap `middleware`. + * Can be sync or async; return a promise, receive a callback, or return new + * values and errors. + * + * @param {Middleware} middleware + * @param {Callback} callback + */ +export function wrap(middleware, callback) { + /** @type {boolean} */ + let called + + return wrapped + + /** + * Call `middleware`. + * @this {any} + * @param {Array} parameters + * @returns {void} + */ + function wrapped(...parameters) { + const fnExpectsCallback = middleware.length > parameters.length + /** @type {any} */ + let result + + if (fnExpectsCallback) { + parameters.push(done) + } + + try { + result = middleware.apply(this, parameters) + } catch (error) { + const exception = /** @type {Error} */ (error) + + // Well, this is quite the pickle. + // `middleware` received a callback and called it synchronously, but that + // threw an error. + // The only thing left to do is to throw the thing instead. + if (fnExpectsCallback && called) { + throw exception + } + + return done(exception) + } + + if (!fnExpectsCallback) { + if (result instanceof Promise) { + result.then(then, done) + } else if (result instanceof Error) { + done(result) + } else { + then(result) + } + } + } + + /** + * Call `callback`, only once. + * @type {Callback} + */ + function done(error, ...output) { + if (!called) { + called = true + callback(error, ...output) + } + } + + /** + * Call `done` with one value. + * + * @param {any} [value] + */ + function then(value) { + done(null, value) + } +} diff --git a/_extensions/d2/node_modules/trough/license b/_extensions/d2/node_modules/trough/license new file mode 100644 index 00000000..3f0166f6 --- /dev/null +++ b/_extensions/d2/node_modules/trough/license @@ -0,0 +1,21 @@ +(The MIT License) + +Copyright (c) 2016 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/_extensions/d2/node_modules/trough/package.json b/_extensions/d2/node_modules/trough/package.json new file mode 100644 index 00000000..c4dcbdc0 --- /dev/null +++ b/_extensions/d2/node_modules/trough/package.json @@ -0,0 +1,78 @@ +{ + "name": "trough", + "version": "2.1.0", + "description": "Middleware: a channel used to convey a liquid", + "license": "MIT", + "keywords": [ + "middleware", + "ware" + ], + "repository": "wooorm/trough", + "bugs": "https://github.com/wooorm/trough/issues", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "index.d.ts", + "index.js" + ], + "devDependencies": { + "@types/tape": "^4.0.0", + "c8": "^7.0.0", + "prettier": "^2.0.0", + "remark-cli": "^10.0.0", + "remark-preset-wooorm": "^9.0.0", + "rimraf": "^3.0.0", + "tape": "^5.0.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "xo": "^0.48.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "rimraf \"*.d.ts\" && tsc && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node test.js", + "test-coverage": "c8 --check-coverage --branches 100 --functions 100 --lines 100 --statements 100 --reporter lcov node test.js", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true, + "rules": { + "capitalized-comments": "off" + } + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true, + "#": "some nessecary `any`s", + "ignoreFiles": [ + "index.js", + "index.d.ts" + ] + } +} diff --git a/_extensions/d2/node_modules/trough/readme.md b/_extensions/d2/node_modules/trough/readme.md new file mode 100644 index 00000000..a4961d33 --- /dev/null +++ b/_extensions/d2/node_modules/trough/readme.md @@ -0,0 +1,400 @@ +# trough + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] + +`trough` is middleware. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`trough()`](#trough-1) + * [`wrap(middleware, callback)(…input)`](#wrapmiddleware-callbackinput) + * [`Trough`](#trough-2) +* [Types](#types) +* [Compatibility](#compatibility) +* [Security](#security) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +`trough` is like [`ware`][ware] with less sugar. +Middleware functions can also change the input of the next. + +The word **trough** (`/trôf/`) means a channel used to convey a liquid. + +## When should I use this? + +You can use this package when you’re building something that accepts “plugins”, +which are functions, that can be sync or async, promises or callbacks. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 12.20+, 14.14+, or 16.0+), install with [npm][]: + +```sh +npm install trough +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {trough} from "https://esm.sh/trough@2" +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import process from 'node:process' +import fs from 'node:fs' +import path from 'node:path' +import {trough} from 'trough' + +const pipeline = trough() + .use(function (fileName) { + console.log('Checking… ' + fileName) + }) + .use(function (fileName) { + return path.join(process.cwd(), fileName) + }) + .use(function (filePath, next) { + fs.stat(filePath, function (error, stats) { + next(error, {filePath, stats}) + }) + }) + .use(function (ctx, next) { + if (ctx.stats.isFile()) { + fs.readFile(ctx.filePath, next) + } else { + next(new Error('Expected file')) + } + }) + +pipeline.run('readme.md', console.log) +pipeline.run('node_modules', console.log) +``` + +Yields: + +```txt +Checking… readme.md +Checking… node_modules +Error: Expected file + at ~/example.js:22:12 + at wrapped (~/node_modules/trough/index.js:111:16) + at next (~/node_modules/trough/index.js:62:23) + at done (~/node_modules/trough/index.js:145:7) + at ~/example.js:15:7 + at FSReqCallback.oncomplete (node:fs:199:5) +null +``` + +## API + +This package exports the identifiers `trough` and `wrap`. +There is no default export. + +### `trough()` + +Create a new [`Trough`][trough]. + +### `wrap(middleware, callback)(…input)` + +Call `middleware` with all input. +If `middleware` accepts more arguments than given in input, an extra `done` +function is passed in after the input when calling it. +In that case, `done` must be called. + +The first value in `input` is the main input value. +All other input values are the rest input values. +The values given to `callback` are the input values, merged with every +non-nullish output value. + +* If `middleware` throws an error, returns a promise that is rejected, or + calls the given `done` function with an error, `callback` is called with + that error +* If `middleware` returns a value or returns a promise that is resolved, that + value is the main output value +* If `middleware` calls `done`, all non-nullish values except for the first + one (the error) overwrite the output values + +### `Trough` + +A pipeline. + +#### `Trough#run([input…, ]done)` + +Run the pipeline (all [`use()`][use]d middleware). +Calls [`done`][done] on completion with either an error or the output of the +last middleware. + +> 👉 **Note**: as the length of input defines whether [async][] functions get a +> `next` function, it’s recommended to keep `input` at one value normally. + +##### `function done(err?, [output…])` + +The final handler passed to [`run()`][run], called with an error if a +[middleware function][fn] rejected, passed, or threw one, or the output of the +last middleware function. + +#### `Trough#use(fn)` + +Add `fn`, a [middleware function][fn], to the pipeline. + +##### `function fn([input…, ][next])` + +A middleware function called with the output of its predecessor. + +###### Synchronous + +If `fn` returns or throws an error, the pipeline fails and `done` is called +with that error. + +If `fn` returns a value (neither `null` nor `undefined`), the first `input` of +the next function is set to that value (all other `input` is passed through). + +The following example shows how returning an error stops the pipeline: + +```js +import {trough} from 'trough' + +trough() + .use(function (thing) { + return new Error('Got: ' + thing) + }) + .run('some value', console.log) +``` + +Yields: + +```txt +Error: Got: some value + at ~/example.js:5:12 + … +``` + +The following example shows how throwing an error stops the pipeline: + +```js +import {trough} from 'trough' + +trough() + .use(function (thing) { + throw new Error('Got: ' + thing) + }) + .run('more value', console.log) +``` + +Yields: + +```txt +Error: Got: more value + at ~/example.js:5:11 + … +``` + +The following example shows how the first output can be modified: + +```js +import {trough} from 'trough' + +trough() + .use(function (thing) { + return 'even ' + thing + }) + .run('more value', 'untouched', console.log) +``` + +Yields: + +```txt +null 'even more value' 'untouched' +``` + +###### Promise + +If `fn` returns a promise, and that promise rejects, the pipeline fails and +`done` is called with the rejected value. + +If `fn` returns a promise, and that promise resolves with a value (neither +`null` nor `undefined`), the first `input` of the next function is set to that +value (all other `input` is passed through). + +The following example shows how rejecting a promise stops the pipeline: + +```js +import {trough} from 'trough' + +trough() + .use(function (thing) { + return new Promise(function (resolve, reject) { + reject('Got: ' + thing) + }) + }) + .run('thing', console.log) +``` + +Yields: + +```txt +Got: thing +``` + +The following example shows how the input isn’t touched by resolving to `null`. + +```js +import {trough} from 'trough' + +trough() + .use(function () { + return new Promise(function (resolve) { + setTimeout(function () { + resolve(null) + }, 100) + }) + }) + .run('Input', console.log) +``` + +Yields: + +```txt +null 'Input' +``` + +###### Asynchronous + +If `fn` accepts one more argument than the given `input`, a `next` function is +given (after the input). `next` must be called, but doesn’t have to be called +async. + +If `next` is given a value (neither `null` nor `undefined`) as its first +argument, the pipeline fails and `done` is called with that value. + +If `next` is given no value (either `null` or `undefined`) as the first +argument, all following non-nullish values change the input of the following +function, and all nullish values default to the `input`. + +The following example shows how passing a first argument stops the pipeline: + +```js +import {trough} from 'trough' + +trough() + .use(function (thing, next) { + next(new Error('Got: ' + thing)) + }) + .run('thing', console.log) +``` + +Yields: + +```txt +Error: Got: thing + at ~/example.js:5:10 +``` + +The following example shows how more values than the input are passed. + +```js +import {trough} from 'trough' + +trough() + .use(function (thing, next) { + setTimeout(function () { + next(null, null, 'values') + }, 100) + }) + .run('some', console.log) +``` + +Yields: + +```txt +null 'some' 'values' +``` + +## Types + +This package is fully typed with [TypeScript][]. + +## Compatibility + +This package is at least compatible with all maintained versions of Node.js. +As of now, that is Node.js 12.20+, 14.14+, and 16.0+. +It also works in Deno and modern browsers. + +## Security + +This package is safe. + +## Contribute + +Yes please! +See [How to Contribute to Open Source][contribute]. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/wooorm/trough/workflows/main/badge.svg + +[build]: https://github.com/wooorm/trough/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/wooorm/trough.svg + +[coverage]: https://codecov.io/github/wooorm/trough + +[downloads-badge]: https://img.shields.io/npm/dm/trough.svg + +[downloads]: https://www.npmjs.com/package/trough + +[size-badge]: https://img.shields.io/bundlephobia/minzip/trough.svg + +[size]: https://bundlephobia.com/result?p=trough + +[npm]: https://docs.npmjs.com/cli/install + +[license]: license + +[author]: https://wooorm.com + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[esmsh]: https://esm.sh + +[typescript]: https://www.typescriptlang.org + +[contribute]: https://opensource.guide/how-to-contribute/ + +[ware]: https://github.com/segmentio/ware + +[trough]: #trough-1 + +[use]: #troughusefn + +[run]: #troughruninput-done + +[fn]: #function-fninput-next + +[done]: #function-doneerr-output + +[async]: #asynchronous diff --git a/_extensions/d2/node_modules/unified/index.d.ts b/_extensions/d2/node_modules/unified/index.d.ts new file mode 100644 index 00000000..09db6f2e --- /dev/null +++ b/_extensions/d2/node_modules/unified/index.d.ts @@ -0,0 +1,863 @@ +// TypeScript Version: 4.0 + +// Note: this is a `.d.ts` file because it is not possible to have default type +// parameters in JSDoc-based TypeScript, which is a feature we use to type that: +// +// ```js +// .use(somePlugin, theOptions) +// ``` +// +// `theOptions` matches the options that `somePlugin` expects and thus is very +// important for making unified usable in TypeScript. +// +// Furthermore, this is places in the root of the project because types that +// accept type parameters cannot be re-exported as such easily. + +import {Node} from 'unist' +import {VFile, VFileCompatible} from 'vfile' + +/* eslint-disable @typescript-eslint/naming-convention */ + +type VFileWithOutput = Result extends Uint8Array // Buffer. + ? VFile + : Result extends object // Custom result type + ? VFile & {result: Result} + : VFile + +// Get the right most non-void thing. +type Specific = Right extends void ? Left : Right + +// Create a processor based on the input/output of a plugin. +type UsePlugin< + ParseTree extends Node | void = void, + CurrentTree extends Node | void = void, + CompileTree extends Node | void = void, + CompileResult = void, + Input = void, + Output = void +> = Output extends Node + ? Input extends string + ? // If `Input` is `string` and `Output` is `Node`, then this plugin + // defines a parser, so set `ParseTree`. + Processor< + Output, + Specific, + Specific, + CompileResult + > + : Input extends Node + ? // If `Input` is `Node` and `Output` is `Node`, then this plugin defines a + // transformer, its output defines the input of the next, so set + // `CurrentTree`. + Processor< + Specific, + Output, + Specific, + CompileResult + > + : // Else, `Input` is something else and `Output` is `Node`: + never + : Input extends Node + ? // If `Input` is `Node` and `Output` is not a `Node`, then this plugin + // defines a compiler, so set `CompileTree` and `CompileResult` + Processor< + Specific, + Specific, + Input, + Output + > + : // Else, `Input` is not a `Node` and `Output` is not a `Node`. + // Maybe it’s untyped, or the plugin throws an error (`never`), so lets + // just keep it as it was. + Processor + +/* eslint-enable @typescript-eslint/naming-convention */ + +/** + * Processor allows plugins to be chained together to transform content. + * The chain of plugins defines how content flows through it. + * + * @typeParam ParseTree + * The node that the parser yields (and `run` receives). + * @typeParam CurrentTree + * The node that the last attached plugin yields. + * @typeParam CompileTree + * The node that the compiler receives (and `run` yields). + * @typeParam CompileResult + * The thing that the compiler yields. + */ +export interface Processor< + ParseTree extends Node | void = void, + CurrentTree extends Node | void = void, + CompileTree extends Node | void = void, + CompileResult = void +> extends FrozenProcessor { + /** + * Configure the processor to use a plugin. + * + * @typeParam PluginParameters + * Plugin settings. + * @typeParam Input + * Value that is accepted by the plugin. + * + * * If the plugin returns a transformer, then this should be the node + * type that the transformer expects. + * * If the plugin sets a parser, then this should be `string`. + * * If the plugin sets a compiler, then this should be the node type that + * the compiler expects. + * @typeParam Output + * Value that the plugin yields. + * + * * If the plugin returns a transformer, then this should be the node + * type that the transformer yields, and defaults to `Input`. + * * If the plugin sets a parser, then this should be the node type that + * the parser yields. + * * If the plugin sets a compiler, then this should be the result that + * the compiler yields (`string`, `Buffer`, or something else). + * @param plugin + * Plugin (function) to use. + * Plugins are deduped based on identity: passing a function in twice will + * cause it to run only once. + * @param settings + * Configuration for plugin, optional. + * Plugins typically receive one options object, but could receive other and + * more values. + * It’s also possible to pass a boolean instead of settings: `true` (to turn + * a plugin on) or `false` (to turn a plugin off). + * @returns + * Current processor. + */ + use< + PluginParameters extends any[] = any[], + Input = Specific, + Output = Input + >( + plugin: Plugin, + ...settings: PluginParameters | [boolean] + ): UsePlugin< + ParseTree, + CurrentTree, + CompileTree, + CompileResult, + Input, + Output + > + + /** + * Configure the processor with a tuple of a plugin and setting(s). + * + * @typeParam PluginParameters + * Plugin settings. + * @typeParam Input + * Value that is accepted by the plugin. + * + * * If the plugin returns a transformer, then this should be the node + * type that the transformer expects. + * * If the plugin sets a parser, then this should be `string`. + * * If the plugin sets a compiler, then this should be the node type that + * the compiler expects. + * @typeParam Output + * Value that the plugin yields. + * + * * If the plugin returns a transformer, then this should be the node + * type that the transformer yields, and defaults to `Input`. + * * If the plugin sets a parser, then this should be the node type that + * the parser yields. + * * If the plugin sets a compiler, then this should be the result that + * the compiler yields (`string`, `Buffer`, or something else). + * @param tuple + * A tuple where the first item is a plugin (function) to use and other + * items are options. + * Plugins are deduped based on identity: passing a function in twice will + * cause it to run only once. + * It’s also possible to pass a boolean instead of settings: `true` (to turn + * a plugin on) or `false` (to turn a plugin off). + * @returns + * Current processor. + */ + use< + PluginParameters extends any[] = any[], + Input = Specific, + Output = Input + >( + tuple: + | PluginTuple + | [Plugin, boolean] + ): UsePlugin< + ParseTree, + CurrentTree, + CompileTree, + CompileResult, + Input, + Output + > + + /** + * Configure the processor with a preset or list of plugins and presets. + * + * @param presetOrList + * Either a list of plugins, presets, and tuples, or a single preset: an + * object with a `plugins` (list) and/or `settings` + * (`Record`). + * @returns + * Current processor. + */ + use( + presetOrList: Preset | PluggableList + ): Processor +} + +/** + * A frozen processor is just like a regular processor, except no additional + * plugins can be added. + * A frozen processor can be created by calling `.freeze()` on a processor. + * An unfrozen processor can be created by calling a processor. + */ +export interface FrozenProcessor< + ParseTree extends Node | void = void, + CurrentTree extends Node | void = void, + CompileTree extends Node | void = void, + CompileResult = void +> { + /** + * Clone current processor + * + * @returns + * New unfrozen processor that is configured to function the same as its + * ancestor. + * But when the descendant processor is configured it does not affect the + * ancestral processor. + */ + (): Processor + + /** + * Internal list of configured plugins. + * + * @private + */ + attachers: Array<[Plugin, ...unknown[]]> + + Parser?: Parser> | undefined + Compiler?: + | Compiler, Specific> + | undefined + + /** + * Parse a file. + * + * @param file + * File to parse. + * `VFile` or anything that can be given to `new VFile()`, optional. + * @returns + * Resulting tree. + */ + parse(file?: VFileCompatible | undefined): Specific + + /** + * Compile a file. + * + * @param node + * Node to compile. + * @param file + * `VFile` or anything that can be given to `new VFile()`, optional. + * @returns + * New content: compiled text (`string` or `Buffer`) or something else. + * This depends on which plugins you use: typically text, but could for + * example be a React node. + */ + stringify( + node: Specific, + file?: VFileCompatible | undefined + ): CompileTree extends Node ? CompileResult : unknown + + /** + * Run transforms on the given tree. + * + * @param node + * Tree to transform. + * @param callback + * Callback called with an error or the resulting node. + * @returns + * Nothing. + */ + run( + node: Specific, + callback: RunCallback> + ): void + + /** + * Run transforms on the given node. + * + * @param node + * Tree to transform. + * @param file + * File associated with `node`. + * `VFile` or anything that can be given to `new VFile()`. + * @param callback + * Callback called with an error or the resulting node. + * @returns + * Nothing. + */ + run( + node: Specific, + file: VFileCompatible | undefined, + callback: RunCallback> + ): void + + /** + * Run transforms on the given node. + * + * @param node + * Tree to transform. + * @param file + * File associated with `node`. + * `VFile` or anything that can be given to `new VFile()`. + * @returns + * Promise that resolves to the resulting tree. + */ + run( + node: Specific, + file?: VFileCompatible | undefined + ): Promise> + + /** + * Run transforms on the given node, synchronously. + * Throws when asynchronous transforms are configured. + * + * @param node + * Tree to transform. + * @param file + * File associated with `node`. + * `VFile` or anything that can be given to `new VFile()`, optional. + * @returns + * Resulting tree. + */ + runSync( + node: Specific, + file?: VFileCompatible | undefined + ): Specific + + /** + * Process a file. + * + * This performs all phases of the processor: + * + * 1. Parse a file into a unist node using the configured `Parser` + * 2. Run transforms on that node + * 3. Compile the resulting node using the `Compiler` + * + * The result from the compiler is stored on the file. + * What the result is depends on which plugins you use. + * The result is typically text (`string` or `Buffer`), which can be retrieved + * with `file.toString()` (or `String(file)`). + * In some cases, such as when using `rehypeReact` to create a React node, + * the result is stored on `file.result`. + * + * @param file + * `VFile` or anything that can be given to `new VFile()`. + * @param callback + * Callback called with an error or the resulting file. + * @returns + * Nothing. + */ + process( + file: VFileCompatible | undefined, + callback: ProcessCallback> + ): void + + /** + * Process a file. + * + * This performs all phases of the processor: + * + * 1. Parse a file into a unist node using the configured `Parser` + * 2. Run transforms on that node + * 3. Compile the resulting node using the `Compiler` + * + * The result from the compiler is stored on the file. + * What the result is depends on which plugins you use. + * The result is typically text (`string` or `Buffer`), which can be retrieved + * with `file.toString()` (or `String(file)`). + * In some cases, such as when using `rehypeReact` to create a React node, + * the result is stored on `file.result`. + * + * @param file + * `VFile` or anything that can be given to `new VFile()`. + * @returns + * Promise that resolves to the resulting `VFile`. + */ + process(file: VFileCompatible): Promise> + + /** + * Process a file, synchronously. + * Throws when asynchronous transforms are configured. + * + * This performs all phases of the processor: + * + * 1. Parse a file into a unist node using the configured `Parser` + * 2. Run transforms on that node + * 3. Compile the resulting node using the `Compiler` + * + * The result from the compiler is stored on the file. + * What the result is depends on which plugins you use. + * The result is typically text (`string` or `Buffer`), which can be retrieved + * with `file.toString()` (or `String(file)`). + * In some cases, such as when using `rehypeReact` to create a React node, + * the result is stored on `file.result`. + * + * @param file + * `VFile` or anything that can be given to `new VFile()`, optional. + * @returns + * Resulting file. + */ + processSync( + file?: VFileCompatible | undefined + ): VFileWithOutput + + /** + * Get an in-memory key-value store accessible to all phases of the process. + * + * @returns + * Key-value store. + */ + data(): Record + + /** + * Set an in-memory key-value store accessible to all phases of the process. + * + * @param data + * Key-value store. + * @returns + * Current processor. + */ + data( + data: Record + ): Processor + + /** + * Get an in-memory value by key. + * + * @param key + * Key to get. + * @returns + * The value at `key`. + */ + data(key: string): unknown + + /** + * Set an in-memory value by key. + * + * @param key + * Key to set. + * @param value + * Value to set. + * @returns + * Current processor. + */ + data( + key: string, + value: unknown + ): Processor + + /** + * Freeze a processor. + * Frozen processors are meant to be extended and not to be configured or + * processed directly. + * + * Once a processor is frozen it cannot be unfrozen. + * New processors working just like it can be created by calling the + * processor. + * + * It’s possible to freeze processors explicitly, by calling `.freeze()`, but + * `.parse()`, `.run()`, `.stringify()`, and `.process()` call `.freeze()` to + * freeze a processor too. + * + * @returns + * Frozen processor. + */ + freeze(): FrozenProcessor +} + +/** + * A plugin is a function. + * It configures the processor and in turn can receive options. + * Plugins can configure processors by interacting with parsers and compilers + * (at `this.Parser` or `this.Compiler`) or by specifying how the syntax tree + * is handled (by returning a `Transformer`). + * + * @typeParam PluginParameters + * Plugin settings. + * @typeParam Input + * Value that is accepted by the plugin. + * + * * If the plugin returns a transformer, then this should be the node + * type that the transformer expects. + * * If the plugin sets a parser, then this should be `string`. + * * If the plugin sets a compiler, then this should be the node type that + * the compiler expects. + * @typeParam Output + * Value that the plugin yields. + * + * * If the plugin returns a transformer, then this should be the node + * type that the transformer yields, and defaults to `Input`. + * * If the plugin sets a parser, then this should be the node type that + * the parser yields. + * * If the plugin sets a compiler, then this should be the result that + * the compiler yields (`string`, `Buffer`, or something else). + * @this + * The current processor. + * Plugins can configure the processor by interacting with `this.Parser` or + * `this.Compiler`, or by accessing the data associated with the whole process + * (`this.data`). + * @param settings + * Configuration for plugin. + * Plugins typically receive one options object, but could receive other and + * more values. + * Users can also pass a boolean instead of settings: `true` (to turn + * a plugin on) or `false` (to turn a plugin off). + * When a plugin is turned off, it won’t be called. + * + * When creating your own plugins, please accept only a single object! + * It allows plugins to be reconfigured and it helps users to know that every + * plugin accepts one options object. + * @returns + * Plugins can return a `Transformer` to specify how the syntax tree is + * handled. + */ +export type Plugin< + PluginParameters extends any[] = any[], + Input = Node, + Output = Input +> = ( + this: Input extends Node + ? Output extends Node + ? // This is a transform, so define `Input` as the current tree. + Processor + : // Compiler. + Processor + : Output extends Node + ? // Parser. + Processor + : // No clue. + Processor, + ...settings: PluginParameters +) => // If both `Input` and `Output` are `Node`, expect an optional `Transformer`. +Input extends Node + ? Output extends Node + ? Transformer | void + : void + : void + +/** + * Presets provide a sharable way to configure processors with multiple plugins + * and/or settings. + */ +export interface Preset { + plugins?: PluggableList + settings?: Record +} + +/** + * A tuple of a plugin and its setting(s). + * The first item is a plugin (function) to use and other items are options. + * Plugins are deduped based on identity: passing a function in twice will + * cause it to run only once. + * + * @typeParam PluginParameters + * Plugin settings. + * @typeParam Input + * Value that is accepted by the plugin. + * + * * If the plugin returns a transformer, then this should be the node + * type that the transformer expects. + * * If the plugin sets a parser, then this should be `string`. + * * If the plugin sets a compiler, then this should be the node type that + * the compiler expects. + * @typeParam Output + * Value that the plugin yields. + * + * * If the plugin returns a transformer, then this should be the node + * type that the transformer yields, and defaults to `Input`. + * * If the plugin sets a parser, then this should be the node type that + * the parser yields. + * * If the plugin sets a compiler, then this should be the result that + * the compiler yields (`string`, `Buffer`, or something else). + */ +export type PluginTuple< + PluginParameters extends any[] = any[], + Input = Node, + Output = Input +> = [Plugin, ...PluginParameters] + +/** + * A union of the different ways to add plugins and settings. + * + * @typeParam PluginParameters + * Plugin settings. + */ +export type Pluggable = + | PluginTuple + | Plugin + | Preset + +/** + * A list of plugins and presets. + */ +export type PluggableList = Pluggable[] + +/** + * @deprecated + * Please use `Plugin`. + */ +export type Attacher< + PluginParameters extends any[] = any[], + Input = Node, + Output = Input +> = Plugin + +/** + * Transformers modify the syntax tree or metadata of a file. + * A transformer is a function that is called each time a file is passed + * through the transform phase. + * If an error occurs (either because it’s thrown, returned, rejected, or passed + * to `next`), the process stops. + * + * @typeParam Input + * Node type that the transformer expects. + * @typeParam Output + * Node type that the transformer yields. + * @param node + * Tree to be transformed. + * @param file + * File associated with node. + * @param next + * Callback that you must call when done. + * Note: this is given if you accept three parameters in your transformer. + * If you accept up to two parameters, it’s not given, and you can return + * a promise. + * @returns + * Any of the following: + * + * * `void` — If nothing is returned, the next transformer keeps using same + * tree. + * * `Error` — Can be returned to stop the process. + * * `Node` — Can be returned and results in further transformations and + * `stringify`s to be performed on the new tree. + * * `Promise` — If a promise is returned, the function is asynchronous, and + * must be resolved (optionally with a `Node`) or rejected (optionally with + * an `Error`). + * + * If you accept a `next` callback, nothing should be returned. + */ +export type Transformer< + Input extends Node = Node, + Output extends Node = Input +> = ( + node: Input, + file: VFile, + next: TransformCallback +) => Promise | Output | Error | undefined | void + +/** + * Callback you must call when a transformer is done. + * + * @typeParam Tree + * Node that the plugin yields. + * @param error + * Pass an error to stop the process. + * @param node + * Pass a tree to continue transformations (and `stringify`) on the new tree. + * @param file + * Pass a file to continue transformations (and `stringify`) on the new file. + * @returns + * Nothing. + */ +export type TransformCallback = ( + error?: Error | null | undefined, + node?: Tree | undefined, + file?: VFile | undefined +) => void + +/** + * Function handling the parsing of text to a syntax tree. + * Used in the parse phase in the process and called with a `string` and + * `VFile` representation of the document to parse. + * + * `Parser` can be a normal function, in which case it must return a `Node`: + * the syntax tree representation of the given file. + * + * `Parser` can also be a constructor function (a function with keys in its + * `prototype`), in which case it’s called with `new`. + * Instances must have a parse method that is called without arguments and + * must return a `Node`. + * + * @typeParam Tree + * The node that the parser yields (and `run` receives). + */ +export type Parser = + | ParserClass + | ParserFunction + +/** + * A class to parse files. + * + * @typeParam Tree + * The node that the parser yields. + */ +export class ParserClass { + prototype: { + /** + * Parse a file. + * + * @returns + * Parsed tree. + */ + parse(): Tree + } + + /** + * Constructor. + * + * @param document + * Document to parse. + * @param file + * File associated with `document`. + * @returns + * Instance. + */ + constructor(document: string, file: VFile) +} + +/** + * Normal function to parse a file. + * + * @typeParam Tree + * The node that the parser yields. + * @param document + * Document to parse. + * @param file + * File associated with `document`. + * @returns + * Node representing the given file. + */ +export type ParserFunction = ( + document: string, + file: VFile +) => Tree + +/** + * Function handling the compilation of syntax tree to a text. + * Used in the stringify phase in the process and called with a `Node` and + * `VFile` representation of the document to stringify. + * + * `Compiler` can be a normal function, in which case it must return a + * `string`: the text representation of the given syntax tree. + * + * `Compiler` can also be a constructor function (a function with keys in its + * `prototype`), in which case it’s called with `new`. + * Instances must have a `compile` method that is called without arguments + * and must return a `string`. + * + * @typeParam Tree + * The node that the compiler receives. + * @typeParam Result + * The thing that the compiler yields. + */ +export type Compiler = + | CompilerClass + | CompilerFunction + +/** + * A class to compile trees. + * + * @typeParam Tree + * The node that the compiler receives. + * @typeParam Result + * The thing that the compiler yields. + */ +export class CompilerClass { + prototype: { + /** + * Compile a tree. + * + * @returns + * New content: compiled text (`string` or `Buffer`, for `file.value`) or + * something else (for `file.result`). + */ + compile(): Result + } + + /** + * Constructor. + * + * @param tree + * Tree to compile. + * @param file + * File associated with `tree`. + * @returns + * Instance. + */ + constructor(tree: Tree, file: VFile) +} + +/** + * Normal function to compile a tree. + * + * @typeParam Tree + * The node that the compiler receives. + * @typeParam Result + * The thing that the compiler yields. + * @param tree + * Tree to compile. + * @param file + * File associated with `tree`. + * @returns + * New content: compiled text (`string` or `Buffer`, for `file.value`) or + * something else (for `file.result`). + */ +export type CompilerFunction = ( + tree: Tree, + file: VFile +) => Result + +/** + * Callback called when a done running. + * + * @typeParam Tree + * The tree that the callback receives. + * @param error + * Error passed when unsuccessful. + * @param node + * Tree to transform. + * @param file + * File passed when successful. + * @returns + * Nothing. + */ +export type RunCallback = ( + error?: Error | null | undefined, + node?: Tree | undefined, + file?: VFile | undefined +) => void + +/** + * Callback called when a done processing. + * + * @typeParam File + * The file that the callback receives. + * @param error + * Error passed when unsuccessful. + * @param file + * File passed when successful. + * @returns + * Nothing. + */ +export type ProcessCallback = ( + error?: Error | null | undefined, + file?: File | undefined +) => void + +/** + * A frozen processor. + */ +export function unified(): Processor diff --git a/_extensions/d2/node_modules/unified/index.js b/_extensions/d2/node_modules/unified/index.js new file mode 100644 index 00000000..024230ea --- /dev/null +++ b/_extensions/d2/node_modules/unified/index.js @@ -0,0 +1 @@ +export {unified} from './lib/index.js' diff --git a/_extensions/d2/node_modules/unified/lib/index.d.ts b/_extensions/d2/node_modules/unified/lib/index.d.ts new file mode 100644 index 00000000..07429122 --- /dev/null +++ b/_extensions/d2/node_modules/unified/lib/index.d.ts @@ -0,0 +1,19 @@ +export const unified: import('..').FrozenProcessor +export type Node = import('unist').Node +export type VFileCompatible = import('vfile').VFileCompatible +export type VFileValue = import('vfile').VFileValue +export type Processor = import('..').Processor +export type Plugin = import('..').Plugin +export type Preset = import('..').Preset +export type Pluggable = import('..').Pluggable +export type PluggableList = import('..').PluggableList +export type Transformer = import('..').Transformer +export type Parser = import('..').Parser +export type Compiler = import('..').Compiler +export type RunCallback = import('..').RunCallback +export type ProcessCallback = import('..').ProcessCallback +export type Context = { + tree: Node + file: VFile +} +import {VFile} from 'vfile' diff --git a/_extensions/d2/node_modules/unified/lib/index.js b/_extensions/d2/node_modules/unified/lib/index.js new file mode 100644 index 00000000..ef5748e6 --- /dev/null +++ b/_extensions/d2/node_modules/unified/lib/index.js @@ -0,0 +1,599 @@ +/** + * @typedef {import('unist').Node} Node + * @typedef {import('vfile').VFileCompatible} VFileCompatible + * @typedef {import('vfile').VFileValue} VFileValue + * @typedef {import('..').Processor} Processor + * @typedef {import('..').Plugin} Plugin + * @typedef {import('..').Preset} Preset + * @typedef {import('..').Pluggable} Pluggable + * @typedef {import('..').PluggableList} PluggableList + * @typedef {import('..').Transformer} Transformer + * @typedef {import('..').Parser} Parser + * @typedef {import('..').Compiler} Compiler + * @typedef {import('..').RunCallback} RunCallback + * @typedef {import('..').ProcessCallback} ProcessCallback + * + * @typedef Context + * @property {Node} tree + * @property {VFile} file + */ + +import {bail} from 'bail' +import isBuffer from 'is-buffer' +import extend from 'extend' +import isPlainObj from 'is-plain-obj' +import {trough} from 'trough' +import {VFile} from 'vfile' + +// Expose a frozen processor. +export const unified = base().freeze() + +const own = {}.hasOwnProperty + +// Function to create the first processor. +/** + * @returns {Processor} + */ +function base() { + const transformers = trough() + /** @type {Processor['attachers']} */ + const attachers = [] + /** @type {Record} */ + let namespace = {} + /** @type {boolean|undefined} */ + let frozen + let freezeIndex = -1 + + // Data management. + // @ts-expect-error: overloads are handled. + processor.data = data + processor.Parser = undefined + processor.Compiler = undefined + + // Lock. + processor.freeze = freeze + + // Plugins. + processor.attachers = attachers + // @ts-expect-error: overloads are handled. + processor.use = use + + // API. + processor.parse = parse + processor.stringify = stringify + // @ts-expect-error: overloads are handled. + processor.run = run + processor.runSync = runSync + // @ts-expect-error: overloads are handled. + processor.process = process + processor.processSync = processSync + + // Expose. + return processor + + // Create a new processor based on the processor in the current scope. + /** @type {Processor} */ + function processor() { + const destination = base() + let index = -1 + + while (++index < attachers.length) { + destination.use(...attachers[index]) + } + + destination.data(extend(true, {}, namespace)) + + return destination + } + + /** + * @param {string|Record} [key] + * @param {unknown} [value] + * @returns {unknown} + */ + function data(key, value) { + if (typeof key === 'string') { + // Set `key`. + if (arguments.length === 2) { + assertUnfrozen('data', frozen) + namespace[key] = value + return processor + } + + // Get `key`. + return (own.call(namespace, key) && namespace[key]) || null + } + + // Set space. + if (key) { + assertUnfrozen('data', frozen) + namespace = key + return processor + } + + // Get space. + return namespace + } + + /** @type {Processor['freeze']} */ + function freeze() { + if (frozen) { + return processor + } + + while (++freezeIndex < attachers.length) { + const [attacher, ...options] = attachers[freezeIndex] + + if (options[0] === false) { + continue + } + + if (options[0] === true) { + options[0] = undefined + } + + /** @type {Transformer|void} */ + const transformer = attacher.call(processor, ...options) + + if (typeof transformer === 'function') { + transformers.use(transformer) + } + } + + frozen = true + freezeIndex = Number.POSITIVE_INFINITY + + return processor + } + + /** + * @param {Pluggable|null|undefined} [value] + * @param {...unknown} options + * @returns {Processor} + */ + function use(value, ...options) { + /** @type {Record|undefined} */ + let settings + + assertUnfrozen('use', frozen) + + if (value === null || value === undefined) { + // Empty. + } else if (typeof value === 'function') { + addPlugin(value, ...options) + } else if (typeof value === 'object') { + if (Array.isArray(value)) { + addList(value) + } else { + addPreset(value) + } + } else { + throw new TypeError('Expected usable value, not `' + value + '`') + } + + if (settings) { + namespace.settings = Object.assign(namespace.settings || {}, settings) + } + + return processor + + /** + * @param {import('..').Pluggable} value + * @returns {void} + */ + function add(value) { + if (typeof value === 'function') { + addPlugin(value) + } else if (typeof value === 'object') { + if (Array.isArray(value)) { + const [plugin, ...options] = value + addPlugin(plugin, ...options) + } else { + addPreset(value) + } + } else { + throw new TypeError('Expected usable value, not `' + value + '`') + } + } + + /** + * @param {Preset} result + * @returns {void} + */ + function addPreset(result) { + addList(result.plugins) + + if (result.settings) { + settings = Object.assign(settings || {}, result.settings) + } + } + + /** + * @param {PluggableList|null|undefined} [plugins] + * @returns {void} + */ + function addList(plugins) { + let index = -1 + + if (plugins === null || plugins === undefined) { + // Empty. + } else if (Array.isArray(plugins)) { + while (++index < plugins.length) { + const thing = plugins[index] + add(thing) + } + } else { + throw new TypeError('Expected a list of plugins, not `' + plugins + '`') + } + } + + /** + * @param {Plugin} plugin + * @param {...unknown} [value] + * @returns {void} + */ + function addPlugin(plugin, value) { + let index = -1 + /** @type {Processor['attachers'][number]|undefined} */ + let entry + + while (++index < attachers.length) { + if (attachers[index][0] === plugin) { + entry = attachers[index] + break + } + } + + if (entry) { + if (isPlainObj(entry[1]) && isPlainObj(value)) { + value = extend(true, entry[1], value) + } + + entry[1] = value + } else { + // @ts-expect-error: fine. + attachers.push([...arguments]) + } + } + } + + /** @type {Processor['parse']} */ + function parse(doc) { + processor.freeze() + const file = vfile(doc) + const Parser = processor.Parser + assertParser('parse', Parser) + + if (newable(Parser, 'parse')) { + // @ts-expect-error: `newable` checks this. + return new Parser(String(file), file).parse() + } + + // @ts-expect-error: `newable` checks this. + return Parser(String(file), file) // eslint-disable-line new-cap + } + + /** @type {Processor['stringify']} */ + function stringify(node, doc) { + processor.freeze() + const file = vfile(doc) + const Compiler = processor.Compiler + assertCompiler('stringify', Compiler) + assertNode(node) + + if (newable(Compiler, 'compile')) { + // @ts-expect-error: `newable` checks this. + return new Compiler(node, file).compile() + } + + // @ts-expect-error: `newable` checks this. + return Compiler(node, file) // eslint-disable-line new-cap + } + + /** + * @param {Node} node + * @param {VFileCompatible|RunCallback} [doc] + * @param {RunCallback} [callback] + * @returns {Promise|void} + */ + function run(node, doc, callback) { + assertNode(node) + processor.freeze() + + if (!callback && typeof doc === 'function') { + callback = doc + doc = undefined + } + + if (!callback) { + return new Promise(executor) + } + + executor(null, callback) + + /** + * @param {null|((node: Node) => void)} resolve + * @param {(error: Error) => void} reject + * @returns {void} + */ + function executor(resolve, reject) { + // @ts-expect-error: `doc` can’t be a callback anymore, we checked. + transformers.run(node, vfile(doc), done) + + /** + * @param {Error|null} error + * @param {Node} tree + * @param {VFile} file + * @returns {void} + */ + function done(error, tree, file) { + tree = tree || node + if (error) { + reject(error) + } else if (resolve) { + resolve(tree) + } else { + // @ts-expect-error: `callback` is defined if `resolve` is not. + callback(null, tree, file) + } + } + } + } + + /** @type {Processor['runSync']} */ + function runSync(node, file) { + /** @type {Node|undefined} */ + let result + /** @type {boolean|undefined} */ + let complete + + processor.run(node, file, done) + + assertDone('runSync', 'run', complete) + + // @ts-expect-error: we either bailed on an error or have a tree. + return result + + /** + * @param {Error|null} [error] + * @param {Node} [tree] + * @returns {void} + */ + function done(error, tree) { + bail(error) + result = tree + complete = true + } + } + + /** + * @param {VFileCompatible} doc + * @param {ProcessCallback} [callback] + * @returns {Promise|undefined} + */ + function process(doc, callback) { + processor.freeze() + assertParser('process', processor.Parser) + assertCompiler('process', processor.Compiler) + + if (!callback) { + return new Promise(executor) + } + + executor(null, callback) + + /** + * @param {null|((file: VFile) => void)} resolve + * @param {(error?: Error|null|undefined) => void} reject + * @returns {void} + */ + function executor(resolve, reject) { + const file = vfile(doc) + + processor.run(processor.parse(file), file, (error, tree, file) => { + if (error || !tree || !file) { + done(error) + } else { + /** @type {unknown} */ + const result = processor.stringify(tree, file) + + if (result === undefined || result === null) { + // Empty. + } else if (looksLikeAVFileValue(result)) { + file.value = result + } else { + file.result = result + } + + done(error, file) + } + }) + + /** + * @param {Error|null|undefined} [error] + * @param {VFile|undefined} [file] + * @returns {void} + */ + function done(error, file) { + if (error || !file) { + reject(error) + } else if (resolve) { + resolve(file) + } else { + // @ts-expect-error: `callback` is defined if `resolve` is not. + callback(null, file) + } + } + } + } + + /** @type {Processor['processSync']} */ + function processSync(doc) { + /** @type {boolean|undefined} */ + let complete + + processor.freeze() + assertParser('processSync', processor.Parser) + assertCompiler('processSync', processor.Compiler) + + const file = vfile(doc) + + processor.process(file, done) + + assertDone('processSync', 'process', complete) + + return file + + /** + * @param {Error|null|undefined} [error] + * @returns {void} + */ + function done(error) { + complete = true + bail(error) + } + } +} + +/** + * Check if `value` is a constructor. + * + * @param {unknown} value + * @param {string} name + * @returns {boolean} + */ +function newable(value, name) { + return ( + typeof value === 'function' && + // Prototypes do exist. + // type-coverage:ignore-next-line + value.prototype && + // A function with keys in its prototype is probably a constructor. + // Classes’ prototype methods are not enumerable, so we check if some value + // exists in the prototype. + // type-coverage:ignore-next-line + (keys(value.prototype) || name in value.prototype) + ) +} + +/** + * Check if `value` is an object with keys. + * + * @param {Record} value + * @returns {boolean} + */ +function keys(value) { + /** @type {string} */ + let key + + for (key in value) { + if (own.call(value, key)) { + return true + } + } + + return false +} + +/** + * Assert a parser is available. + * + * @param {string} name + * @param {unknown} value + * @returns {asserts value is Parser} + */ +function assertParser(name, value) { + if (typeof value !== 'function') { + throw new TypeError('Cannot `' + name + '` without `Parser`') + } +} + +/** + * Assert a compiler is available. + * + * @param {string} name + * @param {unknown} value + * @returns {asserts value is Compiler} + */ +function assertCompiler(name, value) { + if (typeof value !== 'function') { + throw new TypeError('Cannot `' + name + '` without `Compiler`') + } +} + +/** + * Assert the processor is not frozen. + * + * @param {string} name + * @param {unknown} frozen + * @returns {asserts frozen is false} + */ +function assertUnfrozen(name, frozen) { + if (frozen) { + throw new Error( + 'Cannot call `' + + name + + '` on a frozen processor.\nCreate a new processor first, by calling it: use `processor()` instead of `processor`.' + ) + } +} + +/** + * Assert `node` is a unist node. + * + * @param {unknown} node + * @returns {asserts node is Node} + */ +function assertNode(node) { + // `isPlainObj` unfortunately uses `any` instead of `unknown`. + // type-coverage:ignore-next-line + if (!isPlainObj(node) || typeof node.type !== 'string') { + throw new TypeError('Expected node, got `' + node + '`') + // Fine. + } +} + +/** + * Assert that `complete` is `true`. + * + * @param {string} name + * @param {string} asyncName + * @param {unknown} complete + * @returns {asserts complete is true} + */ +function assertDone(name, asyncName, complete) { + if (!complete) { + throw new Error( + '`' + name + '` finished async. Use `' + asyncName + '` instead' + ) + } +} + +/** + * @param {VFileCompatible} [value] + * @returns {VFile} + */ +function vfile(value) { + return looksLikeAVFile(value) ? value : new VFile(value) +} + +/** + * @param {VFileCompatible} [value] + * @returns {value is VFile} + */ +function looksLikeAVFile(value) { + return Boolean( + value && + typeof value === 'object' && + 'message' in value && + 'messages' in value + ) +} + +/** + * @param {unknown} [value] + * @returns {value is VFileValue} + */ +function looksLikeAVFileValue(value) { + return typeof value === 'string' || isBuffer(value) +} diff --git a/_extensions/d2/node_modules/unified/license b/_extensions/d2/node_modules/unified/license new file mode 100644 index 00000000..f3722d94 --- /dev/null +++ b/_extensions/d2/node_modules/unified/license @@ -0,0 +1,21 @@ +(The MIT License) + +Copyright (c) 2015 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/_extensions/d2/node_modules/unified/package.json b/_extensions/d2/node_modules/unified/package.json new file mode 100644 index 00000000..4be454f0 --- /dev/null +++ b/_extensions/d2/node_modules/unified/package.json @@ -0,0 +1,107 @@ +{ + "name": "unified", + "version": "10.1.2", + "description": "Interface for parsing, inspecting, transforming, and serializing content through syntax trees", + "license": "MIT", + "keywords": [ + "unified", + "process", + "parse", + "transform", + "compile", + "stringify", + "serialize", + "ast", + "cst", + "syntax", + "tree", + "content", + "rehype", + "retext", + "remark" + ], + "homepage": "https://unifiedjs.com", + "repository": "unifiedjs/unified", + "bugs": "https://github.com/unifiedjs/unified/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)", + "Junyoung Choi ", + "Hernan Rajchert ", + "Christian Murphy ", + "Vse Mozhet Byt ", + "Richard Littauer " + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "lib/", + "index.d.ts", + "index.js" + ], + "dependencies": { + "@types/unist": "^2.0.0", + "bail": "^2.0.0", + "extend": "^3.0.0", + "is-buffer": "^2.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^5.0.0" + }, + "devDependencies": { + "@types/extend": "^3.0.0", + "@types/tape": "^4.0.0", + "c8": "^7.0.0", + "prettier": "^2.0.0", + "remark-cli": "^10.0.0", + "remark-preset-wooorm": "^9.0.0", + "rimraf": "^3.0.0", + "tape": "^5.0.0", + "tsd": "^0.19.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "xo": "^0.48.0" + }, + "scripts": { + "build": "rimraf \"test/**/*.d.ts\" && tsc && tsd && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --unhandled-rejections=strict --conditions development test/index.js", + "test-coverage": "c8 --check-coverage --branches 100 --functions 100 --lines 100 --statements 100 --reporter lcov node --unhandled-rejections=strict --conditions development test/index.js", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true, + "rules": { + "@typescript-eslint/ban-types": "off", + "promise/param-names": "off" + }, + "ignores": [ + "types/" + ] + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/unified/readme.md b/_extensions/d2/node_modules/unified/readme.md new file mode 100644 index 00000000..59eb796c --- /dev/null +++ b/_extensions/d2/node_modules/unified/readme.md @@ -0,0 +1,1450 @@ +# [![unified][logo]][site] + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +**unified** is an interface for processing text using syntax trees. +It’s what powers [**remark**][remark] (Markdown), [**retext**][retext] (natural +language), and [**rehype**][rehype] (HTML), and allows for processing between +formats. + +## Intro + +**unified** enables new exciting projects like [Gatsby][] to pull in Markdown, +[MDX][] to embed [JSX][], and [Prettier][] to format it. +It’s used in about 700k projects on GitHub and has about 35m downloads each +month on npm: you’re probably using it. +Some notable users are [Node.js][], [Vercel][], [Netlify][], [GitHub][], +[Mozilla][], [WordPress][], [Adobe][], [Facebook][], [Google][], and many more. + +* To read about what we are up to, follow us [Twitter][] +* For a less technical and more practical introduction to unified, visit + [`unifiedjs.com`][site] and peruse its [Learn][] section +* Browse [awesome unified][awesome] to find out more about the ecosystem +* Questions? + Get help on [Discussions][chat]! +* Check out [Contribute][] below to find out how to help out, or become a + backer or sponsor on [OpenCollective][collective] + +## Sponsors + +Support this effort and give back by sponsoring on [OpenCollective][collective]! + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Vercel

+ +
+ Motif

+ +
+ HashiCorp

+ +
+ American Express

+ +
+ GitBook

+ +
+ Gatsby

+ +
+ Netlify

+ + +
+ Coinbase

+ +
+ ThemeIsle

+ +
+ Expo

+ +
+ Boost Note

+ +
+ Holloway

+ +
+
+ You? +

+
+ +## Install + +This package is [ESM only](https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c): +Node 12+ is needed to use it and it must be `import`ed instead of `require`d. + +[npm][]: + +```sh +npm install unified +``` + +## Use + +```js +import {unified} from 'unified' +import remarkParse from 'remark-parse' +import remarkRehype from 'remark-rehype' +import rehypeDocument from 'rehype-document' +import rehypeFormat from 'rehype-format' +import rehypeStringify from 'rehype-stringify' +import {reporter} from 'vfile-reporter' + +unified() + .use(remarkParse) + .use(remarkRehype) + .use(rehypeDocument, {title: '👋🌍'}) + .use(rehypeFormat) + .use(rehypeStringify) + .process('# Hello world!') + .then( + (file) => { + console.error(reporter(file)) + console.log(String(file)) + }, + (error) => { + // Handle your error here! + throw error + } + ) +``` + +Yields: + +```txt +no issues found +``` + +```html + + + + + 👋🌍 + + + +

Hello world!

+ + +``` + +## Contents + +* [Description](#description) +* [API](#api) + * [`processor()`](#processor) + * [`processor.use(plugin[, options])`](#processoruseplugin-options) + * [`processor.parse(file)`](#processorparsefile) + * [`processor.stringify(node[, file])`](#processorstringifynode-file) + * [`processor.run(node[, file][, done])`](#processorrunnode-file-done) + * [`processor.runSync(node[, file])`](#processorrunsyncnode-file) + * [`processor.process(file[, done])`](#processorprocessfile-done) + * [`processor.processSync(file|value)`](#processorprocesssyncfilevalue) + * [`processor.data([key[, value]])`](#processordatakey-value) + * [`processor.freeze()`](#processorfreeze) +* [`Plugin`](#plugin) + * [`function attacher([options])`](#function-attacheroptions) + * [`function transformer(node, file[, next])`](#function-transformernode-file-next) +* [`Preset`](#preset) +* [Contribute](#contribute) +* [Acknowledgments](#acknowledgments) +* [License](#license) + +## Description + +**unified** is an interface for processing text using syntax trees. +Syntax trees are a representation of text understandable to programs. +Those programs, called [*plugin*][plugin]s, take these trees and inspect and +modify them. +To get to the syntax tree from text, there is a [*parser*][parser]. +To get from that back to text, there is a [*compiler*][compiler]. +This is the [*process*][process] of a *processor*. + +```ascii +| ........................ process ........................... | +| .......... parse ... | ... run ... | ... stringify ..........| + + +--------+ +----------+ +Input ->- | Parser | ->- Syntax Tree ->- | Compiler | ->- Output + +--------+ | +----------+ + X + | + +--------------+ + | Transformers | + +--------------+ +``` + +###### Processors + +Every **processor** implements another processor. +To create a processor, call another processor. +The new processor is configured to work the same as its ancestor. +But when the descendant processor is configured in the future it does not affect +the ancestral processor. + +When processors are exposed from a module (for example, `unified` itself) they +should not be configured directly, as that would change their behavior for all +module users. +Those processors are [*frozen*][freeze] and they should be called to create a +new processor before they are used. + +###### Syntax trees + +The **syntax trees** used in **unified** are [**unist**][unist] nodes. +A [**node**][node] is a plain JavaScript objects with a `type` field. +The semantics of nodes and format of syntax trees is defined by other projects. + +There are several [*utilities*][unist-utilities] for working with nodes. + +* [**esast**][esast] — JS +* [**hast**][hast] — HTML +* [**mdast**][mdast] — Markdown +* [**nlcst**][nlcst] — Natural language +* [**xast**][xast] — XML + +###### List of processors + +The following projects process different [*syntax tree*][syntax-tree] formats. +They parse text to a syntax tree and compile that back to text. +These processors can be used as is, or their parser and compiler can be mixed +and matched with **unified** and plugins to process between different syntaxes. + +* [**rehype**][rehype] ([*hast*][hast]) — HTML +* [**remark**][remark] ([*mdast*][mdast]) — Markdown +* [**retext**][retext] ([*nlcst*][nlcst]) — Natural language + +###### List of plugins + +The below [**plugins**][plugin] work with **unified**, on all [*syntax +tree*][syntax-tree] formats: + +* [`unified-diff`](https://github.com/unifiedjs/unified-diff) + — Ignore messages for unchanged lines in Travis +* [`unified-message-control`](https://github.com/unifiedjs/unified-message-control) + — Enable, disable, and ignore messages + +See [**remark**][remark-plugins], [**rehype**][rehype-plugins], and +[**retext**][retext-plugins] for their lists of plugins. + +###### File + +When processing a document, **metadata** is often gathered about that document. +[**vfile**][vfile] is a virtual file format that stores data, metadata, and +messages about files for **unified** and its plugins. + +There are several [*utilities*][vfile-utilities] for working with these files. + +###### Configuration + +[*Processors*][processors] are configured with [*plugin*][plugin]s or +with the [`data`][data] method. + +###### Integrations + +**unified** can integrate with the file system with [`unified-engine`][engine]. +CLI apps can be created with [`unified-args`][args], Gulp plugins with +[`unified-engine-gulp`][gulp], and Atom Linters with +[`unified-engine-atom`][atom]. + +[`unified-stream`][stream] provides a streaming interface. + +###### Programming interface + +The API provided by **unified** allows multiple files to be processed and gives +access to *metadata* (such as lint messages): + +```js +import {unified} from 'unified' +import remarkParse from 'remark-parse' +import remarkPresetLintMarkdownStyleGuide from 'remark-preset-lint-markdown-style-guide' +import remarkRetext from 'remark-retext' +import retextEnglish from 'retext-english' +import retextEquality from 'retext-equality' +import remarkRehype from 'remark-rehype' +import rehypeStringify from 'rehype-stringify' +import {reporter} from 'vfile-reporter' + +unified() + .use(remarkParse) + .use(remarkPresetLintMarkdownStyleGuide) + .use(remarkRetext, unified().use(retextEnglish).use(retextEquality)) + .use(remarkRehype) + .use(rehypeStringify) + .process('*Emphasis* and _stress_, you guys!') + .then( + (file) => { + console.error(reporter(file)) + console.log(String(file)) + }, + (error) => { + // Handle your error here! + throw error + } + ) +``` + +Yields: + +```txt + 1:16-1:24 warning Emphasis should use `*` as a marker emphasis-marker remark-lint + 1:30-1:34 warning `guys` may be insensitive, use `people`, `persons`, `folks` instead gals-man retext-equality + +⚠ 2 warnings +``` + +```html +

Emphasis and stress, you guys!

+``` + +###### Processing between syntaxes + +[*Processors*][processors] can be combined in two modes. + +**Bridge** mode transforms the [*syntax tree*][syntax-tree] from one format +(*origin*) to another (*destination*). +Another processor runs on the destination tree. +Finally, the original processor continues transforming the origin tree. + +**Mutate** mode also transforms the syntax tree from one format to another. +But the original processor continues transforming the destination tree. + +In the previous example (“Programming interface”), `remark-retext` is used in +*bridge* mode: the origin syntax tree is kept after [**retext**][retext] is +done; whereas `remark-rehype` is used in *mutate* mode: it sets a new syntax +tree and discards the origin tree. + +* [`remark-retext`][remark-retext] +* [`remark-rehype`][remark-rehype] +* [`rehype-retext`][rehype-retext] +* [`rehype-remark`][rehype-remark] + +## API + +This package exports the following identifiers: `unified`. +There is no default export. + +### `processor()` + +[*Processor*][processors] describing how to *process* text. + +###### Returns + +`Function` — New [*unfrozen*][freeze] processor that is configured to work the +same as its ancestor. +When the descendant processor is configured in the future it does not affect the +ancestral processor. + +###### Example + +The following example shows how a new processor can be created (from the remark +processor) and linked to **stdin**(4) and **stdout**(4). + +```js +import {remark} from 'remark' +import concatStream from 'concat-stream' + +process.stdin.pipe( + concatStream((buf) => { + process.stdout.write(remark().processSync(buf).toString()) + }) +) +``` + +### `processor.use(plugin[, options])` + +[*Configure*][configuration] the processor to use a [*plugin*][plugin] and +optionally configure that plugin with options. + +If the processor is already using this plugin, the previous plugin configuration +is changed based on the options that are passed in. +The plugin is not added a second time. + +###### Signatures + +* `processor.use(plugin[, options])` +* `processor.use(preset)` +* `processor.use(list)` + +###### Parameters + +* `plugin` ([`Attacher`][plugin]) +* `options` (`*`, optional) — Configuration for `plugin` +* `preset` (`Object`) — Object with an optional `plugins` (set to `list`), + and/or an optional `settings` object +* `list` (`Array`) — List of plugins, presets, and pairs (`plugin` and + `options` in an array) + +###### Returns + +`processor` — The processor that `use` was called on. + +###### Note + +`use` cannot be called on [*frozen*][freeze] processors. +Call the processor first to create a new unfrozen processor. + +###### Example + +There are many ways to pass plugins to `.use()`. +The below example gives an overview. + +```js +import {unified} from 'unified' + +unified() + // Plugin with options: + .use(pluginA, {x: true, y: true}) + // Passing the same plugin again merges configuration (to `{x: true, y: false, z: true}`): + .use(pluginA, {y: false, z: true}) + // Plugins: + .use([pluginB, pluginC]) + // Two plugins, the second with options: + .use([pluginD, [pluginE, {}]]) + // Preset with plugins and settings: + .use({plugins: [pluginF, [pluginG, {}]], settings: {position: false}}) + // Settings only: + .use({settings: {position: false}}) +``` + +### `processor.parse(file)` + +Parse text to a [*syntax tree*][syntax-tree]. + +###### Parameters + +* `file` ([`VFile`][vfile]) — [*File*][file], any value accepted by `vfile()` + +###### Returns + +[`Node`][node] — Parsed [*syntax tree*][syntax-tree] representing `file`. + +###### Note + +`parse` freezes the processor if not already [*frozen*][freeze]. + +`parse` performs the [*parse phase*][description], not the *run phase* or other +phases. + +###### Example + +The below example shows how `parse` can be used to create a syntax tree from a +file. + +```js +import {unified} from 'unified' +import remarkParse from 'remark-parse' + +const tree = unified().use(remarkParse).parse('# Hello world!') + +console.log(tree) +``` + +Yields: + +```js +{ + type: 'root', + children: [ + {type: 'heading', depth: 1, children: [Array], position: [Position]} + ], + position: { + start: {line: 1, column: 1, offset: 0}, + end: {line: 1, column: 15, offset: 14} + } +} +``` + +#### `processor.Parser` + +A **parser** handles the parsing of text to a [*syntax tree*][syntax-tree]. +Used in the [*parse phase*][description] and called with a `string` and +[`VFile`][vfile] representation of the text to parse. + +`Parser` can be a function, in which case it must return a [`Node`][node]: the +syntax tree representation of the given file. + +`Parser` can also be a constructor function (a function with a `parse` field, or +other fields, in its `prototype`), in which case it’s constructed with `new`. +Instances must have a `parse` method that is called without arguments and must +return a [`Node`][node]. + +### `processor.stringify(node[, file])` + +Compile a [*syntax tree*][syntax-tree]. + +###### Parameters + +* `node` ([`Node`][node]) — [*Syntax tree*][syntax-tree] to compile +* `file` ([`VFile`][vfile], optional) — [*File*][file], any value accepted by + `vfile()` + +###### Returns + +`string` or `Buffer` (see notes) — Textual representation of the [*syntax +tree*][syntax-tree] + +###### Note + +`stringify` freezes the processor if not already [*frozen*][freeze]. + +`stringify` performs the [*stringify phase*][description], not the *run phase* +or other phases. + +unified typically compiles by serializing: most [*compiler*][compiler]s return +`string` (or `Buffer`). +Some compilers, such as the one configured with [`rehype-react`][rehype-react], +return other values (in this case, a React tree). +If you’re using a compiler doesn’t serialize, expect different result values. +When using TypeScript, cast the type on your side. + +###### Example + +The below example shows how `stringify` can be used to serialize a syntax tree. + +```js +import {unified} from 'unified' +import rehypeStringify from 'rehype-stringify' +import {h} from 'hastscript' + +const tree = h('h1', 'Hello world!') + +const doc = unified().use(rehypeStringify).stringify(tree) + +console.log(doc) +``` + +Yields: + +```html +

Hello world!

+``` + +#### `processor.Compiler` + +A **compiler** handles the compiling of a [*syntax tree*][syntax-tree] to text. +Used in the [*stringify phase*][description] and called with a [`Node`][node] +and [`VFile`][file] representation of syntax tree to compile. + +`Compiler` can be a function, in which case it should return a `string`: the +textual representation of the syntax tree. + +`Compiler` can also be a constructor function (a function with a `compile` +field, or other fields, in its `prototype`), in which case it’s constructed with +`new`. +Instances must have a `compile` method that is called without arguments and +should return a `string`. + +### `processor.run(node[, file][, done])` + +Run [*transformers*][transformer] on a [*syntax tree*][syntax-tree]. + +###### Parameters + +* `node` ([`Node`][node]) — [*Syntax tree*][syntax-tree] to run on +* `file` ([`VFile`][vfile], optional) — [*File*][file], any value accepted by + `vfile()` +* `done` ([`Function`][run-done], optional) — Callback + +###### Returns + +[`Promise`][promise] if `done` is not given. +The returned promise is rejected with a fatal error, or resolved with the +transformed [*syntax tree*][syntax-tree]. + +###### Note + +`run` freezes the processor if not already [*frozen*][freeze]. + +`run` performs the [*run phase*][description], not other phases. + +#### `function done(err[, node, file])` + +Callback called when [*transformers*][transformer] are done. +Called with either an error or results. + +###### Parameters + +* `err` (`Error`, optional) — Fatal error +* `node` ([`Node`][node], optional) — Transformed [*syntax tree*][syntax-tree] +* `file` ([`VFile`][vfile], optional) — [*File*][file] + +###### Example + +The below example shows how `run` can be used to transform a syntax tree. + +```js +import {unified} from 'unified' +import remarkReferenceLinks from 'remark-reference-links' +import {u} from 'unist-builder' + +const tree = u('root', [ + u('paragraph', [ + u('link', {href: 'https://example.com'}, [u('text', 'Example Domain')]) + ]) +]) + +unified() + .use(remarkReferenceLinks) + .run(tree) + .then( + (changedTree) => console.log(changedTree), + (error) => { + // Handle your error here! + throw error + } + ) +``` + +Yields: + +```js +{ + type: 'root', + children: [ + {type: 'paragraph', children: [Array]}, + {type: 'definition', identifier: '1', title: undefined, url: undefined} + ] +} +``` + +### `processor.runSync(node[, file])` + +Run [*transformers*][transformer] on a [*syntax tree*][syntax-tree]. + +An error is thrown if asynchronous [*plugin*][plugin]s are configured. + +###### Parameters + +* `node` ([`Node`][node]) — [*Syntax tree*][syntax-tree] to run on +* `file` ([`VFile`][vfile], optional) — [*File*][file], any value accepted by + `vfile()` + +###### Returns + +[`Node`][node] — Transformed [*syntax tree*][syntax-tree]. + +###### Note + +`runSync` freezes the processor if not already [*frozen*][freeze]. + +`runSync` performs the [*run phase*][description], not other phases. + +### `processor.process(file[, done])` + +[*Process*][description] the given [*file*][file] as configured on the +processor. + +###### Parameters + +* `file` ([`VFile`][vfile]) — [*File*][file], any value accepted by `vfile()` +* `done` ([`Function`][process-done], optional) — Callback + +###### Returns + +[`Promise`][promise] if `done` is not given. +The returned promise is rejected with a fatal error, or resolved with the +processed [*file*][file]. + +The parsed, transformed, and compiled value is exposed on +[`file.value`][vfile-value] or `file.result` (see notes). + +###### Note + +`process` freezes the processor if not already [*frozen*][freeze]. + +`process` performs the [*parse*, *run*, and *stringify* phases][description]. + +unified typically compiles by serializing: most [*compiler*][compiler]s return +`string` (or `Buffer`). +Some compilers, such as the one configured with [`rehype-react`][rehype-react], +return other values (in this case, a React tree). +If you’re using a compiler that serializes, the result is available at +`file.value`. +Otherwise, the result is available at `file.result`. + +###### Example + +The below example shows how `process` can be used to process a file, whether +transformers are asynchronous or not, with promises. + +```js +import {unified} from 'unified' +import remarkParse from 'remark-parse' +import remarkRehype from 'remark-rehype' +import rehypeDocument from 'rehype-document' +import rehypeFormat from 'rehype-format' +import rehypeStringify from 'rehype-stringify' + +unified() + .use(remarkParse) + .use(remarkRehype) + .use(rehypeDocument, {title: '👋🌍'}) + .use(rehypeFormat) + .use(rehypeStringify) + .process('# Hello world!') + .then( + (file) => console.log(String(file)), + (error) => { + // Handle your error here! + throw error + } + ) +``` + +Yields: + +```html + + + + + 👋🌍 + + + +

Hello world!

+ + +``` + +#### `function done(err, file)` + +Callback called when the [*process*][description] is done. +Called with a fatal error, if any, and a [*file*][file]. + +###### Parameters + +* `err` (`Error`, optional) — Fatal error +* `file` ([`VFile`][vfile]) — Processed [*file*][file] + +###### Example + +The below example shows how `process` can be used to process a file, whether +transformers are asynchronous or not, with a callback. + +```js +import {unified} from 'unified' +import remarkParse from 'remark-parse' +import remarkGithub from 'remark-github' +import remarkStringify from 'remark-stringify' +import {reporter} from 'vfile-reporter' + +unified() + .use(remarkParse) + .use(remarkGithub) + .use(remarkStringify) + .process('@unifiedjs') + .then( + (file) => { + console.error(reporter(file)) + console.log(String(file)) + }, + (error) => { + // Handle your error here! + throw error + } + ) +``` + +Yields: + +```txt +no issues found +``` + +```markdown +[**@unifiedjs**](https://github.com/unifiedjs) +``` + +### `processor.processSync(file|value)` + +[*Process*][description] the given [*file*][file] as configured on the +processor. + +An error is thrown if asynchronous [*plugin*][plugin]s are configured. + +###### Parameters + +* `file` ([`VFile`][vfile]) — [*File*][file], any value accepted by `vfile()` + +###### Returns + +([`VFile`][vfile]) — Processed [*file*][file] + +The parsed, transformed, and compiled value is exposed on +[`file.value`][vfile-value] or `file.result` (see notes). + +###### Note + +`processSync` freezes the processor if not already [*frozen*][freeze]. + +`processSync` performs the [*parse*, *run*, and *stringify* +phases][description]. + +unified typically compiles by serializing: most [*compiler*][compiler]s return +`string` (or `Buffer`). +Some compilers, such as the one configured with [`rehype-react`][rehype-react], +return other values (in this case, a React tree). +If you’re using a compiler that serializes, the result is available at +`file.value`. +Otherwise, the result is available at `file.result`. + +###### Example + +The below example shows how `processSync` can be used to process a file, if all +transformers are synchronous. + +```js +import {unified} from 'unified' +import remarkParse from 'remark-parse' +import remarkRehype from 'remark-rehype' +import rehypeDocument from 'rehype-document' +import rehypeFormat from 'rehype-format' +import rehypeStringify from 'rehype-stringify' + +const processor = unified() + .use(remarkParse) + .use(remarkRehype) + .use(rehypeDocument, {title: '👋🌍'}) + .use(rehypeFormat) + .use(rehypeStringify) + +console.log(processor.processSync('# Hello world!').toString()) +``` + +Yields: + +```html + + + + + 👋🌍 + + + +

Hello world!

+ + +``` + +### `processor.data([key[, value]])` + +[*Configure*][configuration] the processor with information available to all +[*plugin*][plugin]s. +Information is stored in an in-memory key-value store. + +Typically, options can be given to a specific plugin, but sometimes it makes +sense to have information shared with several plugins. +For example, a list of HTML elements that are self-closing, which is needed +during all [*phases*][description] of the *process*. + +###### Signatures + +* `processor = processor.data(key, value)` +* `processor = processor.data(values)` +* `value = processor.data(key)` +* `info = processor.data()` + +###### Parameters + +* `key` (`string`, optional) — Identifier +* `value` (`*`, optional) — Value to set +* `values` (`Object`, optional) — Values to set + +###### Returns + +* `processor` — If setting, the processor that `data` is called on +* `value` (`*`) — If getting, the value at `key` +* `info` (`Object`) — Without arguments, the key-value store + +###### Note + +Setting information cannot occur on [*frozen*][freeze] processors. +Call the processor first to create a new unfrozen processor. + +###### Example + +The following example show how to get and set information: + +```js +import {unified} from 'unified' + +const processor = unified().data('alpha', 'bravo') + +processor.data('alpha') // => 'bravo' + +processor.data() // => {alpha: 'bravo'} + +processor.data({charlie: 'delta'}) + +processor.data() // => {charlie: 'delta'} +``` + +### `processor.freeze()` + +**Freeze** a processor. +*Frozen* processors are meant to be extended and not to be configured directly. + +Once a processor is frozen it cannot be *unfrozen*. +New processors working the same way can be created by calling the processor. + +It’s possible to freeze processors explicitly by calling `.freeze()`. +Processors freeze implicitly when [`.parse()`][parse], [`.run()`][run], +[`.runSync()`][run-sync], [`.stringify()`][stringify], [`.process()`][process], +or [`.processSync()`][process-sync] are called. + +###### Returns + +`processor` — The processor that `freeze` was called on. + +###### Example + +The following example, `index.js`, shows how rehype prevents extensions to +itself: + +```js +import {unified} from 'unified' +import remarkParse from 'rehype-parse' +import remarkStringify from 'rehype-stringify' + +export const rehype = unified().use(remarkParse).use(remarkStringify).freeze() +``` + +The below example, `a.js`, shows how that processor can be used and configured. + +```js +import {rehype} from 'rehype' +import rehypeFormat from 'rehype-format' +// … + +rehype() + .use(rehypeFormat) + // … +``` + +The below example, `b.js`, shows a similar looking example that operates on the +frozen rehype interface because it does not call `rehype`. +If this behavior was allowed it would result in unexpected behavior so an +error is thrown. +**This is invalid**: + +```js +import {rehype} from 'rehype' +import rehypeFormat from 'rehype-format' +// … + +rehype + .use(rehypeFormat) + // … +``` + +Yields: + +```txt +~/node_modules/unified/index.js:426 + throw new Error( + ^ + +Error: Cannot call `use` on a frozen processor. +Create a new processor first, by calling it: use `processor()` instead of `processor`. + at assertUnfrozen (~/node_modules/unified/index.js:426:11) + at Function.use (~/node_modules/unified/index.js:165:5) + at ~/b.js:6:4 +``` + +## `Plugin` + +**Plugins** [*configure*][configuration] the processors they are applied on in +the following ways: + +* They change the processor: such as the [*parser*][parser], the + [*compiler*][compiler], or configuring [*data*][data] +* They specify how to handle [*syntax trees*][syntax-tree] and [*files*][file] + +Plugins are a concept. +They materialize as [`attacher`][attacher]s. + +###### Example + +`move.js`: + +```js +export function move(options = {}) { + const {extname} = options + + if (!extname) { + throw new Error('Missing `extname` in options') + } + + return transformer + + function transformer(tree, file) { + if (file.extname && file.extname !== extname) { + file.extname = extname + } + } +} +``` + +`index.md`: + +```markdown +# Hello, world! +``` + +`index.js`: + +```js +import {unified} from 'unified' +import remarkParse from 'remark-parse' +import remarkRehype from 'remark-rehype' +import rehypeStringify from 'rehype-stringify' +import {toVFile} from 'to-vfile' +import {reporter} from 'vfile-reporter' +import {move} from './move.js' + +unified() + .use(remarkParse) + .use(remarkRehype) + .use(move, {extname: '.html'}) + .use(rehypeStringify) + .process(toVFile.readSync('index.md')) + .then( + (file) => { + console.error(reporter(file)) + toVFile.writeSync(file) // Written to `index.html`. + }, + (error) => { + // Handle your error here! + throw error + } + ) +``` + +Yields: + +```txt +index.md: no issues found +``` + +…and in `index.html`: + +```html +

Hello, world!

+``` + +### `function attacher([options])` + +**Attachers** are materialized [*plugin*][plugin]s. +An attacher is a function that can receive options and +[*configures*][configuration] the processor. + +Attachers change the processor, such as the [*parser*][parser], the +[*compiler*][compiler], configuring [*data*][data], or by specifying how the +[*syntax tree*][syntax-tree] or [*file*][file] are handled. + +###### Context + +The context object (`this`) is set to the processor the attacher is applied on. + +###### Parameters + +* `options` (`*`, optional) — Configuration + +###### Returns + +[`transformer`][transformer] — Optional. + +###### Note + +Attachers are called when the processor is [*frozen*][freeze], not when they are +applied. + +### `function transformer(node, file[, next])` + +**Transformers** handle [*syntax tree*][syntax-tree]s and [*file*][file]s. +A transformer is a function that is called each time a syntax tree and file are +passed through the [*run phase*][description]. +If an error occurs (either because it’s thrown, returned, rejected, or passed to +[`next`][next]), the process stops. + +The *run phase* is handled by [`trough`][trough], see its documentation for the +exact semantics of these functions. + +###### Parameters + +* `node` ([`Node`][node]) — [*Syntax tree*][syntax-tree] to handle +* `file` ([`VFile`][vfile]) — [*File*][file] to handle +* `next` ([`Function`][next], optional) + +###### Returns + +* `void` — If nothing is returned, the next transformer keeps using same tree. +* `Error` — Fatal error to stop the process +* `node` ([`Node`][node]) — New [*syntax tree*][syntax-tree]. + If returned, the next transformer is given this new tree +* `Promise` — Returned to perform an asynchronous operation. + The promise **must** be resolved (optionally with a [`Node`][node]) or + rejected (optionally with an `Error`) + +#### `function next(err[, tree[, file]])` + +If the signature of a [*transformer*][transformer] includes `next` (the third +argument), the transformer **may** perform asynchronous operations, and **must** +call `next()`. + +###### Parameters + +* `err` (`Error`, optional) — Fatal error to stop the process +* `node` ([`Node`][node], optional) — New [*syntax tree*][syntax-tree]. + If given, the next transformer is given this new tree +* `file` ([`VFile`][vfile], optional) — New [*file*][file]. + If given, the next transformer is given this new file + +## `Preset` + +**Presets** are sharable [*configuration*][configuration]. +They can contain [*plugins*][plugin] and settings. + +###### Example + +`preset.js`: + +```js +import remarkPresetLintRecommended from 'remark-preset-lint-recommended' +import remarkPresetLintConsistent from 'remark-preset-lint-consistent' +import remarkCommentConfig from 'remark-comment-config' +import remarkToc from 'remark-toc' +import remarkLicense from 'remark-license' + +export const preset = { + settings: {bullet: '*', emphasis: '*', fences: true}, + plugins: [ + remarkPresetLintRecommended, + remarkPresetLintConsistent, + remarkCommentConfig, + [remarkToc, {maxDepth: 3, tight: true}], + remarkLicense + ] +} +``` + +`example.md`: + +```markdown +# Hello, world! + +_Emphasis_ and **importance**. + +## Table of contents + +## API + +## License +``` + +`index.js`: + +```js +import {remark} from 'remark' +import {toVFile} from 'to-vfile' +import {reporter} from 'vfile-reporter' +import {preset} from './preset.js' + +remark() + .use(preset) + .process(toVFile.readSync('example.md')) + .then( + (file) => { + console.error(reporter(file)) + toVFile.writeSync(file) + }, + (error) => { + // Handle your error here! + throw error + } + ) +``` + +Yields: + +```txt +example.md: no issues found +``` + +`example.md` now contains: + +```markdown +# Hello, world! + +*Emphasis* and **importance**. + +## Table of contents + +* [API](#api) +* [License](#license) + +## API + +## License + +[MIT](license) © [Titus Wormer](https://wooorm.com) +``` + +## Contribute + +See [`contributing.md`][contributing] in [`unifiedjs/.github`][health] for ways +to get started. +See [`support.md`][support] for ways to get help. +Ideas for new plugins and tools can be posted in [`unifiedjs/ideas`][ideas]. + +A curated list of awesome unified resources can be found in [**awesome +unified**][awesome]. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## Acknowledgments + +Preliminary work for unified was done [in 2014][preliminary] for +[**retext**][retext] and inspired by [`ware`][ware]. +Further incubation happened in [**remark**][remark]. +The project was finally [externalised][] in 2015 and [published][] as `unified`. +The project was authored by [**@wooorm**](https://github.com/wooorm). + +Although `unified` since moved its plugin architecture to [`trough`][trough], +thanks to [**@calvinfo**](https://github.com/calvinfo), +[**@ianstormtaylor**](https://github.com/ianstormtaylor), and others for their +work on [`ware`][ware], as it was a huge initial inspiration. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[logo]: https://raw.githubusercontent.com/unifiedjs/unified/93862e5/logo.svg?sanitize=true + +[build-badge]: https://github.com/unifiedjs/unified/workflows/main/badge.svg + +[build]: https://github.com/unifiedjs/unified/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/unifiedjs/unified.svg + +[coverage]: https://codecov.io/github/unifiedjs/unified + +[downloads-badge]: https://img.shields.io/npm/dm/unified.svg + +[downloads]: https://www.npmjs.com/package/unified + +[size-badge]: https://img.shields.io/bundlephobia/minzip/unified.svg + +[size]: https://bundlephobia.com/result?p=unified + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/unifiedjs/unified/discussions + +[health]: https://github.com/unifiedjs/.github + +[contributing]: https://github.com/unifiedjs/.github/blob/HEAD/contributing.md + +[support]: https://github.com/unifiedjs/.github/blob/HEAD/support.md + +[coc]: https://github.com/unifiedjs/.github/blob/HEAD/code-of-conduct.md + +[awesome]: https://github.com/unifiedjs/awesome-unified + +[license]: license + +[author]: https://wooorm.com + +[npm]: https://docs.npmjs.com/cli/install + +[site]: https://unifiedjs.com + +[twitter]: https://twitter.com/unifiedjs + +[learn]: https://unifiedjs.com/learn/ + +[rehype]: https://github.com/rehypejs/rehype + +[remark]: https://github.com/remarkjs/remark + +[retext]: https://github.com/retextjs/retext + +[esast]: https://github.com/syntax-tree/esast + +[hast]: https://github.com/syntax-tree/hast + +[mdast]: https://github.com/syntax-tree/mdast + +[nlcst]: https://github.com/syntax-tree/nlcst + +[xast]: https://github.com/syntax-tree/xast + +[unist]: https://github.com/syntax-tree/unist + +[engine]: https://github.com/unifiedjs/unified-engine + +[args]: https://github.com/unifiedjs/unified-args + +[gulp]: https://github.com/unifiedjs/unified-engine-gulp + +[atom]: https://github.com/unifiedjs/unified-engine-atom + +[remark-rehype]: https://github.com/remarkjs/remark-rehype + +[remark-retext]: https://github.com/remarkjs/remark-retext + +[rehype-retext]: https://github.com/rehypejs/rehype-retext + +[rehype-remark]: https://github.com/rehypejs/rehype-remark + +[unist-utilities]: https://github.com/syntax-tree/unist#list-of-utilities + +[vfile]: https://github.com/vfile/vfile + +[vfile-value]: https://github.com/vfile/vfile#vfilevalue + +[vfile-utilities]: https://github.com/vfile/vfile#list-of-utilities + +[node]: https://github.com/syntax-tree/unist#node + +[description]: #description + +[syntax-tree]: #syntax-trees + +[configuration]: #configuration + +[file]: #file + +[processors]: #processors + +[process]: #processorprocessfile-done + +[process-sync]: #processorprocesssyncfilevalue + +[parse]: #processorparsefile + +[parser]: #processorparser + +[stringify]: #processorstringifynode-file + +[run]: #processorrunnode-file-done + +[run-sync]: #processorrunsyncnode-file + +[compiler]: #processorcompiler + +[data]: #processordatakey-value + +[attacher]: #function-attacheroptions + +[transformer]: #function-transformernode-file-next + +[next]: #function-nexterr-tree-file + +[freeze]: #processorfreeze + +[plugin]: #plugin + +[run-done]: #function-doneerr-node-file + +[process-done]: #function-doneerr-file + +[contribute]: #contribute + +[rehype-react]: https://github.com/rehypejs/rehype-react + +[trough]: https://github.com/wooorm/trough#function-fninput-next + +[promise]: https://developer.mozilla.org/docs/Web/JavaScript/Reference/Global_Objects/Promise + +[remark-plugins]: https://github.com/remarkjs/remark/blob/HEAD/doc/plugins.md#list-of-plugins + +[rehype-plugins]: https://github.com/rehypejs/rehype/blob/HEAD/doc/plugins.md#list-of-plugins + +[retext-plugins]: https://github.com/retextjs/retext/blob/HEAD/doc/plugins.md#list-of-plugins + +[stream]: https://github.com/unifiedjs/unified-stream + +[ideas]: https://github.com/unifiedjs/ideas + +[preliminary]: https://github.com/retextjs/retext/commit/8fcb1f#diff-168726dbe96b3ce427e7fedce31bb0bc + +[externalised]: https://github.com/remarkjs/remark/commit/9892ec#diff-168726dbe96b3ce427e7fedce31bb0bc + +[published]: https://github.com/unifiedjs/unified/commit/2ba1cf + +[ware]: https://github.com/segmentio/ware + +[gatsby]: https://www.gatsbyjs.org + +[mdx]: https://mdxjs.com + +[jsx]: https://reactjs.org/docs/jsx-in-depth.html + +[prettier]: https://prettier.io + +[node.js]: https://nodejs.org + +[vercel]: https://vercel.com + +[netlify]: https://www.netlify.com + +[github]: https://github.com + +[mozilla]: https://www.mozilla.org + +[wordpress]: https://wordpress.com + +[adobe]: https://www.adobe.com + +[facebook]: https://www.facebook.com + +[google]: https://www.google.com diff --git a/_extensions/d2/node_modules/unist-util-is/index.d.ts b/_extensions/d2/node_modules/unist-util-is/index.d.ts new file mode 100644 index 00000000..5f7e8576 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-is/index.d.ts @@ -0,0 +1,13 @@ +export type Test = import('./lib/index.js').Test +export type TestFunctionAnything = import('./lib/index.js').TestFunctionAnything +export type AssertAnything = import('./lib/index.js').AssertAnything +export type PredicateTest< + Kind extends import('unist').Node +> = import('./lib/index.js').PredicateTest +export type TestFunctionPredicate< + Kind extends import('unist').Node +> = import('./lib/index.js').TestFunctionPredicate +export type AssertPredicate< + Kind extends import('unist').Node +> = import('./lib/index.js').AssertPredicate +export {is, convert} from './lib/index.js' diff --git a/_extensions/d2/node_modules/unist-util-is/index.js b/_extensions/d2/node_modules/unist-util-is/index.js new file mode 100644 index 00000000..ba5aa168 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-is/index.js @@ -0,0 +1,22 @@ +/** + * @typedef {import('./lib/index.js').Test} Test + * @typedef {import('./lib/index.js').TestFunctionAnything} TestFunctionAnything + * @typedef {import('./lib/index.js').AssertAnything} AssertAnything + */ + +/** + * @template {import('unist').Node} Kind + * @typedef {import('./lib/index.js').PredicateTest} PredicateTest + */ + +/** + * @template {import('unist').Node} Kind + * @typedef {import('./lib/index.js').TestFunctionPredicate} TestFunctionPredicate + */ + +/** + * @template {import('unist').Node} Kind + * @typedef {import('./lib/index.js').AssertPredicate} AssertPredicate + */ + +export {is, convert} from './lib/index.js' diff --git a/_extensions/d2/node_modules/unist-util-is/lib/index.d.ts b/_extensions/d2/node_modules/unist-util-is/lib/index.d.ts new file mode 100644 index 00000000..cf0b46ed --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-is/lib/index.d.ts @@ -0,0 +1,207 @@ +/** + * @typedef {import('unist').Node} Node + * @typedef {import('unist').Parent} Parent + */ +/** + * @typedef {Record} Props + * @typedef {null | undefined | string | Props | TestFunctionAnything | Array} Test + * Check for an arbitrary node, unaware of TypeScript inferral. + * + * @callback TestFunctionAnything + * Check if a node passes a test, unaware of TypeScript inferral. + * @param {unknown} this + * The given context. + * @param {Node} node + * A node. + * @param {number | null | undefined} [index] + * The node’s position in its parent. + * @param {Parent | null | undefined} [parent] + * The node’s parent. + * @returns {boolean | void} + * Whether this node passes the test. + */ +/** + * @template {Node} Kind + * Node type. + * @typedef {Kind['type'] | Partial | TestFunctionPredicate | Array | TestFunctionPredicate>} PredicateTest + * Check for a node that can be inferred by TypeScript. + */ +/** + * Check if a node passes a certain test. + * + * @template {Node} Kind + * Node type. + * @callback TestFunctionPredicate + * Complex test function for a node that can be inferred by TypeScript. + * @param {Node} node + * A node. + * @param {number | null | undefined} [index] + * The node’s position in its parent. + * @param {Parent | null | undefined} [parent] + * The node’s parent. + * @returns {node is Kind} + * Whether this node passes the test. + */ +/** + * @callback AssertAnything + * Check that an arbitrary value is a node, unaware of TypeScript inferral. + * @param {unknown} [node] + * Anything (typically a node). + * @param {number | null | undefined} [index] + * The node’s position in its parent. + * @param {Parent | null | undefined} [parent] + * The node’s parent. + * @returns {boolean} + * Whether this is a node and passes a test. + */ +/** + * Check if a node is a node and passes a certain node test. + * + * @template {Node} Kind + * Node type. + * @callback AssertPredicate + * Check that an arbitrary value is a specific node, aware of TypeScript. + * @param {unknown} [node] + * Anything (typically a node). + * @param {number | null | undefined} [index] + * The node’s position in its parent. + * @param {Parent | null | undefined} [parent] + * The node’s parent. + * @returns {node is Kind} + * Whether this is a node and passes a test. + */ +/** + * Check if `node` is a `Node` and whether it passes the given test. + * + * @param node + * Thing to check, typically `Node`. + * @param test + * A check for a specific node. + * @param index + * The node’s position in its parent. + * @param parent + * The node’s parent. + * @returns + * Whether `node` is a node and passes a test. + */ +export const is: (() => false) & + (< + Kind extends import('unist').Node< + import('unist').Data + > = import('unist').Node + >( + node: unknown, + test: PredicateTest, + index: number, + parent: Parent, + context?: unknown + ) => node is Kind) & + (< + Kind_1 extends import('unist').Node< + import('unist').Data + > = import('unist').Node + >( + node: unknown, + test: PredicateTest, + index?: null | undefined, + parent?: null | undefined, + context?: unknown + ) => node is Kind_1) & + (( + node: unknown, + test: Test, + index: number, + parent: Parent, + context?: unknown + ) => boolean) & + (( + node: unknown, + test?: Test, + index?: null | undefined, + parent?: null | undefined, + context?: unknown + ) => boolean) +/** + * Generate an assertion from a test. + * + * Useful if you’re going to test many nodes, for example when creating a + * utility where something else passes a compatible test. + * + * The created function is a bit faster because it expects valid input only: + * a `node`, `index`, and `parent`. + * + * @param test + * * when nullish, checks if `node` is a `Node`. + * * when `string`, works like passing `(node) => node.type === test`. + * * when `function` checks if function passed the node is true. + * * when `object`, checks that all keys in test are in node, and that they have (strictly) equal values. + * * when `array`, checks if any one of the subtests pass. + * @returns + * An assertion. + */ +export const convert: (< + Kind extends import('unist').Node +>( + test: PredicateTest +) => AssertPredicate) & + ((test?: Test) => AssertAnything) +export type Node = import('unist').Node +export type Parent = import('unist').Parent +export type Props = Record +/** + * Check for an arbitrary node, unaware of TypeScript inferral. + */ +export type Test = + | null + | undefined + | string + | Props + | TestFunctionAnything + | Array +/** + * Check if a node passes a test, unaware of TypeScript inferral. + */ +export type TestFunctionAnything = ( + this: unknown, + node: Node, + index?: number | null | undefined, + parent?: Parent | null | undefined +) => boolean | void +/** + * Check for a node that can be inferred by TypeScript. + */ +export type PredicateTest< + Kind extends import('unist').Node +> = + | Kind['type'] + | Partial + | TestFunctionPredicate + | Array | TestFunctionPredicate> +/** + * Complex test function for a node that can be inferred by TypeScript. + */ +export type TestFunctionPredicate< + Kind extends import('unist').Node +> = ( + node: Node, + index?: number | null | undefined, + parent?: Parent | null | undefined +) => node is Kind +/** + * Check that an arbitrary value is a node, unaware of TypeScript inferral. + */ +export type AssertAnything = ( + node?: unknown, + index?: number | null | undefined, + parent?: Parent | null | undefined +) => boolean +/** + * Check that an arbitrary value is a specific node, aware of TypeScript. + */ +export type AssertPredicate< + Kind extends import('unist').Node +> = ( + node?: unknown, + index?: number | null | undefined, + parent?: Parent | null | undefined +) => node is Kind diff --git a/_extensions/d2/node_modules/unist-util-is/lib/index.js b/_extensions/d2/node_modules/unist-util-is/lib/index.js new file mode 100644 index 00000000..f9a14b0b --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-is/lib/index.js @@ -0,0 +1,301 @@ +/** + * @typedef {import('unist').Node} Node + * @typedef {import('unist').Parent} Parent + */ + +/** + * @typedef {Record} Props + * @typedef {null | undefined | string | Props | TestFunctionAnything | Array} Test + * Check for an arbitrary node, unaware of TypeScript inferral. + * + * @callback TestFunctionAnything + * Check if a node passes a test, unaware of TypeScript inferral. + * @param {unknown} this + * The given context. + * @param {Node} node + * A node. + * @param {number | null | undefined} [index] + * The node’s position in its parent. + * @param {Parent | null | undefined} [parent] + * The node’s parent. + * @returns {boolean | void} + * Whether this node passes the test. + */ + +/** + * @template {Node} Kind + * Node type. + * @typedef {Kind['type'] | Partial | TestFunctionPredicate | Array | TestFunctionPredicate>} PredicateTest + * Check for a node that can be inferred by TypeScript. + */ + +/** + * Check if a node passes a certain test. + * + * @template {Node} Kind + * Node type. + * @callback TestFunctionPredicate + * Complex test function for a node that can be inferred by TypeScript. + * @param {Node} node + * A node. + * @param {number | null | undefined} [index] + * The node’s position in its parent. + * @param {Parent | null | undefined} [parent] + * The node’s parent. + * @returns {node is Kind} + * Whether this node passes the test. + */ + +/** + * @callback AssertAnything + * Check that an arbitrary value is a node, unaware of TypeScript inferral. + * @param {unknown} [node] + * Anything (typically a node). + * @param {number | null | undefined} [index] + * The node’s position in its parent. + * @param {Parent | null | undefined} [parent] + * The node’s parent. + * @returns {boolean} + * Whether this is a node and passes a test. + */ + +/** + * Check if a node is a node and passes a certain node test. + * + * @template {Node} Kind + * Node type. + * @callback AssertPredicate + * Check that an arbitrary value is a specific node, aware of TypeScript. + * @param {unknown} [node] + * Anything (typically a node). + * @param {number | null | undefined} [index] + * The node’s position in its parent. + * @param {Parent | null | undefined} [parent] + * The node’s parent. + * @returns {node is Kind} + * Whether this is a node and passes a test. + */ + +/** + * Check if `node` is a `Node` and whether it passes the given test. + * + * @param node + * Thing to check, typically `Node`. + * @param test + * A check for a specific node. + * @param index + * The node’s position in its parent. + * @param parent + * The node’s parent. + * @returns + * Whether `node` is a node and passes a test. + */ +export const is = + /** + * @type {( + * (() => false) & + * ((node: unknown, test: PredicateTest, index: number, parent: Parent, context?: unknown) => node is Kind) & + * ((node: unknown, test: PredicateTest, index?: null | undefined, parent?: null | undefined, context?: unknown) => node is Kind) & + * ((node: unknown, test: Test, index: number, parent: Parent, context?: unknown) => boolean) & + * ((node: unknown, test?: Test, index?: null | undefined, parent?: null | undefined, context?: unknown) => boolean) + * )} + */ + ( + /** + * @param {unknown} [node] + * @param {Test} [test] + * @param {number | null | undefined} [index] + * @param {Parent | null | undefined} [parent] + * @param {unknown} [context] + * @returns {boolean} + */ + // eslint-disable-next-line max-params + function is(node, test, index, parent, context) { + const check = convert(test) + + if ( + index !== undefined && + index !== null && + (typeof index !== 'number' || + index < 0 || + index === Number.POSITIVE_INFINITY) + ) { + throw new Error('Expected positive finite index') + } + + if ( + parent !== undefined && + parent !== null && + (!is(parent) || !parent.children) + ) { + throw new Error('Expected parent node') + } + + if ( + (parent === undefined || parent === null) !== + (index === undefined || index === null) + ) { + throw new Error('Expected both parent and index') + } + + // @ts-expect-error Looks like a node. + return node && node.type && typeof node.type === 'string' + ? Boolean(check.call(context, node, index, parent)) + : false + } + ) + +/** + * Generate an assertion from a test. + * + * Useful if you’re going to test many nodes, for example when creating a + * utility where something else passes a compatible test. + * + * The created function is a bit faster because it expects valid input only: + * a `node`, `index`, and `parent`. + * + * @param test + * * when nullish, checks if `node` is a `Node`. + * * when `string`, works like passing `(node) => node.type === test`. + * * when `function` checks if function passed the node is true. + * * when `object`, checks that all keys in test are in node, and that they have (strictly) equal values. + * * when `array`, checks if any one of the subtests pass. + * @returns + * An assertion. + */ +export const convert = + /** + * @type {( + * ((test: PredicateTest) => AssertPredicate) & + * ((test?: Test) => AssertAnything) + * )} + */ + ( + /** + * @param {Test} [test] + * @returns {AssertAnything} + */ + function (test) { + if (test === undefined || test === null) { + return ok + } + + if (typeof test === 'string') { + return typeFactory(test) + } + + if (typeof test === 'object') { + return Array.isArray(test) ? anyFactory(test) : propsFactory(test) + } + + if (typeof test === 'function') { + return castFactory(test) + } + + throw new Error('Expected function, string, or object as test') + } + ) + +/** + * @param {Array} tests + * @returns {AssertAnything} + */ +function anyFactory(tests) { + /** @type {Array} */ + const checks = [] + let index = -1 + + while (++index < tests.length) { + checks[index] = convert(tests[index]) + } + + return castFactory(any) + + /** + * @this {unknown} + * @param {Array} parameters + * @returns {boolean} + */ + function any(...parameters) { + let index = -1 + + while (++index < checks.length) { + if (checks[index].call(this, ...parameters)) return true + } + + return false + } +} + +/** + * Turn an object into a test for a node with a certain fields. + * + * @param {Props} check + * @returns {AssertAnything} + */ +function propsFactory(check) { + return castFactory(all) + + /** + * @param {Node} node + * @returns {boolean} + */ + function all(node) { + /** @type {string} */ + let key + + for (key in check) { + // @ts-expect-error: hush, it sure works as an index. + if (node[key] !== check[key]) return false + } + + return true + } +} + +/** + * Turn a string into a test for a node with a certain type. + * + * @param {string} check + * @returns {AssertAnything} + */ +function typeFactory(check) { + return castFactory(type) + + /** + * @param {Node} node + */ + function type(node) { + return node && node.type === check + } +} + +/** + * Turn a custom test into a test for a node that passes that test. + * + * @param {TestFunctionAnything} check + * @returns {AssertAnything} + */ +function castFactory(check) { + return assertion + + /** + * @this {unknown} + * @param {unknown} node + * @param {Array} parameters + * @returns {boolean} + */ + function assertion(node, ...parameters) { + return Boolean( + node && + typeof node === 'object' && + 'type' in node && + // @ts-expect-error: fine. + Boolean(check.call(this, node, ...parameters)) + ) + } +} + +function ok() { + return true +} diff --git a/_extensions/d2/node_modules/unist-util-is/license b/_extensions/d2/node_modules/unist-util-is/license new file mode 100644 index 00000000..cfa79e66 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-is/license @@ -0,0 +1,22 @@ +(The MIT license) + +Copyright (c) 2015 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/unist-util-is/package.json b/_extensions/d2/node_modules/unist-util-is/package.json new file mode 100644 index 00000000..1a542d02 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-is/package.json @@ -0,0 +1,88 @@ +{ + "name": "unist-util-is", + "version": "5.2.1", + "description": "unist utility to check if a node passes a test", + "license": "MIT", + "keywords": [ + "unist", + "unist-util", + "util", + "utility", + "tree", + "node", + "is", + "equal", + "check", + "test", + "type" + ], + "repository": "syntax-tree/unist-util-is", + "bugs": "https://github.com/syntax-tree/unist-util-is/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)", + "Christian Murphy ", + "Lucas Brandstaetter (https://github.com/Roang-zero1)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "lib/", + "index.d.ts", + "index.js" + ], + "dependencies": { + "@types/unist": "^2.0.0" + }, + "devDependencies": { + "@types/lodash": "^4.0.0", + "@types/mdast": "^3.0.0", + "@types/node": "^18.0.0", + "c8": "^7.0.0", + "fast-check": "^3.0.0", + "lodash": "^4.0.0", + "prettier": "^2.0.0", + "remark-cli": "^11.0.0", + "remark-preset-wooorm": "^9.0.0", + "tsd": "^0.25.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "unified": "^10.0.0", + "xo": "^0.53.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "tsc --build --clean && tsc --build && tsd && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test/index.js", + "test-coverage": "c8 --check-coverage --100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true + }, + "remarkConfig": { + "plugins": [ + "remark-preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true + } +} diff --git a/_extensions/d2/node_modules/unist-util-is/readme.md b/_extensions/d2/node_modules/unist-util-is/readme.md new file mode 100644 index 00000000..b5b6bbcd --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-is/readme.md @@ -0,0 +1,419 @@ +# unist-util-is + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +[unist][] utility to check if nodes pass a test. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`is(node[, test[, index, parent[, context]]])`](#isnode-test-index-parent-context) + * [`convert(test)`](#converttest) + * [`AssertAnything`](#assertanything) + * [`AssertPredicate`](#assertpredicate) + * [`Test`](#test) + * [`TestFunctionAnything`](#testfunctionanything) + * [`PredicateTest`](#predicatetest) + * [`TestFunctionPredicate`](#testfunctionpredicate) +* [Examples](#examples) + * [Example of `convert`](#example-of-convert) +* [Types](#types) +* [Compatibility](#compatibility) +* [Related](#related) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +This package is a small utility that checks that a node is a certain node. + +## When should I use this? + +Use this small utility if you find yourself repeating code for checking what +nodes are. + +A similar package, [`hast-util-is-element`][hast-util-is-element], works on hast +elements. + +For more advanced tests, [`unist-util-select`][unist-util-select] can be used +to match against CSS selectors. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 14.14+ and 16.0+), install with [npm][]: + +```sh +npm install unist-util-is +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {is} from 'https://esm.sh/unist-util-is@5' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {is} from 'unist-util-is' + +const node = {type: 'strong'} +const parent = {type: 'paragraph', children: [node]} + +is() // => false +is({children: []}) // => false +is(node) // => true +is(node, 'strong') // => true +is(node, 'emphasis') // => false + +is(node, node) // => true +is(parent, {type: 'paragraph'}) // => true +is(parent, {type: 'strong'}) // => false + +is(node, test) // => false +is(node, test, 4, parent) // => false +is(node, test, 5, parent) // => true + +function test(node, n) { + return n === 5 +} +``` + +## API + +This package exports the identifiers [`convert`][convert] and [`is`][is]. +There is no default export. + +### `is(node[, test[, index, parent[, context]]])` + +Check if `node` is a `Node` and whether it passes the given test. + +###### Parameters + +* `node` (`unknown`) + — thing to check, typically [`Node`][node] +* `test` ([`Test`][test] or [`PredicateTest`][predicatetest], optional) + — a check for a specific element +* `index` (`number`, optional) + — the node’s position in its parent +* `parent` ([`Node`][node], optional) + — the node’s parent +* `context` (`any`, optional) + — context object (`this`) to call `test` with + +###### Returns + +Whether `node` is a [`Node`][node] and passes a test (`boolean`). + +###### Throws + +When an incorrect `test`, `index`, or `parent` is given. +There is no error thrown when `node` is not a node. + +### `convert(test)` + +Generate a check from a test. + +Useful if you’re going to test many nodes, for example when creating a +utility where something else passes a compatible test. + +The created function is a bit faster because it expects valid input only: +a `node`, `index`, and `parent`. + +###### Parameters + +* `test` ([`Test`][test] or [`PredicateTest`][predicatetest], optional) + — a check for a specific node + +###### Returns + +An assertion ([`AssertAnything`][assertanything] or +[`AssertPredicate`][assertpredicate]). + +### `AssertAnything` + +Check that an arbitrary value is a node, unaware of TypeScript inferral +(TypeScript type). + +###### Parameters + +* `node` (`unknown`) + — anything (typically a node) +* `index` (`number`, optional) + — the node’s position in its parent +* `parent` ([`Node`][node], optional) + — the node’s parent + +###### Returns + +Whether this is a node and passes a test (`boolean`). + +### `AssertPredicate` + +Check that an arbitrary value is a specific node, aware of TypeScript +(TypeScript type). + +###### Type parameters + +* `Kind` ([`Node`][node]) + — node type + +###### Parameters + +* `node` (`unknown`) + — anything (typically a node) +* `index` (`number`, optional) + — the node’s position in its parent +* `parent` ([`Node`][node], optional) + — the node’s parent + +###### Returns + +Whether this is a node and passes a test (`node is Kind`). + +### `Test` + +Check for an arbitrary node, unaware of TypeScript inferral (TypeScript +type). + +###### Type + +```ts +type Test = + | null + | undefined + | string + | Record + | TestFunctionAnything + | Array | TestFunctionAnything> +``` + +Checks that the given thing is a node, and then: + +* when `string`, checks that the node has that tag name +* when `function`, see [`TestFunctionAnything`][testfunctionanything] +* when `object`, checks that all keys in test are in node, and that they have + (strictly) equal values +* when `Array`, checks if one of the subtests pass + +### `TestFunctionAnything` + +Check if a node passes a test, unaware of TypeScript inferral (TypeScript +type). + +###### Parameters + +* `node` ([`Node`][node]) + — a node +* `index` (`number`, optional) + — the node’s position in its parent +* `parent` ([`Node`][node], optional) + — the node’s parent + +###### Returns + +Whether this node passes the test (`boolean`). + +### `PredicateTest` + +Check for a node that can be inferred by TypeScript (TypeScript type). + +###### Type + +```ts +type PredicateTest = + | Kind['type'] + | Partial + | TestFunctionPredicate + | Array | TestFunctionPredicate> +``` + +See [`TestFunctionPredicate`][testfunctionpredicate]. + +### `TestFunctionPredicate` + +Check if a node passes a certain node test (TypeScript type). + +###### Type parameters + +* `Kind` ([`Node`][node]) + — node type + +###### Parameters + +* `node` ([`Node`][node]) + — a node +* `index` (`number`, optional) + — the node’s position in its parent +* `parent` ([`Node`][node], optional) + — the node’s parent + +###### Returns + +Whether this node passes the test (`node is Kind`). + +## Examples + +### Example of `convert` + +```js +import {u} from 'unist-builder' +import {convert} from 'unist-util-is' + +const test = convert('leaf') + +const tree = u('tree', [ + u('node', [u('leaf', '1')]), + u('leaf', '2'), + u('node', [u('leaf', '3'), u('leaf', '4')]), + u('leaf', '5') +]) + +const leafs = tree.children.filter((child, index) => test(child, index, tree)) + +console.log(leafs) +``` + +Yields: + +```js +[{type: 'leaf', value: '2'}, {type: 'leaf', value: '5'}] +``` + +## Types + +This package is fully typed with [TypeScript][]. +It exports the additional types [`AssertAnything`][assertanything], +[`AssertPredicate`][assertpredicate], [`Test`][test], +[`TestFunctionAnything`][testfunctionanything], +[`TestFunctionPredicate`][testfunctionpredicate], and +[`PredicateTest`][predicatetest]. + +## Compatibility + +Projects maintained by the unified collective are compatible with all maintained +versions of Node.js. +As of now, that is Node.js 14.14+ and 16.0+. +Our projects sometimes work with older versions, but this is not guaranteed. + +## Related + +* [`unist-util-find-after`](https://github.com/syntax-tree/unist-util-find-after) + — find a node after another node +* [`unist-util-find-before`](https://github.com/syntax-tree/unist-util-find-before) + — find a node before another node +* [`unist-util-find-all-after`](https://github.com/syntax-tree/unist-util-find-all-after) + — find all nodes after another node +* [`unist-util-find-all-before`](https://github.com/syntax-tree/unist-util-find-all-before) + — find all nodes before another node +* [`unist-util-find-all-between`](https://github.com/mrzmmr/unist-util-find-all-between) + — find all nodes between two nodes +* [`unist-util-filter`](https://github.com/syntax-tree/unist-util-filter) + — create a new tree with nodes that pass a check +* [`unist-util-remove`](https://github.com/syntax-tree/unist-util-remove) + — remove nodes from tree + +## Contribute + +See [`contributing.md`][contributing] in [`syntax-tree/.github`][health] for +ways to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/syntax-tree/unist-util-is/workflows/main/badge.svg + +[build]: https://github.com/syntax-tree/unist-util-is/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/syntax-tree/unist-util-is.svg + +[coverage]: https://codecov.io/github/syntax-tree/unist-util-is + +[downloads-badge]: https://img.shields.io/npm/dm/unist-util-is.svg + +[downloads]: https://www.npmjs.com/package/unist-util-is + +[size-badge]: https://img.shields.io/bundlephobia/minzip/unist-util-is.svg + +[size]: https://bundlephobia.com/result?p=unist-util-is + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/syntax-tree/unist/discussions + +[npm]: https://docs.npmjs.com/cli/install + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[esmsh]: https://esm.sh + +[typescript]: https://www.typescriptlang.org + +[license]: license + +[author]: https://wooorm.com + +[health]: https://github.com/syntax-tree/.github + +[contributing]: https://github.com/syntax-tree/.github/blob/main/contributing.md + +[support]: https://github.com/syntax-tree/.github/blob/main/support.md + +[coc]: https://github.com/syntax-tree/.github/blob/main/code-of-conduct.md + +[unist]: https://github.com/syntax-tree/unist + +[node]: https://github.com/syntax-tree/unist#node + +[hast-util-is-element]: https://github.com/syntax-tree/hast-util-is-element + +[unist-util-select]: https://github.com/syntax-tree/unist-util-select + +[is]: #isnode-test-index-parent-context + +[convert]: #converttest + +[assertanything]: #assertanything + +[assertpredicate]: #assertpredicate + +[test]: #test + +[testfunctionanything]: #testfunctionanything + +[testfunctionpredicate]: #testfunctionpredicate + +[predicatetest]: #predicatetest diff --git a/_extensions/d2/node_modules/unist-util-stringify-position/index.d.ts b/_extensions/d2/node_modules/unist-util-stringify-position/index.d.ts new file mode 100644 index 00000000..93163eae --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-stringify-position/index.d.ts @@ -0,0 +1 @@ +export {stringifyPosition} from './lib/index.js' diff --git a/_extensions/d2/node_modules/unist-util-stringify-position/index.js b/_extensions/d2/node_modules/unist-util-stringify-position/index.js new file mode 100644 index 00000000..93163eae --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-stringify-position/index.js @@ -0,0 +1 @@ +export {stringifyPosition} from './lib/index.js' diff --git a/_extensions/d2/node_modules/unist-util-stringify-position/lib/index.d.ts b/_extensions/d2/node_modules/unist-util-stringify-position/lib/index.d.ts new file mode 100644 index 00000000..7a64b47a --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-stringify-position/lib/index.d.ts @@ -0,0 +1,61 @@ +/** + * @typedef {import('unist').Node} Node + * @typedef {import('unist').Point} Point + * @typedef {import('unist').Position} Position + */ +/** + * @typedef NodeLike + * @property {string} type + * @property {PositionLike | null | undefined} [position] + * + * @typedef PositionLike + * @property {PointLike | null | undefined} [start] + * @property {PointLike | null | undefined} [end] + * + * @typedef PointLike + * @property {number | null | undefined} [line] + * @property {number | null | undefined} [column] + * @property {number | null | undefined} [offset] + */ +/** + * Serialize the positional info of a point, position (start and end points), + * or node. + * + * @param {Node | NodeLike | Position | PositionLike | Point | PointLike | null | undefined} [value] + * Node, position, or point. + * @returns {string} + * Pretty printed positional info of a node (`string`). + * + * In the format of a range `ls:cs-le:ce` (when given `node` or `position`) + * or a point `l:c` (when given `point`), where `l` stands for line, `c` for + * column, `s` for `start`, and `e` for end. + * An empty string (`''`) is returned if the given value is neither `node`, + * `position`, nor `point`. + */ +export function stringifyPosition( + value?: + | Node + | NodeLike + | Position + | PositionLike + | Point + | PointLike + | null + | undefined +): string +export type Node = import('unist').Node +export type Point = import('unist').Point +export type Position = import('unist').Position +export type NodeLike = { + type: string + position?: PositionLike | null | undefined +} +export type PositionLike = { + start?: PointLike | null | undefined + end?: PointLike | null | undefined +} +export type PointLike = { + line?: number | null | undefined + column?: number | null | undefined + offset?: number | null | undefined +} diff --git a/_extensions/d2/node_modules/unist-util-stringify-position/lib/index.js b/_extensions/d2/node_modules/unist-util-stringify-position/lib/index.js new file mode 100644 index 00000000..7474343c --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-stringify-position/lib/index.js @@ -0,0 +1,84 @@ +/** + * @typedef {import('unist').Node} Node + * @typedef {import('unist').Point} Point + * @typedef {import('unist').Position} Position + */ + +/** + * @typedef NodeLike + * @property {string} type + * @property {PositionLike | null | undefined} [position] + * + * @typedef PositionLike + * @property {PointLike | null | undefined} [start] + * @property {PointLike | null | undefined} [end] + * + * @typedef PointLike + * @property {number | null | undefined} [line] + * @property {number | null | undefined} [column] + * @property {number | null | undefined} [offset] + */ + +/** + * Serialize the positional info of a point, position (start and end points), + * or node. + * + * @param {Node | NodeLike | Position | PositionLike | Point | PointLike | null | undefined} [value] + * Node, position, or point. + * @returns {string} + * Pretty printed positional info of a node (`string`). + * + * In the format of a range `ls:cs-le:ce` (when given `node` or `position`) + * or a point `l:c` (when given `point`), where `l` stands for line, `c` for + * column, `s` for `start`, and `e` for end. + * An empty string (`''`) is returned if the given value is neither `node`, + * `position`, nor `point`. + */ +export function stringifyPosition(value) { + // Nothing. + if (!value || typeof value !== 'object') { + return '' + } + + // Node. + if ('position' in value || 'type' in value) { + return position(value.position) + } + + // Position. + if ('start' in value || 'end' in value) { + return position(value) + } + + // Point. + if ('line' in value || 'column' in value) { + return point(value) + } + + // ? + return '' +} + +/** + * @param {Point | PointLike | null | undefined} point + * @returns {string} + */ +function point(point) { + return index(point && point.line) + ':' + index(point && point.column) +} + +/** + * @param {Position | PositionLike | null | undefined} pos + * @returns {string} + */ +function position(pos) { + return point(pos && pos.start) + '-' + point(pos && pos.end) +} + +/** + * @param {number | null | undefined} value + * @returns {number} + */ +function index(value) { + return value && typeof value === 'number' ? value : 1 +} diff --git a/_extensions/d2/node_modules/unist-util-stringify-position/license b/_extensions/d2/node_modules/unist-util-stringify-position/license new file mode 100644 index 00000000..8d8660d3 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-stringify-position/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2016 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/unist-util-stringify-position/package.json b/_extensions/d2/node_modules/unist-util-stringify-position/package.json new file mode 100644 index 00000000..51e32c22 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-stringify-position/package.json @@ -0,0 +1,80 @@ +{ + "name": "unist-util-stringify-position", + "version": "3.0.3", + "description": "unist utility to serialize a node, position, or point as a human readable location", + "license": "MIT", + "keywords": [ + "unist", + "unist-util", + "util", + "utility", + "position", + "location", + "point", + "node", + "stringify", + "tostring" + ], + "repository": "syntax-tree/unist-util-stringify-position", + "bugs": "https://github.com/syntax-tree/unist-util-stringify-position/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "lib/", + "index.d.ts", + "index.js" + ], + "dependencies": { + "@types/unist": "^2.0.0" + }, + "devDependencies": { + "@types/mdast": "^3.0.0", + "@types/node": "^18.0.0", + "c8": "^7.0.0", + "prettier": "^2.0.0", + "remark-cli": "^11.0.0", + "remark-preset-wooorm": "^9.0.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "xo": "^0.53.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "tsc --build --clean && tsc --build && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test.js", + "test-coverage": "c8 --check-coverage --100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true + } +} diff --git a/_extensions/d2/node_modules/unist-util-stringify-position/readme.md b/_extensions/d2/node_modules/unist-util-stringify-position/readme.md new file mode 100644 index 00000000..323f9605 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-stringify-position/readme.md @@ -0,0 +1,203 @@ +# unist-util-stringify-position + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +[unist][] utility to pretty print the positional info of a node. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`stringifyPosition(node|position|point)`](#stringifypositionnodepositionpoint) +* [Types](#types) +* [Compatibility](#compatibility) +* [Security](#security) +* [Related](#related) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +This package is a utility that takes any [unist][] (whether mdast, hast, etc) +node, position, or point, and serializes its positional info. + +## When should I use this? + +This utility is useful to display where something occurred in the original +document, in one standard way, for humans. +For example, when throwing errors or warning messages about something. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 14.14+ and 16.0+), install with [npm][]: + +```sh +npm install unist-util-stringify-position +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {stringifyPosition} from 'https://esm.sh/unist-util-stringify-position@3' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {stringifyPosition} from 'unist-util-stringify-position' + +stringifyPosition({line: 2, column: 3}) // => '2:3' (point) +stringifyPosition({start: {line: 2}, end: {line: 3}}) // => '2:1-3:1' (position) +stringifyPosition({ + type: 'text', + value: '!', + position: { + start: {line: 5, column: 11}, + end: {line: 5, column: 12} + } +}) // => '5:11-5:12' (node) +``` + +## API + +This package exports the identifier [`stringifyPosition`][stringifyposition]. +There is no default export. + +### `stringifyPosition(node|position|point)` + +Serialize the positional info of a point, position (start and end points), or +node. + +###### Parameters + +* `node` ([`Node`][node]) + — node whose `position` fields to serialize +* `position` ([`Position`][position]) + — position whose `start` and `end` points to serialize +* `point` ([`Point`][point]) + — point whose `line` and `column` fields to serialize + +###### Returns + +Pretty printed positional info of a node (`string`). + +In the format of a range `ls:cs-le:ce` (when given `node` or `position`) or a +point `l:c` (when given `point`), where `l` stands for line, `c` for column, `s` +for `start`, and `e` for end. +An empty string (`''`) is returned if the given value is neither `node`, +`position`, nor `point`. + +## Types + +This package is fully typed with [TypeScript][]. +It exports no additional types. + +## Compatibility + +Projects maintained by the unified collective are compatible with all maintained +versions of Node.js. +As of now, that is Node.js 14.14+ and 16.0+. +Our projects sometimes work with older versions, but this is not guaranteed. + +## Security + +This project is safe. + +## Related + +* [`unist-util-generated`](https://github.com/syntax-tree/unist-util-generated) + — check if a node is generated +* [`unist-util-position`](https://github.com/syntax-tree/unist-util-position) + — get positional info of nodes +* [`unist-util-remove-position`](https://github.com/syntax-tree/unist-util-remove-position) + — remove positional info from trees +* [`unist-util-source`](https://github.com/syntax-tree/unist-util-source) + — get the source of a value (node or position) in a file + +## Contribute + +See [`contributing.md` in `syntax-tree/.github`][contributing] for ways to get +started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/syntax-tree/unist-util-stringify-position/workflows/main/badge.svg + +[build]: https://github.com/syntax-tree/unist-util-stringify-position/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/syntax-tree/unist-util-stringify-position.svg + +[coverage]: https://codecov.io/github/syntax-tree/unist-util-stringify-position + +[downloads-badge]: https://img.shields.io/npm/dm/unist-util-stringify-position.svg + +[downloads]: https://www.npmjs.com/package/unist-util-stringify-position + +[size-badge]: https://img.shields.io/bundlephobia/minzip/unist-util-stringify-position.svg + +[size]: https://bundlephobia.com/result?p=unist-util-stringify-position + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/syntax-tree/unist/discussions + +[npm]: https://docs.npmjs.com/cli/install + +[license]: license + +[author]: https://wooorm.com + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[esmsh]: https://esm.sh + +[typescript]: https://www.typescriptlang.org + +[contributing]: https://github.com/syntax-tree/.github/blob/HEAD/contributing.md + +[support]: https://github.com/syntax-tree/.github/blob/HEAD/support.md + +[coc]: https://github.com/syntax-tree/.github/blob/HEAD/code-of-conduct.md + +[unist]: https://github.com/syntax-tree/unist + +[node]: https://github.com/syntax-tree/unist#node + +[position]: https://github.com/syntax-tree/unist#position + +[point]: https://github.com/syntax-tree/unist#point + +[stringifyposition]: #stringifypositionnodepositionpoint diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/complex-types.d.ts b/_extensions/d2/node_modules/unist-util-visit-parents/complex-types.d.ts new file mode 100644 index 00000000..9a244844 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/complex-types.d.ts @@ -0,0 +1,14 @@ +// To do: next major: remove this file? +export type { + // Used in `unist-util-visit`: + VisitorResult, + + // Documented: + Visitor, + BuildVisitor +} from './index.js' +export type { + // Used in `unist-util-visit`: + Matches, + InclusiveDescendant +} from './lib/complex-types.js' diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/index.d.ts b/_extensions/d2/node_modules/unist-util-visit-parents/index.d.ts new file mode 100644 index 00000000..29af1e66 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/index.d.ts @@ -0,0 +1,10 @@ +export type {Test} from 'unist-util-is' +export type { + Action, + ActionTuple, + BuildVisitor, + Index, + Visitor, + VisitorResult +} from './lib/index.js' +export {CONTINUE, EXIT, SKIP, visitParents} from './lib/index.js' diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/index.js b/_extensions/d2/node_modules/unist-util-visit-parents/index.js new file mode 100644 index 00000000..995e97b6 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/index.js @@ -0,0 +1,2 @@ +// Note: types exported from `index.d.ts` +export {CONTINUE, EXIT, SKIP, visitParents} from './lib/index.js' diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/lib/color.browser.d.ts b/_extensions/d2/node_modules/unist-util-visit-parents/lib/color.browser.d.ts new file mode 100644 index 00000000..70797262 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/lib/color.browser.d.ts @@ -0,0 +1,5 @@ +/** + * @param {string} d + * @returns {string} + */ +export function color(d: string): string; diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/lib/color.browser.js b/_extensions/d2/node_modules/unist-util-visit-parents/lib/color.browser.js new file mode 100644 index 00000000..1685a155 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/lib/color.browser.js @@ -0,0 +1,7 @@ +/** + * @param {string} d + * @returns {string} + */ +export function color(d) { + return d +} diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/lib/color.d.ts b/_extensions/d2/node_modules/unist-util-visit-parents/lib/color.d.ts new file mode 100644 index 00000000..70797262 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/lib/color.d.ts @@ -0,0 +1,5 @@ +/** + * @param {string} d + * @returns {string} + */ +export function color(d: string): string; diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/lib/color.js b/_extensions/d2/node_modules/unist-util-visit-parents/lib/color.js new file mode 100644 index 00000000..3634ae94 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/lib/color.js @@ -0,0 +1,7 @@ +/** + * @param {string} d + * @returns {string} + */ +export function color(d) { + return '\u001B[33m' + d + '\u001B[39m' +} diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/lib/complex-types.d.ts b/_extensions/d2/node_modules/unist-util-visit-parents/lib/complex-types.d.ts new file mode 100644 index 00000000..ef06b3ac --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/lib/complex-types.d.ts @@ -0,0 +1,67 @@ +/* eslint-disable @typescript-eslint/ban-types */ + +import type {Node, Parent} from 'unist' +import type {Test} from 'unist-util-is' +import type {Visitor} from './index.js' + +/** + * Internal utility to collect all descendants of in `Tree`. + */ +export type InclusiveDescendant< + Tree extends Node = never, + Found = void +> = Tree extends Parent + ? + | Tree + | InclusiveDescendant< + Exclude, + Found | Tree + > + : Tree + +/** + * Infer the thing that is asserted from a type guard. + */ +type Predicate = Fn extends ( + value: any +) => value is infer Thing + ? Thing + : Fallback + +/** + * Check if a node matches a test. + * + * Returns either the node if it matches or `never` otherwise. + */ +type MatchesOne = + // Is this a node? + Value extends Node + ? // No test. + Check extends null + ? Value + : // No test. + Check extends undefined + ? Value + : // Function test. + Check extends Function + ? Extract> + : // String (type) test. + Value['type'] extends Check + ? Value + : // Partial test. + Value extends Check + ? Value + : never + : never + +/** + * Check if a node matches one or more tests. + * + * Returns either the node if it matches or `never` otherwise. + */ +export type Matches = + // Is this a list? + Check extends Array + ? MatchesOne + : MatchesOne +/* eslint-enable @typescript-eslint/ban-types */ diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/lib/index.d.ts b/_extensions/d2/node_modules/unist-util-visit-parents/lib/index.d.ts new file mode 100644 index 00000000..3fb4fbee --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/lib/index.d.ts @@ -0,0 +1,92 @@ +/** + * Continue traversing as normal. + */ +export const CONTINUE: true; +/** + * Stop traversing immediately. + */ +export const EXIT: false; +/** + * Do not traverse this node’s children. + */ +export const SKIP: "skip"; +/** + * Visit nodes, with ancestral information. + * + * This algorithm performs *depth-first* *tree traversal* in *preorder* + * (**NLR**) or if `reverse` is given, in *reverse preorder* (**NRL**). + * + * You can choose for which nodes `visitor` is called by passing a `test`. + * For complex tests, you should test yourself in `visitor`, as it will be + * faster and will have improved type information. + * + * Walking the tree is an intensive task. + * Make use of the return values of the visitor when possible. + * Instead of walking a tree multiple times, walk it once, use `unist-util-is` + * to check if a node matches, and then perform different operations. + * + * You can change the tree. + * See `Visitor` for more info. + * + * @param tree + * Tree to traverse. + * @param test + * `unist-util-is`-compatible test + * @param visitor + * Handle each node. + * @param reverse + * Traverse in reverse preorder (NRL) instead of the default preorder (NLR). + * @returns + * Nothing. + */ +export const visitParents: (, Check extends import("unist-util-is/lib/index.js").Test>(tree: Tree, test: Check, visitor: BuildVisitor, reverse?: boolean | null | undefined) => void) & (>(tree: Tree_1, visitor: BuildVisitor, reverse?: boolean | null | undefined) => void); +export type Node = import('unist').Node; +export type Parent = import('unist').Parent; +export type Test = import('unist-util-is').Test; +/** + * Union of the action types. + */ +export type Action = boolean | 'skip'; +/** + * Move to the sibling at `index` next (after node itself is completely + * traversed). + * + * Useful if mutating the tree, such as removing the node the visitor is + * currently on, or any of its previous siblings. + * Results less than 0 or greater than or equal to `children.length` stop + * traversing the parent. + */ +export type Index = number; +/** + * List with one or two values, the first an action, the second an index. + */ +export type ActionTuple = [(Action | null | undefined | void)?, (Index | null | undefined)?]; +/** + * Any value that can be returned from a visitor. + */ +export type VisitorResult = Action | [(void | Action | null | undefined)?, (number | null | undefined)?] | Index | null | undefined | void; +/** + * Handle a node (matching `test`, if given). + * + * Visitors are free to transform `node`. + * They can also transform the parent of node (the last of `ancestors`). + * + * Replacing `node` itself, if `SKIP` is not returned, still causes its + * descendants to be walked (which is a bug). + * + * When adding or removing previous siblings of `node` (or next siblings, in + * case of reverse), the `Visitor` should return a new `Index` to specify the + * sibling to traverse after `node` is traversed. + * Adding or removing next siblings of `node` (or previous siblings, in case + * of reverse) is handled as expected without needing to return a new `Index`. + * + * Removing the children property of an ancestor still results in them being + * traversed. + */ +export type Visitor = import("unist").Node, Ancestor extends import("unist").Parent, import("unist").Data> = import("unist").Parent, import("unist").Data>> = (node: Visited, ancestors: Array) => VisitorResult; +/** + * Build a typed `Visitor` function from a tree and a test. + * + * It will infer which values are passed as `node` and which as `parents`. + */ +export type BuildVisitor = import("unist").Node, Check extends import("unist-util-is/lib/index.js").Test = string> = Visitor, Check>, Extract, Parent>>; diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/lib/index.js b/_extensions/d2/node_modules/unist-util-visit-parents/lib/index.js new file mode 100644 index 00000000..e8b72ed3 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/lib/index.js @@ -0,0 +1,241 @@ +/** + * @typedef {import('unist').Node} Node + * @typedef {import('unist').Parent} Parent + * @typedef {import('unist-util-is').Test} Test + */ + +/** + * @typedef {boolean | 'skip'} Action + * Union of the action types. + * + * @typedef {number} Index + * Move to the sibling at `index` next (after node itself is completely + * traversed). + * + * Useful if mutating the tree, such as removing the node the visitor is + * currently on, or any of its previous siblings. + * Results less than 0 or greater than or equal to `children.length` stop + * traversing the parent. + * + * @typedef {[(Action | null | undefined | void)?, (Index | null | undefined)?]} ActionTuple + * List with one or two values, the first an action, the second an index. + * + * @typedef {Action | ActionTuple | Index | null | undefined | void} VisitorResult + * Any value that can be returned from a visitor. + */ + +/** + * @template {Node} [Visited=Node] + * Visited node type. + * @template {Parent} [Ancestor=Parent] + * Ancestor type. + * @callback Visitor + * Handle a node (matching `test`, if given). + * + * Visitors are free to transform `node`. + * They can also transform the parent of node (the last of `ancestors`). + * + * Replacing `node` itself, if `SKIP` is not returned, still causes its + * descendants to be walked (which is a bug). + * + * When adding or removing previous siblings of `node` (or next siblings, in + * case of reverse), the `Visitor` should return a new `Index` to specify the + * sibling to traverse after `node` is traversed. + * Adding or removing next siblings of `node` (or previous siblings, in case + * of reverse) is handled as expected without needing to return a new `Index`. + * + * Removing the children property of an ancestor still results in them being + * traversed. + * @param {Visited} node + * Found node. + * @param {Array} ancestors + * Ancestors of `node`. + * @returns {VisitorResult} + * What to do next. + * + * An `Index` is treated as a tuple of `[CONTINUE, Index]`. + * An `Action` is treated as a tuple of `[Action]`. + * + * Passing a tuple back only makes sense if the `Action` is `SKIP`. + * When the `Action` is `EXIT`, that action can be returned. + * When the `Action` is `CONTINUE`, `Index` can be returned. + */ + +/** + * @template {Node} [Tree=Node] + * Tree type. + * @template {Test} [Check=string] + * Test type. + * @typedef {Visitor, Check>, Extract, Parent>>} BuildVisitor + * Build a typed `Visitor` function from a tree and a test. + * + * It will infer which values are passed as `node` and which as `parents`. + */ + +import {convert} from 'unist-util-is' +import {color} from './color.js' + +/** + * Continue traversing as normal. + */ +export const CONTINUE = true + +/** + * Stop traversing immediately. + */ +export const EXIT = false + +/** + * Do not traverse this node’s children. + */ +export const SKIP = 'skip' + +/** + * Visit nodes, with ancestral information. + * + * This algorithm performs *depth-first* *tree traversal* in *preorder* + * (**NLR**) or if `reverse` is given, in *reverse preorder* (**NRL**). + * + * You can choose for which nodes `visitor` is called by passing a `test`. + * For complex tests, you should test yourself in `visitor`, as it will be + * faster and will have improved type information. + * + * Walking the tree is an intensive task. + * Make use of the return values of the visitor when possible. + * Instead of walking a tree multiple times, walk it once, use `unist-util-is` + * to check if a node matches, and then perform different operations. + * + * You can change the tree. + * See `Visitor` for more info. + * + * @param tree + * Tree to traverse. + * @param test + * `unist-util-is`-compatible test + * @param visitor + * Handle each node. + * @param reverse + * Traverse in reverse preorder (NRL) instead of the default preorder (NLR). + * @returns + * Nothing. + */ +export const visitParents = + /** + * @type {( + * ((tree: Tree, test: Check, visitor: BuildVisitor, reverse?: boolean | null | undefined) => void) & + * ((tree: Tree, visitor: BuildVisitor, reverse?: boolean | null | undefined) => void) + * )} + */ + ( + /** + * @param {Node} tree + * @param {Test} test + * @param {Visitor} visitor + * @param {boolean | null | undefined} [reverse] + * @returns {void} + */ + function (tree, test, visitor, reverse) { + if (typeof test === 'function' && typeof visitor !== 'function') { + reverse = visitor + // @ts-expect-error no visitor given, so `visitor` is test. + visitor = test + test = null + } + + const is = convert(test) + const step = reverse ? -1 : 1 + + factory(tree, undefined, [])() + + /** + * @param {Node} node + * @param {number | undefined} index + * @param {Array} parents + */ + function factory(node, index, parents) { + /** @type {Record} */ + // @ts-expect-error: hush + const value = node && typeof node === 'object' ? node : {} + + if (typeof value.type === 'string') { + const name = + // `hast` + typeof value.tagName === 'string' + ? value.tagName + : // `xast` + typeof value.name === 'string' + ? value.name + : undefined + + Object.defineProperty(visit, 'name', { + value: + 'node (' + color(node.type + (name ? '<' + name + '>' : '')) + ')' + }) + } + + return visit + + function visit() { + /** @type {ActionTuple} */ + let result = [] + /** @type {ActionTuple} */ + let subresult + /** @type {number} */ + let offset + /** @type {Array} */ + let grandparents + + if (!test || is(node, index, parents[parents.length - 1] || null)) { + result = toResult(visitor(node, parents)) + + if (result[0] === EXIT) { + return result + } + } + + // @ts-expect-error looks like a parent. + if (node.children && result[0] !== SKIP) { + // @ts-expect-error looks like a parent. + offset = (reverse ? node.children.length : -1) + step + // @ts-expect-error looks like a parent. + grandparents = parents.concat(node) + + // @ts-expect-error looks like a parent. + while (offset > -1 && offset < node.children.length) { + // @ts-expect-error looks like a parent. + subresult = factory(node.children[offset], offset, grandparents)() + + if (subresult[0] === EXIT) { + return subresult + } + + offset = + typeof subresult[1] === 'number' ? subresult[1] : offset + step + } + } + + return result + } + } + } + ) + +/** + * Turn a return value into a clean result. + * + * @param {VisitorResult} value + * Valid return values from visitors. + * @returns {ActionTuple} + * Clean result. + */ +function toResult(value) { + if (Array.isArray(value)) { + return value + } + + if (typeof value === 'number') { + return [CONTINUE, value] + } + + return [value] +} diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/license b/_extensions/d2/node_modules/unist-util-visit-parents/license new file mode 100644 index 00000000..8d8660d3 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2016 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/package.json b/_extensions/d2/node_modules/unist-util-visit-parents/package.json new file mode 100644 index 00000000..3aa8dc37 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/package.json @@ -0,0 +1,104 @@ +{ + "name": "unist-util-visit-parents", + "version": "5.1.3", + "description": "unist utility to recursively walk over nodes, with ancestral information", + "license": "MIT", + "keywords": [ + "unist", + "unist-util", + "util", + "utility", + "tree", + "ast", + "visit", + "traverse", + "walk", + "check", + "parent", + "parents" + ], + "repository": "syntax-tree/unist-util-visit-parents", + "bugs": "https://github.com/syntax-tree/unist-util-visit-parents/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "browser": { + "./lib/color.js": "./lib/color.browser.js" + }, + "react-native": { + "./lib/color.js": "./lib/color.browser.js" + }, + "types": "index.d.ts", + "files": [ + "lib/", + "complex-types.d.ts", + "index.d.ts", + "index.js" + ], + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0" + }, + "devDependencies": { + "@types/hast": "^2.0.0", + "@types/mdast": "^3.0.0", + "@types/node": "^18.0.0", + "c8": "^7.0.0", + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-gfm": "^2.0.0", + "micromark-extension-gfm": "^2.0.0", + "prettier": "^2.0.0", + "remark-cli": "^11.0.0", + "remark-preset-wooorm": "^9.0.0", + "strip-ansi": "^7.0.0", + "tsd": "^0.25.0", + "type-coverage": "^2.0.0", + "typescript": "^4.7.0", + "xo": "^0.53.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "tsc --build --clean && tsc --build && tsd && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test.js", + "test-coverage": "c8 --check-coverage --100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true, + "rules": { + "@typescript-eslint/array-type": "off" + } + }, + "remarkConfig": { + "plugins": [ + "remark-preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true, + "#": "needed `any`s", + "ignoreFiles": [ + "lib/complex-types.d.ts" + ] + } +} diff --git a/_extensions/d2/node_modules/unist-util-visit-parents/readme.md b/_extensions/d2/node_modules/unist-util-visit-parents/readme.md new file mode 100644 index 00000000..d90beb79 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit-parents/readme.md @@ -0,0 +1,385 @@ +# unist-util-visit-parents + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +[unist][] utility to walk the tree with a stack of parents. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`visitParents(tree[, test], visitor[, reverse])`](#visitparentstree-test-visitor-reverse) + * [`CONTINUE`](#continue) + * [`EXIT`](#exit) + * [`SKIP`](#skip) + * [`Action`](#action) + * [`ActionTuple`](#actiontuple) + * [`BuildVisitor`](#buildvisitor) + * [`Index`](#index) + * [`Test`](#test) + * [`Visitor`](#visitor) + * [`VisitorResult`](#visitorresult) +* [Types](#types) +* [Compatibility](#compatibility) +* [Related](#related) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +This is a very important utility for working with unist as it lets you walk the +tree. + +## When should I use this? + +You can use this utility when you want to walk the tree and want to know about +every parent of each node. +You can use [`unist-util-visit`][unist-util-visit] if you don’t care about the +entire stack of parents. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 14.14+ and 16.0+), install with [npm][]: + +```sh +npm install unist-util-visit-parents +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {visitParents} from 'https://esm.sh/unist-util-visit-parents@5' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {visitParents} from 'unist-util-visit-parents' +import {fromMarkdown} from 'mdast-util-from-markdown' + +const tree = fromMarkdown('Some *emphasis*, **strong**, and `code`.') + +visitParents(tree, 'strong', (node, ancestors) => { + console.log(node.type, ancestors.map(ancestor => ancestor.type)) +}) +``` + +Yields: + +```js +strong ['root', 'paragraph'] +``` + +## API + +This package exports the identifiers [`CONTINUE`][api-continue], +[`EXIT`][api-exit], [`SKIP`][api-skip], and [`visitParents`][api-visitparents]. +There is no default export. + +### `visitParents(tree[, test], visitor[, reverse])` + +Visit nodes, with ancestral information. + +This algorithm performs *[depth-first][]* *[tree traversal][tree-traversal]* +in *[preorder][]* (**NLR**) or if `reverse` is given, in *reverse preorder* +(**NRL**). + +You can choose for which nodes `visitor` is called by passing a `test`. +For complex tests, you should test yourself in `visitor`, as it will be +faster and will have improved type information. + +Walking the tree is an intensive task. +Make use of the return values of the visitor when possible. +Instead of walking a tree multiple times, walk it once, use +[`unist-util-is`][unist-util-is] to check if a node matches, and then perform +different operations. + +You can change the tree. +See [`Visitor`][api-visitor] for more info. + +###### Parameters + +* `tree` ([`Node`][node]) + — tree to traverse +* `test` ([`Test`][api-test], optional) + — [`unist-util-is`][unist-util-is]-compatible test +* `visitor` ([`Visitor`][api-visitor]) + — handle each node +* `reverse` (`boolean`, default: `false`) + — traverse in reverse preorder (NRL) instead of the default preorder (NLR) + +###### Returns + +Nothing (`void`). + +### `CONTINUE` + +Continue traversing as normal (`true`). + +### `EXIT` + +Stop traversing immediately (`false`). + +### `SKIP` + +Do not traverse this node’s children (`'skip'`). + +### `Action` + +Union of the action types (TypeScript type). + +###### Type + +```ts +type Action = typeof CONTINUE | typeof EXIT | typeof SKIP +``` + +### `ActionTuple` + +List with one or two values, the first an action, the second an index +(TypeScript type). + +###### Type + +```ts +type ActionTuple = [ + (Action | null | undefined | void)?, + (Index | null | undefined)? +] +``` + +### `BuildVisitor` + +Build a typed `Visitor` function from a tree and a test (TypeScript type). + +It will infer which values are passed as `node` and which as `parents`. + +###### Type parameters + +* `Tree` ([`Node`][node], default: `Node`) + — tree type +* `Check` ([`Test`][api-test], default: `string`) + — test type + +###### Returns + +[`Visitor`][api-visitor]. + +### `Index` + +Move to the sibling at `index` next (after node itself is completely +traversed) (TypeScript type). + +Useful if mutating the tree, such as removing the node the visitor is currently +on, or any of its previous siblings. +Results less than `0` or greater than or equal to `children.length` stop +traversing the parent. + +###### Type + +```ts +type Index = number +``` + +### `Test` + +[`unist-util-is`][unist-util-is] compatible test (TypeScript type). + +### `Visitor` + +Handle a node (matching `test`, if given) (TypeScript type). + +Visitors are free to transform `node`. +They can also transform the parent of node (the last of `ancestors`). + +Replacing `node` itself, if `SKIP` is not returned, still causes its +descendants to be walked (which is a bug). + +When adding or removing previous siblings of `node` (or next siblings, in +case of reverse), the `Visitor` should return a new `Index` to specify the +sibling to traverse after `node` is traversed. +Adding or removing next siblings of `node` (or previous siblings, in case +of reverse) is handled as expected without needing to return a new `Index`. + +Removing the children property of an ancestor still results in them being +traversed. + +###### Parameters + +* `node` ([`Node`][node]) + — found node +* `parents` ([`Array`][node]) + — ancestors of `node` + +###### Returns + +What to do next. + +An `Index` is treated as a tuple of `[CONTINUE, Index]`. +An `Action` is treated as a tuple of `[Action]`. + +Passing a tuple back only makes sense if the `Action` is `SKIP`. +When the `Action` is `EXIT`, that action can be returned. +When the `Action` is `CONTINUE`, `Index` can be returned. + +### `VisitorResult` + +Any value that can be returned from a visitor (TypeScript type). + +###### Type + +```ts +type VisitorResult = + | Action + | ActionTuple + | Index + | null + | undefined + | void +``` + +## Types + +This package is fully typed with [TypeScript][]. +It exports the additional types [`Action`][api-action], +[`ActionTuple`][api-actiontuple], [`BuildVisitor`][api-buildvisitor], +[`Index`][api-index], [`Test`][api-test], [`Visitor`][api-visitor], and +[`VisitorResult`][api-visitorresult]. + +## Compatibility + +Projects maintained by the unified collective are compatible with all maintained +versions of Node.js. +As of now, that is Node.js 14.14+ and 16.0+. +Our projects sometimes work with older versions, but this is not guaranteed. + +## Related + +* [`unist-util-visit`](https://github.com/syntax-tree/unist-util-visit) + — walk the tree with one parent +* [`unist-util-filter`](https://github.com/syntax-tree/unist-util-filter) + — create a new tree with all nodes that pass a test +* [`unist-util-map`](https://github.com/syntax-tree/unist-util-map) + — create a new tree with all nodes mapped by a given function +* [`unist-util-flatmap`](https://gitlab.com/staltz/unist-util-flatmap) + — create a new tree by mapping (to an array) with the given function +* [`unist-util-remove`](https://github.com/syntax-tree/unist-util-remove) + — remove nodes from a tree that pass a test +* [`unist-util-select`](https://github.com/syntax-tree/unist-util-select) + — select nodes with CSS-like selectors + +## Contribute + +See [`contributing.md`][contributing] in [`syntax-tree/.github`][health] for +ways to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/syntax-tree/unist-util-visit-parents/workflows/main/badge.svg + +[build]: https://github.com/syntax-tree/unist-util-visit-parents/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/syntax-tree/unist-util-visit-parents.svg + +[coverage]: https://codecov.io/github/syntax-tree/unist-util-visit-parents + +[downloads-badge]: https://img.shields.io/npm/dm/unist-util-visit-parents.svg + +[downloads]: https://www.npmjs.com/package/unist-util-visit-parents + +[size-badge]: https://img.shields.io/bundlephobia/minzip/unist-util-visit-parents.svg + +[size]: https://bundlephobia.com/result?p=unist-util-visit-parents + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/syntax-tree/unist/discussions + +[npm]: https://docs.npmjs.com/cli/install + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[esmsh]: https://esm.sh + +[typescript]: https://www.typescriptlang.org + +[license]: license + +[author]: https://wooorm.com + +[health]: https://github.com/syntax-tree/.github + +[contributing]: https://github.com/syntax-tree/.github/blob/HEAD/contributing.md + +[support]: https://github.com/syntax-tree/.github/blob/HEAD/support.md + +[coc]: https://github.com/syntax-tree/.github/blob/HEAD/code-of-conduct.md + +[unist]: https://github.com/syntax-tree/unist + +[node]: https://github.com/syntax-tree/unist#node + +[depth-first]: https://github.com/syntax-tree/unist#depth-first-traversal + +[tree-traversal]: https://github.com/syntax-tree/unist#tree-traversal + +[preorder]: https://github.com/syntax-tree/unist#preorder + +[unist-util-visit]: https://github.com/syntax-tree/unist-util-visit + +[unist-util-is]: https://github.com/syntax-tree/unist-util-is + +[api-visitparents]: #visitparentstree-test-visitor-reverse + +[api-continue]: #continue + +[api-exit]: #exit + +[api-skip]: #skip + +[api-action]: #action + +[api-actiontuple]: #actiontuple + +[api-buildvisitor]: #buildvisitor + +[api-index]: #index + +[api-test]: #test + +[api-visitor]: #visitor + +[api-visitorresult]: #visitorresult diff --git a/_extensions/d2/node_modules/unist-util-visit/complex-types.d.ts b/_extensions/d2/node_modules/unist-util-visit/complex-types.d.ts new file mode 100644 index 00000000..5085b4bb --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit/complex-types.d.ts @@ -0,0 +1,2 @@ +// To do: next major: remove this file. +export type {Visitor, BuildVisitor} from './index.js' diff --git a/_extensions/d2/node_modules/unist-util-visit/index.d.ts b/_extensions/d2/node_modules/unist-util-visit/index.d.ts new file mode 100644 index 00000000..c6ebdb0a --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit/index.d.ts @@ -0,0 +1,9 @@ +export type {Test} from 'unist-util-is' +export type { + Action, + ActionTuple, + Index, + VisitorResult +} from 'unist-util-visit-parents' +export type {Visitor, BuildVisitor} from './lib/index.js' +export {CONTINUE, EXIT, SKIP, visit} from './lib/index.js' diff --git a/_extensions/d2/node_modules/unist-util-visit/index.js b/_extensions/d2/node_modules/unist-util-visit/index.js new file mode 100644 index 00000000..47479999 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit/index.js @@ -0,0 +1,2 @@ +// Note: types exported from `index.d.ts` +export {CONTINUE, EXIT, SKIP, visit} from './lib/index.js' diff --git a/_extensions/d2/node_modules/unist-util-visit/lib/index.d.ts b/_extensions/d2/node_modules/unist-util-visit/lib/index.d.ts new file mode 100644 index 00000000..da1908ee --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit/lib/index.d.ts @@ -0,0 +1,137 @@ +/** + * Visit nodes. + * + * This algorithm performs *depth-first* *tree traversal* in *preorder* + * (**NLR**) or if `reverse` is given, in *reverse preorder* (**NRL**). + * + * You can choose for which nodes `visitor` is called by passing a `test`. + * For complex tests, you should test yourself in `visitor`, as it will be + * faster and will have improved type information. + * + * Walking the tree is an intensive task. + * Make use of the return values of the visitor when possible. + * Instead of walking a tree multiple times, walk it once, use `unist-util-is` + * to check if a node matches, and then perform different operations. + * + * You can change the tree. + * See `Visitor` for more info. + * + * @param tree + * Tree to traverse. + * @param test + * `unist-util-is`-compatible test + * @param visitor + * Handle each node. + * @param reverse + * Traverse in reverse preorder (NRL) instead of the default preorder (NLR). + * @returns + * Nothing. + */ +export const visit: (< + Tree extends import('unist').Node, + Check extends import('unist-util-is/lib/index.js').Test +>( + tree: Tree, + test: Check, + visitor: BuildVisitor, + reverse?: boolean | null | undefined +) => void) & + (>( + tree: Tree_1, + visitor: BuildVisitor, + reverse?: boolean | null | undefined + ) => void) +export type Node = import('unist').Node +export type Parent = import('unist').Parent +export type Test = import('unist-util-is').Test +export type VisitorResult = import('unist-util-visit-parents').VisitorResult +/** + * Check if `Child` can be a child of `Ancestor`. + * + * Returns the ancestor when `Child` can be a child of `Ancestor`, or returns + * `never`. + */ +export type ParentsOf< + Ancestor extends import('unist').Node, + Child extends import('unist').Node +> = Ancestor extends Parent + ? Child extends Ancestor['children'][number] + ? Ancestor + : never + : never +/** + * Handle a node (matching `test`, if given). + * + * Visitors are free to transform `node`. + * They can also transform `parent`. + * + * Replacing `node` itself, if `SKIP` is not returned, still causes its + * descendants to be walked (which is a bug). + * + * When adding or removing previous siblings of `node` (or next siblings, in + * case of reverse), the `Visitor` should return a new `Index` to specify the + * sibling to traverse after `node` is traversed. + * Adding or removing next siblings of `node` (or previous siblings, in case + * of reverse) is handled as expected without needing to return a new `Index`. + * + * Removing the children property of `parent` still results in them being + * traversed. + */ +export type Visitor< + Visited extends import('unist').Node< + import('unist').Data + > = import('unist').Node, + Ancestor extends import('unist').Parent< + import('unist').Node, + import('unist').Data + > = import('unist').Parent< + import('unist').Node, + import('unist').Data + > +> = ( + node: Visited, + index: Visited extends Node ? number | null : never, + parent: Ancestor extends Node ? Ancestor | null : never +) => VisitorResult +/** + * Build a typed `Visitor` function from a node and all possible parents. + * + * It will infer which values are passed as `node` and which as `parent`. + */ +export type BuildVisitorFromMatch< + Visited extends import('unist').Node, + Ancestor extends import('unist').Parent< + import('unist').Node, + import('unist').Data + > +> = Visitor> +/** + * Build a typed `Visitor` function from a list of descendants and a test. + * + * It will infer which values are passed as `node` and which as `parent`. + */ +export type BuildVisitorFromDescendants< + Descendant extends import('unist').Node, + Check extends import('unist-util-is/lib/index.js').Test +> = BuildVisitorFromMatch< + import('unist-util-visit-parents/complex-types.js').Matches< + Descendant, + Check + >, + Extract +> +/** + * Build a typed `Visitor` function from a tree and a test. + * + * It will infer which values are passed as `node` and which as `parent`. + */ +export type BuildVisitor< + Tree extends import('unist').Node< + import('unist').Data + > = import('unist').Node, + Check extends import('unist-util-is/lib/index.js').Test = string +> = BuildVisitorFromDescendants< + import('unist-util-visit-parents/complex-types.js').InclusiveDescendant, + Check +> +export {CONTINUE, EXIT, SKIP} from 'unist-util-visit-parents' diff --git a/_extensions/d2/node_modules/unist-util-visit/lib/index.js b/_extensions/d2/node_modules/unist-util-visit/lib/index.js new file mode 100644 index 00000000..180bd142 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit/lib/index.js @@ -0,0 +1,182 @@ +/** + * @typedef {import('unist').Node} Node + * @typedef {import('unist').Parent} Parent + * @typedef {import('unist-util-is').Test} Test + * @typedef {import('unist-util-visit-parents').VisitorResult} VisitorResult + */ + +/** + * Check if `Child` can be a child of `Ancestor`. + * + * Returns the ancestor when `Child` can be a child of `Ancestor`, or returns + * `never`. + * + * @template {Node} Ancestor + * Node type. + * @template {Node} Child + * Node type. + * @typedef {( + * Ancestor extends Parent + * ? Child extends Ancestor['children'][number] + * ? Ancestor + * : never + * : never + * )} ParentsOf + */ + +/** + * @template {Node} [Visited=Node] + * Visited node type. + * @template {Parent} [Ancestor=Parent] + * Ancestor type. + * @callback Visitor + * Handle a node (matching `test`, if given). + * + * Visitors are free to transform `node`. + * They can also transform `parent`. + * + * Replacing `node` itself, if `SKIP` is not returned, still causes its + * descendants to be walked (which is a bug). + * + * When adding or removing previous siblings of `node` (or next siblings, in + * case of reverse), the `Visitor` should return a new `Index` to specify the + * sibling to traverse after `node` is traversed. + * Adding or removing next siblings of `node` (or previous siblings, in case + * of reverse) is handled as expected without needing to return a new `Index`. + * + * Removing the children property of `parent` still results in them being + * traversed. + * @param {Visited} node + * Found node. + * @param {Visited extends Node ? number | null : never} index + * Index of `node` in `parent`. + * @param {Ancestor extends Node ? Ancestor | null : never} parent + * Parent of `node`. + * @returns {VisitorResult} + * What to do next. + * + * An `Index` is treated as a tuple of `[CONTINUE, Index]`. + * An `Action` is treated as a tuple of `[Action]`. + * + * Passing a tuple back only makes sense if the `Action` is `SKIP`. + * When the `Action` is `EXIT`, that action can be returned. + * When the `Action` is `CONTINUE`, `Index` can be returned. + */ + +/** + * Build a typed `Visitor` function from a node and all possible parents. + * + * It will infer which values are passed as `node` and which as `parent`. + * + * @template {Node} Visited + * Node type. + * @template {Parent} Ancestor + * Parent type. + * @typedef {Visitor>} BuildVisitorFromMatch + */ + +/** + * Build a typed `Visitor` function from a list of descendants and a test. + * + * It will infer which values are passed as `node` and which as `parent`. + * + * @template {Node} Descendant + * Node type. + * @template {Test} Check + * Test type. + * @typedef {( + * BuildVisitorFromMatch< + * import('unist-util-visit-parents/complex-types.js').Matches, + * Extract + * > + * )} BuildVisitorFromDescendants + */ + +/** + * Build a typed `Visitor` function from a tree and a test. + * + * It will infer which values are passed as `node` and which as `parent`. + * + * @template {Node} [Tree=Node] + * Node type. + * @template {Test} [Check=string] + * Test type. + * @typedef {( + * BuildVisitorFromDescendants< + * import('unist-util-visit-parents/complex-types.js').InclusiveDescendant, + * Check + * > + * )} BuildVisitor + */ + +import {visitParents} from 'unist-util-visit-parents' + +/** + * Visit nodes. + * + * This algorithm performs *depth-first* *tree traversal* in *preorder* + * (**NLR**) or if `reverse` is given, in *reverse preorder* (**NRL**). + * + * You can choose for which nodes `visitor` is called by passing a `test`. + * For complex tests, you should test yourself in `visitor`, as it will be + * faster and will have improved type information. + * + * Walking the tree is an intensive task. + * Make use of the return values of the visitor when possible. + * Instead of walking a tree multiple times, walk it once, use `unist-util-is` + * to check if a node matches, and then perform different operations. + * + * You can change the tree. + * See `Visitor` for more info. + * + * @param tree + * Tree to traverse. + * @param test + * `unist-util-is`-compatible test + * @param visitor + * Handle each node. + * @param reverse + * Traverse in reverse preorder (NRL) instead of the default preorder (NLR). + * @returns + * Nothing. + */ +export const visit = + /** + * @type {( + * ((tree: Tree, test: Check, visitor: BuildVisitor, reverse?: boolean | null | undefined) => void) & + * ((tree: Tree, visitor: BuildVisitor, reverse?: boolean | null | undefined) => void) + * )} + */ + ( + /** + * @param {Node} tree + * @param {Test} test + * @param {Visitor} visitor + * @param {boolean | null | undefined} [reverse] + * @returns {void} + */ + function (tree, test, visitor, reverse) { + if (typeof test === 'function' && typeof visitor !== 'function') { + reverse = visitor + visitor = test + test = null + } + + visitParents(tree, test, overload, reverse) + + /** + * @param {Node} node + * @param {Array} parents + */ + function overload(node, parents) { + const parent = parents[parents.length - 1] + return visitor( + node, + parent ? parent.children.indexOf(node) : null, + parent + ) + } + } + ) + +export {CONTINUE, EXIT, SKIP} from 'unist-util-visit-parents' diff --git a/_extensions/d2/node_modules/unist-util-visit/license b/_extensions/d2/node_modules/unist-util-visit/license new file mode 100644 index 00000000..32e7a3d9 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2015 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/unist-util-visit/package.json b/_extensions/d2/node_modules/unist-util-visit/package.json new file mode 100644 index 00000000..3cb969c8 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit/package.json @@ -0,0 +1,103 @@ +{ + "name": "unist-util-visit", + "version": "4.1.2", + "description": "unist utility to visit nodes", + "license": "MIT", + "keywords": [ + "unist", + "unist-util", + "util", + "utility", + "remark", + "retext", + "rehype", + "mdast", + "hast", + "xast", + "nlcst", + "natural", + "language", + "markdown", + "html", + "xml", + "tree", + "ast", + "node", + "visit", + "walk" + ], + "repository": "syntax-tree/unist-util-visit", + "bugs": "https://github.com/syntax-tree/unist-util-visit/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)", + "Eugene Sharygin ", + "Richard Gibson " + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "lib/", + "complex-types.d.ts", + "index.d.ts", + "index.js" + ], + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-is": "^5.0.0", + "unist-util-visit-parents": "^5.1.1" + }, + "devDependencies": { + "@types/node": "^18.0.0", + "c8": "^7.0.0", + "mdast-util-from-markdown": "^1.0.0", + "mdast-util-gfm": "^2.0.0", + "micromark-extension-gfm": "^2.0.0", + "prettier": "^2.0.0", + "remark-cli": "^11.0.0", + "remark-preset-wooorm": "^9.0.0", + "tsd": "^0.25.0", + "type-coverage": "^2.0.0", + "typescript": "^4.7.0", + "xo": "^0.53.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "tsc --build --clean && tsc --build && tsd && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test.js", + "test-coverage": "c8 --check-coverage --100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true, + "rules": { + "@typescript-eslint/ban-types": "off", + "@typescript-eslint/array-type": "off" + } + }, + "remarkConfig": { + "plugins": [ + "remark-preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true + } +} diff --git a/_extensions/d2/node_modules/unist-util-visit/readme.md b/_extensions/d2/node_modules/unist-util-visit/readme.md new file mode 100644 index 00000000..6e1c1451 --- /dev/null +++ b/_extensions/d2/node_modules/unist-util-visit/readme.md @@ -0,0 +1,318 @@ +# unist-util-visit + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +[unist][] utility to walk the tree. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`visit(tree[, test], visitor[, reverse])`](#visittree-test-visitor-reverse) + * [`CONTINUE`](#continue) + * [`EXIT`](#exit) + * [`SKIP`](#skip) + * [`Action`](#action) + * [`ActionTuple`](#actiontuple) + * [`BuildVisitor`](#buildvisitor) + * [`Index`](#index) + * [`Test`](#test) + * [`Visitor`](#visitor) + * [`VisitorResult`](#visitorresult) +* [Types](#types) +* [Compatibility](#compatibility) +* [Related](#related) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +This is a very important utility for working with unist as it lets you walk the +tree. + +## When should I use this? + +You can use this utility when you want to walk the tree. +You can use [`unist-util-visit-parents`][vp] if you care about the entire stack +of parents. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 14.14+ and 16.0+), install with [npm][]: + +```sh +npm install unist-util-visit +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {visit} from 'https://esm.sh/unist-util-visit@4' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {u} from 'unist-builder' +import {visit} from 'unist-util-visit' + +const tree = u('tree', [ + u('leaf', '1'), + u('node', [u('leaf', '2')]), + u('void'), + u('leaf', '3') +]) + +visit(tree, 'leaf', (node) => { + console.log(node) +}) +``` + +Yields: + +```js +{type: 'leaf', value: '1'} +{type: 'leaf', value: '2'} +{type: 'leaf', value: '3'} +``` + +## API + +This package exports the identifiers [`CONTINUE`][api-continue], +[`EXIT`][api-exit], [`SKIP`][api-skip], and [`visit`][api-visit]. +There is no default export. + +### `visit(tree[, test], visitor[, reverse])` + +This function works exactly the same as [`unist-util-visit-parents`][vp], +but [`Visitor`][api-visitor] has a different signature. + +### `CONTINUE` + +Continue traversing as normal (`true`). + +### `EXIT` + +Stop traversing immediately (`false`). + +### `SKIP` + +Do not traverse this node’s children (`'skip'`). + +### `Action` + +Union of the action types (TypeScript type). +See [`Action` in `unist-util-visit-parents`][vp-action]. + +### `ActionTuple` + +List with an action and an index (TypeScript type). +See [`ActionTuple` in `unist-util-visit-parents`][vp-actiontuple]. + +### `BuildVisitor` + +Build a typed `Visitor` function from a tree and a test (TypeScript type). +See [`BuildVisitor` in `unist-util-visit-parents`][vp-buildvisitor]. + +### `Index` + +Move to the sibling at `index` next (TypeScript type). +See [`Index` in `unist-util-visit-parents`][vp-index]. + +### `Test` + +[`unist-util-is`][unist-util-is] compatible test (TypeScript type). + +### `Visitor` + +Handle a node (matching `test`, if given) (TypeScript type). + +Visitors are free to transform `node`. +They can also transform `parent`. + +Replacing `node` itself, if `SKIP` is not returned, still causes its +descendants to be walked (which is a bug). + +When adding or removing previous siblings of `node` (or next siblings, in +case of reverse), the `Visitor` should return a new `Index` to specify the +sibling to traverse after `node` is traversed. +Adding or removing next siblings of `node` (or previous siblings, in case +of reverse) is handled as expected without needing to return a new `Index`. + +Removing the children property of `parent` still results in them being +traversed. + +###### Parameters + +* `node` ([`Node`][node]) + — found node +* `index` (`number` or `null`) + — index of `node` in `parent` +* `parent` ([`Node`][node] or `null`) + — parent of `node` + +###### Returns + +What to do next. + +An `Index` is treated as a tuple of `[CONTINUE, Index]`. +An `Action` is treated as a tuple of `[Action]`. + +Passing a tuple back only makes sense if the `Action` is `SKIP`. +When the `Action` is `EXIT`, that action can be returned. +When the `Action` is `CONTINUE`, `Index` can be returned. + +### `VisitorResult` + +Any value that can be returned from a visitor (TypeScript type). +See [`VisitorResult` in `unist-util-visit-parents`][vp-visitorresult]. + +## Types + +This package is fully typed with [TypeScript][]. +It exports the additional types [`Action`][api-action], +[`ActionTuple`][api-actiontuple], [`BuildVisitor`][api-buildvisitor], +[`Index`][api-index], [`Test`][api-test], [`Visitor`][api-visitor], and +[`VisitorResult`][api-visitorresult]. + +## Compatibility + +Projects maintained by the unified collective are compatible with all maintained +versions of Node.js. +As of now, that is Node.js 12.20+, 14.14+, 16.0+, and 18.0+. +Our projects sometimes work with older versions, but this is not guaranteed. + +## Related + +* [`unist-util-visit-parents`][vp] + — walk the tree with a stack of parents +* [`unist-util-filter`](https://github.com/syntax-tree/unist-util-filter) + — create a new tree with all nodes that pass a test +* [`unist-util-map`](https://github.com/syntax-tree/unist-util-map) + — create a new tree with all nodes mapped by a given function +* [`unist-util-flatmap`](https://gitlab.com/staltz/unist-util-flatmap) + — create a new tree by mapping (to an array) with the given function +* [`unist-util-remove`](https://github.com/syntax-tree/unist-util-remove) + — remove nodes from a tree that pass a test +* [`unist-util-select`](https://github.com/syntax-tree/unist-util-select) + — select nodes with CSS-like selectors + +## Contribute + +See [`contributing.md`][contributing] in [`syntax-tree/.github`][health] for +ways to get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/syntax-tree/unist-util-visit/workflows/main/badge.svg + +[build]: https://github.com/syntax-tree/unist-util-visit/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/syntax-tree/unist-util-visit.svg + +[coverage]: https://codecov.io/github/syntax-tree/unist-util-visit + +[downloads-badge]: https://img.shields.io/npm/dm/unist-util-visit.svg + +[downloads]: https://www.npmjs.com/package/unist-util-visit + +[size-badge]: https://img.shields.io/bundlephobia/minzip/unist-util-visit.svg + +[size]: https://bundlephobia.com/result?p=unist-util-visit + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/syntax-tree/unist/discussions + +[npm]: https://docs.npmjs.com/cli/install + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[esmsh]: https://esm.sh + +[typescript]: https://www.typescriptlang.org + +[license]: license + +[author]: https://wooorm.com + +[health]: https://github.com/syntax-tree/.github + +[contributing]: https://github.com/syntax-tree/.github/blob/main/contributing.md + +[support]: https://github.com/syntax-tree/.github/blob/main/support.md + +[coc]: https://github.com/syntax-tree/.github/blob/main/code-of-conduct.md + +[unist]: https://github.com/syntax-tree/unist + +[node]: https://github.com/syntax-tree/unist#nodes + +[unist-util-is]: https://github.com/syntax-tree/unist-util-is + +[vp]: https://github.com/syntax-tree/unist-util-visit-parents + +[vp-action]: https://github.com/syntax-tree/unist-util-visit-parents#action + +[vp-actiontuple]: https://github.com/syntax-tree/unist-util-visit-parents#actiontuple + +[vp-buildvisitor]: https://github.com/syntax-tree/unist-util-visit-parents#buildvisitor + +[vp-index]: https://github.com/syntax-tree/unist-util-visit-parents#index + +[vp-visitorresult]: https://github.com/syntax-tree/unist-util-visit-parents#visitorresult + +[api-visit]: #visittree-test-visitor-reverse + +[api-continue]: #continue + +[api-exit]: #exit + +[api-skip]: #skip + +[api-action]: #action + +[api-actiontuple]: #actiontuple + +[api-buildvisitor]: #buildvisitor + +[api-index]: #index + +[api-test]: #test + +[api-visitor]: #visitor + +[api-visitorresult]: #visitorresult diff --git a/_extensions/d2/node_modules/uvu/assert/index.d.ts b/_extensions/d2/node_modules/uvu/assert/index.d.ts new file mode 100644 index 00000000..11279d8c --- /dev/null +++ b/_extensions/d2/node_modules/uvu/assert/index.d.ts @@ -0,0 +1,47 @@ +type Types = 'string' | 'number' | 'boolean' | 'object' | 'undefined' | 'function'; + +export type Message = string | Error; +export function ok(actual: any, msg?: Message): asserts actual; +export function is(actual: any, expects: any, msg?: Message): void; +export function equal(actual: any, expects: any, msg?: Message): void; +export function type(actual: any, expects: Types, msg?: Message): void; +export function instance(actual: any, expects: any, msg?: Message): void; +export function snapshot(actual: string, expects: string, msg?: Message): void; +export function fixture(actual: string, expects: string, msg?: Message): void; +export function match(actual: string, expects: string | RegExp, msg?: Message): void; +export function throws(fn: Function, expects?: Message | RegExp | Function, msg?: Message): void; +export function not(actual: any, msg?: Message): void; +export function unreachable(msg?: Message): void; + +export namespace is { + function not(actual: any, expects: any, msg?: Message): void; +} + +export namespace not { + function ok(actual: any, msg?: Message): void; + function equal(actual: any, expects: any, msg?: Message): void; + function type(actual: any, expects: Types, msg?: Message): void; + function instance(actual: any, expects: any, msg?: Message): void; + function snapshot(actual: string, expects: string, msg?: Message): void; + function fixture(actual: string, expects: string, msg?: Message): void; + function match(actual: string, expects: string | RegExp, msg?: Message): void; + function throws(fn: Function, expects?: Message | RegExp | Function, msg?: Message): void; +} + +export class Assertion extends Error { + name: 'Assertion'; + code: 'ERR_ASSERTION'; + details: false | string; + generated: boolean; + operator: string; + expects: any; + actual: any; + constructor(options?: { + message: string; + details?: string; + generated?: boolean; + operator: string; + expects: any; + actual: any; + }); +} diff --git a/_extensions/d2/node_modules/uvu/assert/index.js b/_extensions/d2/node_modules/uvu/assert/index.js new file mode 100644 index 00000000..3b21d501 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/assert/index.js @@ -0,0 +1,173 @@ +const { dequal } = require('dequal'); +const { compare, lines } = require('uvu/diff'); + +function dedent(str) { + str = str.replace(/\r?\n/g, '\n'); + let arr = str.match(/^[ \t]*(?=\S)/gm); + let i = 0, min = 1/0, len = (arr||[]).length; + for (; i < len; i++) min = Math.min(min, arr[i].length); + return len && min ? str.replace(new RegExp(`^[ \\t]{${min}}`, 'gm'), '') : str; +} + +class Assertion extends Error { + constructor(opts={}) { + super(opts.message); + this.name = 'Assertion'; + this.code = 'ERR_ASSERTION'; + if (Error.captureStackTrace) { + Error.captureStackTrace(this, this.constructor); + } + this.details = opts.details || false; + this.generated = !!opts.generated; + this.operator = opts.operator; + this.expects = opts.expects; + this.actual = opts.actual; + } +} + +function assert(bool, actual, expects, operator, detailer, backup, msg) { + if (bool) return; + let message = msg || backup; + if (msg instanceof Error) throw msg; + let details = detailer && detailer(actual, expects); + throw new Assertion({ actual, expects, operator, message, details, generated: !msg }); +} + +function ok(val, msg) { + assert(!!val, false, true, 'ok', false, 'Expected value to be truthy', msg); +} + +function is(val, exp, msg) { + assert(val === exp, val, exp, 'is', compare, 'Expected values to be strictly equal:', msg); +} + +function equal(val, exp, msg) { + assert(dequal(val, exp), val, exp, 'equal', compare, 'Expected values to be deeply equal:', msg); +} + +function unreachable(msg) { + assert(false, true, false, 'unreachable', false, 'Expected not to be reached!', msg); +} + +function type(val, exp, msg) { + let tmp = typeof val; + assert(tmp === exp, tmp, exp, 'type', false, `Expected "${tmp}" to be "${exp}"`, msg); +} + +function instance(val, exp, msg) { + let name = '`' + (exp.name || exp.constructor.name) + '`'; + assert(val instanceof exp, val, exp, 'instance', false, `Expected value to be an instance of ${name}`, msg); +} + +function match(val, exp, msg) { + if (typeof exp === 'string') { + assert(val.includes(exp), val, exp, 'match', false, `Expected value to include "${exp}" substring`, msg); + } else { + assert(exp.test(val), val, exp, 'match', false, `Expected value to match \`${String(exp)}\` pattern`, msg); + } +} + +function snapshot(val, exp, msg) { + val=dedent(val); exp=dedent(exp); + assert(val === exp, val, exp, 'snapshot', lines, 'Expected value to match snapshot:', msg); +} + +const lineNums = (x, y) => lines(x, y, 1); +function fixture(val, exp, msg) { + val=dedent(val); exp=dedent(exp); + assert(val === exp, val, exp, 'fixture', lineNums, 'Expected value to match fixture:', msg); +} + +function throws(blk, exp, msg) { + if (!msg && typeof exp === 'string') { + msg = exp; exp = null; + } + + try { + blk(); + assert(false, false, true, 'throws', false, 'Expected function to throw', msg); + } catch (err) { + if (err instanceof Assertion) throw err; + + if (typeof exp === 'function') { + assert(exp(err), false, true, 'throws', false, 'Expected function to throw matching exception', msg); + } else if (exp instanceof RegExp) { + assert(exp.test(err.message), false, true, 'throws', false, `Expected function to throw exception matching \`${String(exp)}\` pattern`, msg); + } + } +} + +// --- + +function not(val, msg) { + assert(!val, true, false, 'not', false, 'Expected value to be falsey', msg); +} + +not.ok = not; + +is.not = function (val, exp, msg) { + assert(val !== exp, val, exp, 'is.not', false, 'Expected values not to be strictly equal', msg); +} + +not.equal = function (val, exp, msg) { + assert(!dequal(val, exp), val, exp, 'not.equal', false, 'Expected values not to be deeply equal', msg); +} + +not.type = function (val, exp, msg) { + let tmp = typeof val; + assert(tmp !== exp, tmp, exp, 'not.type', false, `Expected "${tmp}" not to be "${exp}"`, msg); +} + +not.instance = function (val, exp, msg) { + let name = '`' + (exp.name || exp.constructor.name) + '`'; + assert(!(val instanceof exp), val, exp, 'not.instance', false, `Expected value not to be an instance of ${name}`, msg); +} + +not.snapshot = function (val, exp, msg) { + val=dedent(val); exp=dedent(exp); + assert(val !== exp, val, exp, 'not.snapshot', false, 'Expected value not to match snapshot', msg); +} + +not.fixture = function (val, exp, msg) { + val=dedent(val); exp=dedent(exp); + assert(val !== exp, val, exp, 'not.fixture', false, 'Expected value not to match fixture', msg); +} + +not.match = function (val, exp, msg) { + if (typeof exp === 'string') { + assert(!val.includes(exp), val, exp, 'not.match', false, `Expected value not to include "${exp}" substring`, msg); + } else { + assert(!exp.test(val), val, exp, 'not.match', false, `Expected value not to match \`${String(exp)}\` pattern`, msg); + } +} + +not.throws = function (blk, exp, msg) { + if (!msg && typeof exp === 'string') { + msg = exp; exp = null; + } + + try { + blk(); + } catch (err) { + if (typeof exp === 'function') { + assert(!exp(err), true, false, 'not.throws', false, 'Expected function not to throw matching exception', msg); + } else if (exp instanceof RegExp) { + assert(!exp.test(err.message), true, false, 'not.throws', false, `Expected function not to throw exception matching \`${String(exp)}\` pattern`, msg); + } else if (!exp) { + assert(false, true, false, 'not.throws', false, 'Expected function not to throw', msg); + } + } +} + +exports.Assertion = Assertion; +exports.equal = equal; +exports.fixture = fixture; +exports.instance = instance; +exports.is = is; +exports.match = match; +exports.not = not; +exports.ok = ok; +exports.snapshot = snapshot; +exports.throws = throws; +exports.type = type; +exports.unreachable = unreachable; \ No newline at end of file diff --git a/_extensions/d2/node_modules/uvu/assert/index.mjs b/_extensions/d2/node_modules/uvu/assert/index.mjs new file mode 100644 index 00000000..2a2c0157 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/assert/index.mjs @@ -0,0 +1,160 @@ +import { dequal } from 'dequal'; +import { compare, lines } from 'uvu/diff'; + +function dedent(str) { + str = str.replace(/\r?\n/g, '\n'); + let arr = str.match(/^[ \t]*(?=\S)/gm); + let i = 0, min = 1/0, len = (arr||[]).length; + for (; i < len; i++) min = Math.min(min, arr[i].length); + return len && min ? str.replace(new RegExp(`^[ \\t]{${min}}`, 'gm'), '') : str; +} + +export class Assertion extends Error { + constructor(opts={}) { + super(opts.message); + this.name = 'Assertion'; + this.code = 'ERR_ASSERTION'; + if (Error.captureStackTrace) { + Error.captureStackTrace(this, this.constructor); + } + this.details = opts.details || false; + this.generated = !!opts.generated; + this.operator = opts.operator; + this.expects = opts.expects; + this.actual = opts.actual; + } +} + +function assert(bool, actual, expects, operator, detailer, backup, msg) { + if (bool) return; + let message = msg || backup; + if (msg instanceof Error) throw msg; + let details = detailer && detailer(actual, expects); + throw new Assertion({ actual, expects, operator, message, details, generated: !msg }); +} + +export function ok(val, msg) { + assert(!!val, false, true, 'ok', false, 'Expected value to be truthy', msg); +} + +export function is(val, exp, msg) { + assert(val === exp, val, exp, 'is', compare, 'Expected values to be strictly equal:', msg); +} + +export function equal(val, exp, msg) { + assert(dequal(val, exp), val, exp, 'equal', compare, 'Expected values to be deeply equal:', msg); +} + +export function unreachable(msg) { + assert(false, true, false, 'unreachable', false, 'Expected not to be reached!', msg); +} + +export function type(val, exp, msg) { + let tmp = typeof val; + assert(tmp === exp, tmp, exp, 'type', false, `Expected "${tmp}" to be "${exp}"`, msg); +} + +export function instance(val, exp, msg) { + let name = '`' + (exp.name || exp.constructor.name) + '`'; + assert(val instanceof exp, val, exp, 'instance', false, `Expected value to be an instance of ${name}`, msg); +} + +export function match(val, exp, msg) { + if (typeof exp === 'string') { + assert(val.includes(exp), val, exp, 'match', false, `Expected value to include "${exp}" substring`, msg); + } else { + assert(exp.test(val), val, exp, 'match', false, `Expected value to match \`${String(exp)}\` pattern`, msg); + } +} + +export function snapshot(val, exp, msg) { + val=dedent(val); exp=dedent(exp); + assert(val === exp, val, exp, 'snapshot', lines, 'Expected value to match snapshot:', msg); +} + +const lineNums = (x, y) => lines(x, y, 1); +export function fixture(val, exp, msg) { + val=dedent(val); exp=dedent(exp); + assert(val === exp, val, exp, 'fixture', lineNums, 'Expected value to match fixture:', msg); +} + +export function throws(blk, exp, msg) { + if (!msg && typeof exp === 'string') { + msg = exp; exp = null; + } + + try { + blk(); + assert(false, false, true, 'throws', false, 'Expected function to throw', msg); + } catch (err) { + if (err instanceof Assertion) throw err; + + if (typeof exp === 'function') { + assert(exp(err), false, true, 'throws', false, 'Expected function to throw matching exception', msg); + } else if (exp instanceof RegExp) { + assert(exp.test(err.message), false, true, 'throws', false, `Expected function to throw exception matching \`${String(exp)}\` pattern`, msg); + } + } +} + +// --- + +export function not(val, msg) { + assert(!val, true, false, 'not', false, 'Expected value to be falsey', msg); +} + +not.ok = not; + +is.not = function (val, exp, msg) { + assert(val !== exp, val, exp, 'is.not', false, 'Expected values not to be strictly equal', msg); +} + +not.equal = function (val, exp, msg) { + assert(!dequal(val, exp), val, exp, 'not.equal', false, 'Expected values not to be deeply equal', msg); +} + +not.type = function (val, exp, msg) { + let tmp = typeof val; + assert(tmp !== exp, tmp, exp, 'not.type', false, `Expected "${tmp}" not to be "${exp}"`, msg); +} + +not.instance = function (val, exp, msg) { + let name = '`' + (exp.name || exp.constructor.name) + '`'; + assert(!(val instanceof exp), val, exp, 'not.instance', false, `Expected value not to be an instance of ${name}`, msg); +} + +not.snapshot = function (val, exp, msg) { + val=dedent(val); exp=dedent(exp); + assert(val !== exp, val, exp, 'not.snapshot', false, 'Expected value not to match snapshot', msg); +} + +not.fixture = function (val, exp, msg) { + val=dedent(val); exp=dedent(exp); + assert(val !== exp, val, exp, 'not.fixture', false, 'Expected value not to match fixture', msg); +} + +not.match = function (val, exp, msg) { + if (typeof exp === 'string') { + assert(!val.includes(exp), val, exp, 'not.match', false, `Expected value not to include "${exp}" substring`, msg); + } else { + assert(!exp.test(val), val, exp, 'not.match', false, `Expected value not to match \`${String(exp)}\` pattern`, msg); + } +} + +not.throws = function (blk, exp, msg) { + if (!msg && typeof exp === 'string') { + msg = exp; exp = null; + } + + try { + blk(); + } catch (err) { + if (typeof exp === 'function') { + assert(!exp(err), true, false, 'not.throws', false, 'Expected function not to throw matching exception', msg); + } else if (exp instanceof RegExp) { + assert(!exp.test(err.message), true, false, 'not.throws', false, `Expected function not to throw exception matching \`${String(exp)}\` pattern`, msg); + } else if (!exp) { + assert(false, true, false, 'not.throws', false, 'Expected function not to throw', msg); + } + } +} diff --git a/_extensions/d2/node_modules/uvu/bin.js b/_extensions/d2/node_modules/uvu/bin.js new file mode 100644 index 00000000..3ba0e3b9 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/bin.js @@ -0,0 +1,35 @@ +#!/usr/bin/env node +const sade = require('sade'); +const pkg = require('./package'); +const { parse } = require('./parse'); + +const dimport = x => new Function(`return import(${ JSON.stringify(x) })`).call(0); + +const hasImport = (() => { + try { new Function('import').call(0) } + catch (err) { return !/unexpected/i.test(err.message) } +})(); + +sade('uvu [dir] [pattern]') + .version(pkg.version) + .option('-b, --bail', 'Exit on first failure') + .option('-i, --ignore', 'Any file patterns to ignore') + .option('-r, --require', 'Additional module(s) to preload') + .option('-C, --cwd', 'The current directory to resolve from', '.') + .option('-c, --color', 'Print colorized output', true) + .action(async (dir, pattern, opts) => { + try { + if (opts.color) process.env.FORCE_COLOR = '1'; + let ctx = await parse(dir, pattern, opts); + + if (!ctx.requires && hasImport) { + await dimport('uvu/run').then(m => m.run(ctx.suites, opts)); + } else { + await require('uvu/run').run(ctx.suites, opts); + } + } catch (err) { + console.error(err.stack || err.message); + process.exit(1); + } + }) + .parse(process.argv); diff --git a/_extensions/d2/node_modules/uvu/diff/index.d.ts b/_extensions/d2/node_modules/uvu/diff/index.d.ts new file mode 100644 index 00000000..97adcd23 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/diff/index.d.ts @@ -0,0 +1,5 @@ +export function chars(input: any, expects: any): string; +export function lines(input: any, expects: any, linenum?: number): string; +export function direct(input: any, expects: any, lenA?: number, lenB?: number): string; +export function compare(input: any, expects: any): string; +export function arrays(input: any, expects: any): string; diff --git a/_extensions/d2/node_modules/uvu/diff/index.js b/_extensions/d2/node_modules/uvu/diff/index.js new file mode 100644 index 00000000..d51e311c --- /dev/null +++ b/_extensions/d2/node_modules/uvu/diff/index.js @@ -0,0 +1,228 @@ +const kleur = require('kleur'); +const diff = require('diff'); + +const colors = { + '--': kleur.red, + '··': kleur.grey, + '++': kleur.green, +}; + +const TITLE = kleur.dim().italic; +const TAB=kleur.dim('→'), SPACE=kleur.dim('·'), NL=kleur.dim('↵'); +const LOG = (sym, str) => colors[sym](sym + PRETTY(str)) + '\n'; +const LINE = (num, x) => kleur.dim('L' + String(num).padStart(x, '0') + ' '); +const PRETTY = str => str.replace(/[ ]/g, SPACE).replace(/\t/g, TAB).replace(/(\r?\n)/g, NL); + +function line(obj, prev, pad) { + let char = obj.removed ? '--' : obj.added ? '++' : '··'; + let arr = obj.value.replace(/\r?\n$/, '').split('\n'); + let i=0, tmp, out=''; + + if (obj.added) out += colors[char]().underline(TITLE('Expected:')) + '\n'; + else if (obj.removed) out += colors[char]().underline(TITLE('Actual:')) + '\n'; + + for (; i < arr.length; i++) { + tmp = arr[i]; + if (tmp != null) { + if (prev) out += LINE(prev + i, pad); + out += LOG(char, tmp || '\n'); + } + } + + return out; +} + +// TODO: want better diffing +//~> complex items bail outright +function arrays(input, expect) { + let arr = diff.diffArrays(input, expect); + let i=0, j=0, k=0, tmp, val, char, isObj, str; + let out = LOG('··', '['); + + for (; i < arr.length; i++) { + char = (tmp = arr[i]).removed ? '--' : tmp.added ? '++' : '··'; + + if (tmp.added) { + out += colors[char]().underline(TITLE('Expected:')) + '\n'; + } else if (tmp.removed) { + out += colors[char]().underline(TITLE('Actual:')) + '\n'; + } + + for (j=0; j < tmp.value.length; j++) { + isObj = (tmp.value[j] && typeof tmp.value[j] === 'object'); + val = stringify(tmp.value[j]).split(/\r?\n/g); + for (k=0; k < val.length;) { + str = ' ' + val[k++] + (isObj ? '' : ','); + if (isObj && k === val.length && (j + 1) < tmp.value.length) str += ','; + out += LOG(char, str); + } + } + } + + return out + LOG('··', ']'); +} + +function lines(input, expect, linenum = 0) { + let i=0, tmp, output=''; + let arr = diff.diffLines(input, expect); + let pad = String(expect.split(/\r?\n/g).length - linenum).length; + + for (; i < arr.length; i++) { + output += line(tmp = arr[i], linenum, pad); + if (linenum && !tmp.removed) linenum += tmp.count; + } + + return output; +} + +function chars(input, expect) { + let arr = diff.diffChars(input, expect); + let i=0, output='', tmp; + + let l1 = input.length; + let l2 = expect.length; + + let p1 = PRETTY(input); + let p2 = PRETTY(expect); + + tmp = arr[i]; + + if (l1 === l2) { + // no length offsets + } else if (tmp.removed && arr[i + 1]) { + let del = tmp.count - arr[i + 1].count; + if (del == 0) { + // wash~ + } else if (del > 0) { + expect = ' '.repeat(del) + expect; + p2 = ' '.repeat(del) + p2; + l2 += del; + } else if (del < 0) { + input = ' '.repeat(-del) + input; + p1 = ' '.repeat(-del) + p1; + l1 += -del; + } + } + + output += direct(p1, p2, l1, l2); + + if (l1 === l2) { + for (tmp=' '; i < l1; i++) { + tmp += input[i] === expect[i] ? ' ' : '^'; + } + } else { + for (tmp=' '; i < arr.length; i++) { + tmp += ((arr[i].added || arr[i].removed) ? '^' : ' ').repeat(Math.max(arr[i].count, 0)); + if (i + 1 < arr.length && ((arr[i].added && arr[i+1].removed) || (arr[i].removed && arr[i+1].added))) { + arr[i + 1].count -= arr[i].count; + } + } + } + + return output + kleur.red(tmp); +} + +function direct(input, expect, lenA = String(input).length, lenB = String(expect).length) { + let gutter = 4; + let lenC = Math.max(lenA, lenB); + let typeA=typeof input, typeB=typeof expect; + + if (typeA !== typeB) { + gutter = 2; + + let delA = gutter + lenC - lenA; + let delB = gutter + lenC - lenB; + + input += ' '.repeat(delA) + kleur.dim(`[${typeA}]`); + expect += ' '.repeat(delB) + kleur.dim(`[${typeB}]`); + + lenA += delA + typeA.length + 2; + lenB += delB + typeB.length + 2; + lenC = Math.max(lenA, lenB); + } + + let output = colors['++']('++' + expect + ' '.repeat(gutter + lenC - lenB) + TITLE('(Expected)')) + '\n'; + return output + colors['--']('--' + input + ' '.repeat(gutter + lenC - lenA) + TITLE('(Actual)')) + '\n'; +} + +function sort(input, expect) { + var k, i=0, tmp, isArr = Array.isArray(input); + var keys=[], out=isArr ? Array(input.length) : {}; + + if (isArr) { + for (i=0; i < out.length; i++) { + tmp = input[i]; + if (!tmp || typeof tmp !== 'object') out[i] = tmp; + else out[i] = sort(tmp, expect[i]); // might not be right + } + } else { + for (k in expect) + keys.push(k); + + for (; i < keys.length; i++) { + if (Object.prototype.hasOwnProperty.call(input, k = keys[i])) { + if (!(tmp = input[k]) || typeof tmp !== 'object') out[k] = tmp; + else out[k] = sort(tmp, expect[k]); + } + } + + for (k in input) { + if (!out.hasOwnProperty(k)) { + out[k] = input[k]; // expect didnt have + } + } + } + + return out; +} + +function circular() { + var cache = new Set; + return function print(key, val) { + if (val === void 0) return '[__VOID__]'; + if (typeof val === 'number' && val !== val) return '[__NAN__]'; + if (typeof val === 'bigint') return val.toString(); + if (!val || typeof val !== 'object') return val; + if (cache.has(val)) return '[Circular]'; + cache.add(val); return val; + } +} + +function stringify(input) { + return JSON.stringify(input, circular(), 2).replace(/"\[__NAN__\]"/g, 'NaN').replace(/"\[__VOID__\]"/g, 'undefined'); +} + +function compare(input, expect) { + if (Array.isArray(expect) && Array.isArray(input)) return arrays(input, expect); + if (expect instanceof RegExp) return chars(''+input, ''+expect); + + let isA = input && typeof input == 'object'; + let isB = expect && typeof expect == 'object'; + + if (isA && isB) input = sort(input, expect); + if (isB) expect = stringify(expect); + if (isA) input = stringify(input); + + if (expect && typeof expect == 'object') { + input = stringify(sort(input, expect)); + expect = stringify(expect); + } + + isA = typeof input == 'string'; + isB = typeof expect == 'string'; + + if (isA && /\r?\n/.test(input)) return lines(input, ''+expect); + if (isB && /\r?\n/.test(expect)) return lines(''+input, expect); + if (isA && isB) return chars(input, expect); + + return direct(input, expect); +} + +exports.arrays = arrays; +exports.chars = chars; +exports.circular = circular; +exports.compare = compare; +exports.direct = direct; +exports.lines = lines; +exports.sort = sort; +exports.stringify = stringify; \ No newline at end of file diff --git a/_extensions/d2/node_modules/uvu/diff/index.mjs b/_extensions/d2/node_modules/uvu/diff/index.mjs new file mode 100644 index 00000000..c16f7b57 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/diff/index.mjs @@ -0,0 +1,219 @@ +import kleur from 'kleur'; +import * as diff from 'diff'; + +const colors = { + '--': kleur.red, + '··': kleur.grey, + '++': kleur.green, +}; + +const TITLE = kleur.dim().italic; +const TAB=kleur.dim('→'), SPACE=kleur.dim('·'), NL=kleur.dim('↵'); +const LOG = (sym, str) => colors[sym](sym + PRETTY(str)) + '\n'; +const LINE = (num, x) => kleur.dim('L' + String(num).padStart(x, '0') + ' '); +const PRETTY = str => str.replace(/[ ]/g, SPACE).replace(/\t/g, TAB).replace(/(\r?\n)/g, NL); + +function line(obj, prev, pad) { + let char = obj.removed ? '--' : obj.added ? '++' : '··'; + let arr = obj.value.replace(/\r?\n$/, '').split('\n'); + let i=0, tmp, out=''; + + if (obj.added) out += colors[char]().underline(TITLE('Expected:')) + '\n'; + else if (obj.removed) out += colors[char]().underline(TITLE('Actual:')) + '\n'; + + for (; i < arr.length; i++) { + tmp = arr[i]; + if (tmp != null) { + if (prev) out += LINE(prev + i, pad); + out += LOG(char, tmp || '\n'); + } + } + + return out; +} + +// TODO: want better diffing +//~> complex items bail outright +export function arrays(input, expect) { + let arr = diff.diffArrays(input, expect); + let i=0, j=0, k=0, tmp, val, char, isObj, str; + let out = LOG('··', '['); + + for (; i < arr.length; i++) { + char = (tmp = arr[i]).removed ? '--' : tmp.added ? '++' : '··'; + + if (tmp.added) { + out += colors[char]().underline(TITLE('Expected:')) + '\n'; + } else if (tmp.removed) { + out += colors[char]().underline(TITLE('Actual:')) + '\n'; + } + + for (j=0; j < tmp.value.length; j++) { + isObj = (tmp.value[j] && typeof tmp.value[j] === 'object'); + val = stringify(tmp.value[j]).split(/\r?\n/g); + for (k=0; k < val.length;) { + str = ' ' + val[k++] + (isObj ? '' : ','); + if (isObj && k === val.length && (j + 1) < tmp.value.length) str += ','; + out += LOG(char, str); + } + } + } + + return out + LOG('··', ']'); +} + +export function lines(input, expect, linenum = 0) { + let i=0, tmp, output=''; + let arr = diff.diffLines(input, expect); + let pad = String(expect.split(/\r?\n/g).length - linenum).length; + + for (; i < arr.length; i++) { + output += line(tmp = arr[i], linenum, pad); + if (linenum && !tmp.removed) linenum += tmp.count; + } + + return output; +} + +export function chars(input, expect) { + let arr = diff.diffChars(input, expect); + let i=0, output='', tmp; + + let l1 = input.length; + let l2 = expect.length; + + let p1 = PRETTY(input); + let p2 = PRETTY(expect); + + tmp = arr[i]; + + if (l1 === l2) { + // no length offsets + } else if (tmp.removed && arr[i + 1]) { + let del = tmp.count - arr[i + 1].count; + if (del == 0) { + // wash~ + } else if (del > 0) { + expect = ' '.repeat(del) + expect; + p2 = ' '.repeat(del) + p2; + l2 += del; + } else if (del < 0) { + input = ' '.repeat(-del) + input; + p1 = ' '.repeat(-del) + p1; + l1 += -del; + } + } + + output += direct(p1, p2, l1, l2); + + if (l1 === l2) { + for (tmp=' '; i < l1; i++) { + tmp += input[i] === expect[i] ? ' ' : '^'; + } + } else { + for (tmp=' '; i < arr.length; i++) { + tmp += ((arr[i].added || arr[i].removed) ? '^' : ' ').repeat(Math.max(arr[i].count, 0)); + if (i + 1 < arr.length && ((arr[i].added && arr[i+1].removed) || (arr[i].removed && arr[i+1].added))) { + arr[i + 1].count -= arr[i].count; + } + } + } + + return output + kleur.red(tmp); +} + +export function direct(input, expect, lenA = String(input).length, lenB = String(expect).length) { + let gutter = 4; + let lenC = Math.max(lenA, lenB); + let typeA=typeof input, typeB=typeof expect; + + if (typeA !== typeB) { + gutter = 2; + + let delA = gutter + lenC - lenA; + let delB = gutter + lenC - lenB; + + input += ' '.repeat(delA) + kleur.dim(`[${typeA}]`); + expect += ' '.repeat(delB) + kleur.dim(`[${typeB}]`); + + lenA += delA + typeA.length + 2; + lenB += delB + typeB.length + 2; + lenC = Math.max(lenA, lenB); + } + + let output = colors['++']('++' + expect + ' '.repeat(gutter + lenC - lenB) + TITLE('(Expected)')) + '\n'; + return output + colors['--']('--' + input + ' '.repeat(gutter + lenC - lenA) + TITLE('(Actual)')) + '\n'; +} + +export function sort(input, expect) { + var k, i=0, tmp, isArr = Array.isArray(input); + var keys=[], out=isArr ? Array(input.length) : {}; + + if (isArr) { + for (i=0; i < out.length; i++) { + tmp = input[i]; + if (!tmp || typeof tmp !== 'object') out[i] = tmp; + else out[i] = sort(tmp, expect[i]); // might not be right + } + } else { + for (k in expect) + keys.push(k); + + for (; i < keys.length; i++) { + if (Object.prototype.hasOwnProperty.call(input, k = keys[i])) { + if (!(tmp = input[k]) || typeof tmp !== 'object') out[k] = tmp; + else out[k] = sort(tmp, expect[k]); + } + } + + for (k in input) { + if (!out.hasOwnProperty(k)) { + out[k] = input[k]; // expect didnt have + } + } + } + + return out; +} + +export function circular() { + var cache = new Set; + return function print(key, val) { + if (val === void 0) return '[__VOID__]'; + if (typeof val === 'number' && val !== val) return '[__NAN__]'; + if (typeof val === 'bigint') return val.toString(); + if (!val || typeof val !== 'object') return val; + if (cache.has(val)) return '[Circular]'; + cache.add(val); return val; + } +} + +export function stringify(input) { + return JSON.stringify(input, circular(), 2).replace(/"\[__NAN__\]"/g, 'NaN').replace(/"\[__VOID__\]"/g, 'undefined'); +} + +export function compare(input, expect) { + if (Array.isArray(expect) && Array.isArray(input)) return arrays(input, expect); + if (expect instanceof RegExp) return chars(''+input, ''+expect); + + let isA = input && typeof input == 'object'; + let isB = expect && typeof expect == 'object'; + + if (isA && isB) input = sort(input, expect); + if (isB) expect = stringify(expect); + if (isA) input = stringify(input); + + if (expect && typeof expect == 'object') { + input = stringify(sort(input, expect)); + expect = stringify(expect); + } + + isA = typeof input == 'string'; + isB = typeof expect == 'string'; + + if (isA && /\r?\n/.test(input)) return lines(input, ''+expect); + if (isB && /\r?\n/.test(expect)) return lines(''+input, expect); + if (isA && isB) return chars(input, expect); + + return direct(input, expect); +} diff --git a/_extensions/d2/node_modules/uvu/dist/index.js b/_extensions/d2/node_modules/uvu/dist/index.js new file mode 100644 index 00000000..4be54664 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/dist/index.js @@ -0,0 +1,167 @@ +const kleur = require('kleur'); +const { compare } = require('uvu/diff'); + +let isCLI = false, isNode = false; +let hrtime = (now = Date.now()) => () => (Date.now() - now).toFixed(2) + 'ms'; +let write = console.log; + +const into = (ctx, key) => (name, handler) => ctx[key].push({ name, handler }); +const context = (state) => ({ tests:[], before:[], after:[], bEach:[], aEach:[], only:[], skips:0, state }); +const milli = arr => (arr[0]*1e3 + arr[1]/1e6).toFixed(2) + 'ms'; +const hook = (ctx, key) => handler => ctx[key].push(handler); + +if (isNode = typeof process < 'u' && typeof process.stdout < 'u') { + // globalThis polyfill; Node < 12 + if (typeof globalThis !== 'object') { + Object.defineProperty(global, 'globalThis', { + get: function () { return this } + }); + } + + let rgx = /(\.bin[\\+\/]uvu$|uvu[\\+\/]bin\.js)/i; + isCLI = process.argv.some(x => rgx.test(x)); + + // attach node-specific utils + write = x => process.stdout.write(x); + hrtime = (now = process.hrtime()) => () => milli(process.hrtime(now)); +} else if (typeof performance < 'u') { + hrtime = (now = performance.now()) => () => (performance.now() - now).toFixed(2) + 'ms'; +} + +globalThis.UVU_QUEUE = globalThis.UVU_QUEUE || []; +isCLI = isCLI || !!globalThis.UVU_DEFER; +isCLI || UVU_QUEUE.push([null]); + +const QUOTE = kleur.dim('"'), GUTTER = '\n '; +const FAIL = kleur.red('✘ '), PASS = kleur.gray('• '); +const IGNORE = /^\s*at.*(?:\(|\s)(?:node|(internal\/[\w/]*))/; +const FAILURE = kleur.bold().bgRed(' FAIL '); +const FILE = kleur.bold().underline().white; +const SUITE = kleur.bgWhite().bold; + +function stack(stack, idx) { + let i=0, line, out=''; + let arr = stack.substring(idx).replace(/\\/g, '/').split('\n'); + for (; i < arr.length; i++) { + line = arr[i].trim(); + if (line.length && !IGNORE.test(line)) { + out += '\n ' + line; + } + } + return kleur.grey(out) + '\n'; +} + +function format(name, err, suite = '') { + let { details, operator='' } = err; + let idx = err.stack && err.stack.indexOf('\n'); + if (err.name.startsWith('AssertionError') && !operator.includes('not')) details = compare(err.actual, err.expected); // TODO? + let str = ' ' + FAILURE + (suite ? kleur.red(SUITE(` ${suite} `)) : '') + ' ' + QUOTE + kleur.red().bold(name) + QUOTE; + str += '\n ' + err.message + (operator ? kleur.italic().dim(` (${operator})`) : '') + '\n'; + if (details) str += GUTTER + details.split('\n').join(GUTTER); + if (!!~idx) str += stack(err.stack, idx); + return str + '\n'; +} + +async function runner(ctx, name) { + let { only, tests, before, after, bEach, aEach, state } = ctx; + let hook, test, arr = only.length ? only : tests; + let num=0, errors='', total=arr.length; + + try { + if (name) write(SUITE(kleur.black(` ${name} `)) + ' '); + for (hook of before) await hook(state); + + for (test of arr) { + state.__test__ = test.name; + try { + for (hook of bEach) await hook(state); + await test.handler(state); + for (hook of aEach) await hook(state); + write(PASS); + num++; + } catch (err) { + for (hook of aEach) await hook(state); + if (errors.length) errors += '\n'; + errors += format(test.name, err, name); + write(FAIL); + } + } + } finally { + state.__test__ = ''; + for (hook of after) await hook(state); + let msg = ` (${num} / ${total})\n`; + let skipped = (only.length ? tests.length : 0) + ctx.skips; + write(errors.length ? kleur.red(msg) : kleur.green(msg)); + return [errors || true, num, skipped, total]; + } +} + +let timer; +function defer() { + clearTimeout(timer); + timer = setTimeout(exec); +} + +function setup(ctx, name = '') { + ctx.state.__test__ = ''; + ctx.state.__suite__ = name; + const test = into(ctx, 'tests'); + test.before = hook(ctx, 'before'); + test.before.each = hook(ctx, 'bEach'); + test.after = hook(ctx, 'after'); + test.after.each = hook(ctx, 'aEach'); + test.only = into(ctx, 'only'); + test.skip = () => { ctx.skips++ }; + test.run = () => { + let copy = { ...ctx }; + let run = runner.bind(0, copy, name); + Object.assign(ctx, context(copy.state)); + UVU_QUEUE[globalThis.UVU_INDEX || 0].push(run); + isCLI || defer(); + }; + return test; +} + +const suite = (name = '', state = {}) => setup(context(state), name); +const test = suite(); + +let isRunning = false; +async function exec(bail) { + let timer = hrtime(); + let done=0, total=0, skips=0, code=0; + + isRunning = true; + for (let group of UVU_QUEUE) { + if (total) write('\n'); + + let name = group.shift(); + if (name != null) write(FILE(name) + '\n'); + + for (let test of group) { + let [errs, ran, skip, max] = await test(); + total += max; done += ran; skips += skip; + if (errs.length) { + write('\n' + errs + '\n'); code=1; + if (bail) return isNode && process.exit(1); + } + } + } + + isRunning = false; + write('\n Total: ' + total); + write((code ? kleur.red : kleur.green)('\n Passed: ' + done)); + write('\n Skipped: ' + (skips ? kleur.yellow(skips) : skips)); + write('\n Duration: ' + timer() + '\n\n'); + + if (isNode) process.exitCode = code; +} + +if (isNode) process.on('exit', () => { + if (!isRunning) return; // okay to exit + process.exitCode = process.exitCode || 1; + console.error('Exiting early before testing is finished.'); +}); + +exports.exec = exec; +exports.suite = suite; +exports.test = test; \ No newline at end of file diff --git a/_extensions/d2/node_modules/uvu/dist/index.mjs b/_extensions/d2/node_modules/uvu/dist/index.mjs new file mode 100644 index 00000000..ced88a8e --- /dev/null +++ b/_extensions/d2/node_modules/uvu/dist/index.mjs @@ -0,0 +1,163 @@ +import kleur from 'kleur'; +import { compare } from 'uvu/diff'; + +let isCLI = false, isNode = false; +let hrtime = (now = Date.now()) => () => (Date.now() - now).toFixed(2) + 'ms'; +let write = console.log; + +const into = (ctx, key) => (name, handler) => ctx[key].push({ name, handler }); +const context = (state) => ({ tests:[], before:[], after:[], bEach:[], aEach:[], only:[], skips:0, state }); +const milli = arr => (arr[0]*1e3 + arr[1]/1e6).toFixed(2) + 'ms'; +const hook = (ctx, key) => handler => ctx[key].push(handler); + +if (isNode = typeof process < 'u' && typeof process.stdout < 'u') { + // globalThis polyfill; Node < 12 + if (typeof globalThis !== 'object') { + Object.defineProperty(global, 'globalThis', { + get: function () { return this } + }); + } + + let rgx = /(\.bin[\\+\/]uvu$|uvu[\\+\/]bin\.js)/i; + isCLI = process.argv.some(x => rgx.test(x)); + + // attach node-specific utils + write = x => process.stdout.write(x); + hrtime = (now = process.hrtime()) => () => milli(process.hrtime(now)); +} else if (typeof performance < 'u') { + hrtime = (now = performance.now()) => () => (performance.now() - now).toFixed(2) + 'ms'; +} + +globalThis.UVU_QUEUE = globalThis.UVU_QUEUE || []; +isCLI = isCLI || !!globalThis.UVU_DEFER; +isCLI || UVU_QUEUE.push([null]); + +const QUOTE = kleur.dim('"'), GUTTER = '\n '; +const FAIL = kleur.red('✘ '), PASS = kleur.gray('• '); +const IGNORE = /^\s*at.*(?:\(|\s)(?:node|(internal\/[\w/]*))/; +const FAILURE = kleur.bold().bgRed(' FAIL '); +const FILE = kleur.bold().underline().white; +const SUITE = kleur.bgWhite().bold; + +function stack(stack, idx) { + let i=0, line, out=''; + let arr = stack.substring(idx).replace(/\\/g, '/').split('\n'); + for (; i < arr.length; i++) { + line = arr[i].trim(); + if (line.length && !IGNORE.test(line)) { + out += '\n ' + line; + } + } + return kleur.grey(out) + '\n'; +} + +function format(name, err, suite = '') { + let { details, operator='' } = err; + let idx = err.stack && err.stack.indexOf('\n'); + if (err.name.startsWith('AssertionError') && !operator.includes('not')) details = compare(err.actual, err.expected); // TODO? + let str = ' ' + FAILURE + (suite ? kleur.red(SUITE(` ${suite} `)) : '') + ' ' + QUOTE + kleur.red().bold(name) + QUOTE; + str += '\n ' + err.message + (operator ? kleur.italic().dim(` (${operator})`) : '') + '\n'; + if (details) str += GUTTER + details.split('\n').join(GUTTER); + if (!!~idx) str += stack(err.stack, idx); + return str + '\n'; +} + +async function runner(ctx, name) { + let { only, tests, before, after, bEach, aEach, state } = ctx; + let hook, test, arr = only.length ? only : tests; + let num=0, errors='', total=arr.length; + + try { + if (name) write(SUITE(kleur.black(` ${name} `)) + ' '); + for (hook of before) await hook(state); + + for (test of arr) { + state.__test__ = test.name; + try { + for (hook of bEach) await hook(state); + await test.handler(state); + for (hook of aEach) await hook(state); + write(PASS); + num++; + } catch (err) { + for (hook of aEach) await hook(state); + if (errors.length) errors += '\n'; + errors += format(test.name, err, name); + write(FAIL); + } + } + } finally { + state.__test__ = ''; + for (hook of after) await hook(state); + let msg = ` (${num} / ${total})\n`; + let skipped = (only.length ? tests.length : 0) + ctx.skips; + write(errors.length ? kleur.red(msg) : kleur.green(msg)); + return [errors || true, num, skipped, total]; + } +} + +let timer; +function defer() { + clearTimeout(timer); + timer = setTimeout(exec); +} + +function setup(ctx, name = '') { + ctx.state.__test__ = ''; + ctx.state.__suite__ = name; + const test = into(ctx, 'tests'); + test.before = hook(ctx, 'before'); + test.before.each = hook(ctx, 'bEach'); + test.after = hook(ctx, 'after'); + test.after.each = hook(ctx, 'aEach'); + test.only = into(ctx, 'only'); + test.skip = () => { ctx.skips++ }; + test.run = () => { + let copy = { ...ctx }; + let run = runner.bind(0, copy, name); + Object.assign(ctx, context(copy.state)); + UVU_QUEUE[globalThis.UVU_INDEX || 0].push(run); + isCLI || defer(); + }; + return test; +} + +export const suite = (name = '', state = {}) => setup(context(state), name); +export const test = suite(); + +let isRunning = false; +export async function exec(bail) { + let timer = hrtime(); + let done=0, total=0, skips=0, code=0; + + isRunning = true; + for (let group of UVU_QUEUE) { + if (total) write('\n'); + + let name = group.shift(); + if (name != null) write(FILE(name) + '\n'); + + for (let test of group) { + let [errs, ran, skip, max] = await test(); + total += max; done += ran; skips += skip; + if (errs.length) { + write('\n' + errs + '\n'); code=1; + if (bail) return isNode && process.exit(1); + } + } + } + + isRunning = false; + write('\n Total: ' + total); + write((code ? kleur.red : kleur.green)('\n Passed: ' + done)); + write('\n Skipped: ' + (skips ? kleur.yellow(skips) : skips)); + write('\n Duration: ' + timer() + '\n\n'); + + if (isNode) process.exitCode = code; +} + +if (isNode) process.on('exit', () => { + if (!isRunning) return; // okay to exit + process.exitCode = process.exitCode || 1; + console.error('Exiting early before testing is finished.'); +}); diff --git a/_extensions/d2/node_modules/uvu/index.d.ts b/_extensions/d2/node_modules/uvu/index.d.ts new file mode 100644 index 00000000..f92e5500 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/index.d.ts @@ -0,0 +1,27 @@ +declare namespace uvu { + type Crumbs = { __suite__: string; __test__: string }; + type Callback = (context: T & Crumbs) => Promise | void; + + interface Hook { + (hook: Callback): void; + each(hook: Callback): void; + } + + interface Test { + (name: string, test: Callback): void; + only(name: string, test: Callback): void; + skip(name?: string, test?: Callback): void; + before: Hook; + after: Hook + run(): void; + } +} + +type Context = Record; + +export type Test = uvu.Test; +export type Callback = uvu.Callback; + +export const test: uvu.Test; +export function suite(title?: string, context?: T): uvu.Test; +export function exec(bail?: boolean): Promise; diff --git a/_extensions/d2/node_modules/uvu/license b/_extensions/d2/node_modules/uvu/license new file mode 100644 index 00000000..a3f96f82 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/license @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) Luke Edwards (lukeed.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/_extensions/d2/node_modules/uvu/package.json b/_extensions/d2/node_modules/uvu/package.json new file mode 100644 index 00000000..1f9503c2 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/package.json @@ -0,0 +1,85 @@ +{ + "name": "uvu", + "version": "0.5.6", + "repository": "lukeed/uvu", + "description": "uvu is an extremely fast and lightweight test runner for Node.js and the browser", + "module": "dist/index.mjs", + "main": "dist/index.js", + "types": "index.d.ts", + "license": "MIT", + "bin": { + "uvu": "bin.js" + }, + "exports": { + ".": { + "types": "./index.d.ts", + "require": "./dist/index.js", + "import": "./dist/index.mjs" + }, + "./assert": { + "types": "./assert/index.d.ts", + "require": "./assert/index.js", + "import": "./assert/index.mjs" + }, + "./diff": { + "types": "./diff/index.d.ts", + "require": "./diff/index.js", + "import": "./diff/index.mjs" + }, + "./parse": { + "types": "./parse/index.d.ts", + "require": "./parse/index.js", + "import": "./parse/index.mjs" + }, + "./run": { + "types": "./run/index.d.ts", + "require": "./run/index.js", + "import": "./run/index.mjs" + } + }, + "files": [ + "*.js", + "*.d.ts", + "assert", + "parse", + "diff", + "dist", + "run" + ], + "modes": { + "diff": "src/diff.js", + "assert": "src/assert.js", + "default": "src/index.js" + }, + "scripts": { + "build": "bundt", + "test": "node test" + }, + "engines": { + "node": ">=8" + }, + "keywords": [ + "assert", + "diffs", + "runner", + "snapshot", + "test" + ], + "dependencies": { + "dequal": "^2.0.0", + "diff": "^5.0.0", + "kleur": "^4.0.3", + "sade": "^1.7.3" + }, + "devDependencies": { + "bundt": "1.1.1", + "esm": "3.2.25", + "module-alias": "2.2.2", + "totalist": "2.0.0" + }, + "_moduleAliases": { + "uvu": "src/index.js", + "uvu/diff": "src/diff.js", + "uvu/assert": "src/assert.js" + } +} diff --git a/_extensions/d2/node_modules/uvu/parse/index.d.ts b/_extensions/d2/node_modules/uvu/parse/index.d.ts new file mode 100644 index 00000000..bcaac0de --- /dev/null +++ b/_extensions/d2/node_modules/uvu/parse/index.d.ts @@ -0,0 +1,22 @@ +type Arrayable = T[] | T; + +export interface Suite { + /** The relative file path */ + name: string; + /** The absolute file path */ + file: string; +} + +export interface Options { + cwd: string; + require: Arrayable; + ignore: Arrayable; +} + +export interface Argv { + dir: string; + suites: Suite[]; + requires: boolean; +} + +export function parse(dir?: string, pattern?: string|RegExp, opts?: Partial): Promise; diff --git a/_extensions/d2/node_modules/uvu/parse/index.js b/_extensions/d2/node_modules/uvu/parse/index.js new file mode 100644 index 00000000..a02f1118 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/parse/index.js @@ -0,0 +1,50 @@ +// @ts-check +const { readdir, stat } = require('fs'); +const { resolve, join } = require('path'); +const { promisify } = require('util'); + +const ls = promisify(readdir); +const toStat = promisify(stat); +const toRegex = x => new RegExp(x, 'i'); + +async function parse(dir, pattern, opts = {}) { + if (pattern) pattern = toRegex(pattern); + else if (dir) pattern = /(((?:[^\/]*(?:\/|$))*)[\\\/])?\w+\.([mc]js|[jt]sx?)$/; + else pattern = /((\/|^)(tests?|__tests?__)\/.*|\.(tests?|spec)|^\/?tests?)\.([mc]js|[jt]sx?)$/i; + dir = resolve(opts.cwd || '.', dir || '.'); + + let suites = []; + let requires = [].concat(opts.require || []).filter(Boolean); + let ignores = ['^.git', 'node_modules'].concat(opts.ignore || []).map(toRegex); + + requires.forEach(name => { + try { return require(name) } + catch (e) { throw new Error(`Cannot find module "${name}"`) } + }); + + // NOTE: Node 8.x support + // @modified lukeed/totalist + await (async function collect(d, p) { + await ls(d).then(files => { + return Promise.all( + files.map(async str => { + let name = join(p, str); + for (let i = ignores.length; i--;) { + if (ignores[i].test(name)) return; + } + + let file = join(d, str); + let stats = await toStat(file); + if (stats.isDirectory()) return collect(file, name); + else if (pattern.test(name)) suites.push({ name, file }); + }) + ); + }); + })(dir, ''); + + suites.sort((a, b) => a.name.localeCompare(b.name)); + + return { dir, suites, requires: requires.length > 0 }; +} + +exports.parse = parse; diff --git a/_extensions/d2/node_modules/uvu/parse/index.mjs b/_extensions/d2/node_modules/uvu/parse/index.mjs new file mode 100644 index 00000000..ae2c9b01 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/parse/index.mjs @@ -0,0 +1,51 @@ +import { readdir, stat } from 'fs'; +import { createRequire } from 'module'; +import { resolve, join } from 'path'; +import { promisify } from 'util'; + +const ls = promisify(readdir); +const toStat = promisify(stat); +const toRegex = x => new RegExp(x, 'i'); + +export async function parse(dir, pattern, opts = {}) { + if (pattern) pattern = toRegex(pattern); + else if (dir) pattern = /(((?:[^\/]*(?:\/|$))*)[\\\/])?\w+\.([mc]js|[jt]sx?)$/; + else pattern = /((\/|^)(tests?|__tests?__)\/.*|\.(tests?|spec)|^\/?tests?)\.([mc]js|[jt]sx?)$/i; + dir = resolve(opts.cwd || '.', dir || '.'); + + let suites = []; + let requires = [].concat(opts.require || []).filter(Boolean); + let ignores = ['^.git', 'node_modules'].concat(opts.ignore || []).map(toRegex); + + if (requires.length) { + let $require = createRequire(import.meta.url); + requires.forEach(name => { + try { return $require(name) } + catch (e) { throw new Error(`Cannot find module "${name}"`) } + }); + } + + // NOTE: Node 8.x support + // @modified lukeed/totalist + await (async function collect(d, p) { + await ls(d).then(files => { + return Promise.all( + files.map(async str => { + let name = join(p, str); + for (let i = ignores.length; i--;) { + if (ignores[i].test(name)) return; + } + + let file = join(d, str); + let stats = await toStat(file); + if (stats.isDirectory()) return collect(file, name); + else if (pattern.test(name)) suites.push({ name, file }); + }) + ); + }); + })(dir, ''); + + suites.sort((a, b) => a.name.localeCompare(b.name)); + + return { dir, suites, requires: requires.length > 0 }; +} diff --git a/_extensions/d2/node_modules/uvu/readme.md b/_extensions/d2/node_modules/uvu/readme.md new file mode 100644 index 00000000..338781a4 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/readme.md @@ -0,0 +1,137 @@ +
+ uvu +
+ + + +
+ uvu is an extremely fast and lightweight test runner for Node.js and the browser
+ Ultimate Velocity, Unleashed

+ example with suites +
+ + +## Features + +* Super [lightweight](https://npm.anvaka.com/#/view/2d/uvu) +* Extremely [performant](#benchmarks) +* Individually executable test files +* Supports `async`/`await` tests +* Supports native ES Modules +* Browser-Compatible +* Familiar API + + +## Install + +``` +$ npm install --save-dev uvu +``` + + +## Usage + +> Check out [`/examples`](/examples) for a list of working demos! + +```js +// tests/demo.js +import { test } from 'uvu'; +import * as assert from 'uvu/assert'; + +test('Math.sqrt()', () => { + assert.is(Math.sqrt(4), 2); + assert.is(Math.sqrt(144), 12); + assert.is(Math.sqrt(2), Math.SQRT2); +}); + +test('JSON', () => { + const input = { + foo: 'hello', + bar: 'world' + }; + + const output = JSON.stringify(input); + + assert.snapshot(output, `{"foo":"hello","bar":"world"}`); + assert.equal(JSON.parse(output), input, 'matches original'); +}); + +test.run(); +``` + +Then execute this test file: + +```sh +# via `uvu` cli, for all `/tests/**` files +$ uvu -r esm tests + +# via `node` directly, for file isolation +$ node -r esm tests/demo.js +``` + +> **Note:** The `-r esm` is for legacy Node.js versions. [Learn More](/docs/esm.md) + +> [View the `uvu` CLI documentation](/docs/cli.md) + + +## Assertions + +The [`uvu/assert`](/docs/api.assert.md) module is _completely_ optional. + +In fact, you may use any assertion library, including Node's native [`assert`](https://nodejs.org/api/assert.html) module! This works because `uvu` relies on thrown Errors to detect failures. Implicitly, this also means that any uncaught exceptions and/or unhandled `Promise` rejections will result in a failure, which is what you want! + + +## API + +### Module: `uvu` + +> [View `uvu` API documentation](/docs/api.uvu.md) + +The main entry from which you will import the `test` or `suite` methods. + +### Module: `uvu/assert` + +> [View `uvu/assert` API documentation](/docs/api.assert.md) + +A collection of assertion methods to use within your tests. Please note that: + +* these are browser compatible +* these are _completely_ optional + + +## Benchmarks + +> via the [`/bench`](/bench) directory with Node v10.21.0 + +Below you'll find each test runner with two timing values: + +* the `took ___` value is the total process execution time – from startup to termination +* the parenthesis value (`(___)`) is the self-reported execution time, if known + +Each test runner's `stdout` is printed to the console to verify all assertions pass.
Said output is excluded below for brevity. + +``` +~> "ava" took 594ms ( ??? ) +~> "jest" took 962ms (356 ms) +~> "mocha" took 209ms ( 4 ms) +~> "tape" took 122ms ( ??? ) +~> "uvu" took 72ms ( 1.3ms) +``` + + +## License + +MIT © [Luke Edwards](https://lukeed.com) diff --git a/_extensions/d2/node_modules/uvu/run/index.d.ts b/_extensions/d2/node_modules/uvu/run/index.d.ts new file mode 100644 index 00000000..56c982a3 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/run/index.d.ts @@ -0,0 +1,2 @@ +import type { Suite } from 'uvu/parse'; +export function run(suites: Suite[], options?: { bail: boolean }): Promise; diff --git a/_extensions/d2/node_modules/uvu/run/index.js b/_extensions/d2/node_modules/uvu/run/index.js new file mode 100644 index 00000000..c387dbe0 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/run/index.js @@ -0,0 +1,12 @@ +exports.run = async function (suites, opts={}) { + globalThis.UVU_DEFER = 1; + const uvu = require('uvu'); + + suites.forEach((suite, idx) => { + globalThis.UVU_QUEUE.push([suite.name]); + globalThis.UVU_INDEX = idx; + require(suite.file); + }); + + await uvu.exec(opts.bail); +} diff --git a/_extensions/d2/node_modules/uvu/run/index.mjs b/_extensions/d2/node_modules/uvu/run/index.mjs new file mode 100644 index 00000000..7edaee26 --- /dev/null +++ b/_extensions/d2/node_modules/uvu/run/index.mjs @@ -0,0 +1,13 @@ +export async function run(suites, opts={}) { + globalThis.UVU_DEFER = 1; + const uvu = await import('uvu'); + + let suite, idx=0; + for (suite of suites) { + globalThis.UVU_INDEX = idx++; + globalThis.UVU_QUEUE.push([suite.name]); + await import('file:///' + suite.file); + } + + await uvu.exec(opts.bail); +} diff --git a/_extensions/d2/node_modules/vfile-message/index.d.ts b/_extensions/d2/node_modules/vfile-message/index.d.ts new file mode 100644 index 00000000..17019f02 --- /dev/null +++ b/_extensions/d2/node_modules/vfile-message/index.d.ts @@ -0,0 +1 @@ +export {VFileMessage} from './lib/index.js' diff --git a/_extensions/d2/node_modules/vfile-message/index.js b/_extensions/d2/node_modules/vfile-message/index.js new file mode 100644 index 00000000..17019f02 --- /dev/null +++ b/_extensions/d2/node_modules/vfile-message/index.js @@ -0,0 +1 @@ +export {VFileMessage} from './lib/index.js' diff --git a/_extensions/d2/node_modules/vfile-message/lib/index.d.ts b/_extensions/d2/node_modules/vfile-message/lib/index.d.ts new file mode 100644 index 00000000..f7b61de5 --- /dev/null +++ b/_extensions/d2/node_modules/vfile-message/lib/index.d.ts @@ -0,0 +1,125 @@ +/** + * Message. + */ +export class VFileMessage extends Error { + /** + * Create a message for `reason` at `place` from `origin`. + * + * When an error is passed in as `reason`, the `stack` is copied. + * + * @param {string | Error | VFileMessage} reason + * Reason for message, uses the stack and message of the error if given. + * + * > 👉 **Note**: you should use markdown. + * @param {Node | NodeLike | Position | Point | null | undefined} [place] + * Place in file where the message occurred. + * @param {string | null | undefined} [origin] + * Place in code where the message originates (example: + * `'my-package:my-rule'` or `'my-rule'`). + * @returns + * Instance of `VFileMessage`. + */ + constructor( + reason: string | Error | VFileMessage, + place?: Node | NodeLike | Position | Point | null | undefined, + origin?: string | null | undefined + ) + /** + * Stack of message. + * + * This is used by normal errors to show where something happened in + * programming code, irrelevant for `VFile` messages, + * + * @type {string} + */ + stack: string + /** + * Reason for message. + * + * > 👉 **Note**: you should use markdown. + * + * @type {string} + */ + reason: string + /** + * State of problem. + * + * * `true` — marks associated file as no longer processable (error) + * * `false` — necessitates a (potential) change (warning) + * * `null | undefined` — for things that might not need changing (info) + * + * @type {boolean | null | undefined} + */ + fatal: boolean | null | undefined + /** + * Starting line of error. + * + * @type {number | null} + */ + line: number | null + /** + * Starting column of error. + * + * @type {number | null} + */ + column: number | null + /** + * Full unist position. + * + * @type {Position | null} + */ + position: Position | null + /** + * Namespace of message (example: `'my-package'`). + * + * @type {string | null} + */ + source: string | null + /** + * Category of message (example: `'my-rule'`). + * + * @type {string | null} + */ + ruleId: string | null + /** + * Path of a file (used throughout the `VFile` ecosystem). + * + * @type {string | null} + */ + file: string | null + /** + * Specify the source value that’s being reported, which is deemed + * incorrect. + * + * @type {string | null} + */ + actual: string | null + /** + * Suggest acceptable values that can be used instead of `actual`. + * + * @type {Array | null} + */ + expected: Array | null + /** + * Link to docs for the message. + * + * > 👉 **Note**: this must be an absolute URL that can be passed as `x` + * > to `new URL(x)`. + * + * @type {string | null} + */ + url: string | null + /** + * Long form description of the message (you should use markdown). + * + * @type {string | null} + */ + note: string | null +} +export type Node = import('unist').Node +export type Position = import('unist').Position +export type Point = import('unist').Point +export type NodeLike = object & { + type: string + position?: Position | undefined +} diff --git a/_extensions/d2/node_modules/vfile-message/lib/index.js b/_extensions/d2/node_modules/vfile-message/lib/index.js new file mode 100644 index 00000000..8c8bcc30 --- /dev/null +++ b/_extensions/d2/node_modules/vfile-message/lib/index.js @@ -0,0 +1,225 @@ +/** + * @typedef {import('unist').Node} Node + * @typedef {import('unist').Position} Position + * @typedef {import('unist').Point} Point + * @typedef {object & {type: string, position?: Position | undefined}} NodeLike + */ + +import {stringifyPosition} from 'unist-util-stringify-position' + +/** + * Message. + */ +export class VFileMessage extends Error { + /** + * Create a message for `reason` at `place` from `origin`. + * + * When an error is passed in as `reason`, the `stack` is copied. + * + * @param {string | Error | VFileMessage} reason + * Reason for message, uses the stack and message of the error if given. + * + * > 👉 **Note**: you should use markdown. + * @param {Node | NodeLike | Position | Point | null | undefined} [place] + * Place in file where the message occurred. + * @param {string | null | undefined} [origin] + * Place in code where the message originates (example: + * `'my-package:my-rule'` or `'my-rule'`). + * @returns + * Instance of `VFileMessage`. + */ + // To do: next major: expose `undefined` everywhere instead of `null`. + constructor(reason, place, origin) { + /** @type {[string | null, string | null]} */ + const parts = [null, null] + /** @type {Position} */ + let position = { + // @ts-expect-error: we always follows the structure of `position`. + start: {line: null, column: null}, + // @ts-expect-error: " + end: {line: null, column: null} + } + + super() + + if (typeof place === 'string') { + origin = place + place = undefined + } + + if (typeof origin === 'string') { + const index = origin.indexOf(':') + + if (index === -1) { + parts[1] = origin + } else { + parts[0] = origin.slice(0, index) + parts[1] = origin.slice(index + 1) + } + } + + if (place) { + // Node. + if ('type' in place || 'position' in place) { + if (place.position) { + // To do: next major: deep clone. + // @ts-expect-error: looks like a position. + position = place.position + } + } + // Position. + else if ('start' in place || 'end' in place) { + // @ts-expect-error: looks like a position. + // To do: next major: deep clone. + position = place + } + // Point. + else if ('line' in place || 'column' in place) { + // To do: next major: deep clone. + position.start = place + } + } + + // Fields from `Error`. + /** + * Serialized positional info of error. + * + * On normal errors, this would be something like `ParseError`, buit in + * `VFile` messages we use this space to show where an error happened. + */ + this.name = stringifyPosition(place) || '1:1' + + /** + * Reason for message. + * + * @type {string} + */ + this.message = typeof reason === 'object' ? reason.message : reason + + /** + * Stack of message. + * + * This is used by normal errors to show where something happened in + * programming code, irrelevant for `VFile` messages, + * + * @type {string} + */ + this.stack = '' + + if (typeof reason === 'object' && reason.stack) { + this.stack = reason.stack + } + + /** + * Reason for message. + * + * > 👉 **Note**: you should use markdown. + * + * @type {string} + */ + this.reason = this.message + + /* eslint-disable no-unused-expressions */ + /** + * State of problem. + * + * * `true` — marks associated file as no longer processable (error) + * * `false` — necessitates a (potential) change (warning) + * * `null | undefined` — for things that might not need changing (info) + * + * @type {boolean | null | undefined} + */ + this.fatal + + /** + * Starting line of error. + * + * @type {number | null} + */ + this.line = position.start.line + + /** + * Starting column of error. + * + * @type {number | null} + */ + this.column = position.start.column + + /** + * Full unist position. + * + * @type {Position | null} + */ + this.position = position + + /** + * Namespace of message (example: `'my-package'`). + * + * @type {string | null} + */ + this.source = parts[0] + + /** + * Category of message (example: `'my-rule'`). + * + * @type {string | null} + */ + this.ruleId = parts[1] + + /** + * Path of a file (used throughout the `VFile` ecosystem). + * + * @type {string | null} + */ + this.file + + // The following fields are “well known”. + // Not standard. + // Feel free to add other non-standard fields to your messages. + + /** + * Specify the source value that’s being reported, which is deemed + * incorrect. + * + * @type {string | null} + */ + this.actual + + /** + * Suggest acceptable values that can be used instead of `actual`. + * + * @type {Array | null} + */ + this.expected + + /** + * Link to docs for the message. + * + * > 👉 **Note**: this must be an absolute URL that can be passed as `x` + * > to `new URL(x)`. + * + * @type {string | null} + */ + this.url + + /** + * Long form description of the message (you should use markdown). + * + * @type {string | null} + */ + this.note + /* eslint-enable no-unused-expressions */ + } +} + +VFileMessage.prototype.file = '' +VFileMessage.prototype.name = '' +VFileMessage.prototype.reason = '' +VFileMessage.prototype.message = '' +VFileMessage.prototype.stack = '' +VFileMessage.prototype.fatal = null +VFileMessage.prototype.column = null +VFileMessage.prototype.line = null +VFileMessage.prototype.source = null +VFileMessage.prototype.ruleId = null +VFileMessage.prototype.position = null diff --git a/_extensions/d2/node_modules/vfile-message/license b/_extensions/d2/node_modules/vfile-message/license new file mode 100644 index 00000000..045ffe0e --- /dev/null +++ b/_extensions/d2/node_modules/vfile-message/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2017 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/vfile-message/package.json b/_extensions/d2/node_modules/vfile-message/package.json new file mode 100644 index 00000000..0a58c323 --- /dev/null +++ b/_extensions/d2/node_modules/vfile-message/package.json @@ -0,0 +1,78 @@ +{ + "name": "vfile-message", + "version": "3.1.4", + "description": "vfile utility to create a virtual message", + "license": "MIT", + "keywords": [ + "vfile", + "vfile-util", + "util", + "utility", + "virtual", + "file", + "message" + ], + "repository": "vfile/vfile-message", + "bugs": "https://github.com/vfile/vfile-message/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "lib/", + "index.d.ts", + "index.js" + ], + "dependencies": { + "@types/unist": "^2.0.0", + "unist-util-stringify-position": "^3.0.0" + }, + "devDependencies": { + "@types/node": "^18.0.0", + "c8": "^7.0.0", + "prettier": "^2.0.0", + "remark-cli": "^11.0.0", + "remark-preset-wooorm": "^9.0.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "xo": "^0.53.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "tsc --build --clean && tsc --build && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test.js", + "test-coverage": "c8 --check-coverage --100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true + } +} diff --git a/_extensions/d2/node_modules/vfile-message/readme.md b/_extensions/d2/node_modules/vfile-message/readme.md new file mode 100644 index 00000000..35cb0525 --- /dev/null +++ b/_extensions/d2/node_modules/vfile-message/readme.md @@ -0,0 +1,244 @@ +# vfile-message + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +Create [vfile][] messages. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`VFileMessage(reason[, place][, origin])`](#vfilemessagereason-place-origin) + * [Well-known](#well-known) +* [Types](#types) +* [Compatibility](#compatibility) +* [Contribute](#contribute) +* [License](#license) + +## What is this? + +This package provides a (lint) message format. + +## When should I use this? + +In most cases, you can use `file.message` from `VFile` itself, but in some +cases you might not have a file, and still want to emit warnings or errors, +in which case this can be used directly. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 14.14+ and 16.0+), install with [npm][]: + +```sh +npm install vfile-message +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {VFileMessage} from 'https://esm.sh/vfile-message@3' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {VFileMessage} from 'vfile-message' + +const message = new VFileMessage( + 'Unexpected unknown word `braavo`, did you mean `bravo`?', + {line: 1, column: 8}, + 'spell:typo' +) + +console.log(message) +``` + +Yields: + +```txt +[1:8: Unexpected unknown word `braavo`, did you mean `bravo`?] { + reason: 'Unexpected unknown word `braavo`, did you mean `bravo`?', + line: 1, + column: 8, + source: 'spell', + ruleId: 'typo', + position: {start: {line: 1, column: 8}, end: {line: null, column: null}} +} +``` + +## API + +This package exports the identifier [`VFileMessage`][api-vfile-message]. +There is no default export. + +### `VFileMessage(reason[, place][, origin])` + +Create a message for `reason` at `place` from `origin`. + +When an error is passed in as `reason`, the `stack` is copied. + +###### Parameters + +* `reason` (`string` or `Error`) + — reason for message, uses the stack and message of the error if given +* `place` ([`Node`][node], [`Position`][position], or [`Point`][point], + optional) + — place in file where the message occurred +* `origin` (`string`, optional) + — place in code where the message originates (example: + `'my-package:my-rule'` or `'my-rule'`) + +###### Extends + +[`Error`][error]. + +###### Returns + +Instance of `VFileMessage`. + +###### Fields + +* `reason` (`string`) + — reason for message (you should use markdown) +* `fatal` (`boolean | null | undefined`) + — state of problem; `true` marks associated file as no longer processable + (error); `false` necessitates a (potential) change (warning); + `null | undefined` for things that might not need changing (info) +* `line` (`number | null`) + — starting line of error +* `column` (`number | null`) + — starting column of error +* `position` ([`Position | null`][position]) + — full unist position +* `source` (`string | null`, example: `'my-package'`) + — namespace of message +* `ruleId` (`string | null`, example: `'my-rule'`) + — category of message +* `stack` (`string | null`) + — stack of message in code +* `file` (`string | null`) + — path of a file (used throughout the `VFile` ecosystem) + +### Well-known + +It’s OK to store custom data directly on the `VFileMessage`, some of those are +handled by [utilities][util]. +The following fields are documented and typed here. + +###### Fields + +* `actual` (`string | null`) + — specify the source value that’s being reported, which is deemed incorrect +* `expected` (`Array | null`) + — suggest acceptable values that can be used instead of `actual` +* `url` (`string | null`) + — link to docs for the message (this must be an absolute URL that can be + passed as `x` to `new URL(x)`) +* `note` (`string | null`) + — long form description of the message (you should use markdown) + +## Types + +This package is fully typed with [TypeScript][]. +It exports no additional types. + +## Compatibility + +Projects maintained by the unified collective are compatible with all maintained +versions of Node.js. +As of now, that is Node.js 14.14+ and 16.0+. +Our projects sometimes work with older versions, but this is not guaranteed. + +## Contribute + +See [`contributing.md`][contributing] in [`vfile/.github`][health] for ways to +get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/vfile/vfile-message/workflows/main/badge.svg + +[build]: https://github.com/vfile/vfile-message/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/vfile/vfile-message.svg + +[coverage]: https://codecov.io/github/vfile/vfile-message + +[downloads-badge]: https://img.shields.io/npm/dm/vfile-message.svg + +[downloads]: https://www.npmjs.com/package/vfile-message + +[size-badge]: https://img.shields.io/bundlephobia/minzip/vfile-message.svg + +[size]: https://bundlephobia.com/result?p=vfile-message + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/vfile/vfile/discussions + +[npm]: https://docs.npmjs.com/cli/install + +[contributing]: https://github.com/vfile/.github/blob/main/contributing.md + +[support]: https://github.com/vfile/.github/blob/main/support.md + +[health]: https://github.com/vfile/.github + +[coc]: https://github.com/vfile/.github/blob/main/code-of-conduct.md + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[esmsh]: https://esm.sh + +[typescript]: https://www.typescriptlang.org + +[license]: license + +[author]: https://wooorm.com + +[error]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Error + +[node]: https://github.com/syntax-tree/unist#node + +[position]: https://github.com/syntax-tree/unist#position + +[point]: https://github.com/syntax-tree/unist#point + +[vfile]: https://github.com/vfile/vfile + +[util]: https://github.com/vfile/vfile#utilities + +[api-vfile-message]: #vfilemessagereason-place-origin diff --git a/_extensions/d2/node_modules/vfile/index.d.ts b/_extensions/d2/node_modules/vfile/index.d.ts new file mode 100644 index 00000000..6b5215b4 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/index.d.ts @@ -0,0 +1,64 @@ +import type {Reporter} from './lib/index.js' + +/** + * This is the same as `Buffer` if node types are included, `never` otherwise. + */ +// eslint-disable-next-line @typescript-eslint/ban-ts-comment, @typescript-eslint/prefer-ts-expect-error +// @ts-ignore It’s important to preserve this ignore statement. This makes sure +// it works both with and without node types. +// eslint-disable-next-line n/prefer-global/buffer +type MaybeBuffer = any extends Buffer ? never : Buffer + +/** + * Contents of the file. + * + * Can either be text or a `Buffer` structure. + */ +// Note: this does not directly use type `Buffer`, because it can also be used +// in a browser context. +// Instead this leverages `Uint8Array` which is the base type for `Buffer`, +// and a native JavaScript construct. +export type Value = string | MaybeBuffer + +/** + * This map registers the type of the `data` key of a `VFile`. + * + * This type can be augmented to register custom `data` types. + * + * @example + * declare module 'vfile' { + * interface DataMap { + * // `file.data.name` is typed as `string` + * name: string + * } + * } + */ + +// eslint-disable-next-line @typescript-eslint/consistent-type-definitions, @typescript-eslint/no-empty-interface +export interface DataMap {} + +/** + * Custom information. + * + * Known attributes can be added to @see {@link DataMap} + */ +export type Data = Record & Partial + +// Deprecated names (w/ prefix): +export type {Data as VFileData, DataMap as VFileDataMap, Value as VFileValue} + +export {VFile} from './lib/index.js' + +export type { + BufferEncoding, + Map, + Compatible, + Options, + Reporter, + ReporterSettings, + // Deprecated names (w/ prefix): + Compatible as VFileCompatible, + Options as VFileOptions, + Reporter as VFileReporter, + ReporterSettings as VFileReporterSettings +} from './lib/index.js' diff --git a/_extensions/d2/node_modules/vfile/index.js b/_extensions/d2/node_modules/vfile/index.js new file mode 100644 index 00000000..73c0d380 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/index.js @@ -0,0 +1 @@ +export {VFile} from './lib/index.js' diff --git a/_extensions/d2/node_modules/vfile/lib/index.d.ts b/_extensions/d2/node_modules/vfile/lib/index.d.ts new file mode 100644 index 00000000..03ab38c3 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/index.d.ts @@ -0,0 +1,355 @@ +export class VFile { + /** + * Create a new virtual file. + * + * `options` is treated as: + * + * * `string` or `Buffer` — `{value: options}` + * * `URL` — `{path: options}` + * * `VFile` — shallow copies its data over to the new file + * * `object` — all fields are shallow copied over to the new file + * + * Path related fields are set in the following order (least specific to + * most specific): `history`, `path`, `basename`, `stem`, `extname`, + * `dirname`. + * + * You cannot set `dirname` or `extname` without setting either `history`, + * `path`, `basename`, or `stem` too. + * + * @param {Compatible | null | undefined} [value] + * File value. + * @returns + * New instance. + */ + constructor(value?: Compatible | null | undefined) + /** + * Place to store custom information (default: `{}`). + * + * It’s OK to store custom data directly on the file but moving it to + * `data` is recommended. + * + * @type {Data} + */ + data: Data + /** + * List of messages associated with the file. + * + * @type {Array} + */ + messages: Array + /** + * List of filepaths the file moved between. + * + * The first is the original path and the last is the current path. + * + * @type {Array} + */ + history: Array + /** + * Base of `path` (default: `process.cwd()` or `'/'` in browsers). + * + * @type {string} + */ + cwd: string + /** + * Raw value. + * + * @type {Value} + */ + value: Value + /** + * Whether a file was saved to disk. + * + * This is used by vfile reporters. + * + * @type {boolean} + */ + stored: boolean + /** + * Custom, non-string, compiled, representation. + * + * This is used by unified to store non-string results. + * One example is when turning markdown into React nodes. + * + * @type {unknown} + */ + result: unknown + /** + * Source map. + * + * This type is equivalent to the `RawSourceMap` type from the `source-map` + * module. + * + * @type {Map | null | undefined} + */ + map: Map | null | undefined + /** + * Set the full path (example: `'~/index.min.js'`). + * + * Cannot be nullified. + * You can set a file URL (a `URL` object with a `file:` protocol) which will + * be turned into a path with `url.fileURLToPath`. + * + * @param {string | URL} path + */ + set path(arg: string) + /** + * Get the full path (example: `'~/index.min.js'`). + * + * @returns {string} + */ + get path(): string + /** + * Set the parent path (example: `'~'`). + * + * Cannot be set if there’s no `path` yet. + */ + set dirname(arg: string | undefined) + /** + * Get the parent path (example: `'~'`). + */ + get dirname(): string | undefined + /** + * Set basename (including extname) (`'index.min.js'`). + * + * Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\'` + * on windows). + * Cannot be nullified (use `file.path = file.dirname` instead). + */ + set basename(arg: string | undefined) + /** + * Get the basename (including extname) (example: `'index.min.js'`). + */ + get basename(): string | undefined + /** + * Set the extname (including dot) (example: `'.js'`). + * + * Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\'` + * on windows). + * Cannot be set if there’s no `path` yet. + */ + set extname(arg: string | undefined) + /** + * Get the extname (including dot) (example: `'.js'`). + */ + get extname(): string | undefined + /** + * Set the stem (basename w/o extname) (example: `'index.min'`). + * + * Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\'` + * on windows). + * Cannot be nullified (use `file.path = file.dirname` instead). + */ + set stem(arg: string | undefined) + /** + * Get the stem (basename w/o extname) (example: `'index.min'`). + */ + get stem(): string | undefined + /** + * Serialize the file. + * + * @param {BufferEncoding | null | undefined} [encoding='utf8'] + * Character encoding to understand `value` as when it’s a `Buffer` + * (default: `'utf8'`). + * @returns {string} + * Serialized file. + */ + toString(encoding?: BufferEncoding | null | undefined): string + /** + * Create a warning message associated with the file. + * + * Its `fatal` is set to `false` and `file` is set to the current file path. + * Its added to `file.messages`. + * + * @param {string | Error | VFileMessage} reason + * Reason for message, uses the stack and message of the error if given. + * @param {Node | NodeLike | Position | Point | null | undefined} [place] + * Place in file where the message occurred. + * @param {string | null | undefined} [origin] + * Place in code where the message originates (example: + * `'my-package:my-rule'` or `'my-rule'`). + * @returns {VFileMessage} + * Message. + */ + message( + reason: string | Error | VFileMessage, + place?: Node | NodeLike | Position | Point | null | undefined, + origin?: string | null | undefined + ): VFileMessage + /** + * Create an info message associated with the file. + * + * Its `fatal` is set to `null` and `file` is set to the current file path. + * Its added to `file.messages`. + * + * @param {string | Error | VFileMessage} reason + * Reason for message, uses the stack and message of the error if given. + * @param {Node | NodeLike | Position | Point | null | undefined} [place] + * Place in file where the message occurred. + * @param {string | null | undefined} [origin] + * Place in code where the message originates (example: + * `'my-package:my-rule'` or `'my-rule'`). + * @returns {VFileMessage} + * Message. + */ + info( + reason: string | Error | VFileMessage, + place?: Node | NodeLike | Position | Point | null | undefined, + origin?: string | null | undefined + ): VFileMessage + /** + * Create a fatal error associated with the file. + * + * Its `fatal` is set to `true` and `file` is set to the current file path. + * Its added to `file.messages`. + * + * > 👉 **Note**: a fatal error means that a file is no longer processable. + * + * @param {string | Error | VFileMessage} reason + * Reason for message, uses the stack and message of the error if given. + * @param {Node | NodeLike | Position | Point | null | undefined} [place] + * Place in file where the message occurred. + * @param {string | null | undefined} [origin] + * Place in code where the message originates (example: + * `'my-package:my-rule'` or `'my-rule'`). + * @returns {never} + * Message. + * @throws {VFileMessage} + * Message. + */ + fail( + reason: string | Error | VFileMessage, + place?: Node | NodeLike | Position | Point | null | undefined, + origin?: string | null | undefined + ): never +} +export type Node = import('unist').Node +export type Position = import('unist').Position +export type Point = import('unist').Point +export type URL = import('./minurl.shared.js').URL +export type Data = import('../index.js').Data +export type Value = import('../index.js').Value +export type NodeLike = Record & { + type: string + position?: Position | undefined +} +/** + * Encodings supported by the buffer class. + * + * This is a copy of the types from Node, copied to prevent Node globals from + * being needed. + * Copied from: + */ +export type BufferEncoding = + | 'ascii' + | 'utf8' + | 'utf-8' + | 'utf16le' + | 'ucs2' + | 'ucs-2' + | 'base64' + | 'base64url' + | 'latin1' + | 'binary' + | 'hex' +/** + * Things that can be passed to the constructor. + */ +export type Compatible = Options | URL | Value | VFile +/** + * Set multiple values. + */ +export type VFileCoreOptions = { + /** + * Set `value`. + */ + value?: Value | null | undefined + /** + * Set `cwd`. + */ + cwd?: string | null | undefined + /** + * Set `history`. + */ + history?: Array | null | undefined + /** + * Set `path`. + */ + path?: URL | string | null | undefined + /** + * Set `basename`. + */ + basename?: string | null | undefined + /** + * Set `stem`. + */ + stem?: string | null | undefined + /** + * Set `extname`. + */ + extname?: string | null | undefined + /** + * Set `dirname`. + */ + dirname?: string | null | undefined + /** + * Set `data`. + */ + data?: Data | null | undefined +} +/** + * Raw source map. + * + * See: + * . + */ +export type Map = { + /** + * Which version of the source map spec this map is following. + */ + version: number + /** + * An array of URLs to the original source files. + */ + sources: Array + /** + * An array of identifiers which can be referenced by individual mappings. + */ + names: Array + /** + * The URL root from which all sources are relative. + */ + sourceRoot?: string | undefined + /** + * An array of contents of the original source files. + */ + sourcesContent?: Array | undefined + /** + * A string of base64 VLQs which contain the actual mappings. + */ + mappings: string + /** + * The generated file this source map is associated with. + */ + file: string +} +/** + * Configuration. + * + * A bunch of keys that will be shallow copied over to the new file. + */ +export type Options = { + [key: string]: unknown +} & VFileCoreOptions +/** + * Configuration for reporters. + */ +export type ReporterSettings = Record +/** + * Type for a reporter. + */ +export type Reporter = ( + files: Array, + options: Settings +) => string +import {VFileMessage} from 'vfile-message' diff --git a/_extensions/d2/node_modules/vfile/lib/index.js b/_extensions/d2/node_modules/vfile/lib/index.js new file mode 100644 index 00000000..e74a58c1 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/index.js @@ -0,0 +1,520 @@ +/** + * @typedef {import('unist').Node} Node + * @typedef {import('unist').Position} Position + * @typedef {import('unist').Point} Point + * @typedef {import('./minurl.shared.js').URL} URL + * @typedef {import('../index.js').Data} Data + * @typedef {import('../index.js').Value} Value + */ + +/** + * @typedef {Record & {type: string, position?: Position | undefined}} NodeLike + * + * @typedef {'ascii' | 'utf8' | 'utf-8' | 'utf16le' | 'ucs2' | 'ucs-2' | 'base64' | 'base64url' | 'latin1' | 'binary' | 'hex'} BufferEncoding + * Encodings supported by the buffer class. + * + * This is a copy of the types from Node, copied to prevent Node globals from + * being needed. + * Copied from: + * + * @typedef {Options | URL | Value | VFile} Compatible + * Things that can be passed to the constructor. + * + * @typedef VFileCoreOptions + * Set multiple values. + * @property {Value | null | undefined} [value] + * Set `value`. + * @property {string | null | undefined} [cwd] + * Set `cwd`. + * @property {Array | null | undefined} [history] + * Set `history`. + * @property {URL | string | null | undefined} [path] + * Set `path`. + * @property {string | null | undefined} [basename] + * Set `basename`. + * @property {string | null | undefined} [stem] + * Set `stem`. + * @property {string | null | undefined} [extname] + * Set `extname`. + * @property {string | null | undefined} [dirname] + * Set `dirname`. + * @property {Data | null | undefined} [data] + * Set `data`. + * + * @typedef Map + * Raw source map. + * + * See: + * . + * @property {number} version + * Which version of the source map spec this map is following. + * @property {Array} sources + * An array of URLs to the original source files. + * @property {Array} names + * An array of identifiers which can be referenced by individual mappings. + * @property {string | undefined} [sourceRoot] + * The URL root from which all sources are relative. + * @property {Array | undefined} [sourcesContent] + * An array of contents of the original source files. + * @property {string} mappings + * A string of base64 VLQs which contain the actual mappings. + * @property {string} file + * The generated file this source map is associated with. + * + * @typedef {{[key: string]: unknown} & VFileCoreOptions} Options + * Configuration. + * + * A bunch of keys that will be shallow copied over to the new file. + * + * @typedef {Record} ReporterSettings + * Configuration for reporters. + */ + +/** + * @template {ReporterSettings} Settings + * Options type. + * @callback Reporter + * Type for a reporter. + * @param {Array} files + * Files to report. + * @param {Settings} options + * Configuration. + * @returns {string} + * Report. + */ + +import bufferLike from 'is-buffer' +import {VFileMessage} from 'vfile-message' +import {path} from './minpath.js' +import {proc} from './minproc.js' +import {urlToPath, isUrl} from './minurl.js' + +/** + * Order of setting (least specific to most), we need this because otherwise + * `{stem: 'a', path: '~/b.js'}` would throw, as a path is needed before a + * stem can be set. + * + * @type {Array<'basename' | 'dirname' | 'extname' | 'history' | 'path' | 'stem'>} + */ +const order = ['history', 'path', 'basename', 'stem', 'extname', 'dirname'] + +export class VFile { + /** + * Create a new virtual file. + * + * `options` is treated as: + * + * * `string` or `Buffer` — `{value: options}` + * * `URL` — `{path: options}` + * * `VFile` — shallow copies its data over to the new file + * * `object` — all fields are shallow copied over to the new file + * + * Path related fields are set in the following order (least specific to + * most specific): `history`, `path`, `basename`, `stem`, `extname`, + * `dirname`. + * + * You cannot set `dirname` or `extname` without setting either `history`, + * `path`, `basename`, or `stem` too. + * + * @param {Compatible | null | undefined} [value] + * File value. + * @returns + * New instance. + */ + constructor(value) { + /** @type {Options | VFile} */ + let options + + if (!value) { + options = {} + } else if (typeof value === 'string' || buffer(value)) { + options = {value} + } else if (isUrl(value)) { + options = {path: value} + } else { + options = value + } + + /** + * Place to store custom information (default: `{}`). + * + * It’s OK to store custom data directly on the file but moving it to + * `data` is recommended. + * + * @type {Data} + */ + this.data = {} + + /** + * List of messages associated with the file. + * + * @type {Array} + */ + this.messages = [] + + /** + * List of filepaths the file moved between. + * + * The first is the original path and the last is the current path. + * + * @type {Array} + */ + this.history = [] + + /** + * Base of `path` (default: `process.cwd()` or `'/'` in browsers). + * + * @type {string} + */ + this.cwd = proc.cwd() + + /* eslint-disable no-unused-expressions */ + /** + * Raw value. + * + * @type {Value} + */ + this.value + + // The below are non-standard, they are “well-known”. + // As in, used in several tools. + + /** + * Whether a file was saved to disk. + * + * This is used by vfile reporters. + * + * @type {boolean} + */ + this.stored + + /** + * Custom, non-string, compiled, representation. + * + * This is used by unified to store non-string results. + * One example is when turning markdown into React nodes. + * + * @type {unknown} + */ + this.result + + /** + * Source map. + * + * This type is equivalent to the `RawSourceMap` type from the `source-map` + * module. + * + * @type {Map | null | undefined} + */ + this.map + /* eslint-enable no-unused-expressions */ + + // Set path related properties in the correct order. + let index = -1 + + while (++index < order.length) { + const prop = order[index] + + // Note: we specifically use `in` instead of `hasOwnProperty` to accept + // `vfile`s too. + if ( + prop in options && + options[prop] !== undefined && + options[prop] !== null + ) { + // @ts-expect-error: TS doesn’t understand basic reality. + this[prop] = prop === 'history' ? [...options[prop]] : options[prop] + } + } + + /** @type {string} */ + let prop + + // Set non-path related properties. + for (prop in options) { + // @ts-expect-error: fine to set other things. + if (!order.includes(prop)) { + // @ts-expect-error: fine to set other things. + this[prop] = options[prop] + } + } + } + + /** + * Get the full path (example: `'~/index.min.js'`). + * + * @returns {string} + */ + get path() { + return this.history[this.history.length - 1] + } + + /** + * Set the full path (example: `'~/index.min.js'`). + * + * Cannot be nullified. + * You can set a file URL (a `URL` object with a `file:` protocol) which will + * be turned into a path with `url.fileURLToPath`. + * + * @param {string | URL} path + */ + set path(path) { + if (isUrl(path)) { + path = urlToPath(path) + } + + assertNonEmpty(path, 'path') + + if (this.path !== path) { + this.history.push(path) + } + } + + /** + * Get the parent path (example: `'~'`). + */ + get dirname() { + return typeof this.path === 'string' ? path.dirname(this.path) : undefined + } + + /** + * Set the parent path (example: `'~'`). + * + * Cannot be set if there’s no `path` yet. + */ + set dirname(dirname) { + assertPath(this.basename, 'dirname') + this.path = path.join(dirname || '', this.basename) + } + + /** + * Get the basename (including extname) (example: `'index.min.js'`). + */ + get basename() { + return typeof this.path === 'string' ? path.basename(this.path) : undefined + } + + /** + * Set basename (including extname) (`'index.min.js'`). + * + * Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\'` + * on windows). + * Cannot be nullified (use `file.path = file.dirname` instead). + */ + set basename(basename) { + assertNonEmpty(basename, 'basename') + assertPart(basename, 'basename') + this.path = path.join(this.dirname || '', basename) + } + + /** + * Get the extname (including dot) (example: `'.js'`). + */ + get extname() { + return typeof this.path === 'string' ? path.extname(this.path) : undefined + } + + /** + * Set the extname (including dot) (example: `'.js'`). + * + * Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\'` + * on windows). + * Cannot be set if there’s no `path` yet. + */ + set extname(extname) { + assertPart(extname, 'extname') + assertPath(this.dirname, 'extname') + + if (extname) { + if (extname.charCodeAt(0) !== 46 /* `.` */) { + throw new Error('`extname` must start with `.`') + } + + if (extname.includes('.', 1)) { + throw new Error('`extname` cannot contain multiple dots') + } + } + + this.path = path.join(this.dirname, this.stem + (extname || '')) + } + + /** + * Get the stem (basename w/o extname) (example: `'index.min'`). + */ + get stem() { + return typeof this.path === 'string' + ? path.basename(this.path, this.extname) + : undefined + } + + /** + * Set the stem (basename w/o extname) (example: `'index.min'`). + * + * Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\'` + * on windows). + * Cannot be nullified (use `file.path = file.dirname` instead). + */ + set stem(stem) { + assertNonEmpty(stem, 'stem') + assertPart(stem, 'stem') + this.path = path.join(this.dirname || '', stem + (this.extname || '')) + } + + /** + * Serialize the file. + * + * @param {BufferEncoding | null | undefined} [encoding='utf8'] + * Character encoding to understand `value` as when it’s a `Buffer` + * (default: `'utf8'`). + * @returns {string} + * Serialized file. + */ + toString(encoding) { + return (this.value || '').toString(encoding || undefined) + } + + /** + * Create a warning message associated with the file. + * + * Its `fatal` is set to `false` and `file` is set to the current file path. + * Its added to `file.messages`. + * + * @param {string | Error | VFileMessage} reason + * Reason for message, uses the stack and message of the error if given. + * @param {Node | NodeLike | Position | Point | null | undefined} [place] + * Place in file where the message occurred. + * @param {string | null | undefined} [origin] + * Place in code where the message originates (example: + * `'my-package:my-rule'` or `'my-rule'`). + * @returns {VFileMessage} + * Message. + */ + message(reason, place, origin) { + const message = new VFileMessage(reason, place, origin) + + if (this.path) { + message.name = this.path + ':' + message.name + message.file = this.path + } + + message.fatal = false + + this.messages.push(message) + + return message + } + + /** + * Create an info message associated with the file. + * + * Its `fatal` is set to `null` and `file` is set to the current file path. + * Its added to `file.messages`. + * + * @param {string | Error | VFileMessage} reason + * Reason for message, uses the stack and message of the error if given. + * @param {Node | NodeLike | Position | Point | null | undefined} [place] + * Place in file where the message occurred. + * @param {string | null | undefined} [origin] + * Place in code where the message originates (example: + * `'my-package:my-rule'` or `'my-rule'`). + * @returns {VFileMessage} + * Message. + */ + info(reason, place, origin) { + const message = this.message(reason, place, origin) + + message.fatal = null + + return message + } + + /** + * Create a fatal error associated with the file. + * + * Its `fatal` is set to `true` and `file` is set to the current file path. + * Its added to `file.messages`. + * + * > 👉 **Note**: a fatal error means that a file is no longer processable. + * + * @param {string | Error | VFileMessage} reason + * Reason for message, uses the stack and message of the error if given. + * @param {Node | NodeLike | Position | Point | null | undefined} [place] + * Place in file where the message occurred. + * @param {string | null | undefined} [origin] + * Place in code where the message originates (example: + * `'my-package:my-rule'` or `'my-rule'`). + * @returns {never} + * Message. + * @throws {VFileMessage} + * Message. + */ + fail(reason, place, origin) { + const message = this.message(reason, place, origin) + + message.fatal = true + + throw message + } +} + +/** + * Assert that `part` is not a path (as in, does not contain `path.sep`). + * + * @param {string | null | undefined} part + * File path part. + * @param {string} name + * Part name. + * @returns {void} + * Nothing. + */ +function assertPart(part, name) { + if (part && part.includes(path.sep)) { + throw new Error( + '`' + name + '` cannot be a path: did not expect `' + path.sep + '`' + ) + } +} + +/** + * Assert that `part` is not empty. + * + * @param {string | undefined} part + * Thing. + * @param {string} name + * Part name. + * @returns {asserts part is string} + * Nothing. + */ +function assertNonEmpty(part, name) { + if (!part) { + throw new Error('`' + name + '` cannot be empty') + } +} + +/** + * Assert `path` exists. + * + * @param {string | undefined} path + * Path. + * @param {string} name + * Dependency name. + * @returns {asserts path is string} + * Nothing. + */ +function assertPath(path, name) { + if (!path) { + throw new Error('Setting `' + name + '` requires `path` to be set too') + } +} + +/** + * Assert `value` is a buffer. + * + * @param {unknown} value + * thing. + * @returns {value is Buffer} + * Whether `value` is a Node.js buffer. + */ +function buffer(value) { + return bufferLike(value) +} diff --git a/_extensions/d2/node_modules/vfile/lib/minpath.browser.d.ts b/_extensions/d2/node_modules/vfile/lib/minpath.browser.d.ts new file mode 100644 index 00000000..a3fa641b --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minpath.browser.d.ts @@ -0,0 +1,46 @@ +export namespace path { + export {basename} + export {dirname} + export {extname} + export {join} + export const sep: string +} +/** + * Get the basename from a path. + * + * @param {string} path + * File path. + * @param {string | undefined} [ext] + * Extension to strip. + * @returns {string} + * Stem or basename. + */ +declare function basename(path: string, ext?: string | undefined): string +/** + * Get the dirname from a path. + * + * @param {string} path + * File path. + * @returns {string} + * File path. + */ +declare function dirname(path: string): string +/** + * Get an extname from a path. + * + * @param {string} path + * File path. + * @returns {string} + * Extname. + */ +declare function extname(path: string): string +/** + * Join segments from a path. + * + * @param {Array} segments + * Path segments. + * @returns {string} + * File path. + */ +declare function join(...segments: Array): string +export {} diff --git a/_extensions/d2/node_modules/vfile/lib/minpath.browser.js b/_extensions/d2/node_modules/vfile/lib/minpath.browser.js new file mode 100644 index 00000000..48c23fb2 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minpath.browser.js @@ -0,0 +1,422 @@ +// A derivative work based on: +// . +// Which is licensed: +// +// MIT License +// +// Copyright (c) 2013 James Halliday +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +// A derivative work based on: +// +// Parts of that are extracted from Node’s internal `path` module: +// . +// Which is licensed: +// +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +export const path = {basename, dirname, extname, join, sep: '/'} + +/* eslint-disable max-depth, complexity */ + +/** + * Get the basename from a path. + * + * @param {string} path + * File path. + * @param {string | undefined} [ext] + * Extension to strip. + * @returns {string} + * Stem or basename. + */ +function basename(path, ext) { + if (ext !== undefined && typeof ext !== 'string') { + throw new TypeError('"ext" argument must be a string') + } + + assertPath(path) + let start = 0 + let end = -1 + let index = path.length + /** @type {boolean | undefined} */ + let seenNonSlash + + if (ext === undefined || ext.length === 0 || ext.length > path.length) { + while (index--) { + if (path.charCodeAt(index) === 47 /* `/` */) { + // If we reached a path separator that was not part of a set of path + // separators at the end of the string, stop now. + if (seenNonSlash) { + start = index + 1 + break + } + } else if (end < 0) { + // We saw the first non-path separator, mark this as the end of our + // path component. + seenNonSlash = true + end = index + 1 + } + } + + return end < 0 ? '' : path.slice(start, end) + } + + if (ext === path) { + return '' + } + + let firstNonSlashEnd = -1 + let extIndex = ext.length - 1 + + while (index--) { + if (path.charCodeAt(index) === 47 /* `/` */) { + // If we reached a path separator that was not part of a set of path + // separators at the end of the string, stop now. + if (seenNonSlash) { + start = index + 1 + break + } + } else { + if (firstNonSlashEnd < 0) { + // We saw the first non-path separator, remember this index in case + // we need it if the extension ends up not matching. + seenNonSlash = true + firstNonSlashEnd = index + 1 + } + + if (extIndex > -1) { + // Try to match the explicit extension. + if (path.charCodeAt(index) === ext.charCodeAt(extIndex--)) { + if (extIndex < 0) { + // We matched the extension, so mark this as the end of our path + // component + end = index + } + } else { + // Extension does not match, so our result is the entire path + // component + extIndex = -1 + end = firstNonSlashEnd + } + } + } + } + + if (start === end) { + end = firstNonSlashEnd + } else if (end < 0) { + end = path.length + } + + return path.slice(start, end) +} + +/** + * Get the dirname from a path. + * + * @param {string} path + * File path. + * @returns {string} + * File path. + */ +function dirname(path) { + assertPath(path) + + if (path.length === 0) { + return '.' + } + + let end = -1 + let index = path.length + /** @type {boolean | undefined} */ + let unmatchedSlash + + // Prefix `--` is important to not run on `0`. + while (--index) { + if (path.charCodeAt(index) === 47 /* `/` */) { + if (unmatchedSlash) { + end = index + break + } + } else if (!unmatchedSlash) { + // We saw the first non-path separator + unmatchedSlash = true + } + } + + return end < 0 + ? path.charCodeAt(0) === 47 /* `/` */ + ? '/' + : '.' + : end === 1 && path.charCodeAt(0) === 47 /* `/` */ + ? '//' + : path.slice(0, end) +} + +/** + * Get an extname from a path. + * + * @param {string} path + * File path. + * @returns {string} + * Extname. + */ +function extname(path) { + assertPath(path) + + let index = path.length + + let end = -1 + let startPart = 0 + let startDot = -1 + // Track the state of characters (if any) we see before our first dot and + // after any path separator we find. + let preDotState = 0 + /** @type {boolean | undefined} */ + let unmatchedSlash + + while (index--) { + const code = path.charCodeAt(index) + + if (code === 47 /* `/` */) { + // If we reached a path separator that was not part of a set of path + // separators at the end of the string, stop now. + if (unmatchedSlash) { + startPart = index + 1 + break + } + + continue + } + + if (end < 0) { + // We saw the first non-path separator, mark this as the end of our + // extension. + unmatchedSlash = true + end = index + 1 + } + + if (code === 46 /* `.` */) { + // If this is our first dot, mark it as the start of our extension. + if (startDot < 0) { + startDot = index + } else if (preDotState !== 1) { + preDotState = 1 + } + } else if (startDot > -1) { + // We saw a non-dot and non-path separator before our dot, so we should + // have a good chance at having a non-empty extension. + preDotState = -1 + } + } + + if ( + startDot < 0 || + end < 0 || + // We saw a non-dot character immediately before the dot. + preDotState === 0 || + // The (right-most) trimmed path component is exactly `..`. + (preDotState === 1 && startDot === end - 1 && startDot === startPart + 1) + ) { + return '' + } + + return path.slice(startDot, end) +} + +/** + * Join segments from a path. + * + * @param {Array} segments + * Path segments. + * @returns {string} + * File path. + */ +function join(...segments) { + let index = -1 + /** @type {string | undefined} */ + let joined + + while (++index < segments.length) { + assertPath(segments[index]) + + if (segments[index]) { + joined = + joined === undefined ? segments[index] : joined + '/' + segments[index] + } + } + + return joined === undefined ? '.' : normalize(joined) +} + +/** + * Normalize a basic file path. + * + * @param {string} path + * File path. + * @returns {string} + * File path. + */ +// Note: `normalize` is not exposed as `path.normalize`, so some code is +// manually removed from it. +function normalize(path) { + assertPath(path) + + const absolute = path.charCodeAt(0) === 47 /* `/` */ + + // Normalize the path according to POSIX rules. + let value = normalizeString(path, !absolute) + + if (value.length === 0 && !absolute) { + value = '.' + } + + if (value.length > 0 && path.charCodeAt(path.length - 1) === 47 /* / */) { + value += '/' + } + + return absolute ? '/' + value : value +} + +/** + * Resolve `.` and `..` elements in a path with directory names. + * + * @param {string} path + * File path. + * @param {boolean} allowAboveRoot + * Whether `..` can move above root. + * @returns {string} + * File path. + */ +function normalizeString(path, allowAboveRoot) { + let result = '' + let lastSegmentLength = 0 + let lastSlash = -1 + let dots = 0 + let index = -1 + /** @type {number | undefined} */ + let code + /** @type {number} */ + let lastSlashIndex + + while (++index <= path.length) { + if (index < path.length) { + code = path.charCodeAt(index) + } else if (code === 47 /* `/` */) { + break + } else { + code = 47 /* `/` */ + } + + if (code === 47 /* `/` */) { + if (lastSlash === index - 1 || dots === 1) { + // Empty. + } else if (lastSlash !== index - 1 && dots === 2) { + if ( + result.length < 2 || + lastSegmentLength !== 2 || + result.charCodeAt(result.length - 1) !== 46 /* `.` */ || + result.charCodeAt(result.length - 2) !== 46 /* `.` */ + ) { + if (result.length > 2) { + lastSlashIndex = result.lastIndexOf('/') + + if (lastSlashIndex !== result.length - 1) { + if (lastSlashIndex < 0) { + result = '' + lastSegmentLength = 0 + } else { + result = result.slice(0, lastSlashIndex) + lastSegmentLength = result.length - 1 - result.lastIndexOf('/') + } + + lastSlash = index + dots = 0 + continue + } + } else if (result.length > 0) { + result = '' + lastSegmentLength = 0 + lastSlash = index + dots = 0 + continue + } + } + + if (allowAboveRoot) { + result = result.length > 0 ? result + '/..' : '..' + lastSegmentLength = 2 + } + } else { + if (result.length > 0) { + result += '/' + path.slice(lastSlash + 1, index) + } else { + result = path.slice(lastSlash + 1, index) + } + + lastSegmentLength = index - lastSlash - 1 + } + + lastSlash = index + dots = 0 + } else if (code === 46 /* `.` */ && dots > -1) { + dots++ + } else { + dots = -1 + } + } + + return result +} + +/** + * Make sure `path` is a string. + * + * @param {string} path + * File path. + * @returns {asserts path is string} + * Nothing. + */ +function assertPath(path) { + if (typeof path !== 'string') { + throw new TypeError( + 'Path must be a string. Received ' + JSON.stringify(path) + ) + } +} + +/* eslint-enable max-depth, complexity */ diff --git a/_extensions/d2/node_modules/vfile/lib/minpath.d.ts b/_extensions/d2/node_modules/vfile/lib/minpath.d.ts new file mode 100644 index 00000000..80824cc5 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minpath.d.ts @@ -0,0 +1 @@ +export {default as path} from 'path' diff --git a/_extensions/d2/node_modules/vfile/lib/minpath.js b/_extensions/d2/node_modules/vfile/lib/minpath.js new file mode 100644 index 00000000..80824cc5 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minpath.js @@ -0,0 +1 @@ +export {default as path} from 'path' diff --git a/_extensions/d2/node_modules/vfile/lib/minproc.browser.d.ts b/_extensions/d2/node_modules/vfile/lib/minproc.browser.d.ts new file mode 100644 index 00000000..0c24c916 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minproc.browser.d.ts @@ -0,0 +1,5 @@ +export namespace proc { + export {cwd} +} +declare function cwd(): string +export {} diff --git a/_extensions/d2/node_modules/vfile/lib/minproc.browser.js b/_extensions/d2/node_modules/vfile/lib/minproc.browser.js new file mode 100644 index 00000000..e40ec9ef --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minproc.browser.js @@ -0,0 +1,8 @@ +// Somewhat based on: +// . +// But I don’t think one tiny line of code can be copyrighted. 😅 +export const proc = {cwd} + +function cwd() { + return '/' +} diff --git a/_extensions/d2/node_modules/vfile/lib/minproc.d.ts b/_extensions/d2/node_modules/vfile/lib/minproc.d.ts new file mode 100644 index 00000000..c0d1d270 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minproc.d.ts @@ -0,0 +1 @@ +export {default as proc} from 'process' diff --git a/_extensions/d2/node_modules/vfile/lib/minproc.js b/_extensions/d2/node_modules/vfile/lib/minproc.js new file mode 100644 index 00000000..c0d1d270 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minproc.js @@ -0,0 +1 @@ +export {default as proc} from 'process' diff --git a/_extensions/d2/node_modules/vfile/lib/minurl.browser.d.ts b/_extensions/d2/node_modules/vfile/lib/minurl.browser.d.ts new file mode 100644 index 00000000..48b85a44 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minurl.browser.d.ts @@ -0,0 +1,9 @@ +/// +/** + * @param {string | URL} path + * File URL. + * @returns {string} + * File URL. + */ +export function urlToPath(path: string | URL): string +export {isUrl} from './minurl.shared.js' diff --git a/_extensions/d2/node_modules/vfile/lib/minurl.browser.js b/_extensions/d2/node_modules/vfile/lib/minurl.browser.js new file mode 100644 index 00000000..6438d0db --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minurl.browser.js @@ -0,0 +1,78 @@ +/// + +import {isUrl} from './minurl.shared.js' + +// See: + +/** + * @param {string | URL} path + * File URL. + * @returns {string} + * File URL. + */ +export function urlToPath(path) { + if (typeof path === 'string') { + path = new URL(path) + } else if (!isUrl(path)) { + /** @type {NodeJS.ErrnoException} */ + const error = new TypeError( + 'The "path" argument must be of type string or an instance of URL. Received `' + + path + + '`' + ) + error.code = 'ERR_INVALID_ARG_TYPE' + throw error + } + + if (path.protocol !== 'file:') { + /** @type {NodeJS.ErrnoException} */ + const error = new TypeError('The URL must be of scheme file') + error.code = 'ERR_INVALID_URL_SCHEME' + throw error + } + + return getPathFromURLPosix(path) +} + +/** + * Get a path from a POSIX URL. + * + * @param {URL} url + * URL. + * @returns {string} + * File path. + */ +function getPathFromURLPosix(url) { + if (url.hostname !== '') { + /** @type {NodeJS.ErrnoException} */ + const error = new TypeError( + 'File URL host must be "localhost" or empty on darwin' + ) + error.code = 'ERR_INVALID_FILE_URL_HOST' + throw error + } + + const pathname = url.pathname + let index = -1 + + while (++index < pathname.length) { + if ( + pathname.charCodeAt(index) === 37 /* `%` */ && + pathname.charCodeAt(index + 1) === 50 /* `2` */ + ) { + const third = pathname.charCodeAt(index + 2) + if (third === 70 /* `F` */ || third === 102 /* `f` */) { + /** @type {NodeJS.ErrnoException} */ + const error = new TypeError( + 'File URL path must not include encoded / characters' + ) + error.code = 'ERR_INVALID_FILE_URL_PATH' + throw error + } + } + } + + return decodeURIComponent(pathname) +} + +export {isUrl} from './minurl.shared.js' diff --git a/_extensions/d2/node_modules/vfile/lib/minurl.d.ts b/_extensions/d2/node_modules/vfile/lib/minurl.d.ts new file mode 100644 index 00000000..3c1da376 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minurl.d.ts @@ -0,0 +1,2 @@ +export {fileURLToPath as urlToPath} from 'url' +export {isUrl} from './minurl.shared.js' diff --git a/_extensions/d2/node_modules/vfile/lib/minurl.js b/_extensions/d2/node_modules/vfile/lib/minurl.js new file mode 100644 index 00000000..3c1da376 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minurl.js @@ -0,0 +1,2 @@ +export {fileURLToPath as urlToPath} from 'url' +export {isUrl} from './minurl.shared.js' diff --git a/_extensions/d2/node_modules/vfile/lib/minurl.shared.d.ts b/_extensions/d2/node_modules/vfile/lib/minurl.shared.d.ts new file mode 100644 index 00000000..5c6aff6a --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minurl.shared.d.ts @@ -0,0 +1,42 @@ +/** + * @typedef URL + * @property {string} hash + * @property {string} host + * @property {string} hostname + * @property {string} href + * @property {string} origin + * @property {string} password + * @property {string} pathname + * @property {string} port + * @property {string} protocol + * @property {string} search + * @property {any} searchParams + * @property {string} username + * @property {() => string} toString + * @property {() => string} toJSON + */ +/** + * Check if `fileUrlOrPath` looks like a URL. + * + * @param {unknown} fileUrlOrPath + * File path or URL. + * @returns {fileUrlOrPath is URL} + * Whether it’s a URL. + */ +export function isUrl(fileUrlOrPath: unknown): fileUrlOrPath is URL +export type URL = { + hash: string + host: string + hostname: string + href: string + origin: string + password: string + pathname: string + port: string + protocol: string + search: string + searchParams: any + username: string + toString: () => string + toJSON: () => string +} diff --git a/_extensions/d2/node_modules/vfile/lib/minurl.shared.js b/_extensions/d2/node_modules/vfile/lib/minurl.shared.js new file mode 100644 index 00000000..7954129a --- /dev/null +++ b/_extensions/d2/node_modules/vfile/lib/minurl.shared.js @@ -0,0 +1,37 @@ +/** + * @typedef URL + * @property {string} hash + * @property {string} host + * @property {string} hostname + * @property {string} href + * @property {string} origin + * @property {string} password + * @property {string} pathname + * @property {string} port + * @property {string} protocol + * @property {string} search + * @property {any} searchParams + * @property {string} username + * @property {() => string} toString + * @property {() => string} toJSON + */ + +/** + * Check if `fileUrlOrPath` looks like a URL. + * + * @param {unknown} fileUrlOrPath + * File path or URL. + * @returns {fileUrlOrPath is URL} + * Whether it’s a URL. + */ +// From: +export function isUrl(fileUrlOrPath) { + return ( + fileUrlOrPath !== null && + typeof fileUrlOrPath === 'object' && + // @ts-expect-error: indexable. + fileUrlOrPath.href && + // @ts-expect-error: indexable. + fileUrlOrPath.origin + ) +} diff --git a/_extensions/d2/node_modules/vfile/license b/_extensions/d2/node_modules/vfile/license new file mode 100644 index 00000000..f3722d94 --- /dev/null +++ b/_extensions/d2/node_modules/vfile/license @@ -0,0 +1,21 @@ +(The MIT License) + +Copyright (c) 2015 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/_extensions/d2/node_modules/vfile/package.json b/_extensions/d2/node_modules/vfile/package.json new file mode 100644 index 00000000..e6721aef --- /dev/null +++ b/_extensions/d2/node_modules/vfile/package.json @@ -0,0 +1,111 @@ +{ + "name": "vfile", + "version": "5.3.7", + "description": "Virtual file format for text processing", + "license": "MIT", + "keywords": [ + "vfile", + "virtual", + "file", + "text", + "processing", + "message", + "warning", + "error", + "remark", + "retext", + "rehype" + ], + "repository": "vfile/vfile", + "bugs": "https://github.com/vfile/vfile/issues", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)", + "Brendan Abbott ", + "Denys Dovhan ", + "Kyle Mathews ", + "Shinnosuke Watanabe ", + "Sindre Sorhus " + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "browser": { + "./lib/minpath.js": "./lib/minpath.browser.js", + "./lib/minproc.js": "./lib/minproc.browser.js", + "./lib/minurl.js": "./lib/minurl.browser.js" + }, + "react-native": { + "./lib/minpath.js": "./lib/minpath.browser.js", + "./lib/minproc.js": "./lib/minproc.browser.js", + "./lib/minurl.js": "./lib/minurl.browser.js" + }, + "files": [ + "lib/", + "index.d.ts", + "index.js" + ], + "dependencies": { + "@types/unist": "^2.0.0", + "is-buffer": "^2.0.0", + "unist-util-stringify-position": "^3.0.0", + "vfile-message": "^3.0.0" + }, + "devDependencies": { + "@types/node": "^18.0.0", + "c8": "^7.0.0", + "prettier": "^2.0.0", + "remark-cli": "^11.0.0", + "remark-preset-wooorm": "^9.0.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "xo": "^0.53.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "tsc --build --clean && tsc --build && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test.js", + "test-coverage": "c8 --check-coverage --100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true, + "rules": { + "unicorn/prefer-code-point": "off", + "unicorn/prefer-node-protocol": "off" + } + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm", + [ + "lint-no-html", + false + ] + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreCatch": true, + "#": "needed `any`s", + "ignoreFiles": [ + "lib/minurl.shared.d.ts" + ] + } +} diff --git a/_extensions/d2/node_modules/vfile/readme.md b/_extensions/d2/node_modules/vfile/readme.md new file mode 100644 index 00000000..f6e873ce --- /dev/null +++ b/_extensions/d2/node_modules/vfile/readme.md @@ -0,0 +1,786 @@ +

+ vfile +

+ +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] +[![Sponsors][sponsors-badge]][collective] +[![Backers][backers-badge]][collective] +[![Chat][chat-badge]][chat] + +**vfile** is a small and browser friendly virtual file format that tracks +metadata about files (such as its `path` and `value`) and lint [messages][]. + +## Contents + +* [unified](#unified) +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`VFile(options?)`](#vfileoptions) + * [`file.value`](#filevalue) + * [`file.cwd`](#filecwd) + * [`file.path`](#filepath) + * [`file.dirname`](#filedirname) + * [`file.basename`](#filebasename) + * [`file.extname`](#fileextname) + * [`file.stem`](#filestem) + * [`file.history`](#filehistory) + * [`file.messages`](#filemessages) + * [`file.data`](#filedata) + * [`VFile#toString(encoding?)`](#vfiletostringencoding) + * [`VFile#message(reason[, position][, origin])`](#vfilemessagereason-position-origin) + * [`VFile#info(reason[, position][, origin])`](#vfileinforeason-position-origin) + * [`VFile#fail(reason[, position][, origin])`](#vfilefailreason-position-origin) + * [`BufferEncoding`](#bufferencoding) + * [`Compatible`](#compatible) + * [`Data`](#data) + * [`DataMap`](#datamap) + * [`Map`](#map) + * [`Options`](#options) + * [`Reporter`](#reporter) + * [`ReporterSettings`](#reportersettings) + * [`Value`](#value) + * [Well-known](#well-known) +* [List of utilities](#list-of-utilities) +* [Reporters](#reporters) +* [Types](#types) +* [Compatibility](#compatibility) +* [Contribute](#contribute) +* [Sponsor](#sponsor) +* [Acknowledgments](#acknowledgments) +* [License](#license) + +## unified + +**vfile** is part of the unified collective. + +* for more about us, see [`unifiedjs.com`][site] +* for how the collective is governed, see [`unifiedjs/collective`][governance] +* for updates, see [@unifiedjs][twitter] on Twitter + +## What is this? + +This package provides a virtual file format. +It exposes an API to access the file value, path, metadata about the file, and +specifically supports attaching lint messages and errors to certain places in +these files. + +## When should I use this? + +The virtual file format is useful when dealing with the concept of files in +places where you might not be able to access the file system. +The message API is particularly useful when making things that check files (as +in, linting). + +vfile is made for [unified][], which amongst other things checks files. +However, vfile can be used in other projects that deal with parsing, +transforming, and serializing data, to build linters, compilers, static site +generators, and other build tools. + +This is different from the excellent [`vinyl`][vinyl] in that vfile has a +smaller API, a smaller size, and focuses on messages. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 14.14 and 16.0+), install with [npm][]: + +```sh +npm install vfile +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {VFile} from 'https://esm.sh/vfile@5' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {VFile} from 'vfile' + +const file = new VFile({ + path: '~/example.txt', + value: 'Alpha *braavo* charlie.' +}) + +console.log(file.path) // => '~/example.txt' +console.log(file.dirname) // => '~' + +file.extname = '.md' + +console.log(file.basename) // => 'example.md' + +file.basename = 'index.text' + +console.log(file.history) // => ['~/example.txt', '~/example.md', '~/index.text'] + +file.message('Unexpected unknown word `braavo`, did you mean `bravo`?', { + line: 1, + column: 8 +}) + +console.log(file.messages) +``` + +Yields: + +```txt +[ + [~/index.text:1:8: Unexpected unknown word `braavo`, did you mean `bravo`?] { + reason: 'Unexpected unknown word `braavo`, did you mean `bravo`?', + line: 1, + column: 8, + source: null, + ruleId: null, + position: {start: [Object], end: [Object]}, + file: '~/index.text', + fatal: false + } +] +``` + +## API + +This package exports the identifier [`VFile`][api-vfile]. +There is no default export. + +### `VFile(options?)` + +Create a new virtual file. + +`options` is treated as: + +* `string` or [`Buffer`][buffer] — `{value: options}` +* `URL` — `{path: options}` +* `VFile` — shallow copies its data over to the new file +* `object` — all fields are shallow copied over to the new file + +Path related fields are set in the following order (least specific to +most specific): `history`, `path`, `basename`, `stem`, `extname`, +`dirname`. + +You cannot set `dirname` or `extname` without setting either `history`, +`path`, `basename`, or `stem` too. + +###### Parameters + +* `options` ([`Compatible`][api-compatible], optional) + — file value + +###### Returns + +New instance (`VFile`). + +###### Example + +```js +new VFile() +new VFile('console.log("alpha");') +new VFile(Buffer.from('exit 1')) +new VFile({path: path.join('path', 'to', 'readme.md')}) +new VFile({stem: 'readme', extname: '.md', dirname: path.join('path', 'to')}) +new VFile({other: 'properties', are: 'copied', ov: {e: 'r'}}) +``` + +### `file.value` + +Raw value ([`Buffer`][buffer], `string`, `null`). + +### `file.cwd` + +Base of `path` (`string`, default: `process.cwd()` or `'/'` in browsers). + +### `file.path` + +Get or set the full path (`string?`, example: `'~/index.min.js'`). + +Cannot be nullified. +You can set a file URL (a `URL` object with a `file:` protocol) which will be +turned into a path with [`url.fileURLToPath`][file-url-to-path]. + +### `file.dirname` + +Get or set the parent path (`string?`, example: `'~'`). + +Cannot be set if there’s no `path` yet. + +### `file.basename` + +Get or set the basename (including extname) (`string?`, example: `'index.min.js'`). + +Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\'` on +windows). +Cannot be nullified (use `file.path = file.dirname` instead). + +### `file.extname` + +Get or set the extname (including dot) (`string?`, example: `'.js'`). + +Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\'` on +windows). +Cannot be set if there’s no `path` yet. + +### `file.stem` + +Get or set the stem (basename w/o extname) (`string?`, example: `'index.min'`). + +Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\'` on +windows). +Cannot be nullified. + +### `file.history` + +List of filepaths the file moved between (`Array`). + +The first is the original path and the last is the current path. + +### `file.messages` + +List of messages associated with the file ([`Array`][message]). + +### `file.data` + +Place to store custom information (`Record`, default: `{}`). + +It’s OK to store custom data directly on the file but moving it to `data` is +recommended. + +### `VFile#toString(encoding?)` + +Serialize the file. + +###### Parameters + +* `encoding` ([`BufferEncoding`][api-buffer-encoding], default: `'utf8'`) + — character encoding to understand `value` as when it’s a + [`Buffer`][buffer] + +###### Returns + +Serialized file (`string`). + +### `VFile#message(reason[, position][, origin])` + +Create a warning message associated with the file. + +Its `fatal` is set to `false` and `file` is set to the current file path. +Its added to `file.messages`. + +###### Parameters + +* `reason` (`string` or `Error`) + — reason for message, uses the stack and message of the error if given +* `place` (`Node`, `Position`, or `Point`, optional) + — place in file where the message occurred +* `origin` (`string?`, optional, example: `'my-package:my-rule'` or `'my-rule'`) + — place in code where the message originates + +###### Returns + +Message ([`VFileMessage`][vmessage]). + +### `VFile#info(reason[, position][, origin])` + +Create an info message associated with the file. + +Its `fatal` is set to `null` and `file` is set to the current file path. +Its added to `file.messages`. + +###### Parameters + +* `reason` (`string` or `Error`) + — reason for message, uses the stack and message of the error if given +* `place` (`Node`, `Position`, or `Point`, optional) + — place in file where the message occurred +* `origin` (`string?`, optional, example: `'my-package:my-rule'` or `'my-rule'`) + — place in code where the message originates + +###### Returns + +Message ([`VFileMessage`][vmessage]). + +### `VFile#fail(reason[, position][, origin])` + +Create a fatal error associated with the file. + +Its `fatal` is set to `true` and `file` is set to the current file path. +Its added to `file.messages`. + +> 👉 **Note**: a fatal error means that a file is no longer processable. + +###### Parameters + +* `reason` (`string` or `Error`) + — reason for message, uses the stack and message of the error if given +* `place` (`Node`, `Position`, or `Point`, optional) + — place in file where the message occurred +* `origin` (`string?`, optional, example: `'my-package:my-rule'` or `'my-rule'`) + — place in code where the message originates + +###### Returns + +Nothing (`never`). + +###### Throws + +Message ([`VFileMessage`][vmessage]). + +### `BufferEncoding` + +[Encodings][encoding] supported by the [buffer][] class (TypeScript type). + +This is a copy of the types from Node. + +###### Type + +```ts +type BufferEncoding = + | 'ascii' + | 'utf8' + | 'utf-8' + | 'utf16le' + | 'ucs2' + | 'ucs-2' + | 'base64' + | 'base64url' + | 'latin1' + | 'binary' + | 'hex' +``` + +### `Compatible` + +Things that can be passed to the constructor (TypeScript type). + +###### Type + +```ts +type Compatible = Options | URL | Value | VFile +``` + +### `Data` + +Custom information (TypeScript type). + +Known attributes can be added to [`DataMap`][api-data-map]. + +###### Type + +```ts +type Data = Record & Partial +``` + +### `DataMap` + +This map registers the type of the `data` key of a `VFile` (TypeScript type). + +This type can be augmented to register custom `data` types. + +###### Type + +```ts +interface DataMap {} +``` + +###### Example + +```ts +declare module 'vfile' { + interface DataMap { + // `file.data.name` is typed as `string` + name: string + } +} +``` + +### `Map` + +Raw source map (TypeScript type). + +See [`source-map`][source-map]. + +###### Fields + +* `version` (`number`) + — which version of the source map spec this map is following +* `sources` (`Array`) + — an array of URLs to the original source files +* `names` (`Array`) + — an array of identifiers which can be referenced by individual mappings +* `sourceRoot` (`string`, optional) + — the URL root from which all sources are relative +* `sourcesContent` (`Array`, optional) + — an array of contents of the original source files +* `mappings` (`string`) + — a string of base64 VLQs which contain the actual mappings +* `file` (`string`) + — the generated file this source map is associated with + +### `Options` + +An object with arbitrary fields and the following known fields (TypeScript +type). + +###### Fields + +* `value` ([`Value`][api-value], optional) + — set `value` +* `cwd` (`string`, optional) + — set `cwd` +* `history` (`Array`, optional) + — set `history` +* `path` (`URL | string`, optional) + — set `path` +* `basename` (`string`, optional) + — set `basename` +* `stem` (`string`, optional) + — set `stem` +* `extname` (`string`, optional) + — set `extname` +* `dirname` (`string`, optional) + — set `dirname` +* `data` ([`Data`][api-data], optional) + — set `data` + +### `Reporter` + +Type for a reporter (TypeScript type). + +###### Type + +```ts +type Reporter = ( + files: Array, + options: Settings +) => string +``` + +### `ReporterSettings` + +Configuration for reporters (TypeScript type). + +###### Type + +```ts +type ReporterSettings = Record +``` + +### `Value` + +Contents of the file (TypeScript type). + +Can either be text or a `Buffer` structure. + +###### Type + +```ts +type Value = string | Buffer +``` + +### Well-known + +The following fields are considered “non-standard”, but they are allowed, and +some utilities use them: + +* `stored` (`boolean`) + — whether a file was saved to disk; this is used by vfile reporters +* `result` (`unknown`) + — custom, non-string, compiled, representation; this is used by unified to + store non-string results; one example is when turning markdown into React + nodes +* `map` ([`Map`][api-map]) + — source map; this type is equivalent to the `RawSourceMap` type from the + `source-map` module + +There are also well-known fields on messages, see +[them in a similar section of +`vfile-message`](https://github.com/vfile/vfile-message#well-known). + + + +## List of utilities + +* [`convert-vinyl-to-vfile`](https://github.com/dustinspecker/convert-vinyl-to-vfile) + — transform from [Vinyl][] +* [`to-vfile`](https://github.com/vfile/to-vfile) + — create a file from a filepath and read and write to the file system +* [`vfile-find-down`](https://github.com/vfile/vfile-find-down) + — find files by searching the file system downwards +* [`vfile-find-up`](https://github.com/vfile/vfile-find-up) + — find files by searching the file system upwards +* [`vfile-glob`](https://github.com/shinnn/vfile-glob) + — find files by glob patterns +* [`vfile-is`](https://github.com/vfile/vfile-is) + — check if a file passes a test +* [`vfile-location`](https://github.com/vfile/vfile-location) + — convert between positional and offset locations +* [`vfile-matter`](https://github.com/vfile/vfile-matter) + — parse the YAML front matter +* [`vfile-message`](https://github.com/vfile/vfile-message) + — create a file message +* [`vfile-messages-to-vscode-diagnostics`](https://github.com/shinnn/vfile-messages-to-vscode-diagnostics) + — transform file messages to VS Code diagnostics +* [`vfile-mkdirp`](https://github.com/vfile/vfile-mkdirp) + — make sure the directory of a file exists on the file system +* [`vfile-rename`](https://github.com/vfile/vfile-rename) + — rename the path parts of a file +* [`vfile-sort`](https://github.com/vfile/vfile-sort) + — sort messages by line/column +* [`vfile-statistics`](https://github.com/vfile/vfile-statistics) + — count messages per category: failures, warnings, etc +* [`vfile-to-eslint`](https://github.com/vfile/vfile-to-eslint) + — convert to ESLint formatter compatible output + +> 👉 **Note**: see [unist][] for projects that work with nodes. + +## Reporters + +* [`vfile-reporter`][reporter] + — create a report +* [`vfile-reporter-json`](https://github.com/vfile/vfile-reporter-json) + — create a JSON report +* [`vfile-reporter-folder-json`](https://github.com/vfile/vfile-reporter-folder-json) + — create a JSON representation of vfiles +* [`vfile-reporter-pretty`](https://github.com/vfile/vfile-reporter-pretty) + — create a pretty report +* [`vfile-reporter-junit`](https://github.com/kellyselden/vfile-reporter-junit) + — create a jUnit report +* [`vfile-reporter-position`](https://github.com/Hocdoc/vfile-reporter-position) + — create a report with content excerpts + +> 👉 **Note**: want to make your own reporter? +> Reporters *must* accept `Array` as their first argument, and return +> `string`. +> Reporters *may* accept other values too, in which case it’s suggested to stick +> to `vfile-reporter`s interface. + +## Types + +This package is fully typed with [TypeScript][]. +It exports the additional types +[`BufferEncoding`][api-buffer-encoding], +[`Compatible`][api-compatible], +[`Data`][api-data], +[`DataMap`][api-data-map], +[`Map`][api-map], +[`Options`][api-options], +[`Reporter`][api-reporter], +[`ReporterSettings`][api-reporter-settings], and +[`Value`][api-value]. + +## Compatibility + +Projects maintained by the unified collective are compatible with all maintained +versions of Node.js. +As of now, that is Node.js 14.14+ and 16.0+. +Our projects sometimes work with older versions, but this is not guaranteed. + +## Contribute + +See [`contributing.md`][contributing] in [`vfile/.github`][health] for ways to +get started. +See [`support.md`][support] for ways to get help. + +This project has a [code of conduct][coc]. +By interacting with this repository, organization, or community you agree to +abide by its terms. + +## Sponsor + +Support this effort and give back by sponsoring on [OpenCollective][collective]! + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Vercel

+ +
+ Motif

+ +
+ HashiCorp

+ +
+ GitBook

+ +
+ Gatsby

+ +
+ Netlify

+ + +
+ Coinbase

+ +
+ ThemeIsle

+ +
+ Expo

+ +
+ Boost Note

+ +
+ Holloway

+ +
+
+ You? +

+
+ +## Acknowledgments + +The initial release of this project was authored by +[**@wooorm**](https://github.com/wooorm). + +Thanks to [**@contra**](https://github.com/contra), +[**@phated**](https://github.com/phated), and others for their work on +[Vinyl][], which was a huge inspiration. + +Thanks to +[**@brendo**](https://github.com/brendo), +[**@shinnn**](https://github.com/shinnn), +[**@KyleAMathews**](https://github.com/KyleAMathews), +[**@sindresorhus**](https://github.com/sindresorhus), and +[**@denysdovhan**](https://github.com/denysdovhan) +for contributing commits since! + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/vfile/vfile/workflows/main/badge.svg + +[build]: https://github.com/vfile/vfile/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/vfile/vfile.svg + +[coverage]: https://codecov.io/github/vfile/vfile + +[downloads-badge]: https://img.shields.io/npm/dm/vfile.svg + +[downloads]: https://www.npmjs.com/package/vfile + +[size-badge]: https://img.shields.io/bundlephobia/minzip/vfile.svg + +[size]: https://bundlephobia.com/result?p=vfile + +[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg + +[backers-badge]: https://opencollective.com/unified/backers/badge.svg + +[collective]: https://opencollective.com/unified + +[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg + +[chat]: https://github.com/vfile/vfile/discussions + +[npm]: https://docs.npmjs.com/cli/install + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[esmsh]: https://esm.sh + +[typescript]: https://www.typescriptlang.org + +[health]: https://github.com/vfile/.github + +[contributing]: https://github.com/vfile/.github/blob/main/contributing.md + +[support]: https://github.com/vfile/.github/blob/main/support.md + +[coc]: https://github.com/vfile/.github/blob/main/code-of-conduct.md + +[license]: license + +[author]: https://wooorm.com + +[unified]: https://github.com/unifiedjs/unified + +[vinyl]: https://github.com/gulpjs/vinyl + +[site]: https://unifiedjs.com + +[twitter]: https://twitter.com/unifiedjs + +[unist]: https://github.com/syntax-tree/unist#list-of-utilities + +[reporter]: https://github.com/vfile/vfile-reporter + +[vmessage]: https://github.com/vfile/vfile-message + +[messages]: #filemessages + +[message]: #vfilemessagereason-position-origin + +[encoding]: https://nodejs.org/api/buffer.html#buffer_buffers_and_character_encodings + +[buffer]: https://nodejs.org/api/buffer.html + +[source-map]: https://github.com/mozilla/source-map/blob/58819f0/source-map.d.ts#L15-L23 + +[file-url-to-path]: https://nodejs.org/api/url.html#url_url_fileurltopath_url + +[governance]: https://github.com/unifiedjs/collective + +[api-vfile]: #vfileoptions + +[api-buffer-encoding]: #bufferencoding + +[api-compatible]: #compatible + +[api-data]: #data + +[api-data-map]: #datamap + +[api-map]: #map + +[api-options]: #options + +[api-reporter]: #reporter + +[api-reporter-settings]: #reportersettings + +[api-value]: #value diff --git a/_extensions/d2/node_modules/which/LICENSE b/_extensions/d2/node_modules/which/LICENSE new file mode 100644 index 00000000..19129e31 --- /dev/null +++ b/_extensions/d2/node_modules/which/LICENSE @@ -0,0 +1,15 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/_extensions/d2/node_modules/which/README.md b/_extensions/d2/node_modules/which/README.md new file mode 100644 index 00000000..323aaf29 --- /dev/null +++ b/_extensions/d2/node_modules/which/README.md @@ -0,0 +1,51 @@ +# which + +Like the unix `which` utility. + +Finds the first instance of a specified executable in the PATH +environment variable. Does not cache the results, so `hash -r` is not +needed when the PATH changes. + +## USAGE + +```javascript +const which = require('which') + +// async usage +// rejects if not found +const resolved = await which('node') + +// if nothrow option is used, returns null if not found +const resolvedOrNull = await which('node', { nothrow: true }) + +// sync usage +// throws if not found +const resolved = which.sync('node') + +// if nothrow option is used, returns null if not found +const resolvedOrNull = which.sync('node', { nothrow: true }) + +// Pass options to override the PATH and PATHEXT environment vars. +await which('node', { path: someOtherPath, pathExt: somePathExt }) +``` + +## CLI USAGE + +Just like the BSD `which(1)` binary but using `node-which`. + +``` +usage: node-which [-as] program ... +``` + +You can learn more about why the binary is `node-which` and not `which` +[here](https://github.com/npm/node-which/pull/67) + +## OPTIONS + +You may pass an options object as the second argument. + +- `path`: Use instead of the `PATH` environment variable. +- `pathExt`: Use instead of the `PATHEXT` environment variable. +- `all`: Return all matches, instead of just the first one. Note that + this means the function returns an array of strings instead of a + single string. diff --git a/_extensions/d2/node_modules/which/bin/which.js b/_extensions/d2/node_modules/which/bin/which.js new file mode 100644 index 00000000..6df16f21 --- /dev/null +++ b/_extensions/d2/node_modules/which/bin/which.js @@ -0,0 +1,52 @@ +#!/usr/bin/env node + +const which = require('../lib') +const argv = process.argv.slice(2) + +const usage = (err) => { + if (err) { + console.error(`which: ${err}`) + } + console.error('usage: which [-as] program ...') + process.exit(1) +} + +if (!argv.length) { + return usage() +} + +let dashdash = false +const [commands, flags] = argv.reduce((acc, arg) => { + if (dashdash || arg === '--') { + dashdash = true + return acc + } + + if (!/^-/.test(arg)) { + acc[0].push(arg) + return acc + } + + for (const flag of arg.slice(1).split('')) { + if (flag === 's') { + acc[1].silent = true + } else if (flag === 'a') { + acc[1].all = true + } else { + usage(`illegal option -- ${flag}`) + } + } + + return acc +}, [[], {}]) + +for (const command of commands) { + try { + const res = which.sync(command, { all: flags.all }) + if (!flags.silent) { + console.log([].concat(res).join('\n')) + } + } catch (err) { + process.exitCode = 1 + } +} diff --git a/_extensions/d2/node_modules/which/lib/index.js b/_extensions/d2/node_modules/which/lib/index.js new file mode 100644 index 00000000..52e9ea62 --- /dev/null +++ b/_extensions/d2/node_modules/which/lib/index.js @@ -0,0 +1,115 @@ +const isexe = require('isexe') +const { join, delimiter, sep, posix } = require('path') + +const isWindows = process.platform === 'win32' + +// used to check for slashed in commands passed in. always checks for the posix +// seperator on all platforms, and checks for the current separator when not on +// a posix platform. don't use the isWindows check for this since that is mocked +// in tests but we still need the code to actually work when called. that is also +// why it is ignored from coverage. +/* istanbul ignore next */ +const rSlash = new RegExp(`[${posix.sep}${sep === posix.sep ? '' : sep}]`.replace(/(\\)/g, '\\$1')) +const rRel = new RegExp(`^\\.${rSlash.source}`) + +const getNotFoundError = (cmd) => + Object.assign(new Error(`not found: ${cmd}`), { code: 'ENOENT' }) + +const getPathInfo = (cmd, { + path: optPath = process.env.PATH, + pathExt: optPathExt = process.env.PATHEXT, + delimiter: optDelimiter = delimiter, +}) => { + // If it has a slash, then we don't bother searching the pathenv. + // just check the file itself, and that's it. + const pathEnv = cmd.match(rSlash) ? [''] : [ + // windows always checks the cwd first + ...(isWindows ? [process.cwd()] : []), + ...(optPath || /* istanbul ignore next: very unusual */ '').split(optDelimiter), + ] + + if (isWindows) { + const pathExtExe = optPathExt || + ['.EXE', '.CMD', '.BAT', '.COM'].join(optDelimiter) + const pathExt = pathExtExe.split(optDelimiter).reduce((acc, item) => { + acc.push(item) + acc.push(item.toLowerCase()) + return acc + }, []) + if (cmd.includes('.') && pathExt[0] !== '') { + pathExt.unshift('') + } + return { pathEnv, pathExt, pathExtExe } + } + + return { pathEnv, pathExt: [''] } +} + +const getPathPart = (raw, cmd) => { + const pathPart = /^".*"$/.test(raw) ? raw.slice(1, -1) : raw + const prefix = !pathPart && rRel.test(cmd) ? cmd.slice(0, 2) : '' + return prefix + join(pathPart, cmd) +} + +const which = async (cmd, opt = {}) => { + const { pathEnv, pathExt, pathExtExe } = getPathInfo(cmd, opt) + const found = [] + + for (const envPart of pathEnv) { + const p = getPathPart(envPart, cmd) + + for (const ext of pathExt) { + const withExt = p + ext + const is = await isexe(withExt, { pathExt: pathExtExe, ignoreErrors: true }) + if (is) { + if (!opt.all) { + return withExt + } + found.push(withExt) + } + } + } + + if (opt.all && found.length) { + return found + } + + if (opt.nothrow) { + return null + } + + throw getNotFoundError(cmd) +} + +const whichSync = (cmd, opt = {}) => { + const { pathEnv, pathExt, pathExtExe } = getPathInfo(cmd, opt) + const found = [] + + for (const pathEnvPart of pathEnv) { + const p = getPathPart(pathEnvPart, cmd) + + for (const ext of pathExt) { + const withExt = p + ext + const is = isexe.sync(withExt, { pathExt: pathExtExe, ignoreErrors: true }) + if (is) { + if (!opt.all) { + return withExt + } + found.push(withExt) + } + } + } + + if (opt.all && found.length) { + return found + } + + if (opt.nothrow) { + return null + } + + throw getNotFoundError(cmd) +} + +module.exports = which +which.sync = whichSync diff --git a/_extensions/d2/node_modules/which/package.json b/_extensions/d2/node_modules/which/package.json new file mode 100644 index 00000000..989e01c9 --- /dev/null +++ b/_extensions/d2/node_modules/which/package.json @@ -0,0 +1,51 @@ +{ + "author": "GitHub Inc.", + "name": "which", + "description": "Like which(1) unix command. Find the first instance of an executable in the PATH.", + "version": "3.0.1", + "repository": { + "type": "git", + "url": "https://github.com/npm/node-which.git" + }, + "main": "lib/index.js", + "bin": { + "node-which": "./bin/which.js" + }, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "devDependencies": { + "@npmcli/eslint-config": "^4.0.0", + "@npmcli/template-oss": "4.14.1", + "tap": "^16.3.0" + }, + "scripts": { + "test": "tap", + "lint": "eslint \"**/*.js\"", + "postlint": "template-oss-check", + "template-oss-apply": "template-oss-apply --force", + "lintfix": "npm run lint -- --fix", + "snap": "tap", + "posttest": "npm run lint" + }, + "files": [ + "bin/", + "lib/" + ], + "tap": { + "check-coverage": true, + "nyc-arg": [ + "--exclude", + "tap-snapshots/**" + ] + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + }, + "templateOSS": { + "//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.", + "version": "4.14.1", + "publish": "true" + } +} diff --git a/_extensions/d2/node_modules/wrappy/LICENSE b/_extensions/d2/node_modules/wrappy/LICENSE new file mode 100644 index 00000000..19129e31 --- /dev/null +++ b/_extensions/d2/node_modules/wrappy/LICENSE @@ -0,0 +1,15 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/_extensions/d2/node_modules/wrappy/README.md b/_extensions/d2/node_modules/wrappy/README.md new file mode 100644 index 00000000..98eab252 --- /dev/null +++ b/_extensions/d2/node_modules/wrappy/README.md @@ -0,0 +1,36 @@ +# wrappy + +Callback wrapping utility + +## USAGE + +```javascript +var wrappy = require("wrappy") + +// var wrapper = wrappy(wrapperFunction) + +// make sure a cb is called only once +// See also: http://npm.im/once for this specific use case +var once = wrappy(function (cb) { + var called = false + return function () { + if (called) return + called = true + return cb.apply(this, arguments) + } +}) + +function printBoo () { + console.log('boo') +} +// has some rando property +printBoo.iAmBooPrinter = true + +var onlyPrintOnce = once(printBoo) + +onlyPrintOnce() // prints 'boo' +onlyPrintOnce() // does nothing + +// random property is retained! +assert.equal(onlyPrintOnce.iAmBooPrinter, true) +``` diff --git a/_extensions/d2/node_modules/wrappy/package.json b/_extensions/d2/node_modules/wrappy/package.json new file mode 100644 index 00000000..13075204 --- /dev/null +++ b/_extensions/d2/node_modules/wrappy/package.json @@ -0,0 +1,29 @@ +{ + "name": "wrappy", + "version": "1.0.2", + "description": "Callback wrapping utility", + "main": "wrappy.js", + "files": [ + "wrappy.js" + ], + "directories": { + "test": "test" + }, + "dependencies": {}, + "devDependencies": { + "tap": "^2.3.1" + }, + "scripts": { + "test": "tap --coverage test/*.js" + }, + "repository": { + "type": "git", + "url": "https://github.com/npm/wrappy" + }, + "author": "Isaac Z. Schlueter (http://blog.izs.me/)", + "license": "ISC", + "bugs": { + "url": "https://github.com/npm/wrappy/issues" + }, + "homepage": "https://github.com/npm/wrappy" +} diff --git a/_extensions/d2/node_modules/wrappy/wrappy.js b/_extensions/d2/node_modules/wrappy/wrappy.js new file mode 100644 index 00000000..bb7e7d6f --- /dev/null +++ b/_extensions/d2/node_modules/wrappy/wrappy.js @@ -0,0 +1,33 @@ +// Returns a wrapper function that returns a wrapped callback +// The wrapper function should do some stuff, and return a +// presumably different callback function. +// This makes sure that own properties are retained, so that +// decorations and such are not lost along the way. +module.exports = wrappy +function wrappy (fn, cb) { + if (fn && cb) return wrappy(fn)(cb) + + if (typeof fn !== 'function') + throw new TypeError('need wrapper function') + + Object.keys(fn).forEach(function (k) { + wrapper[k] = fn[k] + }) + + return wrapper + + function wrapper() { + var args = new Array(arguments.length) + for (var i = 0; i < args.length; i++) { + args[i] = arguments[i] + } + var ret = fn.apply(this, args) + var cb = args[args.length-1] + if (typeof ret === 'function' && ret !== cb) { + Object.keys(cb).forEach(function (k) { + ret[k] = cb[k] + }) + } + return ret + } +} diff --git a/_extensions/d2/node_modules/zwitch/index.d.ts b/_extensions/d2/node_modules/zwitch/index.d.ts new file mode 100644 index 00000000..cee48bce --- /dev/null +++ b/_extensions/d2/node_modules/zwitch/index.d.ts @@ -0,0 +1,67 @@ +/** + * Handle values based on a field. + * + * @template {InvalidHandler} [Invalid=InvalidHandler] + * @template {UnknownHandler} [Unknown=UnknownHandler] + * @template {Record} [Handlers=Record] + * @param {string} key + * Field to switch on. + * @param {Options} [options] + * Configuration (required). + * @returns {{unknown: Unknown, invalid: Invalid, handlers: Handlers, (...parameters: Parameters): ReturnType, (...parameters: Parameters): ReturnType}} + */ +export function zwitch< + Invalid extends InvalidHandler = InvalidHandler, + Unknown extends UnknownHandler = UnknownHandler, + Handlers extends Record = Record +>( + key: string, + options?: Options | undefined +): { + (...parameters: Parameters): ReturnType< + Handlers[keyof Handlers] + > + (...parameters: Parameters): ReturnType + unknown: Unknown + invalid: Invalid + handlers: Handlers +} +/** + * Handle a value, with a certain ID field set to a certain value. + * The ID field is passed to `zwitch`, and it’s value is this function’s + * place on the `handlers` record. + */ +export type Handler = (...parameters: any[]) => any +/** + * Handle values that do have a certain ID field, but it’s set to a value + * that is not listed in the `handlers` record. + */ +export type UnknownHandler = (value: unknown, ...rest: any[]) => any +/** + * Handle values that do not have a certain ID field. + */ +export type InvalidHandler = ( + value: unknown, + ...rest: any[] +) => void | null | undefined | never +/** + * Configuration (required). + */ +export type Options< + Invalid extends InvalidHandler = InvalidHandler, + Unknown extends UnknownHandler = UnknownHandler, + Handlers extends Record = Record +> = { + /** + * Handler to use for invalid values. + */ + invalid?: Invalid | undefined + /** + * Handler to use for unknown values. + */ + unknown?: Unknown | undefined + /** + * Handlers to use. + */ + handlers?: Handlers | undefined +} diff --git a/_extensions/d2/node_modules/zwitch/index.js b/_extensions/d2/node_modules/zwitch/index.js new file mode 100644 index 00000000..c6bf1b89 --- /dev/null +++ b/_extensions/d2/node_modules/zwitch/index.js @@ -0,0 +1,118 @@ +/** + * @callback Handler + * Handle a value, with a certain ID field set to a certain value. + * The ID field is passed to `zwitch`, and it’s value is this function’s + * place on the `handlers` record. + * @param {...any} parameters + * Arbitrary parameters passed to the zwitch. + * The first will be an object with a certain ID field set to a certain value. + * @returns {any} + * Anything! + */ + +/** + * @callback UnknownHandler + * Handle values that do have a certain ID field, but it’s set to a value + * that is not listed in the `handlers` record. + * @param {unknown} value + * An object with a certain ID field set to an unknown value. + * @param {...any} rest + * Arbitrary parameters passed to the zwitch. + * @returns {any} + * Anything! + */ + +/** + * @callback InvalidHandler + * Handle values that do not have a certain ID field. + * @param {unknown} value + * Any unknown value. + * @param {...any} rest + * Arbitrary parameters passed to the zwitch. + * @returns {void|null|undefined|never} + * This should crash or return nothing. + */ + +/** + * @template {InvalidHandler} [Invalid=InvalidHandler] + * @template {UnknownHandler} [Unknown=UnknownHandler] + * @template {Record} [Handlers=Record] + * @typedef Options + * Configuration (required). + * @property {Invalid} [invalid] + * Handler to use for invalid values. + * @property {Unknown} [unknown] + * Handler to use for unknown values. + * @property {Handlers} [handlers] + * Handlers to use. + */ + +const own = {}.hasOwnProperty + +/** + * Handle values based on a field. + * + * @template {InvalidHandler} [Invalid=InvalidHandler] + * @template {UnknownHandler} [Unknown=UnknownHandler] + * @template {Record} [Handlers=Record] + * @param {string} key + * Field to switch on. + * @param {Options} [options] + * Configuration (required). + * @returns {{unknown: Unknown, invalid: Invalid, handlers: Handlers, (...parameters: Parameters): ReturnType, (...parameters: Parameters): ReturnType}} + */ +export function zwitch(key, options) { + const settings = options || {} + + /** + * Handle one value. + * + * Based on the bound `key`, a respective handler will be called. + * If `value` is not an object, or doesn’t have a `key` property, the special + * “invalid” handler will be called. + * If `value` has an unknown `key`, the special “unknown” handler will be + * called. + * + * All arguments, and the context object, are passed through to the handler, + * and it’s result is returned. + * + * @this {unknown} + * Any context object. + * @param {unknown} [value] + * Any value. + * @param {...unknown} parameters + * Arbitrary parameters passed to the zwitch. + * @property {Handler} invalid + * Handle for values that do not have a certain ID field. + * @property {Handler} unknown + * Handle values that do have a certain ID field, but it’s set to a value + * that is not listed in the `handlers` record. + * @property {Handlers} handlers + * Record of handlers. + * @returns {unknown} + * Anything. + */ + function one(value, ...parameters) { + /** @type {Handler|undefined} */ + let fn = one.invalid + const handlers = one.handlers + + if (value && own.call(value, key)) { + // @ts-expect-error Indexable. + const id = String(value[key]) + // @ts-expect-error Indexable. + fn = own.call(handlers, id) ? handlers[id] : one.unknown + } + + if (fn) { + return fn.call(this, value, ...parameters) + } + } + + one.handlers = settings.handlers || {} + one.invalid = settings.invalid + one.unknown = settings.unknown + + // @ts-expect-error: matches! + return one +} diff --git a/_extensions/d2/node_modules/zwitch/license b/_extensions/d2/node_modules/zwitch/license new file mode 100644 index 00000000..8d8660d3 --- /dev/null +++ b/_extensions/d2/node_modules/zwitch/license @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2016 Titus Wormer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/_extensions/d2/node_modules/zwitch/package.json b/_extensions/d2/node_modules/zwitch/package.json new file mode 100644 index 00000000..058a0c48 --- /dev/null +++ b/_extensions/d2/node_modules/zwitch/package.json @@ -0,0 +1,72 @@ +{ + "name": "zwitch", + "version": "2.0.4", + "description": "Handle values based on a property", + "license": "MIT", + "keywords": [ + "handle", + "switch", + "property" + ], + "repository": "wooorm/zwitch", + "bugs": "https://github.com/wooorm/zwitch/issues", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + }, + "author": "Titus Wormer (https://wooorm.com)", + "contributors": [ + "Titus Wormer (https://wooorm.com)" + ], + "sideEffects": false, + "type": "module", + "main": "index.js", + "types": "index.d.ts", + "files": [ + "index.d.ts", + "index.js" + ], + "devDependencies": { + "@types/node": "^18.0.0", + "c8": "^7.0.0", + "prettier": "^2.0.0", + "remark-cli": "^11.0.0", + "remark-preset-wooorm": "^9.0.0", + "tsd": "^0.24.0", + "type-coverage": "^2.0.0", + "typescript": "^4.0.0", + "xo": "^0.52.0" + }, + "scripts": { + "prepack": "npm run build && npm run format", + "build": "tsc --build --clean && tsc --build && tsd && type-coverage", + "format": "remark . -qfo && prettier . -w --loglevel warn && xo --fix", + "test-api": "node --conditions development test.js", + "test-coverage": "c8 --check-coverage --100 --reporter lcov npm run test-api", + "test": "npm run build && npm run format && npm run test-coverage" + }, + "prettier": { + "tabWidth": 2, + "useTabs": false, + "singleQuote": true, + "bracketSpacing": false, + "semi": false, + "trailingComma": "none" + }, + "xo": { + "prettier": true + }, + "remarkConfig": { + "plugins": [ + "preset-wooorm" + ] + }, + "typeCoverage": { + "atLeast": 100, + "detail": true, + "strict": true, + "ignoreFiles": [ + "index.d.ts" + ] + } +} diff --git a/_extensions/d2/node_modules/zwitch/readme.md b/_extensions/d2/node_modules/zwitch/readme.md new file mode 100644 index 00000000..813dc3d4 --- /dev/null +++ b/_extensions/d2/node_modules/zwitch/readme.md @@ -0,0 +1,226 @@ +# zwitch + +[![Build][build-badge]][build] +[![Coverage][coverage-badge]][coverage] +[![Downloads][downloads-badge]][downloads] +[![Size][size-badge]][size] + +Handle values based on a field. + +## Contents + +* [What is this?](#what-is-this) +* [When should I use this?](#when-should-i-use-this) +* [Install](#install) +* [Use](#use) +* [API](#api) + * [`zwitch(key[, options])`](#zwitchkey-options) + * [`one(value[, rest…])`](#onevalue-rest) + * [`function handler(value[, rest…])`](#function-handlervalue-rest) +* [Types](#types) +* [Compatibility](#compatibility) +* [Related](#related) +* [Contribute](#contribute) +* [Security](#security) +* [License](#license) + +## What is this? + +This is a tiny package that lets you `switch` between some field on objects. + +## When should I use this? + +This package is very useful when mapping one AST to another. +It’s a lot like a `switch` statement on one field, but it’s extensible. + +## Install + +This package is [ESM only][esm]. +In Node.js (version 14.14+, 16.0+), install with [npm][]: + +```sh +npm install zwitch +``` + +In Deno with [`esm.sh`][esmsh]: + +```js +import {zwitch} from 'https://esm.sh/zwitch@2' +``` + +In browsers with [`esm.sh`][esmsh]: + +```html + +``` + +## Use + +```js +import {zwitch} from 'zwitch' + +const handle = zwitch('type', {invalid, unknown, handlers: {alpha: handleAlpha}}) + +handle({type: 'alpha'}) + +function handleAlpha() { /* … */ } +``` + +Or, with a `switch` statement: + +```js +const field = 'type' + +function handle(value) { + let fn = invalid + + if (value && typeof value === 'object' && field in value) { + switch (value[field]) { + case 'alpha': + fn = handleAlpha + break + default: + fn = unknown + break + } + } + + return fn.apply(this, arguments) +} + +handle({type: 'alpha'}) + +function handleAlpha() { /* … */ } +function unknown() { /* … */ } +function invalid() { /* … */ } +``` + +## API + +This package exports the identifier `zwitch`. +There is no default export. + +### `zwitch(key[, options])` + +Create a switch, based on a `key` (`string`). + +##### `options` + +Options can be omitted and added later to `one`. + +###### `options.handlers` + +Handlers to use, stored on `one.handlers` (`Record`, +optional). + +###### `options.unknown` + +Handler to use for unknown values, stored on `one.unknown` (`Function`, +optional). + +###### `options.invalid` + +Handler to use for invalid values, stored on `one.invalid` (`Function`, +optional). + +###### Returns + +See [`one`][one] (`Function`). + +### `one(value[, rest…])` + +Handle one value. +Based on the bound `key`, a respective handler will be called. +If `value` is not an object, or doesn’t have a `key` property, the special +“invalid” handler will be called. +If `value` has an unknown `key`, the special “unknown” handler will be called. + +All arguments, and the context object (`this`), are passed through to the +[handler][], and it’s result is returned. + +###### `one.handlers` + +Map of [handler][]s (`Record`). + +###### `one.invalid` + +Special [`handler`][handler] called if a value doesn’t have a `key` property. +If not set, `undefined` is returned for invalid values. + +###### `one.unknown` + +Special [`handler`][handler] called if a value does not have a matching +handler. +If not set, `undefined` is returned for unknown values. + +### `function handler(value[, rest…])` + +Handle one value. + +## Types + +This package is fully typed with [TypeScript][]. +It exports the types `Handler`, `UnknownHandler`, `InvalidHandler`, and +`Options`. + +## Compatibility + +This package is at least compatible with all maintained versions of Node.js. +As of now, that is Node.js 14.14+ and 16.0+. +It also works in Deno and modern browsers. + +## Related + +* [`mapz`](https://github.com/wooorm/mapz) + — functional map + +## Contribute + +Yes please! +See [How to Contribute to Open Source][contribute]. + +## Security + +This package is safe. + +## License + +[MIT][license] © [Titus Wormer][author] + + + +[build-badge]: https://github.com/wooorm/zwitch/workflows/main/badge.svg + +[build]: https://github.com/wooorm/zwitch/actions + +[coverage-badge]: https://img.shields.io/codecov/c/github/wooorm/zwitch.svg + +[coverage]: https://codecov.io/github/wooorm/zwitch + +[downloads-badge]: https://img.shields.io/npm/dm/zwitch.svg + +[downloads]: https://www.npmjs.com/package/zwitch + +[size-badge]: https://img.shields.io/bundlephobia/minzip/zwitch.svg + +[size]: https://bundlephobia.com/result?p=zwitch + +[npm]: https://docs.npmjs.com/cli/install + +[esm]: https://gist.github.com/sindresorhus/a39789f98801d908bbc7ff3ecc99d99c + +[esmsh]: https://esm.sh + +[typescript]: https://www.typescriptlang.org + +[contribute]: https://opensource.guide/how-to-contribute/ + +[license]: license + +[author]: https://wooorm.com + +[one]: #onevalue-rest + +[handler]: #function-handlervalue-rest diff --git a/_quarto.yml b/_quarto.yml index d14f7c36..b3f38f70 100644 --- a/_quarto.yml +++ b/_quarto.yml @@ -57,4 +57,6 @@ format: toc: true +filters: + - d2 diff --git a/contributing/creating_components.qmd b/contributing/creating_components.qmd index 5637423c..caea09a0 100644 --- a/contributing/creating_components.qmd +++ b/contributing/creating_components.qmd @@ -5,39 +5,106 @@ order: 20 # A common file format -One of the core principals of OpenPipelines is to use [MuData](https://mudata.readthedocs.io/) as a common data format troughout the whole pipeline. This means that the input and output for most components and workflows will be a MuData file and converters from and to other common data formats are provided to improve compatibility with downstream applications. Choosing a common data format greatly diminishes the development complexity because it facilitates interfacing between different tools in a pipeline without needing to convert multiple times. +One of the core principals of OpenPipelines is to use [MuData](https://mudata.readthedocs.io/) as a common data format troughout the whole pipeline. See [the concepts page](../fundamentals/concepts.qmd#sec-common-file-format) for more information on openpipelines uses MuData to store single-cell data. -MuData is a format to store annotated multimodal data. It is derived from the [AnnData](https://anndata.readthedocs.io/en/latest/) format. If you are unfamiliar with AnnData or MuData, it is recommended to read up on AnnData first as it is the unimodal counterpart of MuData. MuData can be roughly described as collection of several AnnData objects (stored as a associative array in the `.mod` attribute). MuData provides a hierarchical way to store the data: +# Component location +As discussed in [the project structure](project_structure.qmd#sec-project-structure), components in the repository are stored within `src`. Additionally, components are grouped into namespaces, according to a common functionality. An example of such a namespace is the dimensionality reduction namespace (`dimred`), of which the components `pca` and `umap` are members. This means that within `src`, the namespace folders can be found that stores the components that belong to these namespaces. +In order to create a new component in OpenPipelines, you will need to create a new folder that will contain the different elements of the component: + +```bash +mkdir src/my_namespace/my_component ``` -MuData -├─ .mod -│ ├─ modality_1 (AnnData Object) -│ ├─ .X -│ ├─ .layers -│ ├─ layer_1 -│ ├─ ... -│ ├─ .var -│ ├─ .obs -│ ├─ .obsm -│ ├─ .varm -│ ├─ .uns -│ ├─ modality_1 (AnnData Object) -├─ .var -├─ .obs -├─ .obms -├─ .varm -├─ .uns + +:::{.callout-tip} +Take a look at the components that are already in `src/`! There might be a component that already does something similar to what you need. +::: + +# The elements of a component +A component consists of one or more scripts that provide the functionality of the component together with metadata of the component in a configuration file. The [Viash config](https://viash.io/reference/config/) contains metadata of your dataset, which script is used to run it, and the required dependencies. An in-depth guide on how to create components is available on the [viash website](https://viash.io/guide/component/create-component.html), but a few specifics and guidelines will be discussed here. + +## The config + +```yaml +functionality: + name: "my_component" + namespace: "my_namespace" + description: "My new custom component" + authors: + - __merge__: ../../authors/my_name.yaml + roles: [ author ] + arguments: + - name: "--output" + type: file + example: "output_file.h5mu" + description: "Location were the output file should be written to." + direction: "output" + resources: + - type: python_script + path: script.py +platforms: + - type: docker + image: python:3.11 + setup: + - type: python + packages: mudata~=0.2.3 + - type: nextflow + directives: + label: [highcpu, midmem] ``` +### Basic information + +Each component should have the name, a namespace, a description and author information defined in the config. Because a single author can contribute to multiple components, the author information is often duplicated across components, which was causing issues with the author information being out of date and not easy to maintain. Therefore, it was decided to move author information to `./src/authors`. Each author has a `yaml` file containing the author information, and the viash `__merge__` property is used to merge this information into the viash configs. + +Basic information checklist: + + - Give the component a name + - Add the component to an appropriate namespace + - Add a description + - Add author information + +### Arguments and argument groups + +If you component requires arguments, they should be defined in `arguments` or `argument_groups`. Try tro group individual arguments into `argument_groups` when the number of arguments become too larg (10 or more as a rule of thumb). + +Argument checklist: + + - Add a description and name + - Each argument should have the appropriate type. + - Input and output files should be of type `file` instead of `string` and use the appropriate `direction:` + - If possible: add an example + - If the argument can accept multiple values, add `multiple: true` + - If the possible input for an argument is limited to certain set of values, use `choices:` + +### (Test)resources + +Resources define files that are required for a component to perform its function. These can be scripts, but also additional files like settings for tools you might require. Defining resources is both a necessity because viash needs to know what code to execute, but defining resources also has the added benefit that these resources are automatically made available, regardless of the build environment. For example: resources are automatically mounted within a running docker container. + +There is a difference between defining `resources` and `test_resources`. While resources are required for a component to function, `test_resources` only need to be included when testing the component (with for example `viash test`) in addition to the regular resources. Having a look at the example above, resources are defined using the `resources:` property. It takes a list of multiple files or folders. + +In openpipelines, it was decided to not use a service like `git lfs` to include large resources into the repository. Instead, if large resources are required, there are two possibilities: + * Large resources required for _testing_ are to uploaded into an `s3` bucket that is synced automatically before running tests (both locally and on github). Please ping a maintainer when you open a PR and ask them to upload the files for you. + * Other large resources that are not needed for testing can be considered as input. This means that an argument of `type: file` needs to be created. The downside of this method is that viash is not able to natively support remote files f + +Resources checklist: + - Script resources are located next to the config and added to the config with the correct type (`python_script`, `r_script`, ...) + - Small resources (<50MB) that are not scripts can also be checked in into the repo, next to the + +### Build information + +TODO + +## The script file + +TODO + +## Author information + +TODO -* `.mod`: an associative array of AnnData objects. Used in OpenPipelines to store the different modalities (CITE-seq, RNA abundance, ...) -* `.X` and `.layers`: matrices storing the measurements with the columns being the variables measured and the rows being the observations (cells in most cases). -* `.var`: metadata for the variables (i.e. annotation for the columns of `.X` or any matrix in `.layers`). The number of rows in the .var datafame (or the length of each entry in the dictionairy) is equal to the number of columns in the measurement matrices. -* `.obs`: metadata for the observations (i.e. annotation for the rows of `.X` or any matrix in `.layers`). The number of rows in the .obs datafame (or the length of each entry in the dictionairy) is equal to the number of rows in the measurement matrices. -* `varm`: multi-dimensional the variable annotation. A key-dataframe mapping where the number of rows in each dataframe is equal to the number of columns in the measurement matrices. -* `obsm`: multi-dimensional the observation annotation. A key-dataframe mapping where the number of rows in each dataframe is equal to the number of rows in the measurement matrices. -* `.uns`: A mapping where no restrictions are enforced on the dimensions of the data. +# Adding dependencies +TODO # Building components from their source When running or [testing individual components](#running-component-unittests), it is not necessary to execute an extra command to run the build step, `viash test` and `viash run` will build the component on the fly. However, before integrating components into a pipeline, you will need to build the components. More specifically, openpipelines uses Nextflow to combine components into pipelines, so we need to have at least the components build for `nextflow` platform as target. The easiest method to build the components is to use: diff --git a/contributing/project_structure.qmd b/contributing/project_structure.qmd index 38492b2d..17f914f9 100644 --- a/contributing/project_structure.qmd +++ b/contributing/project_structure.qmd @@ -3,7 +3,7 @@ title: Project structure order: 10 --- -## Project structure +## Project structure {#sec-project-structure} The root of the repository contains three main folders: 1. `src`, which contains the source code for individual components. @@ -15,7 +15,13 @@ Each subfolder from `src` contains a Viash [namespace](https://viash.io/guides/p As will become apparent later on, Viash not only provides commands to perform operations on individual components, but also on groups of components in a namespace and all components in a project. As a rule of thumb, the basic Viash commands (like `./bin/viash test`) are designated for running commands on individual components, while `ns` commands are (`./bin/viash ns test`) are for namespaces. When cloning a fresh repository, there will be no `target` folder present. This is because the target folder will only be created after components have been build. -## Branching strategy +## Versioning and branching strategy {#sec-versioning} + +OpenPipeline tries to use of [semantic versioning](https://semver.org/) to govern changes between versions. An release of openpipelines uses a version number in the format `MAJOR.MINOR.PATCH`. Currenly, openpipelines is still at major version `0.x.y`, meaning that public-facing breaking changes are possible on `MINOR` releases. These breaking changes will be documented in a dedicated section of the CHANGELOG that is published with each release. A `PATCH` release (i.e. a release where the `MAJOR` and `MINOR` version number stay the same), is used to resolve bugs with the pipeline but should not introduce breaking changes. Keep in mind that patches might introduce behavioral changes that may look breaking but are actually rectifying changes that were inadvertently introduced previously (and were in fact also 'breaking changes'). In this case, a bug can also be released without changing the `MINOR` version, in a `PATH` release. + +Between releases, development progress is tracked on Git branches. A git branch represents a snapshot of a codebase in time, to which changes can be added (i.e. committed). Eventually, all new feature or bugfixes must be reconsiled into a single branch so that a new release can be created. This process is called merging and the process of requesting the merging of two branches is called a pull request. Openpipelines follows the convention that the target branch for all pull requests is the `main` branch. Thus, the `main` branch contains the latest changes for the code and it can be considered the development branch. + +Once a pull request has been approved and merged, Github Actions CI will automatically build all components (creating the `target` directory) and push the result to the `main_build` branch. In essence, the `main_build` branch is a copy of the `main` branch, but also containing the build components. Once it is time to create a openpipelines release, the Github CI release workflow is manually triggered, the components on the `main` branch will be build and tested. Then, the result will be pushed to the `release` branch and the integration tests will be run. If all tests succeeded, a new github tag and release can be created manually from the `release` branch. ```{mermaid} %%{init: { 'logLevel': 'debug', 'theme': 'default'} } }%% @@ -61,7 +67,3 @@ gitGraph checkout release merge main tag: "0.2" ``` - -The develoment version of openpipelines is available on the `main` branch. This is where all new code should be merged into by creating a pull request. -Once a pull request has been approved and merged, Github Actions CI will automatically build all components (creating the `target` directory) and push the result to the `main_build` branch. -In essence, the `main_build` branch is a copy of the `main` branch, but also containing the build components. Once it is time to create a openpipelines release, the Github CI release workflow is manually triggered, the components on the `main` branch will be build and tested. Then, the result will be pushed to the `release` branch and the integration tests will be run. If all tests succeeded, a new github tag and release can be created manually from the `release` branch. \ No newline at end of file diff --git a/fundamentals/architecture.qmd b/fundamentals/architecture.qmd index 58b1c4b2..743b6914 100644 --- a/fundamentals/architecture.qmd +++ b/fundamentals/architecture.qmd @@ -3,173 +3,887 @@ title: Architecture order: 30 --- -The single-cell workflow is comprised of multiple (standalone) subworkflows. - -:::{.column-screen-inset-shaded} +OpenPipeline is a pipeline for the processing of multimodal single-cell data that scales to a great many of samples. Covering the architecture requires us to explain many angles, including: what the expected inputs and outputs are for each workflow are, how do the workflows relate to each other, and what the state of the data is at each step of the pipeline. Here is an overview of the general steps involved in processing sequencing data into a single integrated object. We will discuss each of the steps further below. ```{mermaid} %%| label: fig-architecture -%%| fig-cap: Overview single cell processing steps in OpenPipeline. Rectangles are data objects, parallelograms are Viash modules or subworkflows. +%%| fig-cap: Overview of the steps included in OpenPipeline for the analysis of single cell multiomics data. flowchart TD - subgraph ingest ["Ingestion"] - direction LR + ingest["Ingestion"] --> split --> unimodalsinglesample["Unimodal Single Sample Processing"] --> concat --> unimodalmultisample["Unimodal Multi Sample Processing"] --> merging --> integation_setup["Integration Setup"] --> integration["Integration"] --> downstreamprocessing["Downstream Processing"] +``` + +1. [Ingestion](#ingestion): Convert raw sequencing data or count tables into MuData data for further processing. +2. [Splitting modalities](#sec-splitting): Creating several MuData objects, one per modality, out of a multimodal input sample. +3. [Unimodal Single Sample Processing](#sec-single-sample): tools applied to each modality of samples individually. Mostly involes the selection of true from false cells. +4. [Unimodal Multi Sample Processing](#sec-multisample-processing): steps that require information from all samples together. Processing is still performed per-modality. +5. [Merging](#sec-merging): Creating one MuData object from several unimodal MuData input files. +6. [Initializing Integration](#sec-initializing-integration): Performs dimensionality reduction and cell type clustering on non-integrated samples. These are popular steps that would otherwise be executed manually or they provide input for downstream integration methods. +7. [Integration](#sec-intergration): The alignment of cell types across samples. Can be performed per modality or based on multiple modalities. +8. Downstream Processing: Extra analyses performed on the integrated dataset and conversion to other file formats. + +# Available workflows +The structure of the sections that have been laid out below follow a logical grouping of the processing according to the state _of the data_. However, even though this grouping makes sense from a data perspective, it does not mean that a workflow exist for each section. For example, the [processing a single sample](#sec-single-sample) section describes how processing of single sample is performed as part of the [full pipeline](#sec-full-pipeline), but there is no `singlesample` workflow that a user can execute. The inverse is also possible: while there exists an [multisample](../components/workflows/multiomics/mutlisample.qmd) pipeline, it's functionality is not limited to what has been described in the section [Multisample Processing](#sec-multisample-processing). This section lists all the available workflows and will try to describe the link with the relevant sections below. + +## Ingestion workflows +All of the following workflows from the ingestion namespace have been discussed in more detail in the [ingestion](#sec-ingestion) section: + +* [ingestion/bd_rhapsody](../components/workflows/ingestion/bd_rhapsody.qmd) +* [ingestion/cellranger_mapping](../components/workflows/ingestion/cellranger_mapping.qmd) +* [ingestion/cellranger_multi](../components/workflows/ingestion/cellranger_multi.qmd) +* [ingestion/demux](../components/workflows/ingestion/demux.qmd) +* [ingestion/make_reference](../components/workflows/ingestion/make_reference.qmd) + + +## Multiomics workflows +There exists no `singlesample` workflow. However, the `prot_singlesample` and `rna_singlesample` pipelines do exist and they map identically to the functionality described in the [single-sample antibody capture processing](#sec-single-sample-adt) and [single-sample gene expression processing](#sec-single-sample-gex) sections respectively. If you would like to process your samples as described in the [unimodal single sample processing](#sec-single-sample) section, you can execute both workflows in tandem for the two modalities. + +Contrary to the workflows for single sample processing, there exists a [multiomics/multisample](../components/workflows/multiomics/multisample.qmd) workflow. However this workflow is not just the [multiomics/prot_multisample](../components/workflows/multiomics/prot_multisample.qmd) and [multiomics/rna_multisample](../components/workflows/multiomics/rna_multisample.qmd) workflows that have been combined. Instead, it combines the [multiomics/prot_multisample](../components/workflows/multiomics/prot_multisample.qmd), [multiomics/rna_multisample](../components/workflows/multiomics/rna_multisample.qmd) and [multiomics/integration/initialize_integration](../components/workflows/multiomics/integration/initialize_integration.qmd) workflows. The purpose of this pipeline is to provide an extra 'entrypoint' into the full pipeline that skips the singlesample processing, allowing reprocessing samples that have already been processed before. A popular usecase is to manually select one or more celltypes which need to be processed again or the integration of observations from multiple experiments into a single dataset. Keep in mind that concatenation is not included in the multisample pipeline, so when multiple input files are specified they are processed in parallel. If you would like to integrate multiple experiments, you need to first concatenate them in a seperate step: + +```{.d2} +file_input_1: "Experiment 1\n(multisample)" { + shape: page +} + +file_input_2: "Experiment 2\n(multisample)" { + shape: page +} + +file_output: "Output" { + shape: page +} + +pipeline_out_1: "Full pipeline \n integration \n..." { + shape: parallelogram + style.stroke-dash: 5 +} + +pipeline_out_2: "Full pipeline \n integration \n..." { + shape: parallelogram + style.stroke-dash: 5 +} + +concat: "Concatenation" { + shape: parallelogram +} + +multisample: "Multisample" { + shape: parallelogram +} + +pipeline_out_1 -> file_input_1 +pipeline_out_2 -> file_input_2 +file_input_1 -> concat <- file_input_2 + +concat -> multisample -> file_output + +style: { + fill: "#FCFCFC" +} +``` + +## The "full" pipeline +The name of this pipeline is a bit of a misnomer, because it does not include all the steps from ingestion to integration. As will be discussed in the [ingestion](#sec-ingestion) section, which ingestion strategy you need is dependant on your technology provider and the chosen platform. For [integration](#sec-integration-methods), there exist many methods and combination of methods, and you may wish to choose which integration methods are applicable for your usecase. As a consequence, these two stages in the analysis of single-cell need to be executed seperatly and not as part of a single unified pipeline. All other steps outlined below on the other hand are included into the "full" pipeline, which can therefore be summarized in the following figure: + +```{mermaid} +%%| label: fig-full-pipeline +%%| fig-cap: Overview of the steps included in the full pipelines from OpenPipeline. +flowchart TD + split --> unimodalsinglesample["Unimodal Single Sample Processing"] --> concat --> unimodalmultisample["Unimodal Multi Sample Processing"] --> merging --> integation_setup +``` + +## Integration workflows +For each of the integration methods (and their optional combination with other tools), a seperate pipeline is defined. More information for each of the pipelines is available in the [integration methods section](#sec-integration-methods). + +* [multiomics/integration/bbknn_leiden](../components/workflows/multiomics/integration/bbknn_leiden.qmd) +* [multiomics/integration/harmony_leiden](../components/workflows/multiomics/integration/harmony_leiden.qmd) +* [multiomics/integration/scanorama_leiden](../components/workflows/multiomics/integration/scanorama_leiden.qmd) +* [multiomics/integration/scvi_leiden](../components/workflows/multiomics/integration/scvi_leiden.qmd) +* [multiomics/integration/totalvi_leiden](../components/workflows/multiomics/integration/totalvi_leiden.qmd) +* [multiomics/integration/initialize_integration](../components/workflows/multiomics/integration/initialize_integration.qmd) + + +# Important dataflow components +While most components included in openpipelines are involved in data analysis, the sole purpose of other components is to facilitate data flow throughout the pipelines. In a workflow, output for a component is written to disk after it is done performing its task, and is read back in by the next component. However, the relation between the component and the next component is not always a clear one to one relationship. For example: some tools are capable of analyzing a single sample, while others require the input of all samples together. Additionally, not only are the input requirement of tools limiting, performance also needs to be taken into account. Tasks which are performed on each sample separately can be executed in parallel, while if the same task is performed on a single file that contains the data for all samples. In order to facilitate one-to-many or many-to-one relations between components and to allow parallel execution of tasks, component that are specialized in dataflow were implemented. + +## Splitting modalities {#sec-splitting} +We refer to splitting modalities when multimodal MuData file is split into several unimodal MuData files. The number of output files is equal to the number of modalities present in the input file. Splitting the modalities works on MuData files containing data for multiple samples or for single-sample files. + +~~~{.d2 layout=elk} +file_input: MuData Input{ + shape: page +} + +file_input.mudata_input: |||md + ``` + └─.mod + └─ rna + └─ prot + └─ ... + ``` +||| +file_input.style.font-size: 20 + +file_output_rna: MuData Output\nGene Expression{ + shape: page +} +file_output_rna.mudata_output_rna: |||md + ``` + └─.mod + └─ rna + ``` +||| +file_output_rna.style.font-size: 20 + +file_output_prot: MuData Output\nAntibody Capture{ + shape: page +} +file_output_prot.mudata_output_prot: |||md + ``` + └─.mod + └─ prot + ``` +||| +file_output_prot.style.font-size: 20 + +file_output_other: MuData Output\nOther{ + shape: page +} +file_output_other.mudata_output_other: |||md + ``` + └─.mod + └─ ... + ``` +||| +file_output_other.style.font-size: 20 + +split_modalities: Split Modalities { + shape: parallelogram +} + + +file_input -> split_modalities +split_modalities -> file_output_rna +split_modalities -> file_output_prot +split_modalities -> file_output_other + +style: { + fill: "#FCFCFC" +} + +~~~ + +## Merging of modalities {#sec-merging} +Merging refers to combining multiple files with data for one modality into a single output file that contains all input modalities. It is the inverse operation of splitting the modalities. + +~~~{.d2 layout=elk} +file_output: MuData Output{ + shape: page +} + +file_output.mudata_input: |||md + ``` + └─.mod + └─ rna + └─ prot + └─ ... + ``` +||| +file_output.style.font-size: 20 + +file_input_rna: MuData Input\nGene Expression{ + shape: page +} +file_input_rna.mudata_input_rna: |||md + ``` + └─.mod + └─ rna + ``` +||| +file_input_rna.style.font-size: 20 + +file_input_prot: MuData Input\nAntibody Capture{ + shape: page +} +file_input_prot.mudata_input_prot: |||md + ``` + └─.mod + └─ prot + ``` +||| +file_input_prot.style.font-size: 20 + +file_input_other: "MuData Input\nOther"{ + shape: page +} +file_input_other.mudata_input_other: |||md + ``` + └─.mod + └─ ... + ``` +||| +file_input_other.style.font-size: 20 + +merge_modalities: Merge Modalities { + shape: parallelogram +} + +file_input_rna -> merge_modalities +file_input_prot -> merge_modalities +file_input_other -> merge_modalities +merge_modalities -> file_output + +style: { + fill: "#FCFCFC" +} + +~~~ + +## Concatenation of samples + +Joining of observations for different samples, stored in their respective MuData file, into a single MuData file for all samples together is called sample concatenation. In practice, this operation is performed for each modality separately. An extra column (with default name `sample_id`) is added to the annotation of the observations (`.obs`) to indicate where each observation originated from. + +~~~{.d2 layout=elk} +file_output: MuData Output{ + shape: page +} + +file_output.mudata_input: |||md + ``` + └─.mod + └─ rna + └─ prot + └─ vdj + + └─.obs + [sample_id] + ``` +||| +file_output.style.font-size: 20 + +file_input_sample1: "MuData Input\nSample 1"{ + shape: page +} +file_input_sample1.mudata_input_sample1: |||md + ``` + └─.mod + └─ rna + └─ vdj + ``` +||| +file_input_sample1.style.font-size: 20 + +file_input_sample2: "MuData Input\nSample 2"{ + shape: page +} +file_input_sample2.mudata_input_prot: |||md + ``` + └─.mod + └─ rna + └─ prot + ``` +||| +file_input_sample2.style.font-size: 20 + +file_input_sample3: "MuData Input\nSample 3"{ + shape: page +} +file_input_sample3.mudata_input_sample3: |||md + ``` + └─.mod + └─ rna + ``` +||| +file_input_sample3.style.font-size: 20 + +concatenate_samples: Concatenation { + shape: parallelogram +} + +file_input_sample1 -> concatenate_samples +file_input_sample2 -> concatenate_samples +file_input_sample3 -> concatenate_samples +concatenate_samples -> file_output + +style: { + fill: "#FCFCFC" +} +~~~ + +Special care must be taken when considering annotations for observations and features while concatenating the samples. Indeed, the data from different samples can contain conflicting information. Openpipeline's `concat` component provides an argument `other_axis_mode` that allows a user to specify what happens when conflicting information is found. The `move` option for this argument is the default behavior. In this mode, each annotation column (from `.obs` and `.var`) is compared across samples. When no conflicts are found or the column is unique for a sample, the column is added output object. When a conflict does occur, all of the columns are gathered from the samples and stored into a dataframe. This dataframe is then stored into `.obsm` for annotations for the observations and `.varm` for feature annotations. This way, a user can have a look at the conflicts and decide what to do with them. + +# Ingestion {#sec-ingestion} + +Ingestion is the conversion of raw sequencing data or count tables into MuData objects that can be used for further processing. + +```{mermaid} +flowchart LR RawCounts1["Raw counts"] - BCL["BCL"] - Demux["Demux"] + BCL[/"BCL"/] + Demux[/"Demultiplexing"/] Fastq["Fastq"] Ref["Reference"] - Mapping["Mapping"] + Mapping[/"Mapping"/] RawDir["Raw out"] - Convert["Convert"] + Convert[/"Convert"/] RawCounts1["Raw counts"] BCL --> Demux --> Fastq Fastq & Ref --> Mapping --> RawDir --> Convert --> RawCounts1 - end - subgraph unimodalsinglesample ["Unimodal Single Sample Processing"] - direction LR - RawCounts2["Raw counts for one modality"] - ProccessedCounts1["Processed\nCounts"] - AmbientRNACorr[/"Ambient RNA\ncorrection"/] - RawCounts2 --> CellFiltering --> DoubletCalling --> AmbientRNACorr --> ProccessedCounts1 - end - subgraph unimodalmultisample ["Unimodal Multi Sample Processing"] - direction LR - FeatureAnnotation[/"Feature annotation"/] - BatchCorrection["Batch correction"] - Normalization["Normalisation"] - FeatureSelection["Feature selection"] - ProccessedCounts2["Processed\nCounts"] - DoubletCalling["Doublet\ncalling"] - CellFiltering["Cell\nfiltering"] - NormalisedOut["Normalised\ncounts"] - - ProccessedCounts2 --> FeatureAnnotation --> BatchCorrection --> Normalization --> FeatureSelection --> NormalisedOut - end - subgraph integration ["Integration"] - direction LR +``` - Normalised["Normalised\ncounts"] - Integrated["Integrated\ndata"] +Demultiplexing refers to a two-step process: - DataIntegration[/"Data integration"/] - DimRed[/"Dimensionality\nreduction"/] - - Normalised --> DataIntegration --> DimRed --> Integrated +(@) The conversion of the binary base call (BCL) files, output by the sequencing machines, into the text-based FASTQ format. +(@) The sorting of reads into different FASTQ files for different libraries pooled together into a single sequencing run. - end - subgraph downstreamprocessing ["Downstream Processing"] - direction LR - Interpretation["Interpretation"] - Conversion["Conversion"] - Dataset["Dataset"] +In order to perform demultiplexing, several tools have been made available in the [demux](../components/workflows/ingestion/demux.qmd) workflow, where the `--demultiplexer` can be used to choose your demultiplexer of choice. Currently, three options have been made available: - Interpretation --> Conversion --> Dataset - end +* [bcl2fastq(2)](../components/modules/demux/bcl2fastq.qmd): a legacy tool from Illumina that has been replaced by BCL Convert +* [BCL Convert](../components/modules/demux/bcl_convert.qmd): general demultiplexing software by Illumina. +* Cellranger's [mkfastq](../components/modules/demux/cellranger_mkfastq.qmd): a wrapper around BCL Convert that provides extra convenience features for the processing of 10X single-cell data. - ingest --> split --> unimodalsinglesample --> concat --> unimodalmultisample --> merging --> integration --> downstreamprocessing - %% Do not change the ID for these boxes because this is linked to custom css - titlebox_ingestion["Convert raw sequencing data\nor count tables into MuData data\nfor further processing."] - titlebox_empty[" "] - titlebox2["Per modality fitering pipelines\nare available to select true\nfrom false cells."] - titlebox_empty2[" "] - titlebox3["..."] - titlebox_empty3[" "] - titlebox4["Performs an integration pipeline for single\ncell data based on a single or multiple modalities."] - titlebox5["Take different dataset annotations and combine\nthem together into a single enriched dataset.\n The idea is to have a diff_muon object,\n i.e. a muon object containing the changes \n of the original objectwhere data from the\ndiff_muon will be pushed to the original muon object."] - titlebox_ingestion --> titlebox_empty --> titlebox2 --> titlebox_empty2 --> titlebox3 --> titlebox_empty3--> titlebox4 --> titlebox5 -``` +The alignment of reads from the FASTQ files to an appropriate genome reference is called mapping. The result of the mapping process are tables that count the number of times a read has been mapped to a certain feature and metadata information for the cells (observations) and features. There are different format that can be used to store this information together. Since OpenPipeline uses [MuData](./concepts.qmd#sec-common-file-format) as a common file format throughout its pipelines, a conversion to MuData is included in the mapping pipelines.The choice between workflows for mapping is dependant on your single-cell library provider and technology: -::: +* For DB Genomics libraries, the [BD Rhapsody](../components/workflows/ingestion/bd_rhapsody.qmd) pipeline can be used. +* For 10X based libraries, either [cellranger count](../components/workflows/ingestion/cellranger_mapping.qmd) or [cellranger multi](../components/workflows/ingestion/cellranger_multi.qmd) is provided. For more information about the differences between the two and when to use which mapping software, please consult the [10X genomics website](https://support.10xgenomics.com/single-cell-gene-expression/software/pipelines/latest/using/multi#when-to-use-multi). -# Use-cases +## Creating a transcriptomics reference +Mapping reads from the FASTQ files to features requires a reference that needs to be provided to the mapping component. Depending on the usecase, you might even need to provide references specific for the modalities that you are trying to analyze. For gene expression data, the reference is a reference genome, together with its appropriate gene annotation. A genome reference is often indexed in order to improve the mapping speed. Additionally, some mapping frameworks provided by the single-cell technology providers require extra preprocessing of the reference before they can be used with their worklow. OpenPipelines provides a [make_reference](../components/workflows/ingestion/make_reference.qmd) that allows you to create references in many formats which can be used to map your reads to. +# Processing a single sample {#sec-single-sample} +Some processing can (or must) be performed without concatenating samples together. Even when having the choice, adding a tool to the single-sample processing is preferred because multiple samples can be processed in parallel, improving the processing speed. In general, the processing is modality specific, meaning that a multi-modal sample is first split into its unimodal counterparts. As described in the [multi-sample processing](#sec-multisample-processing), the resulting files are _not_ merged back together after the single-sample processing is done. Instead, the output files for all samples are gathered per modality and concatenated to create a multi-sample unimodal object. -:::{.callout-note} -In these use-cases, `Interpretation`, `Conversion` and `Dataset` are omitted because they are the same in every use case. -::: +~~~{.d2 layout=elk} +file_input: Input File{ + shape: page +} -### A single unimodal sample +split: Split Modalities { + shape: parallelogram +} -:::{.column-screen-inset-shaded} -```{mermaid} -%%| label: fig-example1 -%%| fig-cap: "Example of how the concatenation and merges work. \nGEX: Gene-expression." +unimodal_gex: "Unimodal Processing\nGene Expression"{ + shape: parallelogram +} -flowchart LR +unimodal_prot: "Unimodal Processing\nAntibody Capture"{ + shape: parallelogram +} - Raw1[Sample 1] --> Split1[/Split\nmodalities/] --> ProcGEX1[/Process GEX\nprofile/] - ProcGEX1 --> ConcatGEX[/Concatenate\nprofiles/] --> ProcGEX[/Process GEX\nprofiles/] - ProcGEX --> Merge[/Merge\nmodalities/] --> Integration[/Integration/] -``` +multisample: "To Multi-sample Processing"{ + shape: parallelogram + style.stroke-dash: 5 +} -::: +file_input -> split +split -> unimodal_gex +split -> unimodal_prot +split -> multisample -### Multiple unimodal samples +unimodal_gex -> multisample { + style.stroke-dash: 3 +} -:::{.column-screen-inset-shaded} +unimodal_prot -> multisample { + style.stroke-dash: 3 +} -```{mermaid} -%%| label: fig-example3 -%%| fig-cap: "Example of how the concatenation and merges work. \nGEX: Gene-expression." +style: { + fill: "#FCFCFC" +} -flowchart LR +~~~ - Raw1[Sample 1] --> Split1[/Split\nmodalities/] --> ProcGEX1[/Process GEX\nprofile/] - Raw2[Sample 2] --> Split2[/Split\nmodalities/] --> ProcGEX2[/Process GEX\nprofile/] - Raw3[Sample 3] --> Split3[/Split\nmodalities/] --> ProcGEX3[/Process GEX\nprofile/] - ProcGEX1 & ProcGEX2 & ProcGEX3 --> ConcatGEX[/Concatenate\nprofiles/] --> ProcGEX[/Process GEX\nprofiles/] - ProcGEX --> Merge[/Merge\nmodalities/] --> Integration[/Integration/] +## Single-sample Gene Expression Processing {#sec-single-sample-gex} +Single-sample gene expression processing involves two steps: removing cells based on count statistics and flagging observations originating from doublets. -``` +The removal of cells based on basic count statistics is split up into two parts: first, cells are flagged for removal by [filter_with_counts](../components/modules/filter/filter_with_counts.qmd). It flags observations based on several thresholds: -::: +* The number of genes that have a least a single count. Both a maximum and minimum number of genes for a cell to be removed can be specified. +* The percentage of read counts that originated from a mitochodrial genes. Cells can be filtered based on both a maximum or minimum fraction of mitochondrial genes. +* The minimum or maximum total number of counts captured per cell. Cells with 0 total counts are always removed. +Flagging cells for removal involved adding a boolean column to the `.obs` dataframe. After the cells have been flagged for removal, the cells are actually filtered using [do_filter](../components/modules/filter/do_filter.qmd), which reads the values in `.obs` and removed the cells labeled `True`. This applies the general phylosophy of "separation of concerns": one component is responsible for labeling the cells, another for removing them. This keeps the codebase for a single component small and its functionality testable. -### A single multimodal sample +The next and final step in the single-sample gene expression processing is doublet detection using [filter_with_scrublet](../components/modules/filter/filter_with_scrublet.qmd). Like `filter_with_counts`, it will not remove cells but add a column to `.obs` (which have the name `filter_with_scrublet` by default). The single-sample GEX workflow will not remove not be removed during the processing (hence no `do_filter`). Howver, you can choose to remove them yourself before doing your analyses by applying a filter with the column in `.obs` yourself. +~~~{.d2 layout=elk} +direction: right +file_input: "Input File" { + shape: page +} -:::{.column-screen-inset-shaded} +count_filtering: "Count Filtering" { + shape: parallelogram +} -```{mermaid} -%%| label: fig-example2 -%%| fig-cap: "Example of how the concatenation and merges work. \nGEX: Gene-expression. ADT: Antibody-Derived Tags. RNAV: RNA Velocity." +doublet_removal: "Doublet Removal" { + shape: parallelogram +} -flowchart LR +doublet_removal.filter_with_scrublet { + shape: parallelogram +} - Raw1[Sample 1] --> Split1[/Split\nmodalities/] --> ProcGEX1 & ProcADT1 & ProcRNAV1 - ProcGEX1[/Process GEX\nprofile/] --> ConcatGEX[/Concatenate\nprofiles/] --> ProcGEX[/Process GEX\nprofiles/] - ProcADT1[/Process ADT\nprofile/] --> ConcatADT[/Concatenate\nprofiles/] --> ProcADT[/Process ADT\nprofiles/] - ProcRNAV1[/Process RNAV\nprofile/] --> ConcatRNAV[/Concatenate\nprofiles/] --> ProcRNAV[/Process RNAV\nprofiles/] - ProcGEX & ProcADT & ProcRNAV--> Merge[/Merge\nmodalities/] --> Integration[/Integration/] +file_output: "Output File" { + shape: page +} -``` +count_filtering.filter_with_counts { + shape: parallelogram +} -::: +count_filtering.do_filter { + shape: parallelogram +} + +file_input -> count_filtering +count_filtering -> doublet_removal +doublet_removal -> file_output + + +style: { + fill: "#FCFCFC" +} +~~~ + +## Single-sample Antibody Capture Processing {#sec-single-sample-adt} +The process of filtering antibody capture data is similar to the filtering in [the single-sample gene-expression processing](#sec-single-sample-gex), but without doublet detection. In some particular cases you can use your ADT data to perform doublet detection using for example cell-type maskers. More information can be found in [the single-cell best practices book](https://www.sc-best-practices.org/surface_protein/doublet_detection.html). + +~~~{.d2 layout=elk} +direction: right +file_input: "Input File" { + shape: page +} + +count_filtering: "Count Filtering" { + shape: parallelogram +} + +file_output: "Output File" { + shape: page +} + +count_filtering.filter_with_counts { + shape: parallelogram +} + +count_filtering.do_filter { + shape: parallelogram +} + +file_input -> count_filtering +count_filtering -> file_output + + +style: { + fill: "#FCFCFC" +} + +~~~ + +# Multisample Processing {#sec-multisample-processing} +After the processing of individual samples has been concluded, samples can be concatenated for further processing. Like with the single-sample processing the multisample processing is not performed on multimodal objects, but each modality separately in order to tailor for the specific modality in question. This means that the result from the singlesample processing is merged together per-modality to create unimodal multisample objects. After processing each modality, all of the modalities can finally be merged and a single object is created that is ready for the integration. + +~~~{.d2 layout=elk} +direction: down + + +input_gex_2: "Processed Sample 2\nGene Expression" { + shape: page +} +input_adt_2: "Processed Sample 1\nAntibody Capture" { + shape: page +} + +input_gex_1: "Processed Sample 1\nGene Expression" { + shape: page +} +input_adt_1: "Processed Sample 2\nAntibody Capture" { + shape: page +} + +input_other_1: "Processed Sample 1\nOther Modality" { + shape: page +} +input_other_2: "Processed Sample 2\nOther Modality" { + shape: page +} + + +concat_gex: "Concatenate\nGene Expression" { + shape: parallelogram +} + +concat_adt: "Concatenate\nAntibody Capture" { + shape: parallelogram +} + +concat_other: "Concatenate\nOther Modality" { + shape: parallelogram +} + +multisample_gex: "Multisample Processing\nGene Expression" { + shape: parallelogram +} + +multisample_adt: "Multisample Processing\nAntibody Capture" { + shape: parallelogram +} +merge { + shape: parallelogram +} +integration: "To Integration"{ + shape: parallelogram + style.stroke-dash: 5 +} +input_gex_1 -> concat_gex: "unimodal\nsingle-sample" +input_gex_2 -> concat_gex: "unimodal\nsingle-sample" +input_adt_1 -> concat_adt: "unimodal\nsingle-sample" +input_adt_2 -> concat_adt: "unimodal\nsingle-sample" +input_other_1 -> concat_other: "unimodal\nsingle-sample" +input_other_2 -> concat_other: "unimodal\nsingle-sample" +concat_gex -> multisample_gex: unimodal multi-sample +concat_adt -> multisample_adt: unimodal multi-sample +concat_other -> merge: unimodal multi-sample +multisample_gex -> merge +multisample_adt -> merge +merge -> integration: "multimodal multisample" { + style.stroke-dash: 3 +} +style: { + fill: "#FCFCFC" +} -### Multiple multimodal samples +~~~ + + +## Multisample Gene Expression Processing +Processing multisample gene expression involved the following steps: + +1. [Normalization](../components/modules/transform/normalize_total.qmd): Normalization aims to adjust the raw counts in the dataset for variable sampling effects by scaling the observable variance to a specified range. There are different ways to transform the data, but the normalization method is to make sure each observation (cell) has a total count equal to the median of total counts over all genes for observations (cells) before normalization. +2. [Log transformation](../components/modules/transform/log1p.qmd): Calculates $X = ln(X + 1)$, which converts multiplicative relative changes to additive differences. This allows for interpreting the gene expression in terms of relative, rather than absolute, abundances of genes. +3. [Highly variable gene detection](../components/modules/filter/filter_with_hvg.qmd): Detects genes that have a large change in expression between samples. By default, OpenPipeline uses the method from Seurat [(Satija et al.)](https://doi.org/10.1038/nbt.3192). As with other "filtering" components, the `filter_with_hvg` component does not remove features, but rather annotates genes of interest by adding a boolean column to `.var`. +4. [QC metric calculations](../components/modules/qc/calculate_qc_metrics.qmd) + +~~~{.d2 layout=elk height=1000px} +direction: down + +input: "Input" { + shape: page +} + +output: "Output" { + shape: page +} + +normalize: "Normalization" { + shape: parallelogram +} + +log: "Log Transformation" { + shape: parallelogram +} + +filter_with_hvg: "Highly Variable\nGene Detection" { + shape: parallelogram +} + +qc_metrics: "Calculating QC Metrics" { + shape: parallelogram +} + +input -> normalize -> log -> filter_with_hvg -> qc_metrics -> output + +style: { + fill: "#FCFCFC" +} + +~~~ + + +## Multisample Antibody Capture Processing +Processing the ADT modality for multiple samples + +~~~{.d2 layout=elk pad=0} +direction: right + +input: "Input" { + shape: page +} + +output: "Output" { + shape: page +} + +normalize: "Normalization" { + shape: parallelogram +} + + +qc_metrics: "Calculating QC Metrics" { + shape: parallelogram +} + +input -> normalize -> qc_metrics -> output + +style: { + fill: "#FCFCFC" +} +~~~ + +# Integration {#sec-intergration} + +## Dimensionality Reduction {#sec-dimensionality-reduction} +scRNA-seq is a high-throughput sequencing technology that produces datasets with high dimensions in the number of cells and genes. It is true that the data should provide more information, but it also contains more noise and redudant information, making it harder to distill the usefull information. The number of genes and cells can already reduced by gene filtering, but further reduction is a necessity for downstream analysis. Dimensionality reduction projects high-dimensional data into a lower dimensional space (like taking a photo (2D) of some 3D structure). The lower dimensional representation still captures the underlying information of the data, while having fewer dimensions. + +Several dimensionality reduction methods have been developed and applied to single-cell data analysis. Two of which are being applied in OpenPipeline: + +1. [Principal Component Analysis (PCA)](../components/modules/dimred/pca.qmd): PCA reduces the dimension of a dataset by creating a new set of variables (principal components, PCs) from a linear combination of the original features in such a way that they are as uncorrelated as possible. The PCs can be ranked in the order by which they explain the largest variability in the original dataset. By keeping the top _n_ PCs, the PCs with the lowest variance are discarded to effectively reduce the dimensionality of the data without losing information. +2. [Uniform manifold approximation and projection (UMAP)](../components/modules/dimred/umap.qmd): a non-linear dimensionality technique. It constructs a high dimensional graph representation of the dataset and optimizes the low-dimensional graph representation to be structurally as similar as possible to the original graph. In a review by [Xiang et al., 2021](https://doi.org/10.3389/fgene.2021.646936) it showed the highest stability and separates best the original cell populations. +3. t-SNE is another popular non-linear, graph based dimensionality technique which is very similar to UMAP, but it has not yet been implemented in OpenPipeline. + +## Initializing integration {#sec-initializing-integration} +As will be descibed in more details [later on](#sec-integration-methods), many integration methods exist and therefore there is no single integration which is executed by default. However, there are common tasks which are run before integration either because they provide required input for many downstream integration methods or because they popular steps that would otherwise be done manually. These operations _are_ executed by default when using the "full pipeline" as part of the [initialize_integration](../components/workflows/integration/initialize_integration/initialize_integration.qmd) subworkflow. + +[PCA](../components/modules/dimred/pca.qmd) is used to reduce the dimensionality of the dataset [as described previously](#sec-dimensionality-reduction). [Find Neighbors](../components/modules/neighbors/find_neighbors.qmd) and [Leiden Clustering](../components/modules/cluster/leiden.qmd) are useful for the identification of cell types or states in the data. Here we apply a popular method to accomplish this is to first calculate a neighborhood graph on a [low dimensinonal representation](#sec-dimensionality-reduction) of the data and then cluster the data based on similarity between data points. Finally, [UMAP](../components/modules/dimred/umap.qmd) allows us to visualise the clusters by reducing the dimensionality of the data while still providing an accurate representation of the underlying cell population structure. + +~~~{.d2 layout=elk pad=0} +direction: right + +input: "Input" { + shape: page +} + +output: "Output" { + shape: page +} + +pca: "PCA" { + shape: parallelogram +} + +find_neighbors: "Find\nNeighbors" { + shape: parallelogram +} + +umap: "UMAP" { + shape: parallelogram +} + +input -> pca -> find_neighbors -> umap -> output + +style: { + fill: "#FCFCFC" +} + +~~~ + +## Integration Mehods {#sec-integration-methods} +Integration is the alignment of cell types across samples. There exist three different types of integration methods, based on the degree of integration across modalities: + +1. Unimodal integration across batches. For example: [scVI](../components/modules/integrate/scvi.qmd), [scanorama](../components/modules/integrate/scanorama.qmd), [harmony](../components/modules/integrate/harmonypy.qmd) +2. Multimodal integration across batches and modalities. Can be used to integrate joint-profiling data where multiple modalities are measured. For example: [totalVI](../components/modules/integrate/totalvi.qmd) +3. Mosaic integration: data integration across batches and modalities where not all cells are profiled in all modalities and it may be the case that no cells contain profiles in all integrated modalities. Mosaic integration methods have not been made available in OpenPipeline yet. An example of a tool that performs mosaic integration is StabMap. + +In either of the three cases, concatenated samples are required, and merged modalities preferred. A plethora of integration methods exist, which in turn interact with other functionality (like clustering and dimensionality reduction methods) to generate a large number of possible usecases which one pipeline cannot cover in an easy manner. Therefore, there is no single integration step that is part of a global pipeline which is executed by default. Instead, a user can choose from the integration workflows provided, and 'stack' integration methods by adding the outputs to different output slots of the MuData object. The following sections will descibe the integration workflows that are available in OpenPipeline. + +### Unimodal integration +For unimodal integration, [scVI](../components/modules/integrate/scvi.qmd), [scanorama](../components/modules/integrate/scanorama.qmd) and [harmony](../components/modules/integrate/harmonypy.qmd) have been added to the [scvi_leiden](../components/workflows/integration/scvi_leiden.qmd), [scanorama_leiden](../components/workflows/integration/scanorama_leiden.qmd), and [harmony_leiden](../components/workflows/integration/harmony_leiden.qmd) workflows respectively. After executing the integration methods themselves, [Find Neighbors](../components/modules/neighbors/find_neighbors.qmd) and [Leiden Clustering](../components/modules/cluster/leiden.qmd) are run the results of the integration as wel as [UMAP](../components/modules/dimred/umap.qmd) in order to be able to visualise the results. The functioning of these components has already been described [here](#sec-initializing-integration). + +~~~{.d2 layout=elk pad=0} +direction: right + +input: "Input" { + shape: page +} + +output: "Output" { + shape: page +} + +integration: "Integration" { + shape: parallelogram +} + +integration.scvi: "scVI" { + shape: parallelogram +} + +integration.scanorama: "Scanorama" { + shape: parallelogram +} + +integration.harmony: "Harmony" { + shape: parallelogram +} + +find_neighbors: "Find\nNeighbors" { + shape: parallelogram +} + +leiden_clustering: "Leiden\nClustering" { + shape: parallelogram +} + +umap: "UMAP" { + shape: parallelogram +} + +input -> integration -> find_neighbors -> leiden_clustering -> umap -> output + +style: { + fill: "#FCFCFC" +} + +~~~ + +### Multimodal Integration +A single multimodal integration method is currently avaiable in OpenPipeline: [totalVI](../components/modules/integrate/totalvi.qmd). It allows using information from both the gene-expression data and the antibody-capture data together to integrate the cell types. As with the other integration workflows, after running totalVI, [Find Neighbors](../components/modules/neighbors/find_neighbors.qmd), [Leiden Clustering](../components/modules/cluster/leiden.qmd) and [UMAP](../components/modules/dimred/umap.qmd) are run on the result. However in this case the three components are executed on both of the integrated modalities. + +~~~{.d2 layout=elk pad=0} +direction: right + +input: "Input" { + shape: page +} + +output: "Output" { + shape: page +} + +integration: "Integration" { + shape: parallelogram +} + +integration.totalvi: "TotalVI" { + shape: parallelogram +} + +find_neighbors: "Find\nNeighbors" { + shape: parallelogram +} + +leiden_clustering: "Leiden\nClustering" { + shape: parallelogram +} + +umap: "UMAP" { + shape: parallelogram +} + +input -> integration -> find_neighbors -> leiden_clustering -> umap -> output + +style: { + fill: "#FCFCFC" +} + +~~~ + + +# Putting it all together: the "Full Pipeline" {#sec-full-pipeline} :::{.column-screen-inset-shaded} ```{mermaid} -%%| label: fig-example4 -%%| fig-cap: "Example of how the concat and merges work. \nGEX: Gene-expression. ADT: Antibody-Derived Tags." +%%| label: fig-architecture +%%| fig-cap: Overview single cell processing steps in OpenPipeline. Rectangles are data objects, parallelograms are Viash modules or subworkflows. + + +flowchart TB + Raw1[/Sample 1/]:::file --> Split + Raw2[/Sample 2/]:::file --> Split2 + subgraph FullPipeline [Full Pipeline] + NoIntegration -.-> MultimodalFile[/Multisample\nMultimodal File/]:::file -.-> MultiSample + Split([Split\nmodalities]):::component --gex modality--> ProcGEX1 + Split([Split\nmodalities]):::component --prot modality--> ProcADT1 + Split([Split\nmodalities]):::component -- other--> ConcatVDJ + + Split2([Split\nmodalities]):::component --gex modality--> ProcGEX1 + Split2([Split\nmodalities]):::component --prot modality--> ProcADT1 + Split2([Split\nmodalities]):::component -- other--> ConcatVDJ + + + subgraph MultiSample [Multisample] + subgraph MultisampleRNA [Multisample RNA] + end + MultisampleRNA:::workflow + subgraph MultisampleADT [Multisample ADT] + end + MultisampleADT:::workflow + subgraph Unknown["Untreated modality (e.g. VDJ)"] + end + Unknown:::logicalgrouping + end + ConcatVDJ([Concatenate]):::component + NoIntegration[Multisample Multimodality] + + ConcatVDJ([Concatenate]):::component --> Unknown + ConcatGEX([Concatenate]):::component --> MultisampleRNA + ConcatADT([Concatenate]):::component --> MultisampleADT + + MultisampleRNA & MultisampleADT & Unknown--> Merge([Merge\nmodalities]):::component --> NoIntegration:::workflow + + end + + subgraph Wrapper + subgraph Integration[Integration] + subgraph IntegrationRNA[Integration RNA] + direction LR + hamony_leiden_rna[Harmony + Leiden]:::workflow + scvi_rna[scVI + Leiden]:::workflow + scanorama[Scanorama + Leiden]:::workflow + other[...]:::workflow + end + subgraph IntegrationProt[Integration ADT] + direction LR + hamony_leiden_prot[Harmony + Leiden]:::workflow + otherprot[...]:::workflow + end + subgraph multimodal_integration[Multimodal integration] + totalVI([totalVI]):::component + end + multimodal_integration:::logicalgrouping + IntegrationRNA:::logicalgrouping --choose from--> IntegrationProt:::logicalgrouping + NoIntegration ---> totalVI + end + Integration:::logicalgrouping + subgraph LegendBox[Legend] + direction LR + component([component]):::component + multiple_component((Multiple Components)):::component + workflow["(Sub)workflow"]:::workflow + file[/file/]:::file + Logicalgrouping[Logical grouping]:::logicalgrouping + end + LegendBox:::legend + end + Wrapper:::hide + + + NoIntegration --choose from--> IntegrationRNA + %% NoIntegration ~~~ LegendBox + + ProcGEX1[Process GEX\nSingle Sample]:::workflow --> ConcatGEX + ProcADT1[Process ADT\nSingle Sample]:::workflow --> ConcatADT + -flowchart LR - Raw1[Sample 1] --> Split1[/Split\nmodalities/] --> ProcGEX1 & ProcADT1 - Raw2[Sample 2] --> Split2[/Split\nmodalities/] --> ProcGEX2 & ProcADT2 - ProcGEX1[/Process GEX\nprofile/] & ProcGEX2[/Process GEX\nprofile/] --> ConcatGEX[/Concatenate\nprofiles/] --> ProcGEX[/Process GEX\nprofiles/] - ProcADT1[/Process ADT\nprofile/] & ProcADT2[/Process ADT\nprofile/] --> ConcatADT[/Concatenate\nprofiles/] --> ProcADT[/Process ADT\nprofiles/] - ProcGEX & ProcADT --> Merge[/Merge\nmodalities/] --> Integration[/Integration/] + style FullPipeline fill: #5cc,font-size:1.4em,color:#000; + classDef hide fill:transparent,color:transparent,stroke:transparent; + classDef legend fill:transparent; + classDef file fill: #5c5c5c,color:#fff,stroke-dasharray: 5 5; + classDef logicalgrouping fill:transparent,stroke-dasharray: 5 5; + classDef workflow fill:#ffffde,color:#000; + classDef component fill:#ececff,color:#000; ``` diff --git a/fundamentals/concepts.qmd b/fundamentals/concepts.qmd index 30436fb7..c72d3e3e 100644 --- a/fundamentals/concepts.qmd +++ b/fundamentals/concepts.qmd @@ -1,4 +1,76 @@ --- title: Concepts order: 20 ---- \ No newline at end of file +--- + +# Goals + +OpenPipelines strives to provide easy ways to interact with the pipeline and/or codebase for three types of users: + +* Pipeline executor: runs the pipeline from a GUI side +* Pipeline editor: adapts pipelines with existing components for specific projects +* Component developer: develops novel components and or pipelines + +This means that openpipelines must be: + +* Usable by non-experts +* Easy to deploy +* Provide reproducable results +* Scalable +* Easy to maintain and adapt + +# Requirements + +To meet these demands, the following concepts have been implemented at the core of Openpipeline: + +* 🌍 A language independent framework +* 💾 [A versitile storage solution](#sec-common-file-format) +* 🔳 Modularity +* 🔀 [A best-practice pipeline layout](architecture.qmd) +* ⌛ [Versioning](../contributing/project_structure.qmd#sec-versioning) +* ✅ [Automatic testing](../contributing/project_structure.qmd) +* 💬 [Community input](../contributing/index.qmd) +* 📺 A graphical interace + +# A common file format: AnnData and MuData 💾 {#sec-common-file-format} + +One of the core principals of OpenPipelines is to use [MuData](https://mudata.readthedocs.io/) as a common data format throughout the whole pipeline. This means that the input and output for most components and workflows will be a MuData file and converters from and to other common data formats are provided to improve compatibility with up-and downstream applications. Choosing a common data format greatly diminishes the development complexity because it facilitates interfacing between different tools in a pipeline without needing to convert multiple times. + +MuData is a format to store annotated multimodal data. It is derived from the [AnnData](https://anndata.readthedocs.io/en/latest/) format. If you are unfamiliar with AnnData or MuData, it is recommended to read up on AnnData first as it is the unimodal counterpart of MuData. MuData can be roughly described as collection of several AnnData objects (stored as a associative array in the `.mod` attribute). MuData provides a hierarchical way to store the data: + +``` +MuData +├─ .mod +│ ├─ modality_1 (AnnData Object) +│ ├─ .X +│ ├─ .layers +│ ├─ layer_1 +│ ├─ ... +│ ├─ .var +│ ├─ .obs +│ ├─ .obsm +│ ├─ .varm +│ ├─ .uns +│ ├─ modality_1 (AnnData Object) +├─ .var +├─ .obs +├─ .obms +├─ .varm +├─ .uns +``` + +* `.mod`: an associative array of AnnData objects. Used in OpenPipelines to store the different modalities (CITE-seq, RNA abundance, ...) +* `.X` and `.layers`: matrices storing the measurements with the columns being the variables measured and the rows being the observations (cells in most cases). +* `.var`: metadata for the variables (i.e. annotation for the columns of `.X` or any matrix in `.layers`). The number of rows in the .var datafame (or the length of each entry in the dictionairy) is equal to the number of columns in the measurement matrices. +* `.obs`: metadata for the observations (i.e. annotation for the rows of `.X` or any matrix in `.layers`). The number of rows in the .obs datafame (or the length of each entry in the dictionairy) is equal to the number of rows in the measurement matrices. +* `varm`: multi-dimensional the variable annotation. A key-dataframe mapping where the number of rows in each dataframe is equal to the number of columns in the measurement matrices. +* `obsm`: multi-dimensional the observation annotation. A key-dataframe mapping where the number of rows in each dataframe is equal to the number of rows in the measurement matrices. +* `.uns`: A mapping where no restrictions are enforced on the dimensions of the data. + +# Modularity and a language independent framework 🔳 + +TODO + +# A graphical interface 📺 + +TODO \ No newline at end of file diff --git a/fundamentals/philosophy.qmd b/fundamentals/philosophy.qmd index da0a8209..32f3154c 100644 --- a/fundamentals/philosophy.qmd +++ b/fundamentals/philosophy.qmd @@ -1,4 +1,17 @@ --- title: Philosophy order: 10 ---- \ No newline at end of file +--- + +# Mission + +OpenPipelines are best-practice living workflows for single-cell uni- and multi-omics data. Building a best-practice pipeline requires knowledge and time that not one single person can provide, but rather requires input from a community. Additionally, a best-pratice pipeline needs constant maintenance to keep up to date with the latest standards, ideally sourcing input from a 'living' benchmark. Continuous improvement necessitates a robust system for sourcing and applying community input both from a technical and organisational standpoint. + +```{mermaid} +graph TB + ben["🌱📈 Living benchmarks"] + pra["🌱📖 Living best practices"] + pip["🌱⚙️ Living reproducible pipelines"] + ben --> pra --> pip +``` + diff --git a/fundamentals/roadmap.qmd b/fundamentals/roadmap.qmd index 8676347f..76e13930 100644 --- a/fundamentals/roadmap.qmd +++ b/fundamentals/roadmap.qmd @@ -9,20 +9,20 @@ order: 40 ```{mermaid} %%| label: fig-status -%%| fig-cap: "Status of implemented components. Green: implemented, orange: work in progress, \nGEX: Gene-expression. RNAV: RNA Velocity. ADT: Antibody-Derived Tags. ATAC: Assay for Transposase-Accessible Chromatin." +%%| fig-cap: "Status of implemented components. Green: implemented, orange: work in progress, purple: modality included in output but unprocessed, \nGEX: Gene-expression. RNAV: RNA Velocity. ADT: Antibody-Derived Tags. ATAC: Assay for Transposase-Accessible Chromatin." flowchart LR classDef done fill:#a3f6cf,stroke:#000000; classDef wip fill:#f4cb93,stroke:#000000; - classDef todo fill:#f0aeb2,stroke:#000000; + classDef unprocessed fill:#afadff,stroke:#000000; Raw1[Sample 1] --> Split1[/Split\nmodalities/]:::done --> ProcGEX1 & ProcRNAV1 & ProcADT1 & ProcATAC1 & ProcVDJ1 ProcGEX1[/Process GEX\nprofile/]:::done --> ConcatGEX[/Concatenate\nprofiles/]:::done --> ProcGEX[/Process GEX\nprofiles/]:::done ProcRNAV1[/Process RNAV\nprofile/]:::wip --> ConcatRNAV[/Concatenate\nprofiles/]:::done --> ProcRNAV[/Process RNAV\nprofiles/]:::wip - ProcADT1[/Process ADT\nprofile/]:::done --> ConcatADT[/Concatenate\nprofiles/]:::done --> ProcADT[/Process ADT\nprofiles/]:::todo - ProcATAC1[/Process ATAC\nprofile/]:::todo --> ConcatATAC[/Concatenate\nprofiles/]:::done --> ProcATAC[/Process ATAC\nprofiles/]:::todo - ProcVDJ1[/Process VDJ\nprofile/]:::todo --> ConcatVDJ[/Concatenate\nprofiles/]:::done --> ProcVDJ[/Process VDJ\nprofiles/]:::todo - ProcGEX & ProcRNAV & ProcADT & ProcATAC & ProcVDJ --> Merge[/Merge\nmodalities/]:::done --> Integration[/Integration/]:::done + ProcADT1[/Process ADT\nprofile/]:::done --> ConcatADT[/Concatenate\nprofiles/]:::done --> ProcADT[/Process ADT\nprofiles/]:::done + ProcATAC1[/Process ATAC\nprofile/]:::unprocessed --> ConcatATAC[/Concatenate\nprofiles/]:::done --> ProcATAC[/Process ATAC\nprofiles/]:::unprocessed + ProcVDJ1[/Process VDJ\nprofile/]:::unprocessed --> ConcatVDJ[/Concatenate\nprofiles/]:::done --> ProcVDJ[/Process VDJ\nprofiles/]:::unprocessed + ProcGEX & ProcRNAV & ProcADT & ProcATAC & ProcVDJ --> Merge[/Merge\nmodalities/]:::done --> SetupIntegration[/Setup\nintegration/]:::done --> Integration[/Integration/]:::done ``` ::: \ No newline at end of file diff --git a/images/anndata_schema.svg b/images/anndata_schema.svg new file mode 100644 index 00000000..e871b98d --- /dev/null +++ b/images/anndata_schema.svg @@ -0,0 +1,175 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +