18460000105 3 달 전
부모
커밋
fd56c5b40f
100개의 변경된 파일14763개의 추가작업 그리고 0개의 파일을 삭제
  1. 25 0
      node_modules/json-parse-even-better-errors/LICENSE.md
  2. 96 0
      node_modules/json-parse-even-better-errors/README.md
  3. 121 0
      node_modules/json-parse-even-better-errors/index.js
  4. 33 0
      node_modules/json-parse-even-better-errors/package.json
  5. 27 0
      node_modules/json-schema-traverse/.eslintrc.yml
  6. 21 0
      node_modules/json-schema-traverse/LICENSE
  7. 95 0
      node_modules/json-schema-traverse/README.md
  8. 40 0
      node_modules/json-schema-traverse/index.d.ts
  9. 93 0
      node_modules/json-schema-traverse/index.js
  10. 43 0
      node_modules/json-schema-traverse/package.json
  11. 0 0
      node_modules/json-schema-traverse/spec/.eslintrc.yml
  12. 125 0
      node_modules/json-schema-traverse/spec/fixtures/schema.js
  13. 171 0
      node_modules/json-schema-traverse/spec/index.spec.js
  14. 303 0
      node_modules/json5/dist/index.js
  15. 0 0
      node_modules/json5/dist/index.min.js
  16. 152 0
      node_modules/json5/lib/cli.js
  17. 9 0
      node_modules/json5/lib/index.js
  18. 1114 0
      node_modules/json5/lib/parse.js
  19. 13 0
      node_modules/json5/lib/register.js
  20. 4 0
      node_modules/json5/lib/require.js
  21. 185 0
      node_modules/jsonc-parser/lib/esm/impl/edit.js
  22. 261 0
      node_modules/jsonc-parser/lib/esm/impl/format.js
  23. 178 0
      node_modules/jsonc-parser/lib/esm/main.js
  24. 201 0
      node_modules/jsonc-parser/lib/umd/impl/edit.js
  25. 275 0
      node_modules/jsonc-parser/lib/umd/impl/format.js
  26. 171 0
      node_modules/jsonfile/CHANGELOG.md
  27. 15 0
      node_modules/jsonfile/LICENSE
  28. 230 0
      node_modules/jsonfile/README.md
  29. 88 0
      node_modules/jsonfile/index.js
  30. 40 0
      node_modules/jsonfile/package.json
  31. 14 0
      node_modules/jsonfile/utils.js
  32. 1 0
      node_modules/keycharm/.npmignore
  33. 176 0
      node_modules/keycharm/LICENSE-APACHE-2.0
  34. 22 0
      node_modules/keycharm/LICENSE-MIT
  35. 50 0
      node_modules/keycharm/README.md
  36. 193 0
      node_modules/keycharm/keycharm.js
  37. 8 0
      node_modules/keycharm/package.json
  38. 51 0
      node_modules/keycharm/test/test.html
  39. 21 0
      node_modules/lines-and-columns/LICENSE
  40. 33 0
      node_modules/lines-and-columns/README.md
  41. 13 0
      node_modules/lines-and-columns/build/index.d.ts
  42. 62 0
      node_modules/lines-and-columns/build/index.js
  43. 49 0
      node_modules/lines-and-columns/package.json
  44. 47 0
      node_modules/lodash.debounce/LICENSE
  45. 18 0
      node_modules/lodash.debounce/README.md
  46. 377 0
      node_modules/lodash.debounce/index.js
  47. 17 0
      node_modules/lodash.debounce/package.json
  48. 47 0
      node_modules/lodash/LICENSE
  49. 21 0
      node_modules/lodash/_apply.js
  50. 22 0
      node_modules/lodash/_arrayAggregator.js
  51. 22 0
      node_modules/lodash/_arrayEach.js
  52. 21 0
      node_modules/lodash/_arrayEachRight.js
  53. 23 0
      node_modules/lodash/_arrayEvery.js
  54. 25 0
      node_modules/lodash/_arrayFilter.js
  55. 17 0
      node_modules/lodash/_arrayIncludes.js
  56. 1 0
      node_modules/lodash/fp/__.js
  57. 8 0
      node_modules/log-symbols/browser.js
  58. 25 0
      node_modules/log-symbols/index.d.ts
  59. 19 0
      node_modules/log-symbols/index.js
  60. 9 0
      node_modules/log-symbols/license
  61. 52 0
      node_modules/log-symbols/package.json
  62. 51 0
      node_modules/log-symbols/readme.md
  63. 3 0
      node_modules/loglevel-plugin-prefix/.travis.yml
  64. 21 0
      node_modules/loglevel-plugin-prefix/LICENSE
  65. 218 0
      node_modules/loglevel-plugin-prefix/README.md
  66. BIN
      node_modules/loglevel-plugin-prefix/colored.png
  67. 1 0
      node_modules/loglevel-plugin-prefix/dist/loglevel-plugin-prefix.min.js
  68. 7 0
      node_modules/loglevel-plugin-prefix/examples/.eslintrc.json
  69. 14 0
      node_modules/loglevel-plugin-prefix/index.d.ts
  70. 50 0
      node_modules/loglevel-plugin-prefix/package.json
  71. 27 0
      node_modules/loglevel/.editorconfig
  72. 165 0
      node_modules/loglevel/Gruntfile.js
  73. 22 0
      node_modules/loglevel/LICENSE-MIT
  74. 139 0
      node_modules/loglevel/demo/index.html
  75. 107 0
      node_modules/loglevel/demo/styles.css
  76. 352 0
      node_modules/loglevel/dist/loglevel.js
  77. 2 0
      node_modules/loglevel/dist/loglevel.min.js
  78. 357 0
      node_modules/loglevel/lib/loglevel.js
  79. 15 0
      node_modules/lru-cache/LICENSE
  80. 166 0
      node_modules/lru-cache/README.md
  81. 334 0
      node_modules/lru-cache/index.js
  82. 32 0
      node_modules/lru-cache/package.json
  83. 3 0
      node_modules/lunr/.npmignore
  84. 19 0
      node_modules/lunr/LICENSE
  85. 78 0
      node_modules/lunr/README.md
  86. 11 0
      node_modules/lunr/build/bower.json.template
  87. 3475 0
      node_modules/lunr/lunr.js
  88. 5 0
      node_modules/lunr/lunr.min.js
  89. 30 0
      node_modules/lunr/package.json
  90. 40 0
      node_modules/macos-release/index.d.ts
  91. 37 0
      node_modules/macos-release/index.js
  92. 9 0
      node_modules/macos-release/license
  93. 40 0
      node_modules/macos-release/package.json
  94. 63 0
      node_modules/macos-release/readme.md
  95. 7 0
      node_modules/magic-string/LICENSE
  96. 1555 0
      node_modules/magic-string/dist/magic-string.cjs.js
  97. 0 0
      node_modules/magic-string/dist/magic-string.cjs.js.map
  98. 0 0
      node_modules/magic-string/dist/magic-string.es.mjs.map
  99. 1652 0
      node_modules/magic-string/dist/magic-string.umd.js
  100. 63 0
      node_modules/magic-string/package.json

+ 25 - 0
node_modules/json-parse-even-better-errors/LICENSE.md

@@ -0,0 +1,25 @@
+Copyright 2017 Kat Marchán
+Copyright npm, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
+---
+
+This library is a fork of 'better-json-errors' by Kat Marchán, extended and
+distributed under the terms of the MIT license above.

+ 96 - 0
node_modules/json-parse-even-better-errors/README.md

@@ -0,0 +1,96 @@
+# json-parse-even-better-errors
+
+[`json-parse-even-better-errors`](https://github.com/npm/json-parse-even-better-errors)
+is a Node.js library for getting nicer errors out of `JSON.parse()`,
+including context and position of the parse errors.
+
+It also preserves the newline and indentation styles of the JSON data, by
+putting them in the object or array in the `Symbol.for('indent')` and
+`Symbol.for('newline')` properties.
+
+## Install
+
+`$ npm install --save json-parse-even-better-errors`
+
+## Table of Contents
+
+* [Example](#example)
+* [Features](#features)
+* [Contributing](#contributing)
+* [API](#api)
+  * [`parse`](#parse)
+
+### Example
+
+```javascript
+const parseJson = require('json-parse-even-better-errors')
+
+parseJson('"foo"') // returns the string 'foo'
+parseJson('garbage') // more useful error message
+parseJson.noExceptions('garbage') // returns undefined
+```
+
+### Features
+
+* Like JSON.parse, but the errors are better.
+* Strips a leading byte-order-mark that you sometimes get reading files.
+* Has a `noExceptions` method that returns undefined rather than throwing.
+* Attaches the newline character(s) used to the `Symbol.for('newline')`
+  property on objects and arrays.
+* Attaches the indentation character(s) used to the `Symbol.for('indent')`
+  property on objects and arrays.
+
+## Indentation
+
+To preserve indentation when the file is saved back to disk, use
+`data[Symbol.for('indent')]` as the third argument to `JSON.stringify`, and
+if you want to preserve windows `\r\n` newlines, replace the `\n` chars in
+the string with `data[Symbol.for('newline')]`.
+
+For example:
+
+```js
+const txt = await readFile('./package.json', 'utf8')
+const data = parseJsonEvenBetterErrors(txt)
+const indent = Symbol.for('indent')
+const newline = Symbol.for('newline')
+// .. do some stuff to the data ..
+const string = JSON.stringify(data, null, data[indent]) + '\n'
+const eolFixed = data[newline] === '\n' ? string
+  : string.replace(/\n/g, data[newline])
+await writeFile('./package.json', eolFixed)
+```
+
+Indentation is determined by looking at the whitespace between the initial
+`{` and `[` and the character that follows it.  If you have lots of weird
+inconsistent indentation, then it won't track that or give you any way to
+preserve it.  Whether this is a bug or a feature is debatable ;)
+
+### API
+
+#### <a name="parse"></a> `parse(txt, reviver = null, context = 20)`
+
+Works just like `JSON.parse`, but will include a bit more information when
+an error happens, and attaches a `Symbol.for('indent')` and
+`Symbol.for('newline')` on objects and arrays.  This throws a
+`JSONParseError`.
+
+#### <a name="parse"></a> `parse.noExceptions(txt, reviver = null)`
+
+Works just like `JSON.parse`, but will return `undefined` rather than
+throwing an error.
+
+#### <a name="jsonparseerror"></a> `class JSONParseError(er, text, context = 20, caller = null)`
+
+Extends the JavaScript `SyntaxError` class to parse the message and provide
+better metadata.
+
+Pass in the error thrown by the built-in `JSON.parse`, and the text being
+parsed, and it'll parse out the bits needed to be helpful.
+
+`context` defaults to 20.
+
+Set a `caller` function to trim internal implementation details out of the
+stack trace.  When calling `parseJson`, this is set to the `parseJson`
+function.  If not set, then the constructor defaults to itself, so the
+stack trace will point to the spot where you call `new JSONParseError`.

+ 121 - 0
node_modules/json-parse-even-better-errors/index.js

@@ -0,0 +1,121 @@
+'use strict'
+
+const hexify = char => {
+  const h = char.charCodeAt(0).toString(16).toUpperCase()
+  return '0x' + (h.length % 2 ? '0' : '') + h
+}
+
+const parseError = (e, txt, context) => {
+  if (!txt) {
+    return {
+      message: e.message + ' while parsing empty string',
+      position: 0,
+    }
+  }
+  const badToken = e.message.match(/^Unexpected token (.) .*position\s+(\d+)/i)
+  const errIdx = badToken ? +badToken[2]
+    : e.message.match(/^Unexpected end of JSON.*/i) ? txt.length - 1
+    : null
+
+  const msg = badToken ? e.message.replace(/^Unexpected token ./, `Unexpected token ${
+      JSON.stringify(badToken[1])
+    } (${hexify(badToken[1])})`)
+    : e.message
+
+  if (errIdx !== null && errIdx !== undefined) {
+    const start = errIdx <= context ? 0
+      : errIdx - context
+
+    const end = errIdx + context >= txt.length ? txt.length
+      : errIdx + context
+
+    const slice = (start === 0 ? '' : '...') +
+      txt.slice(start, end) +
+      (end === txt.length ? '' : '...')
+
+    const near = txt === slice ? '' : 'near '
+
+    return {
+      message: msg + ` while parsing ${near}${JSON.stringify(slice)}`,
+      position: errIdx,
+    }
+  } else {
+    return {
+      message: msg + ` while parsing '${txt.slice(0, context * 2)}'`,
+      position: 0,
+    }
+  }
+}
+
+class JSONParseError extends SyntaxError {
+  constructor (er, txt, context, caller) {
+    context = context || 20
+    const metadata = parseError(er, txt, context)
+    super(metadata.message)
+    Object.assign(this, metadata)
+    this.code = 'EJSONPARSE'
+    this.systemError = er
+    Error.captureStackTrace(this, caller || this.constructor)
+  }
+  get name () { return this.constructor.name }
+  set name (n) {}
+  get [Symbol.toStringTag] () { return this.constructor.name }
+}
+
+const kIndent = Symbol.for('indent')
+const kNewline = Symbol.for('newline')
+// only respect indentation if we got a line break, otherwise squash it
+// things other than objects and arrays aren't indented, so ignore those
+// Important: in both of these regexps, the $1 capture group is the newline
+// or undefined, and the $2 capture group is the indent, or undefined.
+const formatRE = /^\s*[{\[]((?:\r?\n)+)([\s\t]*)/
+const emptyRE = /^(?:\{\}|\[\])((?:\r?\n)+)?$/
+
+const parseJson = (txt, reviver, context) => {
+  const parseText = stripBOM(txt)
+  context = context || 20
+  try {
+    // get the indentation so that we can save it back nicely
+    // if the file starts with {" then we have an indent of '', ie, none
+    // otherwise, pick the indentation of the next line after the first \n
+    // If the pattern doesn't match, then it means no indentation.
+    // JSON.stringify ignores symbols, so this is reasonably safe.
+    // if the string is '{}' or '[]', then use the default 2-space indent.
+    const [, newline = '\n', indent = '  '] = parseText.match(emptyRE) ||
+      parseText.match(formatRE) ||
+      [, '', '']
+
+    const result = JSON.parse(parseText, reviver)
+    if (result && typeof result === 'object') {
+      result[kNewline] = newline
+      result[kIndent] = indent
+    }
+    return result
+  } catch (e) {
+    if (typeof txt !== 'string' && !Buffer.isBuffer(txt)) {
+      const isEmptyArray = Array.isArray(txt) && txt.length === 0
+      throw Object.assign(new TypeError(
+        `Cannot parse ${isEmptyArray ? 'an empty array' : String(txt)}`
+      ), {
+        code: 'EJSONPARSE',
+        systemError: e,
+      })
+    }
+
+    throw new JSONParseError(e, parseText, context, parseJson)
+  }
+}
+
+// Remove byte order marker. This catches EF BB BF (the UTF-8 BOM)
+// because the buffer-to-string conversion in `fs.readFileSync()`
+// translates it to FEFF, the UTF-16 BOM.
+const stripBOM = txt => String(txt).replace(/^\uFEFF/, '')
+
+module.exports = parseJson
+parseJson.JSONParseError = JSONParseError
+
+parseJson.noExceptions = (txt, reviver) => {
+  try {
+    return JSON.parse(stripBOM(txt), reviver)
+  } catch (e) {}
+}

+ 33 - 0
node_modules/json-parse-even-better-errors/package.json

@@ -0,0 +1,33 @@
+{
+  "name": "json-parse-even-better-errors",
+  "version": "2.3.1",
+  "description": "JSON.parse with context information on error",
+  "main": "index.js",
+  "files": [
+    "*.js"
+  ],
+  "scripts": {
+    "preversion": "npm t",
+    "postversion": "npm publish",
+    "prepublishOnly": "git push --follow-tags",
+    "test": "tap",
+    "snap": "tap"
+  },
+  "repository": "https://github.com/npm/json-parse-even-better-errors",
+  "keywords": [
+    "JSON",
+    "parser"
+  ],
+  "author": {
+    "name": "Kat Marchán",
+    "email": "kzm@zkat.tech",
+    "twitter": "maybekatz"
+  },
+  "license": "MIT",
+  "devDependencies": {
+    "tap": "^14.6.5"
+  },
+  "tap": {
+    "check-coverage": true
+  }
+}

+ 27 - 0
node_modules/json-schema-traverse/.eslintrc.yml

@@ -0,0 +1,27 @@
+extends: eslint:recommended
+env:
+  node: true
+  browser: true
+rules:
+  block-scoped-var: 2
+  complexity: [2, 15]
+  curly: [2, multi-or-nest, consistent]
+  dot-location: [2, property]
+  dot-notation: 2
+  indent: [2, 2, SwitchCase: 1]
+  linebreak-style: [2, unix]
+  new-cap: 2
+  no-console: [2, allow: [warn, error]]
+  no-else-return: 2
+  no-eq-null: 2
+  no-fallthrough: 2
+  no-invalid-this: 2
+  no-return-assign: 2
+  no-shadow: 1
+  no-trailing-spaces: 2
+  no-use-before-define: [2, nofunc]
+  quotes: [2, single, avoid-escape]
+  semi: [2, always]
+  strict: [2, global]
+  valid-jsdoc: [2, requireReturn: false]
+  no-control-regex: 0

+ 21 - 0
node_modules/json-schema-traverse/LICENSE

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Evgeny Poberezkin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 95 - 0
node_modules/json-schema-traverse/README.md

@@ -0,0 +1,95 @@
+# json-schema-traverse
+Traverse JSON Schema passing each schema object to callback
+
+[![build](https://github.com/epoberezkin/json-schema-traverse/workflows/build/badge.svg)](https://github.com/epoberezkin/json-schema-traverse/actions?query=workflow%3Abuild)
+[![npm](https://img.shields.io/npm/v/json-schema-traverse)](https://www.npmjs.com/package/json-schema-traverse)
+[![coverage](https://coveralls.io/repos/github/epoberezkin/json-schema-traverse/badge.svg?branch=master)](https://coveralls.io/github/epoberezkin/json-schema-traverse?branch=master)
+
+
+## Install
+
+```
+npm install json-schema-traverse
+```
+
+
+## Usage
+
+```javascript
+const traverse = require('json-schema-traverse');
+const schema = {
+  properties: {
+    foo: {type: 'string'},
+    bar: {type: 'integer'}
+  }
+};
+
+traverse(schema, {cb});
+// cb is called 3 times with:
+// 1. root schema
+// 2. {type: 'string'}
+// 3. {type: 'integer'}
+
+// Or:
+
+traverse(schema, {cb: {pre, post}});
+// pre is called 3 times with:
+// 1. root schema
+// 2. {type: 'string'}
+// 3. {type: 'integer'}
+//
+// post is called 3 times with:
+// 1. {type: 'string'}
+// 2. {type: 'integer'}
+// 3. root schema
+
+```
+
+Callback function `cb` is called for each schema object (not including draft-06 boolean schemas), including the root schema, in pre-order traversal. Schema references ($ref) are not resolved, they are passed as is.  Alternatively, you can pass a `{pre, post}` object as `cb`, and then `pre` will be called before traversing child elements, and `post` will be called after all child elements have been traversed.
+
+Callback is passed these parameters:
+
+- _schema_: the current schema object
+- _JSON pointer_: from the root schema to the current schema object
+- _root schema_: the schema passed to `traverse` object
+- _parent JSON pointer_: from the root schema to the parent schema object (see below)
+- _parent keyword_: the keyword inside which this schema appears (e.g. `properties`, `anyOf`, etc.)
+- _parent schema_: not necessarily parent object/array; in the example above the parent schema for `{type: 'string'}` is the root schema
+- _index/property_: index or property name in the array/object containing multiple schemas; in the example above for `{type: 'string'}` the property name is `'foo'`
+
+
+## Traverse objects in all unknown keywords
+
+```javascript
+const traverse = require('json-schema-traverse');
+const schema = {
+  mySchema: {
+    minimum: 1,
+    maximum: 2
+  }
+};
+
+traverse(schema, {allKeys: true, cb});
+// cb is called 2 times with:
+// 1. root schema
+// 2. mySchema
+```
+
+Without option `allKeys: true` callback will be called only with root schema.
+
+
+## Enterprise support
+
+json-schema-traverse package is a part of [Tidelift enterprise subscription](https://tidelift.com/subscription/pkg/npm-json-schema-traverse?utm_source=npm-json-schema-traverse&utm_medium=referral&utm_campaign=enterprise&utm_term=repo) - it provides a centralised commercial support to open-source software users, in addition to the support provided by software maintainers.
+
+
+## Security contact
+
+To report a security vulnerability, please use the
+[Tidelift security contact](https://tidelift.com/security).
+Tidelift will coordinate the fix and disclosure. Please do NOT report security vulnerability via GitHub issues.
+
+
+## License
+
+[MIT](https://github.com/epoberezkin/json-schema-traverse/blob/master/LICENSE)

+ 40 - 0
node_modules/json-schema-traverse/index.d.ts

@@ -0,0 +1,40 @@
+declare function traverse(
+  schema: traverse.SchemaObject,
+  opts: traverse.Options,
+  cb?: traverse.Callback
+): void;
+
+declare function traverse(
+  schema: traverse.SchemaObject,
+  cb: traverse.Callback
+): void;
+
+declare namespace traverse {
+  interface SchemaObject {
+    $id?: string;
+    $schema?: string;
+    [x: string]: any;
+  }
+
+  type Callback = (
+    schema: SchemaObject,
+    jsonPtr: string,
+    rootSchema: SchemaObject,
+    parentJsonPtr?: string,
+    parentKeyword?: string,
+    parentSchema?: SchemaObject,
+    keyIndex?: string | number
+  ) => void;
+
+  interface Options {
+    allKeys?: boolean;
+    cb?:
+      | Callback
+      | {
+          pre?: Callback;
+          post?: Callback;
+        };
+  }
+}
+
+export = traverse;

+ 93 - 0
node_modules/json-schema-traverse/index.js

@@ -0,0 +1,93 @@
+'use strict';
+
+var traverse = module.exports = function (schema, opts, cb) {
+  // Legacy support for v0.3.1 and earlier.
+  if (typeof opts == 'function') {
+    cb = opts;
+    opts = {};
+  }
+
+  cb = opts.cb || cb;
+  var pre = (typeof cb == 'function') ? cb : cb.pre || function() {};
+  var post = cb.post || function() {};
+
+  _traverse(opts, pre, post, schema, '', schema);
+};
+
+
+traverse.keywords = {
+  additionalItems: true,
+  items: true,
+  contains: true,
+  additionalProperties: true,
+  propertyNames: true,
+  not: true,
+  if: true,
+  then: true,
+  else: true
+};
+
+traverse.arrayKeywords = {
+  items: true,
+  allOf: true,
+  anyOf: true,
+  oneOf: true
+};
+
+traverse.propsKeywords = {
+  $defs: true,
+  definitions: true,
+  properties: true,
+  patternProperties: true,
+  dependencies: true
+};
+
+traverse.skipKeywords = {
+  default: true,
+  enum: true,
+  const: true,
+  required: true,
+  maximum: true,
+  minimum: true,
+  exclusiveMaximum: true,
+  exclusiveMinimum: true,
+  multipleOf: true,
+  maxLength: true,
+  minLength: true,
+  pattern: true,
+  format: true,
+  maxItems: true,
+  minItems: true,
+  uniqueItems: true,
+  maxProperties: true,
+  minProperties: true
+};
+
+
+function _traverse(opts, pre, post, schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex) {
+  if (schema && typeof schema == 'object' && !Array.isArray(schema)) {
+    pre(schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex);
+    for (var key in schema) {
+      var sch = schema[key];
+      if (Array.isArray(sch)) {
+        if (key in traverse.arrayKeywords) {
+          for (var i=0; i<sch.length; i++)
+            _traverse(opts, pre, post, sch[i], jsonPtr + '/' + key + '/' + i, rootSchema, jsonPtr, key, schema, i);
+        }
+      } else if (key in traverse.propsKeywords) {
+        if (sch && typeof sch == 'object') {
+          for (var prop in sch)
+            _traverse(opts, pre, post, sch[prop], jsonPtr + '/' + key + '/' + escapeJsonPtr(prop), rootSchema, jsonPtr, key, schema, prop);
+        }
+      } else if (key in traverse.keywords || (opts.allKeys && !(key in traverse.skipKeywords))) {
+        _traverse(opts, pre, post, sch, jsonPtr + '/' + key, rootSchema, jsonPtr, key, schema);
+      }
+    }
+    post(schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex);
+  }
+}
+
+
+function escapeJsonPtr(str) {
+  return str.replace(/~/g, '~0').replace(/\//g, '~1');
+}

+ 43 - 0
node_modules/json-schema-traverse/package.json

@@ -0,0 +1,43 @@
+{
+  "name": "json-schema-traverse",
+  "version": "1.0.0",
+  "description": "Traverse JSON Schema passing each schema object to callback",
+  "main": "index.js",
+  "types": "index.d.ts",
+  "scripts": {
+    "eslint": "eslint index.js spec",
+    "test-spec": "mocha spec -R spec",
+    "test": "npm run eslint && nyc npm run test-spec"
+  },
+  "repository": {
+    "type": "git",
+    "url": "git+https://github.com/epoberezkin/json-schema-traverse.git"
+  },
+  "keywords": [
+    "JSON-Schema",
+    "traverse",
+    "iterate"
+  ],
+  "author": "Evgeny Poberezkin",
+  "license": "MIT",
+  "bugs": {
+    "url": "https://github.com/epoberezkin/json-schema-traverse/issues"
+  },
+  "homepage": "https://github.com/epoberezkin/json-schema-traverse#readme",
+  "devDependencies": {
+    "eslint": "^7.3.1",
+    "mocha": "^8.0.1",
+    "nyc": "^15.0.0",
+    "pre-commit": "^1.2.2"
+  },
+  "nyc": {
+    "exclude": [
+      "**/spec/**",
+      "node_modules"
+    ],
+    "reporter": [
+      "lcov",
+      "text-summary"
+    ]
+  }
+}

+ 0 - 0
node_modules/json-schema-traverse/spec/.eslintrc.yml


+ 125 - 0
node_modules/json-schema-traverse/spec/fixtures/schema.js

@@ -0,0 +1,125 @@
+'use strict';
+
+var schema = {
+  additionalItems: subschema('additionalItems'),
+  items: subschema('items'),
+  contains: subschema('contains'),
+  additionalProperties: subschema('additionalProperties'),
+  propertyNames: subschema('propertyNames'),
+  not: subschema('not'),
+  allOf: [
+    subschema('allOf_0'),
+    subschema('allOf_1'),
+    {
+      items: [
+        subschema('items_0'),
+        subschema('items_1'),
+      ]
+    }
+  ],
+  anyOf: [
+    subschema('anyOf_0'),
+    subschema('anyOf_1'),
+  ],
+  oneOf: [
+    subschema('oneOf_0'),
+    subschema('oneOf_1'),
+  ],
+  definitions: {
+    foo: subschema('definitions_foo'),
+    bar: subschema('definitions_bar'),
+  },
+  properties: {
+    foo: subschema('properties_foo'),
+    bar: subschema('properties_bar'),
+  },
+  patternProperties: {
+    foo: subschema('patternProperties_foo'),
+    bar: subschema('patternProperties_bar'),
+  },
+  dependencies: {
+    foo: subschema('dependencies_foo'),
+    bar: subschema('dependencies_bar'),
+  },
+  required: ['foo', 'bar']
+};
+
+
+function subschema(keyword) {
+  var sch = {
+    properties: {},
+    additionalProperties: false,
+    additionalItems: false,
+    anyOf: [
+      {format: 'email'},
+      {format: 'hostname'}
+    ]
+  };
+  sch.properties['foo_' + keyword] = {title: 'foo'};
+  sch.properties['bar_' + keyword] = {title: 'bar'};
+  return sch;
+}
+
+
+module.exports = {
+  schema: schema,
+
+  // schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex
+  expectedCalls: [[schema, '', schema, undefined, undefined, undefined, undefined]]
+    .concat(expectedCalls('additionalItems'))
+    .concat(expectedCalls('items'))
+    .concat(expectedCalls('contains'))
+    .concat(expectedCalls('additionalProperties'))
+    .concat(expectedCalls('propertyNames'))
+    .concat(expectedCalls('not'))
+    .concat(expectedCallsChild('allOf', 0))
+    .concat(expectedCallsChild('allOf', 1))
+    .concat([
+      [schema.allOf[2], '/allOf/2', schema, '', 'allOf', schema, 2],
+      [schema.allOf[2].items[0], '/allOf/2/items/0', schema, '/allOf/2', 'items', schema.allOf[2], 0],
+      [schema.allOf[2].items[0].properties.foo_items_0, '/allOf/2/items/0/properties/foo_items_0', schema, '/allOf/2/items/0', 'properties', schema.allOf[2].items[0], 'foo_items_0'],
+      [schema.allOf[2].items[0].properties.bar_items_0, '/allOf/2/items/0/properties/bar_items_0', schema, '/allOf/2/items/0', 'properties', schema.allOf[2].items[0], 'bar_items_0'],
+      [schema.allOf[2].items[0].anyOf[0], '/allOf/2/items/0/anyOf/0', schema, '/allOf/2/items/0', 'anyOf', schema.allOf[2].items[0], 0],
+      [schema.allOf[2].items[0].anyOf[1], '/allOf/2/items/0/anyOf/1', schema, '/allOf/2/items/0', 'anyOf', schema.allOf[2].items[0], 1],
+
+      [schema.allOf[2].items[1], '/allOf/2/items/1', schema, '/allOf/2', 'items', schema.allOf[2], 1],
+      [schema.allOf[2].items[1].properties.foo_items_1, '/allOf/2/items/1/properties/foo_items_1', schema, '/allOf/2/items/1', 'properties', schema.allOf[2].items[1], 'foo_items_1'],
+      [schema.allOf[2].items[1].properties.bar_items_1, '/allOf/2/items/1/properties/bar_items_1', schema, '/allOf/2/items/1', 'properties', schema.allOf[2].items[1], 'bar_items_1'],
+      [schema.allOf[2].items[1].anyOf[0], '/allOf/2/items/1/anyOf/0', schema, '/allOf/2/items/1', 'anyOf', schema.allOf[2].items[1], 0],
+      [schema.allOf[2].items[1].anyOf[1], '/allOf/2/items/1/anyOf/1', schema, '/allOf/2/items/1', 'anyOf', schema.allOf[2].items[1], 1]
+    ])
+    .concat(expectedCallsChild('anyOf', 0))
+    .concat(expectedCallsChild('anyOf', 1))
+    .concat(expectedCallsChild('oneOf', 0))
+    .concat(expectedCallsChild('oneOf', 1))
+    .concat(expectedCallsChild('definitions', 'foo'))
+    .concat(expectedCallsChild('definitions', 'bar'))
+    .concat(expectedCallsChild('properties', 'foo'))
+    .concat(expectedCallsChild('properties', 'bar'))
+    .concat(expectedCallsChild('patternProperties', 'foo'))
+    .concat(expectedCallsChild('patternProperties', 'bar'))
+    .concat(expectedCallsChild('dependencies', 'foo'))
+    .concat(expectedCallsChild('dependencies', 'bar'))
+};
+
+
+function expectedCalls(keyword) {
+  return [
+    [schema[keyword], `/${keyword}`, schema, '', keyword, schema, undefined],
+    [schema[keyword].properties[`foo_${keyword}`], `/${keyword}/properties/foo_${keyword}`, schema, `/${keyword}`, 'properties', schema[keyword], `foo_${keyword}`],
+    [schema[keyword].properties[`bar_${keyword}`], `/${keyword}/properties/bar_${keyword}`, schema, `/${keyword}`, 'properties', schema[keyword], `bar_${keyword}`],
+    [schema[keyword].anyOf[0], `/${keyword}/anyOf/0`, schema, `/${keyword}`, 'anyOf', schema[keyword], 0],
+    [schema[keyword].anyOf[1], `/${keyword}/anyOf/1`, schema, `/${keyword}`, 'anyOf', schema[keyword], 1]
+  ];
+}
+
+
+function expectedCallsChild(keyword, i) {
+  return [
+    [schema[keyword][i], `/${keyword}/${i}`, schema, '', keyword, schema, i],
+    [schema[keyword][i].properties[`foo_${keyword}_${i}`], `/${keyword}/${i}/properties/foo_${keyword}_${i}`, schema, `/${keyword}/${i}`, 'properties', schema[keyword][i], `foo_${keyword}_${i}`],
+    [schema[keyword][i].properties[`bar_${keyword}_${i}`], `/${keyword}/${i}/properties/bar_${keyword}_${i}`, schema, `/${keyword}/${i}`, 'properties', schema[keyword][i], `bar_${keyword}_${i}`],
+    [schema[keyword][i].anyOf[0], `/${keyword}/${i}/anyOf/0`, schema, `/${keyword}/${i}`, 'anyOf', schema[keyword][i], 0],
+    [schema[keyword][i].anyOf[1], `/${keyword}/${i}/anyOf/1`, schema, `/${keyword}/${i}`, 'anyOf', schema[keyword][i], 1]
+  ];
+}

+ 171 - 0
node_modules/json-schema-traverse/spec/index.spec.js

@@ -0,0 +1,171 @@
+'use strict';
+
+var traverse = require('../index');
+var assert = require('assert');
+
+describe('json-schema-traverse', function() {
+  var calls;
+
+  beforeEach(function() {
+    calls = [];
+  });
+
+  it('should traverse all keywords containing schemas recursively', function() {
+    var schema = require('./fixtures/schema').schema;
+    var expectedCalls = require('./fixtures/schema').expectedCalls;
+
+    traverse(schema, {cb: callback});
+    assert.deepStrictEqual(calls, expectedCalls);
+  });
+
+  describe('Legacy v0.3.1 API', function() {
+    it('should traverse all keywords containing schemas recursively', function() {
+      var schema = require('./fixtures/schema').schema;
+      var expectedCalls = require('./fixtures/schema').expectedCalls;
+
+      traverse(schema, callback);
+      assert.deepStrictEqual(calls, expectedCalls);
+    });
+
+    it('should work when an options object is provided', function() {
+      // schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex
+      var schema = require('./fixtures/schema').schema;
+      var expectedCalls = require('./fixtures/schema').expectedCalls;
+
+      traverse(schema, {}, callback);
+      assert.deepStrictEqual(calls, expectedCalls);
+    });
+  });
+
+
+  describe('allKeys option', function() {
+    var schema = {
+      someObject: {
+        minimum: 1,
+        maximum: 2
+      }
+    };
+
+    it('should traverse objects with allKeys: true option', function() {
+      // schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex
+      var expectedCalls = [
+        [schema, '', schema, undefined, undefined, undefined, undefined],
+        [schema.someObject, '/someObject', schema, '', 'someObject', schema, undefined]
+      ];
+
+      traverse(schema, {allKeys: true, cb: callback});
+      assert.deepStrictEqual(calls, expectedCalls);
+    });
+
+
+    it('should NOT traverse objects with allKeys: false option', function() {
+      // schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex
+      var expectedCalls = [
+        [schema, '', schema, undefined, undefined, undefined, undefined]
+      ];
+
+      traverse(schema, {allKeys: false, cb: callback});
+      assert.deepStrictEqual(calls, expectedCalls);
+    });
+
+
+    it('should NOT traverse objects without allKeys option', function() {
+      // schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex
+      var expectedCalls = [
+        [schema, '', schema, undefined, undefined, undefined, undefined]
+      ];
+
+      traverse(schema, {cb: callback});
+      assert.deepStrictEqual(calls, expectedCalls);
+    });
+
+
+    it('should NOT travers objects in standard keywords which value is not a schema', function() {
+      var schema2 = {
+        const: {foo: 'bar'},
+        enum: ['a', 'b'],
+        required: ['foo'],
+        another: {
+
+        },
+        patternProperties: {}, // will not traverse - no properties
+        dependencies: true, // will not traverse - invalid
+        properties: {
+          smaller: {
+            type: 'number'
+          },
+          larger: {
+            type: 'number',
+            minimum: {$data: '1/smaller'}
+          }
+        }
+      };
+
+      // schema, jsonPtr, rootSchema, parentJsonPtr, parentKeyword, parentSchema, keyIndex
+      var expectedCalls = [
+        [schema2, '', schema2, undefined, undefined, undefined, undefined],
+        [schema2.another, '/another', schema2, '', 'another', schema2, undefined],
+        [schema2.properties.smaller, '/properties/smaller', schema2, '', 'properties', schema2, 'smaller'],
+        [schema2.properties.larger, '/properties/larger', schema2, '', 'properties', schema2, 'larger'],
+      ];
+
+      traverse(schema2, {allKeys: true, cb: callback});
+      assert.deepStrictEqual(calls, expectedCalls);
+    });
+  });
+
+  describe('pre and post', function() {
+    var schema = {
+      type: 'object',
+      properties: {
+        name: {type: 'string'},
+        age: {type: 'number'}
+      }
+    };
+
+    it('should traverse schema in pre-order', function() {
+      traverse(schema, {cb: {pre}});
+      var expectedCalls = [
+        ['pre', schema, '', schema, undefined, undefined, undefined, undefined],
+        ['pre', schema.properties.name, '/properties/name', schema, '', 'properties', schema, 'name'],
+        ['pre', schema.properties.age, '/properties/age', schema, '', 'properties', schema, 'age'],
+      ];
+      assert.deepStrictEqual(calls, expectedCalls);
+    });
+
+    it('should traverse schema in post-order', function() {
+      traverse(schema, {cb: {post}});
+      var expectedCalls = [
+        ['post', schema.properties.name, '/properties/name', schema, '', 'properties', schema, 'name'],
+        ['post', schema.properties.age, '/properties/age', schema, '', 'properties', schema, 'age'],
+        ['post', schema, '', schema, undefined, undefined, undefined, undefined],
+      ];
+      assert.deepStrictEqual(calls, expectedCalls);
+    });
+
+    it('should traverse schema in pre- and post-order at the same time', function() {
+      traverse(schema, {cb: {pre, post}});
+      var expectedCalls = [
+        ['pre', schema, '', schema, undefined, undefined, undefined, undefined],
+        ['pre', schema.properties.name, '/properties/name', schema, '', 'properties', schema, 'name'],
+        ['post', schema.properties.name, '/properties/name', schema, '', 'properties', schema, 'name'],
+        ['pre', schema.properties.age, '/properties/age', schema, '', 'properties', schema, 'age'],
+        ['post', schema.properties.age, '/properties/age', schema, '', 'properties', schema, 'age'],
+        ['post', schema, '', schema, undefined, undefined, undefined, undefined],
+      ];
+      assert.deepStrictEqual(calls, expectedCalls);
+    });
+  });
+
+  function callback() {
+    calls.push(Array.prototype.slice.call(arguments));
+  }
+
+  function pre() {
+    calls.push(['pre'].concat(Array.prototype.slice.call(arguments)));
+  }
+
+  function post() {
+    calls.push(['post'].concat(Array.prototype.slice.call(arguments)));
+  }
+});

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 303 - 0
node_modules/json5/dist/index.js


파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 0 - 0
node_modules/json5/dist/index.min.js


+ 152 - 0
node_modules/json5/lib/cli.js

@@ -0,0 +1,152 @@
+#!/usr/bin/env node
+
+const fs = require('fs')
+const path = require('path')
+const pkg = require('../package.json')
+const JSON5 = require('./')
+
+const argv = parseArgs()
+
+if (argv.version) {
+    version()
+} else if (argv.help) {
+    usage()
+} else {
+    const inFilename = argv.defaults[0]
+
+    let readStream
+    if (inFilename) {
+        readStream = fs.createReadStream(inFilename)
+    } else {
+        readStream = process.stdin
+    }
+
+    let json5 = ''
+    readStream.on('data', data => {
+        json5 += data
+    })
+
+    readStream.on('end', () => {
+        let space
+        if (argv.space === 't' || argv.space === 'tab') {
+            space = '\t'
+        } else {
+            space = Number(argv.space)
+        }
+
+        let value
+        try {
+            value = JSON5.parse(json5)
+            if (!argv.validate) {
+                const json = JSON.stringify(value, null, space)
+
+                let writeStream
+
+                // --convert is for backward compatibility with v0.5.1. If
+                // specified with <file> and not --out-file, then a file with
+                // the same name but with a .json extension will be written.
+                if (argv.convert && inFilename && !argv.outFile) {
+                    const parsedFilename = path.parse(inFilename)
+                    const outFilename = path.format(
+                        Object.assign(
+                            parsedFilename,
+                            {base: path.basename(parsedFilename.base, parsedFilename.ext) + '.json'}
+                        )
+                    )
+
+                    writeStream = fs.createWriteStream(outFilename)
+                } else if (argv.outFile) {
+                    writeStream = fs.createWriteStream(argv.outFile)
+                } else {
+                    writeStream = process.stdout
+                }
+
+                writeStream.write(json)
+            }
+        } catch (err) {
+            console.error(err.message)
+            process.exit(1)
+        }
+    })
+}
+
+function parseArgs () {
+    let convert
+    let space
+    let validate
+    let outFile
+    let version
+    let help
+    const defaults = []
+
+    const args = process.argv.slice(2)
+    for (let i = 0; i < args.length; i++) {
+        const arg = args[i]
+        switch (arg) {
+        case '--convert':
+        case '-c':
+            convert = true
+            break
+
+        case '--space':
+        case '-s':
+            space = args[++i]
+            break
+
+        case '--validate':
+        case '-v':
+            validate = true
+            break
+
+        case '--out-file':
+        case '-o':
+            outFile = args[++i]
+            break
+
+        case '--version':
+        case '-V':
+            version = true
+            break
+
+        case '--help':
+        case '-h':
+            help = true
+            break
+
+        default:
+            defaults.push(arg)
+            break
+        }
+    }
+
+    return {
+        convert,
+        space,
+        validate,
+        outFile,
+        version,
+        help,
+        defaults,
+    }
+}
+
+function version () {
+    console.log(pkg.version)
+}
+
+function usage () {
+    console.log(
+        `
+  Usage: json5 [options] <file>
+
+  If <file> is not provided, then STDIN is used.
+
+  Options:
+
+    -s, --space              The number of spaces to indent or 't' for tabs
+    -o, --out-file [file]    Output to the specified file, otherwise STDOUT
+    -v, --validate           Validate JSON5 but do not output JSON
+    -V, --version            Output the version number
+    -h, --help               Output usage information`
+    )
+}

+ 9 - 0
node_modules/json5/lib/index.js

@@ -0,0 +1,9 @@
+const parse = require('./parse')
+const stringify = require('./stringify')
+
+const JSON5 = {
+    parse,
+    stringify,
+}
+
+module.exports = JSON5

+ 1114 - 0
node_modules/json5/lib/parse.js

@@ -0,0 +1,1114 @@
+const util = require('./util')
+
+let source
+let parseState
+let stack
+let pos
+let line
+let column
+let token
+let key
+let root
+
+module.exports = function parse (text, reviver) {
+    source = String(text)
+    parseState = 'start'
+    stack = []
+    pos = 0
+    line = 1
+    column = 0
+    token = undefined
+    key = undefined
+    root = undefined
+
+    do {
+        token = lex()
+
+        // This code is unreachable.
+        // if (!parseStates[parseState]) {
+        //     throw invalidParseState()
+        // }
+
+        parseStates[parseState]()
+    } while (token.type !== 'eof')
+
+    if (typeof reviver === 'function') {
+        return internalize({'': root}, '', reviver)
+    }
+
+    return root
+}
+
+function internalize (holder, name, reviver) {
+    const value = holder[name]
+    if (value != null && typeof value === 'object') {
+        if (Array.isArray(value)) {
+            for (let i = 0; i < value.length; i++) {
+                const key = String(i)
+                const replacement = internalize(value, key, reviver)
+                if (replacement === undefined) {
+                    delete value[key]
+                } else {
+                    Object.defineProperty(value, key, {
+                        value: replacement,
+                        writable: true,
+                        enumerable: true,
+                        configurable: true,
+                    })
+                }
+            }
+        } else {
+            for (const key in value) {
+                const replacement = internalize(value, key, reviver)
+                if (replacement === undefined) {
+                    delete value[key]
+                } else {
+                    Object.defineProperty(value, key, {
+                        value: replacement,
+                        writable: true,
+                        enumerable: true,
+                        configurable: true,
+                    })
+                }
+            }
+        }
+    }
+
+    return reviver.call(holder, name, value)
+}
+
+let lexState
+let buffer
+let doubleQuote
+let sign
+let c
+
+function lex () {
+    lexState = 'default'
+    buffer = ''
+    doubleQuote = false
+    sign = 1
+
+    for (;;) {
+        c = peek()
+
+        // This code is unreachable.
+        // if (!lexStates[lexState]) {
+        //     throw invalidLexState(lexState)
+        // }
+
+        const token = lexStates[lexState]()
+        if (token) {
+            return token
+        }
+    }
+}
+
+function peek () {
+    if (source[pos]) {
+        return String.fromCodePoint(source.codePointAt(pos))
+    }
+}
+
+function read () {
+    const c = peek()
+
+    if (c === '\n') {
+        line++
+        column = 0
+    } else if (c) {
+        column += c.length
+    } else {
+        column++
+    }
+
+    if (c) {
+        pos += c.length
+    }
+
+    return c
+}
+
+const lexStates = {
+    default () {
+        switch (c) {
+        case '\t':
+        case '\v':
+        case '\f':
+        case ' ':
+        case '\u00A0':
+        case '\uFEFF':
+        case '\n':
+        case '\r':
+        case '\u2028':
+        case '\u2029':
+            read()
+            return
+
+        case '/':
+            read()
+            lexState = 'comment'
+            return
+
+        case undefined:
+            read()
+            return newToken('eof')
+        }
+
+        if (util.isSpaceSeparator(c)) {
+            read()
+            return
+        }
+
+        // This code is unreachable.
+        // if (!lexStates[parseState]) {
+        //     throw invalidLexState(parseState)
+        // }
+
+        return lexStates[parseState]()
+    },
+
+    comment () {
+        switch (c) {
+        case '*':
+            read()
+            lexState = 'multiLineComment'
+            return
+
+        case '/':
+            read()
+            lexState = 'singleLineComment'
+            return
+        }
+
+        throw invalidChar(read())
+    },
+
+    multiLineComment () {
+        switch (c) {
+        case '*':
+            read()
+            lexState = 'multiLineCommentAsterisk'
+            return
+
+        case undefined:
+            throw invalidChar(read())
+        }
+
+        read()
+    },
+
+    multiLineCommentAsterisk () {
+        switch (c) {
+        case '*':
+            read()
+            return
+
+        case '/':
+            read()
+            lexState = 'default'
+            return
+
+        case undefined:
+            throw invalidChar(read())
+        }
+
+        read()
+        lexState = 'multiLineComment'
+    },
+
+    singleLineComment () {
+        switch (c) {
+        case '\n':
+        case '\r':
+        case '\u2028':
+        case '\u2029':
+            read()
+            lexState = 'default'
+            return
+
+        case undefined:
+            read()
+            return newToken('eof')
+        }
+
+        read()
+    },
+
+    value () {
+        switch (c) {
+        case '{':
+        case '[':
+            return newToken('punctuator', read())
+
+        case 'n':
+            read()
+            literal('ull')
+            return newToken('null', null)
+
+        case 't':
+            read()
+            literal('rue')
+            return newToken('boolean', true)
+
+        case 'f':
+            read()
+            literal('alse')
+            return newToken('boolean', false)
+
+        case '-':
+        case '+':
+            if (read() === '-') {
+                sign = -1
+            }
+
+            lexState = 'sign'
+            return
+
+        case '.':
+            buffer = read()
+            lexState = 'decimalPointLeading'
+            return
+
+        case '0':
+            buffer = read()
+            lexState = 'zero'
+            return
+
+        case '1':
+        case '2':
+        case '3':
+        case '4':
+        case '5':
+        case '6':
+        case '7':
+        case '8':
+        case '9':
+            buffer = read()
+            lexState = 'decimalInteger'
+            return
+
+        case 'I':
+            read()
+            literal('nfinity')
+            return newToken('numeric', Infinity)
+
+        case 'N':
+            read()
+            literal('aN')
+            return newToken('numeric', NaN)
+
+        case '"':
+        case "'":
+            doubleQuote = (read() === '"')
+            buffer = ''
+            lexState = 'string'
+            return
+        }
+
+        throw invalidChar(read())
+    },
+
+    identifierNameStartEscape () {
+        if (c !== 'u') {
+            throw invalidChar(read())
+        }
+
+        read()
+        const u = unicodeEscape()
+        switch (u) {
+        case '$':
+        case '_':
+            break
+
+        default:
+            if (!util.isIdStartChar(u)) {
+                throw invalidIdentifier()
+            }
+
+            break
+        }
+
+        buffer += u
+        lexState = 'identifierName'
+    },
+
+    identifierName () {
+        switch (c) {
+        case '$':
+        case '_':
+        case '\u200C':
+        case '\u200D':
+            buffer += read()
+            return
+
+        case '\\':
+            read()
+            lexState = 'identifierNameEscape'
+            return
+        }
+
+        if (util.isIdContinueChar(c)) {
+            buffer += read()
+            return
+        }
+
+        return newToken('identifier', buffer)
+    },
+
+    identifierNameEscape () {
+        if (c !== 'u') {
+            throw invalidChar(read())
+        }
+
+        read()
+        const u = unicodeEscape()
+        switch (u) {
+        case '$':
+        case '_':
+        case '\u200C':
+        case '\u200D':
+            break
+
+        default:
+            if (!util.isIdContinueChar(u)) {
+                throw invalidIdentifier()
+            }
+
+            break
+        }
+
+        buffer += u
+        lexState = 'identifierName'
+    },
+
+    sign () {
+        switch (c) {
+        case '.':
+            buffer = read()
+            lexState = 'decimalPointLeading'
+            return
+
+        case '0':
+            buffer = read()
+            lexState = 'zero'
+            return
+
+        case '1':
+        case '2':
+        case '3':
+        case '4':
+        case '5':
+        case '6':
+        case '7':
+        case '8':
+        case '9':
+            buffer = read()
+            lexState = 'decimalInteger'
+            return
+
+        case 'I':
+            read()
+            literal('nfinity')
+            return newToken('numeric', sign * Infinity)
+
+        case 'N':
+            read()
+            literal('aN')
+            return newToken('numeric', NaN)
+        }
+
+        throw invalidChar(read())
+    },
+
+    zero () {
+        switch (c) {
+        case '.':
+            buffer += read()
+            lexState = 'decimalPoint'
+            return
+
+        case 'e':
+        case 'E':
+            buffer += read()
+            lexState = 'decimalExponent'
+            return
+
+        case 'x':
+        case 'X':
+            buffer += read()
+            lexState = 'hexadecimal'
+            return
+        }
+
+        return newToken('numeric', sign * 0)
+    },
+
+    decimalInteger () {
+        switch (c) {
+        case '.':
+            buffer += read()
+            lexState = 'decimalPoint'
+            return
+
+        case 'e':
+        case 'E':
+            buffer += read()
+            lexState = 'decimalExponent'
+            return
+        }
+
+        if (util.isDigit(c)) {
+            buffer += read()
+            return
+        }
+
+        return newToken('numeric', sign * Number(buffer))
+    },
+
+    decimalPointLeading () {
+        if (util.isDigit(c)) {
+            buffer += read()
+            lexState = 'decimalFraction'
+            return
+        }
+
+        throw invalidChar(read())
+    },
+
+    decimalPoint () {
+        switch (c) {
+        case 'e':
+        case 'E':
+            buffer += read()
+            lexState = 'decimalExponent'
+            return
+        }
+
+        if (util.isDigit(c)) {
+            buffer += read()
+            lexState = 'decimalFraction'
+            return
+        }
+
+        return newToken('numeric', sign * Number(buffer))
+    },
+
+    decimalFraction () {
+        switch (c) {
+        case 'e':
+        case 'E':
+            buffer += read()
+            lexState = 'decimalExponent'
+            return
+        }
+
+        if (util.isDigit(c)) {
+            buffer += read()
+            return
+        }
+
+        return newToken('numeric', sign * Number(buffer))
+    },
+
+    decimalExponent () {
+        switch (c) {
+        case '+':
+        case '-':
+            buffer += read()
+            lexState = 'decimalExponentSign'
+            return
+        }
+
+        if (util.isDigit(c)) {
+            buffer += read()
+            lexState = 'decimalExponentInteger'
+            return
+        }
+
+        throw invalidChar(read())
+    },
+
+    decimalExponentSign () {
+        if (util.isDigit(c)) {
+            buffer += read()
+            lexState = 'decimalExponentInteger'
+            return
+        }
+
+        throw invalidChar(read())
+    },
+
+    decimalExponentInteger () {
+        if (util.isDigit(c)) {
+            buffer += read()
+            return
+        }
+
+        return newToken('numeric', sign * Number(buffer))
+    },
+
+    hexadecimal () {
+        if (util.isHexDigit(c)) {
+            buffer += read()
+            lexState = 'hexadecimalInteger'
+            return
+        }
+
+        throw invalidChar(read())
+    },
+
+    hexadecimalInteger () {
+        if (util.isHexDigit(c)) {
+            buffer += read()
+            return
+        }
+
+        return newToken('numeric', sign * Number(buffer))
+    },
+
+    string () {
+        switch (c) {
+        case '\\':
+            read()
+            buffer += escape()
+            return
+
+        case '"':
+            if (doubleQuote) {
+                read()
+                return newToken('string', buffer)
+            }
+
+            buffer += read()
+            return
+
+        case "'":
+            if (!doubleQuote) {
+                read()
+                return newToken('string', buffer)
+            }
+
+            buffer += read()
+            return
+
+        case '\n':
+        case '\r':
+            throw invalidChar(read())
+
+        case '\u2028':
+        case '\u2029':
+            separatorChar(c)
+            break
+
+        case undefined:
+            throw invalidChar(read())
+        }
+
+        buffer += read()
+    },
+
+    start () {
+        switch (c) {
+        case '{':
+        case '[':
+            return newToken('punctuator', read())
+
+        // This code is unreachable since the default lexState handles eof.
+        // case undefined:
+        //     return newToken('eof')
+        }
+
+        lexState = 'value'
+    },
+
+    beforePropertyName () {
+        switch (c) {
+        case '$':
+        case '_':
+            buffer = read()
+            lexState = 'identifierName'
+            return
+
+        case '\\':
+            read()
+            lexState = 'identifierNameStartEscape'
+            return
+
+        case '}':
+            return newToken('punctuator', read())
+
+        case '"':
+        case "'":
+            doubleQuote = (read() === '"')
+            lexState = 'string'
+            return
+        }
+
+        if (util.isIdStartChar(c)) {
+            buffer += read()
+            lexState = 'identifierName'
+            return
+        }
+
+        throw invalidChar(read())
+    },
+
+    afterPropertyName () {
+        if (c === ':') {
+            return newToken('punctuator', read())
+        }
+
+        throw invalidChar(read())
+    },
+
+    beforePropertyValue () {
+        lexState = 'value'
+    },
+
+    afterPropertyValue () {
+        switch (c) {
+        case ',':
+        case '}':
+            return newToken('punctuator', read())
+        }
+
+        throw invalidChar(read())
+    },
+
+    beforeArrayValue () {
+        if (c === ']') {
+            return newToken('punctuator', read())
+        }
+
+        lexState = 'value'
+    },
+
+    afterArrayValue () {
+        switch (c) {
+        case ',':
+        case ']':
+            return newToken('punctuator', read())
+        }
+
+        throw invalidChar(read())
+    },
+
+    end () {
+        // This code is unreachable since it's handled by the default lexState.
+        // if (c === undefined) {
+        //     read()
+        //     return newToken('eof')
+        // }
+
+        throw invalidChar(read())
+    },
+}
+
+function newToken (type, value) {
+    return {
+        type,
+        value,
+        line,
+        column,
+    }
+}
+
+function literal (s) {
+    for (const c of s) {
+        const p = peek()
+
+        if (p !== c) {
+            throw invalidChar(read())
+        }
+
+        read()
+    }
+}
+
+function escape () {
+    const c = peek()
+    switch (c) {
+    case 'b':
+        read()
+        return '\b'
+
+    case 'f':
+        read()
+        return '\f'
+
+    case 'n':
+        read()
+        return '\n'
+
+    case 'r':
+        read()
+        return '\r'
+
+    case 't':
+        read()
+        return '\t'
+
+    case 'v':
+        read()
+        return '\v'
+
+    case '0':
+        read()
+        if (util.isDigit(peek())) {
+            throw invalidChar(read())
+        }
+
+        return '\0'
+
+    case 'x':
+        read()
+        return hexEscape()
+
+    case 'u':
+        read()
+        return unicodeEscape()
+
+    case '\n':
+    case '\u2028':
+    case '\u2029':
+        read()
+        return ''
+
+    case '\r':
+        read()
+        if (peek() === '\n') {
+            read()
+        }
+
+        return ''
+
+    case '1':
+    case '2':
+    case '3':
+    case '4':
+    case '5':
+    case '6':
+    case '7':
+    case '8':
+    case '9':
+        throw invalidChar(read())
+
+    case undefined:
+        throw invalidChar(read())
+    }
+
+    return read()
+}
+
+function hexEscape () {
+    let buffer = ''
+    let c = peek()
+
+    if (!util.isHexDigit(c)) {
+        throw invalidChar(read())
+    }
+
+    buffer += read()
+
+    c = peek()
+    if (!util.isHexDigit(c)) {
+        throw invalidChar(read())
+    }
+
+    buffer += read()
+
+    return String.fromCodePoint(parseInt(buffer, 16))
+}
+
+function unicodeEscape () {
+    let buffer = ''
+    let count = 4
+
+    while (count-- > 0) {
+        const c = peek()
+        if (!util.isHexDigit(c)) {
+            throw invalidChar(read())
+        }
+
+        buffer += read()
+    }
+
+    return String.fromCodePoint(parseInt(buffer, 16))
+}
+
+const parseStates = {
+    start () {
+        if (token.type === 'eof') {
+            throw invalidEOF()
+        }
+
+        push()
+    },
+
+    beforePropertyName () {
+        switch (token.type) {
+        case 'identifier':
+        case 'string':
+            key = token.value
+            parseState = 'afterPropertyName'
+            return
+
+        case 'punctuator':
+            // This code is unreachable since it's handled by the lexState.
+            // if (token.value !== '}') {
+            //     throw invalidToken()
+            // }
+
+            pop()
+            return
+
+        case 'eof':
+            throw invalidEOF()
+        }
+
+        // This code is unreachable since it's handled by the lexState.
+        // throw invalidToken()
+    },
+
+    afterPropertyName () {
+        // This code is unreachable since it's handled by the lexState.
+        // if (token.type !== 'punctuator' || token.value !== ':') {
+        //     throw invalidToken()
+        // }
+
+        if (token.type === 'eof') {
+            throw invalidEOF()
+        }
+
+        parseState = 'beforePropertyValue'
+    },
+
+    beforePropertyValue () {
+        if (token.type === 'eof') {
+            throw invalidEOF()
+        }
+
+        push()
+    },
+
+    beforeArrayValue () {
+        if (token.type === 'eof') {
+            throw invalidEOF()
+        }
+
+        if (token.type === 'punctuator' && token.value === ']') {
+            pop()
+            return
+        }
+
+        push()
+    },
+
+    afterPropertyValue () {
+        // This code is unreachable since it's handled by the lexState.
+        // if (token.type !== 'punctuator') {
+        //     throw invalidToken()
+        // }
+
+        if (token.type === 'eof') {
+            throw invalidEOF()
+        }
+
+        switch (token.value) {
+        case ',':
+            parseState = 'beforePropertyName'
+            return
+
+        case '}':
+            pop()
+        }
+
+        // This code is unreachable since it's handled by the lexState.
+        // throw invalidToken()
+    },
+
+    afterArrayValue () {
+        // This code is unreachable since it's handled by the lexState.
+        // if (token.type !== 'punctuator') {
+        //     throw invalidToken()
+        // }
+
+        if (token.type === 'eof') {
+            throw invalidEOF()
+        }
+
+        switch (token.value) {
+        case ',':
+            parseState = 'beforeArrayValue'
+            return
+
+        case ']':
+            pop()
+        }
+
+        // This code is unreachable since it's handled by the lexState.
+        // throw invalidToken()
+    },
+
+    end () {
+        // This code is unreachable since it's handled by the lexState.
+        // if (token.type !== 'eof') {
+        //     throw invalidToken()
+        // }
+    },
+}
+
+function push () {
+    let value
+
+    switch (token.type) {
+    case 'punctuator':
+        switch (token.value) {
+        case '{':
+            value = {}
+            break
+
+        case '[':
+            value = []
+            break
+        }
+
+        break
+
+    case 'null':
+    case 'boolean':
+    case 'numeric':
+    case 'string':
+        value = token.value
+        break
+
+    // This code is unreachable.
+    // default:
+    //     throw invalidToken()
+    }
+
+    if (root === undefined) {
+        root = value
+    } else {
+        const parent = stack[stack.length - 1]
+        if (Array.isArray(parent)) {
+            parent.push(value)
+        } else {
+            Object.defineProperty(parent, key, {
+                value,
+                writable: true,
+                enumerable: true,
+                configurable: true,
+            })
+        }
+    }
+
+    if (value !== null && typeof value === 'object') {
+        stack.push(value)
+
+        if (Array.isArray(value)) {
+            parseState = 'beforeArrayValue'
+        } else {
+            parseState = 'beforePropertyName'
+        }
+    } else {
+        const current = stack[stack.length - 1]
+        if (current == null) {
+            parseState = 'end'
+        } else if (Array.isArray(current)) {
+            parseState = 'afterArrayValue'
+        } else {
+            parseState = 'afterPropertyValue'
+        }
+    }
+}
+
+function pop () {
+    stack.pop()
+
+    const current = stack[stack.length - 1]
+    if (current == null) {
+        parseState = 'end'
+    } else if (Array.isArray(current)) {
+        parseState = 'afterArrayValue'
+    } else {
+        parseState = 'afterPropertyValue'
+    }
+}
+
+// This code is unreachable.
+// function invalidParseState () {
+//     return new Error(`JSON5: invalid parse state '${parseState}'`)
+// }
+
+// This code is unreachable.
+// function invalidLexState (state) {
+//     return new Error(`JSON5: invalid lex state '${state}'`)
+// }
+
+function invalidChar (c) {
+    if (c === undefined) {
+        return syntaxError(`JSON5: invalid end of input at ${line}:${column}`)
+    }
+
+    return syntaxError(`JSON5: invalid character '${formatChar(c)}' at ${line}:${column}`)
+}
+
+function invalidEOF () {
+    return syntaxError(`JSON5: invalid end of input at ${line}:${column}`)
+}
+
+// This code is unreachable.
+// function invalidToken () {
+//     if (token.type === 'eof') {
+//         return syntaxError(`JSON5: invalid end of input at ${line}:${column}`)
+//     }
+
+//     const c = String.fromCodePoint(token.value.codePointAt(0))
+//     return syntaxError(`JSON5: invalid character '${formatChar(c)}' at ${line}:${column}`)
+// }
+
+function invalidIdentifier () {
+    column -= 5
+    return syntaxError(`JSON5: invalid identifier character at ${line}:${column}`)
+}
+
+function separatorChar (c) {
+    console.warn(`JSON5: '${formatChar(c)}' in strings is not valid ECMAScript; consider escaping`)
+}
+
+function formatChar (c) {
+    const replacements = {
+        "'": "\\'",
+        '"': '\\"',
+        '\\': '\\\\',
+        '\b': '\\b',
+        '\f': '\\f',
+        '\n': '\\n',
+        '\r': '\\r',
+        '\t': '\\t',
+        '\v': '\\v',
+        '\0': '\\0',
+        '\u2028': '\\u2028',
+        '\u2029': '\\u2029',
+    }
+
+    if (replacements[c]) {
+        return replacements[c]
+    }
+
+    if (c < ' ') {
+        const hexString = c.charCodeAt(0).toString(16)
+        return '\\x' + ('00' + hexString).substring(hexString.length)
+    }
+
+    return c
+}
+
+function syntaxError (message) {
+    const err = new SyntaxError(message)
+    err.lineNumber = line
+    err.columnNumber = column
+    return err
+}

+ 13 - 0
node_modules/json5/lib/register.js

@@ -0,0 +1,13 @@
+const fs = require('fs')
+const JSON5 = require('./')
+
+// eslint-disable-next-line node/no-deprecated-api
+require.extensions['.json5'] = function (module, filename) {
+    const content = fs.readFileSync(filename, 'utf8')
+    try {
+        module.exports = JSON5.parse(content)
+    } catch (err) {
+        err.message = filename + ': ' + err.message
+        throw err
+    }
+}

+ 4 - 0
node_modules/json5/lib/require.js

@@ -0,0 +1,4 @@
+// This file is for backward compatibility with v0.5.1.
+require('./register')
+
+console.warn("'json5/require' is deprecated. Please use 'json5/register' instead.")

+ 185 - 0
node_modules/jsonc-parser/lib/esm/impl/edit.js

@@ -0,0 +1,185 @@
+/*---------------------------------------------------------------------------------------------
+ *  Copyright (c) Microsoft Corporation. All rights reserved.
+ *  Licensed under the MIT License. See License.txt in the project root for license information.
+ *--------------------------------------------------------------------------------------------*/
+'use strict';
+import { format, isEOL } from './format';
+import { parseTree, findNodeAtLocation } from './parser';
+export function removeProperty(text, path, options) {
+    return setProperty(text, path, void 0, options);
+}
+export function setProperty(text, originalPath, value, options) {
+    const path = originalPath.slice();
+    const errors = [];
+    const root = parseTree(text, errors);
+    let parent = void 0;
+    let lastSegment = void 0;
+    while (path.length > 0) {
+        lastSegment = path.pop();
+        parent = findNodeAtLocation(root, path);
+        if (parent === void 0 && value !== void 0) {
+            if (typeof lastSegment === 'string') {
+                value = { [lastSegment]: value };
+            }
+            else {
+                value = [value];
+            }
+        }
+        else {
+            break;
+        }
+    }
+    if (!parent) {
+        // empty document
+        if (value === void 0) { // delete
+            throw new Error('Can not delete in empty document');
+        }
+        return withFormatting(text, { offset: root ? root.offset : 0, length: root ? root.length : 0, content: JSON.stringify(value) }, options);
+    }
+    else if (parent.type === 'object' && typeof lastSegment === 'string' && Array.isArray(parent.children)) {
+        const existing = findNodeAtLocation(parent, [lastSegment]);
+        if (existing !== void 0) {
+            if (value === void 0) { // delete
+                if (!existing.parent) {
+                    throw new Error('Malformed AST');
+                }
+                const propertyIndex = parent.children.indexOf(existing.parent);
+                let removeBegin;
+                let removeEnd = existing.parent.offset + existing.parent.length;
+                if (propertyIndex > 0) {
+                    // remove the comma of the previous node
+                    let previous = parent.children[propertyIndex - 1];
+                    removeBegin = previous.offset + previous.length;
+                }
+                else {
+                    removeBegin = parent.offset + 1;
+                    if (parent.children.length > 1) {
+                        // remove the comma of the next node
+                        let next = parent.children[1];
+                        removeEnd = next.offset;
+                    }
+                }
+                return withFormatting(text, { offset: removeBegin, length: removeEnd - removeBegin, content: '' }, options);
+            }
+            else {
+                // set value of existing property
+                return withFormatting(text, { offset: existing.offset, length: existing.length, content: JSON.stringify(value) }, options);
+            }
+        }
+        else {
+            if (value === void 0) { // delete
+                return []; // property does not exist, nothing to do
+            }
+            const newProperty = `${JSON.stringify(lastSegment)}: ${JSON.stringify(value)}`;
+            const index = options.getInsertionIndex ? options.getInsertionIndex(parent.children.map(p => p.children[0].value)) : parent.children.length;
+            let edit;
+            if (index > 0) {
+                let previous = parent.children[index - 1];
+                edit = { offset: previous.offset + previous.length, length: 0, content: ',' + newProperty };
+            }
+            else if (parent.children.length === 0) {
+                edit = { offset: parent.offset + 1, length: 0, content: newProperty };
+            }
+            else {
+                edit = { offset: parent.offset + 1, length: 0, content: newProperty + ',' };
+            }
+            return withFormatting(text, edit, options);
+        }
+    }
+    else if (parent.type === 'array' && typeof lastSegment === 'number' && Array.isArray(parent.children)) {
+        const insertIndex = lastSegment;
+        if (insertIndex === -1) {
+            // Insert
+            const newProperty = `${JSON.stringify(value)}`;
+            let edit;
+            if (parent.children.length === 0) {
+                edit = { offset: parent.offset + 1, length: 0, content: newProperty };
+            }
+            else {
+                const previous = parent.children[parent.children.length - 1];
+                edit = { offset: previous.offset + previous.length, length: 0, content: ',' + newProperty };
+            }
+            return withFormatting(text, edit, options);
+        }
+        else if (value === void 0 && parent.children.length >= 0) {
+            // Removal
+            const removalIndex = lastSegment;
+            const toRemove = parent.children[removalIndex];
+            let edit;
+            if (parent.children.length === 1) {
+                // only item
+                edit = { offset: parent.offset + 1, length: parent.length - 2, content: '' };
+            }
+            else if (parent.children.length - 1 === removalIndex) {
+                // last item
+                let previous = parent.children[removalIndex - 1];
+                let offset = previous.offset + previous.length;
+                let parentEndOffset = parent.offset + parent.length;
+                edit = { offset, length: parentEndOffset - 2 - offset, content: '' };
+            }
+            else {
+                edit = { offset: toRemove.offset, length: parent.children[removalIndex + 1].offset - toRemove.offset, content: '' };
+            }
+            return withFormatting(text, edit, options);
+        }
+        else if (value !== void 0) {
+            let edit;
+            const newProperty = `${JSON.stringify(value)}`;
+            if (!options.isArrayInsertion && parent.children.length > lastSegment) {
+                const toModify = parent.children[lastSegment];
+                edit = { offset: toModify.offset, length: toModify.length, content: newProperty };
+            }
+            else if (parent.children.length === 0 || lastSegment === 0) {
+                edit = { offset: parent.offset + 1, length: 0, content: parent.children.length === 0 ? newProperty : newProperty + ',' };
+            }
+            else {
+                const index = lastSegment > parent.children.length ? parent.children.length : lastSegment;
+                const previous = parent.children[index - 1];
+                edit = { offset: previous.offset + previous.length, length: 0, content: ',' + newProperty };
+            }
+            return withFormatting(text, edit, options);
+        }
+        else {
+            throw new Error(`Can not ${value === void 0 ? 'remove' : (options.isArrayInsertion ? 'insert' : 'modify')} Array index ${insertIndex} as length is not sufficient`);
+        }
+    }
+    else {
+        throw new Error(`Can not add ${typeof lastSegment !== 'number' ? 'index' : 'property'} to parent of type ${parent.type}`);
+    }
+}
+function withFormatting(text, edit, options) {
+    if (!options.formattingOptions) {
+        return [edit];
+    }
+    // apply the edit
+    let newText = applyEdit(text, edit);
+    // format the new text
+    let begin = edit.offset;
+    let end = edit.offset + edit.content.length;
+    if (edit.length === 0 || edit.content.length === 0) { // insert or remove
+        while (begin > 0 && !isEOL(newText, begin - 1)) {
+            begin--;
+        }
+        while (end < newText.length && !isEOL(newText, end)) {
+            end++;
+        }
+    }
+    const edits = format(newText, { offset: begin, length: end - begin }, { ...options.formattingOptions, keepLines: false });
+    // apply the formatting edits and track the begin and end offsets of the changes
+    for (let i = edits.length - 1; i >= 0; i--) {
+        const edit = edits[i];
+        newText = applyEdit(newText, edit);
+        begin = Math.min(begin, edit.offset);
+        end = Math.max(end, edit.offset + edit.length);
+        end += edit.content.length - edit.length;
+    }
+    // create a single edit with all changes
+    const editLength = text.length - (newText.length - end) - begin;
+    return [{ offset: begin, length: editLength, content: newText.substring(begin, end) }];
+}
+export function applyEdit(text, edit) {
+    return text.substring(0, edit.offset) + edit.content + text.substring(edit.offset + edit.length);
+}
+export function isWS(text, offset) {
+    return '\r\n \t'.indexOf(text.charAt(offset)) !== -1;
+}

+ 261 - 0
node_modules/jsonc-parser/lib/esm/impl/format.js

@@ -0,0 +1,261 @@
+/*---------------------------------------------------------------------------------------------
+ *  Copyright (c) Microsoft Corporation. All rights reserved.
+ *  Licensed under the MIT License. See License.txt in the project root for license information.
+ *--------------------------------------------------------------------------------------------*/
+'use strict';
+import { createScanner } from './scanner';
+import { cachedSpaces, cachedBreakLinesWithSpaces, supportedEols } from './string-intern';
+export function format(documentText, range, options) {
+    let initialIndentLevel;
+    let formatText;
+    let formatTextStart;
+    let rangeStart;
+    let rangeEnd;
+    if (range) {
+        rangeStart = range.offset;
+        rangeEnd = rangeStart + range.length;
+        formatTextStart = rangeStart;
+        while (formatTextStart > 0 && !isEOL(documentText, formatTextStart - 1)) {
+            formatTextStart--;
+        }
+        let endOffset = rangeEnd;
+        while (endOffset < documentText.length && !isEOL(documentText, endOffset)) {
+            endOffset++;
+        }
+        formatText = documentText.substring(formatTextStart, endOffset);
+        initialIndentLevel = computeIndentLevel(formatText, options);
+    }
+    else {
+        formatText = documentText;
+        initialIndentLevel = 0;
+        formatTextStart = 0;
+        rangeStart = 0;
+        rangeEnd = documentText.length;
+    }
+    const eol = getEOL(options, documentText);
+    const eolFastPathSupported = supportedEols.includes(eol);
+    let numberLineBreaks = 0;
+    let indentLevel = 0;
+    let indentValue;
+    if (options.insertSpaces) {
+        indentValue = cachedSpaces[options.tabSize || 4] ?? repeat(cachedSpaces[1], options.tabSize || 4);
+    }
+    else {
+        indentValue = '\t';
+    }
+    const indentType = indentValue === '\t' ? '\t' : ' ';
+    let scanner = createScanner(formatText, false);
+    let hasError = false;
+    function newLinesAndIndent() {
+        if (numberLineBreaks > 1) {
+            return repeat(eol, numberLineBreaks) + repeat(indentValue, initialIndentLevel + indentLevel);
+        }
+        const amountOfSpaces = indentValue.length * (initialIndentLevel + indentLevel);
+        if (!eolFastPathSupported || amountOfSpaces > cachedBreakLinesWithSpaces[indentType][eol].length) {
+            return eol + repeat(indentValue, initialIndentLevel + indentLevel);
+        }
+        if (amountOfSpaces <= 0) {
+            return eol;
+        }
+        return cachedBreakLinesWithSpaces[indentType][eol][amountOfSpaces];
+    }
+    function scanNext() {
+        let token = scanner.scan();
+        numberLineBreaks = 0;
+        while (token === 15 /* SyntaxKind.Trivia */ || token === 14 /* SyntaxKind.LineBreakTrivia */) {
+            if (token === 14 /* SyntaxKind.LineBreakTrivia */ && options.keepLines) {
+                numberLineBreaks += 1;
+            }
+            else if (token === 14 /* SyntaxKind.LineBreakTrivia */) {
+                numberLineBreaks = 1;
+            }
+            token = scanner.scan();
+        }
+        hasError = token === 16 /* SyntaxKind.Unknown */ || scanner.getTokenError() !== 0 /* ScanError.None */;
+        return token;
+    }
+    const editOperations = [];
+    function addEdit(text, startOffset, endOffset) {
+        if (!hasError && (!range || (startOffset < rangeEnd && endOffset > rangeStart)) && documentText.substring(startOffset, endOffset) !== text) {
+            editOperations.push({ offset: startOffset, length: endOffset - startOffset, content: text });
+        }
+    }
+    let firstToken = scanNext();
+    if (options.keepLines && numberLineBreaks > 0) {
+        addEdit(repeat(eol, numberLineBreaks), 0, 0);
+    }
+    if (firstToken !== 17 /* SyntaxKind.EOF */) {
+        let firstTokenStart = scanner.getTokenOffset() + formatTextStart;
+        let initialIndent = (indentValue.length * initialIndentLevel < 20) && options.insertSpaces
+            ? cachedSpaces[indentValue.length * initialIndentLevel]
+            : repeat(indentValue, initialIndentLevel);
+        addEdit(initialIndent, formatTextStart, firstTokenStart);
+    }
+    while (firstToken !== 17 /* SyntaxKind.EOF */) {
+        let firstTokenEnd = scanner.getTokenOffset() + scanner.getTokenLength() + formatTextStart;
+        let secondToken = scanNext();
+        let replaceContent = '';
+        let needsLineBreak = false;
+        while (numberLineBreaks === 0 && (secondToken === 12 /* SyntaxKind.LineCommentTrivia */ || secondToken === 13 /* SyntaxKind.BlockCommentTrivia */)) {
+            let commentTokenStart = scanner.getTokenOffset() + formatTextStart;
+            addEdit(cachedSpaces[1], firstTokenEnd, commentTokenStart);
+            firstTokenEnd = scanner.getTokenOffset() + scanner.getTokenLength() + formatTextStart;
+            needsLineBreak = secondToken === 12 /* SyntaxKind.LineCommentTrivia */;
+            replaceContent = needsLineBreak ? newLinesAndIndent() : '';
+            secondToken = scanNext();
+        }
+        if (secondToken === 2 /* SyntaxKind.CloseBraceToken */) {
+            if (firstToken !== 1 /* SyntaxKind.OpenBraceToken */) {
+                indentLevel--;
+            }
+            ;
+            if (options.keepLines && numberLineBreaks > 0 || !options.keepLines && firstToken !== 1 /* SyntaxKind.OpenBraceToken */) {
+                replaceContent = newLinesAndIndent();
+            }
+            else if (options.keepLines) {
+                replaceContent = cachedSpaces[1];
+            }
+        }
+        else if (secondToken === 4 /* SyntaxKind.CloseBracketToken */) {
+            if (firstToken !== 3 /* SyntaxKind.OpenBracketToken */) {
+                indentLevel--;
+            }
+            ;
+            if (options.keepLines && numberLineBreaks > 0 || !options.keepLines && firstToken !== 3 /* SyntaxKind.OpenBracketToken */) {
+                replaceContent = newLinesAndIndent();
+            }
+            else if (options.keepLines) {
+                replaceContent = cachedSpaces[1];
+            }
+        }
+        else {
+            switch (firstToken) {
+                case 3 /* SyntaxKind.OpenBracketToken */:
+                case 1 /* SyntaxKind.OpenBraceToken */:
+                    indentLevel++;
+                    if (options.keepLines && numberLineBreaks > 0 || !options.keepLines) {
+                        replaceContent = newLinesAndIndent();
+                    }
+                    else {
+                        replaceContent = cachedSpaces[1];
+                    }
+                    break;
+                case 5 /* SyntaxKind.CommaToken */:
+                    if (options.keepLines && numberLineBreaks > 0 || !options.keepLines) {
+                        replaceContent = newLinesAndIndent();
+                    }
+                    else {
+                        replaceContent = cachedSpaces[1];
+                    }
+                    break;
+                case 12 /* SyntaxKind.LineCommentTrivia */:
+                    replaceContent = newLinesAndIndent();
+                    break;
+                case 13 /* SyntaxKind.BlockCommentTrivia */:
+                    if (numberLineBreaks > 0) {
+                        replaceContent = newLinesAndIndent();
+                    }
+                    else if (!needsLineBreak) {
+                        replaceContent = cachedSpaces[1];
+                    }
+                    break;
+                case 6 /* SyntaxKind.ColonToken */:
+                    if (options.keepLines && numberLineBreaks > 0) {
+                        replaceContent = newLinesAndIndent();
+                    }
+                    else if (!needsLineBreak) {
+                        replaceContent = cachedSpaces[1];
+                    }
+                    break;
+                case 10 /* SyntaxKind.StringLiteral */:
+                    if (options.keepLines && numberLineBreaks > 0) {
+                        replaceContent = newLinesAndIndent();
+                    }
+                    else if (secondToken === 6 /* SyntaxKind.ColonToken */ && !needsLineBreak) {
+                        replaceContent = '';
+                    }
+                    break;
+                case 7 /* SyntaxKind.NullKeyword */:
+                case 8 /* SyntaxKind.TrueKeyword */:
+                case 9 /* SyntaxKind.FalseKeyword */:
+                case 11 /* SyntaxKind.NumericLiteral */:
+                case 2 /* SyntaxKind.CloseBraceToken */:
+                case 4 /* SyntaxKind.CloseBracketToken */:
+                    if (options.keepLines && numberLineBreaks > 0) {
+                        replaceContent = newLinesAndIndent();
+                    }
+                    else {
+                        if ((secondToken === 12 /* SyntaxKind.LineCommentTrivia */ || secondToken === 13 /* SyntaxKind.BlockCommentTrivia */) && !needsLineBreak) {
+                            replaceContent = cachedSpaces[1];
+                        }
+                        else if (secondToken !== 5 /* SyntaxKind.CommaToken */ && secondToken !== 17 /* SyntaxKind.EOF */) {
+                            hasError = true;
+                        }
+                    }
+                    break;
+                case 16 /* SyntaxKind.Unknown */:
+                    hasError = true;
+                    break;
+            }
+            if (numberLineBreaks > 0 && (secondToken === 12 /* SyntaxKind.LineCommentTrivia */ || secondToken === 13 /* SyntaxKind.BlockCommentTrivia */)) {
+                replaceContent = newLinesAndIndent();
+            }
+        }
+        if (secondToken === 17 /* SyntaxKind.EOF */) {
+            if (options.keepLines && numberLineBreaks > 0) {
+                replaceContent = newLinesAndIndent();
+            }
+            else {
+                replaceContent = options.insertFinalNewline ? eol : '';
+            }
+        }
+        const secondTokenStart = scanner.getTokenOffset() + formatTextStart;
+        addEdit(replaceContent, firstTokenEnd, secondTokenStart);
+        firstToken = secondToken;
+    }
+    return editOperations;
+}
+function repeat(s, count) {
+    let result = '';
+    for (let i = 0; i < count; i++) {
+        result += s;
+    }
+    return result;
+}
+function computeIndentLevel(content, options) {
+    let i = 0;
+    let nChars = 0;
+    const tabSize = options.tabSize || 4;
+    while (i < content.length) {
+        let ch = content.charAt(i);
+        if (ch === cachedSpaces[1]) {
+            nChars++;
+        }
+        else if (ch === '\t') {
+            nChars += tabSize;
+        }
+        else {
+            break;
+        }
+        i++;
+    }
+    return Math.floor(nChars / tabSize);
+}
+function getEOL(options, text) {
+    for (let i = 0; i < text.length; i++) {
+        const ch = text.charAt(i);
+        if (ch === '\r') {
+            if (i + 1 < text.length && text.charAt(i + 1) === '\n') {
+                return '\r\n';
+            }
+            return '\r';
+        }
+        else if (ch === '\n') {
+            return '\n';
+        }
+    }
+    return (options && options.eol) || '\n';
+}
+export function isEOL(text, offset) {
+    return '\r\n'.indexOf(text.charAt(offset)) !== -1;
+}

+ 178 - 0
node_modules/jsonc-parser/lib/esm/main.js

@@ -0,0 +1,178 @@
+/*---------------------------------------------------------------------------------------------
+ *  Copyright (c) Microsoft Corporation. All rights reserved.
+ *  Licensed under the MIT License. See License.txt in the project root for license information.
+ *--------------------------------------------------------------------------------------------*/
+'use strict';
+import * as formatter from './impl/format';
+import * as edit from './impl/edit';
+import * as scanner from './impl/scanner';
+import * as parser from './impl/parser';
+/**
+ * Creates a JSON scanner on the given text.
+ * If ignoreTrivia is set, whitespaces or comments are ignored.
+ */
+export const createScanner = scanner.createScanner;
+export var ScanError;
+(function (ScanError) {
+    ScanError[ScanError["None"] = 0] = "None";
+    ScanError[ScanError["UnexpectedEndOfComment"] = 1] = "UnexpectedEndOfComment";
+    ScanError[ScanError["UnexpectedEndOfString"] = 2] = "UnexpectedEndOfString";
+    ScanError[ScanError["UnexpectedEndOfNumber"] = 3] = "UnexpectedEndOfNumber";
+    ScanError[ScanError["InvalidUnicode"] = 4] = "InvalidUnicode";
+    ScanError[ScanError["InvalidEscapeCharacter"] = 5] = "InvalidEscapeCharacter";
+    ScanError[ScanError["InvalidCharacter"] = 6] = "InvalidCharacter";
+})(ScanError || (ScanError = {}));
+export var SyntaxKind;
+(function (SyntaxKind) {
+    SyntaxKind[SyntaxKind["OpenBraceToken"] = 1] = "OpenBraceToken";
+    SyntaxKind[SyntaxKind["CloseBraceToken"] = 2] = "CloseBraceToken";
+    SyntaxKind[SyntaxKind["OpenBracketToken"] = 3] = "OpenBracketToken";
+    SyntaxKind[SyntaxKind["CloseBracketToken"] = 4] = "CloseBracketToken";
+    SyntaxKind[SyntaxKind["CommaToken"] = 5] = "CommaToken";
+    SyntaxKind[SyntaxKind["ColonToken"] = 6] = "ColonToken";
+    SyntaxKind[SyntaxKind["NullKeyword"] = 7] = "NullKeyword";
+    SyntaxKind[SyntaxKind["TrueKeyword"] = 8] = "TrueKeyword";
+    SyntaxKind[SyntaxKind["FalseKeyword"] = 9] = "FalseKeyword";
+    SyntaxKind[SyntaxKind["StringLiteral"] = 10] = "StringLiteral";
+    SyntaxKind[SyntaxKind["NumericLiteral"] = 11] = "NumericLiteral";
+    SyntaxKind[SyntaxKind["LineCommentTrivia"] = 12] = "LineCommentTrivia";
+    SyntaxKind[SyntaxKind["BlockCommentTrivia"] = 13] = "BlockCommentTrivia";
+    SyntaxKind[SyntaxKind["LineBreakTrivia"] = 14] = "LineBreakTrivia";
+    SyntaxKind[SyntaxKind["Trivia"] = 15] = "Trivia";
+    SyntaxKind[SyntaxKind["Unknown"] = 16] = "Unknown";
+    SyntaxKind[SyntaxKind["EOF"] = 17] = "EOF";
+})(SyntaxKind || (SyntaxKind = {}));
+/**
+ * For a given offset, evaluate the location in the JSON document. Each segment in the location path is either a property name or an array index.
+ */
+export const getLocation = parser.getLocation;
+/**
+ * Parses the given text and returns the object the JSON content represents. On invalid input, the parser tries to be as fault tolerant as possible, but still return a result.
+ * Therefore, always check the errors list to find out if the input was valid.
+ */
+export const parse = parser.parse;
+/**
+ * Parses the given text and returns a tree representation the JSON content. On invalid input, the parser tries to be as fault tolerant as possible, but still return a result.
+ */
+export const parseTree = parser.parseTree;
+/**
+ * Finds the node at the given path in a JSON DOM.
+ */
+export const findNodeAtLocation = parser.findNodeAtLocation;
+/**
+ * Finds the innermost node at the given offset. If includeRightBound is set, also finds nodes that end at the given offset.
+ */
+export const findNodeAtOffset = parser.findNodeAtOffset;
+/**
+ * Gets the JSON path of the given JSON DOM node
+ */
+export const getNodePath = parser.getNodePath;
+/**
+ * Evaluates the JavaScript object of the given JSON DOM node
+ */
+export const getNodeValue = parser.getNodeValue;
+/**
+ * Parses the given text and invokes the visitor functions for each object, array and literal reached.
+ */
+export const visit = parser.visit;
+/**
+ * Takes JSON with JavaScript-style comments and remove
+ * them. Optionally replaces every none-newline character
+ * of comments with a replaceCharacter
+ */
+export const stripComments = parser.stripComments;
+export var ParseErrorCode;
+(function (ParseErrorCode) {
+    ParseErrorCode[ParseErrorCode["InvalidSymbol"] = 1] = "InvalidSymbol";
+    ParseErrorCode[ParseErrorCode["InvalidNumberFormat"] = 2] = "InvalidNumberFormat";
+    ParseErrorCode[ParseErrorCode["PropertyNameExpected"] = 3] = "PropertyNameExpected";
+    ParseErrorCode[ParseErrorCode["ValueExpected"] = 4] = "ValueExpected";
+    ParseErrorCode[ParseErrorCode["ColonExpected"] = 5] = "ColonExpected";
+    ParseErrorCode[ParseErrorCode["CommaExpected"] = 6] = "CommaExpected";
+    ParseErrorCode[ParseErrorCode["CloseBraceExpected"] = 7] = "CloseBraceExpected";
+    ParseErrorCode[ParseErrorCode["CloseBracketExpected"] = 8] = "CloseBracketExpected";
+    ParseErrorCode[ParseErrorCode["EndOfFileExpected"] = 9] = "EndOfFileExpected";
+    ParseErrorCode[ParseErrorCode["InvalidCommentToken"] = 10] = "InvalidCommentToken";
+    ParseErrorCode[ParseErrorCode["UnexpectedEndOfComment"] = 11] = "UnexpectedEndOfComment";
+    ParseErrorCode[ParseErrorCode["UnexpectedEndOfString"] = 12] = "UnexpectedEndOfString";
+    ParseErrorCode[ParseErrorCode["UnexpectedEndOfNumber"] = 13] = "UnexpectedEndOfNumber";
+    ParseErrorCode[ParseErrorCode["InvalidUnicode"] = 14] = "InvalidUnicode";
+    ParseErrorCode[ParseErrorCode["InvalidEscapeCharacter"] = 15] = "InvalidEscapeCharacter";
+    ParseErrorCode[ParseErrorCode["InvalidCharacter"] = 16] = "InvalidCharacter";
+})(ParseErrorCode || (ParseErrorCode = {}));
+export function printParseErrorCode(code) {
+    switch (code) {
+        case 1 /* ParseErrorCode.InvalidSymbol */: return 'InvalidSymbol';
+        case 2 /* ParseErrorCode.InvalidNumberFormat */: return 'InvalidNumberFormat';
+        case 3 /* ParseErrorCode.PropertyNameExpected */: return 'PropertyNameExpected';
+        case 4 /* ParseErrorCode.ValueExpected */: return 'ValueExpected';
+        case 5 /* ParseErrorCode.ColonExpected */: return 'ColonExpected';
+        case 6 /* ParseErrorCode.CommaExpected */: return 'CommaExpected';
+        case 7 /* ParseErrorCode.CloseBraceExpected */: return 'CloseBraceExpected';
+        case 8 /* ParseErrorCode.CloseBracketExpected */: return 'CloseBracketExpected';
+        case 9 /* ParseErrorCode.EndOfFileExpected */: return 'EndOfFileExpected';
+        case 10 /* ParseErrorCode.InvalidCommentToken */: return 'InvalidCommentToken';
+        case 11 /* ParseErrorCode.UnexpectedEndOfComment */: return 'UnexpectedEndOfComment';
+        case 12 /* ParseErrorCode.UnexpectedEndOfString */: return 'UnexpectedEndOfString';
+        case 13 /* ParseErrorCode.UnexpectedEndOfNumber */: return 'UnexpectedEndOfNumber';
+        case 14 /* ParseErrorCode.InvalidUnicode */: return 'InvalidUnicode';
+        case 15 /* ParseErrorCode.InvalidEscapeCharacter */: return 'InvalidEscapeCharacter';
+        case 16 /* ParseErrorCode.InvalidCharacter */: return 'InvalidCharacter';
+    }
+    return '<unknown ParseErrorCode>';
+}
+/**
+ * Computes the edit operations needed to format a JSON document.
+ *
+ * @param documentText The input text
+ * @param range The range to format or `undefined` to format the full content
+ * @param options The formatting options
+ * @returns The edit operations describing the formatting changes to the original document following the format described in {@linkcode EditResult}.
+ * To apply the edit operations to the input, use {@linkcode applyEdits}.
+ */
+export function format(documentText, range, options) {
+    return formatter.format(documentText, range, options);
+}
+/**
+ * Computes the edit operations needed to modify a value in the JSON document.
+ *
+ * @param documentText The input text
+ * @param path The path of the value to change. The path represents either to the document root, a property or an array item.
+ * If the path points to an non-existing property or item, it will be created.
+ * @param value The new value for the specified property or item. If the value is undefined,
+ * the property or item will be removed.
+ * @param options Options
+ * @returns The edit operations describing the changes to the original document, following the format described in {@linkcode EditResult}.
+ * To apply the edit operations to the input, use {@linkcode applyEdits}.
+ */
+export function modify(text, path, value, options) {
+    return edit.setProperty(text, path, value, options);
+}
+/**
+ * Applies edits to an input string.
+ * @param text The input text
+ * @param edits Edit operations following the format described in {@linkcode EditResult}.
+ * @returns The text with the applied edits.
+ * @throws An error if the edit operations are not well-formed as described in {@linkcode EditResult}.
+ */
+export function applyEdits(text, edits) {
+    let sortedEdits = edits.slice(0).sort((a, b) => {
+        const diff = a.offset - b.offset;
+        if (diff === 0) {
+            return a.length - b.length;
+        }
+        return diff;
+    });
+    let lastModifiedOffset = text.length;
+    for (let i = sortedEdits.length - 1; i >= 0; i--) {
+        let e = sortedEdits[i];
+        if (e.offset + e.length <= lastModifiedOffset) {
+            text = edit.applyEdit(text, e);
+        }
+        else {
+            throw new Error('Overlapping edit');
+        }
+        lastModifiedOffset = e.offset;
+    }
+    return text;
+}

+ 201 - 0
node_modules/jsonc-parser/lib/umd/impl/edit.js

@@ -0,0 +1,201 @@
+(function (factory) {
+    if (typeof module === "object" && typeof module.exports === "object") {
+        var v = factory(require, exports);
+        if (v !== undefined) module.exports = v;
+    }
+    else if (typeof define === "function" && define.amd) {
+        define(["require", "exports", "./format", "./parser"], factory);
+    }
+})(function (require, exports) {
+    /*---------------------------------------------------------------------------------------------
+     *  Copyright (c) Microsoft Corporation. All rights reserved.
+     *  Licensed under the MIT License. See License.txt in the project root for license information.
+     *--------------------------------------------------------------------------------------------*/
+    'use strict';
+    Object.defineProperty(exports, "__esModule", { value: true });
+    exports.isWS = exports.applyEdit = exports.setProperty = exports.removeProperty = void 0;
+    const format_1 = require("./format");
+    const parser_1 = require("./parser");
+    function removeProperty(text, path, options) {
+        return setProperty(text, path, void 0, options);
+    }
+    exports.removeProperty = removeProperty;
+    function setProperty(text, originalPath, value, options) {
+        const path = originalPath.slice();
+        const errors = [];
+        const root = (0, parser_1.parseTree)(text, errors);
+        let parent = void 0;
+        let lastSegment = void 0;
+        while (path.length > 0) {
+            lastSegment = path.pop();
+            parent = (0, parser_1.findNodeAtLocation)(root, path);
+            if (parent === void 0 && value !== void 0) {
+                if (typeof lastSegment === 'string') {
+                    value = { [lastSegment]: value };
+                }
+                else {
+                    value = [value];
+                }
+            }
+            else {
+                break;
+            }
+        }
+        if (!parent) {
+            // empty document
+            if (value === void 0) { // delete
+                throw new Error('Can not delete in empty document');
+            }
+            return withFormatting(text, { offset: root ? root.offset : 0, length: root ? root.length : 0, content: JSON.stringify(value) }, options);
+        }
+        else if (parent.type === 'object' && typeof lastSegment === 'string' && Array.isArray(parent.children)) {
+            const existing = (0, parser_1.findNodeAtLocation)(parent, [lastSegment]);
+            if (existing !== void 0) {
+                if (value === void 0) { // delete
+                    if (!existing.parent) {
+                        throw new Error('Malformed AST');
+                    }
+                    const propertyIndex = parent.children.indexOf(existing.parent);
+                    let removeBegin;
+                    let removeEnd = existing.parent.offset + existing.parent.length;
+                    if (propertyIndex > 0) {
+                        // remove the comma of the previous node
+                        let previous = parent.children[propertyIndex - 1];
+                        removeBegin = previous.offset + previous.length;
+                    }
+                    else {
+                        removeBegin = parent.offset + 1;
+                        if (parent.children.length > 1) {
+                            // remove the comma of the next node
+                            let next = parent.children[1];
+                            removeEnd = next.offset;
+                        }
+                    }
+                    return withFormatting(text, { offset: removeBegin, length: removeEnd - removeBegin, content: '' }, options);
+                }
+                else {
+                    // set value of existing property
+                    return withFormatting(text, { offset: existing.offset, length: existing.length, content: JSON.stringify(value) }, options);
+                }
+            }
+            else {
+                if (value === void 0) { // delete
+                    return []; // property does not exist, nothing to do
+                }
+                const newProperty = `${JSON.stringify(lastSegment)}: ${JSON.stringify(value)}`;
+                const index = options.getInsertionIndex ? options.getInsertionIndex(parent.children.map(p => p.children[0].value)) : parent.children.length;
+                let edit;
+                if (index > 0) {
+                    let previous = parent.children[index - 1];
+                    edit = { offset: previous.offset + previous.length, length: 0, content: ',' + newProperty };
+                }
+                else if (parent.children.length === 0) {
+                    edit = { offset: parent.offset + 1, length: 0, content: newProperty };
+                }
+                else {
+                    edit = { offset: parent.offset + 1, length: 0, content: newProperty + ',' };
+                }
+                return withFormatting(text, edit, options);
+            }
+        }
+        else if (parent.type === 'array' && typeof lastSegment === 'number' && Array.isArray(parent.children)) {
+            const insertIndex = lastSegment;
+            if (insertIndex === -1) {
+                // Insert
+                const newProperty = `${JSON.stringify(value)}`;
+                let edit;
+                if (parent.children.length === 0) {
+                    edit = { offset: parent.offset + 1, length: 0, content: newProperty };
+                }
+                else {
+                    const previous = parent.children[parent.children.length - 1];
+                    edit = { offset: previous.offset + previous.length, length: 0, content: ',' + newProperty };
+                }
+                return withFormatting(text, edit, options);
+            }
+            else if (value === void 0 && parent.children.length >= 0) {
+                // Removal
+                const removalIndex = lastSegment;
+                const toRemove = parent.children[removalIndex];
+                let edit;
+                if (parent.children.length === 1) {
+                    // only item
+                    edit = { offset: parent.offset + 1, length: parent.length - 2, content: '' };
+                }
+                else if (parent.children.length - 1 === removalIndex) {
+                    // last item
+                    let previous = parent.children[removalIndex - 1];
+                    let offset = previous.offset + previous.length;
+                    let parentEndOffset = parent.offset + parent.length;
+                    edit = { offset, length: parentEndOffset - 2 - offset, content: '' };
+                }
+                else {
+                    edit = { offset: toRemove.offset, length: parent.children[removalIndex + 1].offset - toRemove.offset, content: '' };
+                }
+                return withFormatting(text, edit, options);
+            }
+            else if (value !== void 0) {
+                let edit;
+                const newProperty = `${JSON.stringify(value)}`;
+                if (!options.isArrayInsertion && parent.children.length > lastSegment) {
+                    const toModify = parent.children[lastSegment];
+                    edit = { offset: toModify.offset, length: toModify.length, content: newProperty };
+                }
+                else if (parent.children.length === 0 || lastSegment === 0) {
+                    edit = { offset: parent.offset + 1, length: 0, content: parent.children.length === 0 ? newProperty : newProperty + ',' };
+                }
+                else {
+                    const index = lastSegment > parent.children.length ? parent.children.length : lastSegment;
+                    const previous = parent.children[index - 1];
+                    edit = { offset: previous.offset + previous.length, length: 0, content: ',' + newProperty };
+                }
+                return withFormatting(text, edit, options);
+            }
+            else {
+                throw new Error(`Can not ${value === void 0 ? 'remove' : (options.isArrayInsertion ? 'insert' : 'modify')} Array index ${insertIndex} as length is not sufficient`);
+            }
+        }
+        else {
+            throw new Error(`Can not add ${typeof lastSegment !== 'number' ? 'index' : 'property'} to parent of type ${parent.type}`);
+        }
+    }
+    exports.setProperty = setProperty;
+    function withFormatting(text, edit, options) {
+        if (!options.formattingOptions) {
+            return [edit];
+        }
+        // apply the edit
+        let newText = applyEdit(text, edit);
+        // format the new text
+        let begin = edit.offset;
+        let end = edit.offset + edit.content.length;
+        if (edit.length === 0 || edit.content.length === 0) { // insert or remove
+            while (begin > 0 && !(0, format_1.isEOL)(newText, begin - 1)) {
+                begin--;
+            }
+            while (end < newText.length && !(0, format_1.isEOL)(newText, end)) {
+                end++;
+            }
+        }
+        const edits = (0, format_1.format)(newText, { offset: begin, length: end - begin }, { ...options.formattingOptions, keepLines: false });
+        // apply the formatting edits and track the begin and end offsets of the changes
+        for (let i = edits.length - 1; i >= 0; i--) {
+            const edit = edits[i];
+            newText = applyEdit(newText, edit);
+            begin = Math.min(begin, edit.offset);
+            end = Math.max(end, edit.offset + edit.length);
+            end += edit.content.length - edit.length;
+        }
+        // create a single edit with all changes
+        const editLength = text.length - (newText.length - end) - begin;
+        return [{ offset: begin, length: editLength, content: newText.substring(begin, end) }];
+    }
+    function applyEdit(text, edit) {
+        return text.substring(0, edit.offset) + edit.content + text.substring(edit.offset + edit.length);
+    }
+    exports.applyEdit = applyEdit;
+    function isWS(text, offset) {
+        return '\r\n \t'.indexOf(text.charAt(offset)) !== -1;
+    }
+    exports.isWS = isWS;
+});

+ 275 - 0
node_modules/jsonc-parser/lib/umd/impl/format.js

@@ -0,0 +1,275 @@
+(function (factory) {
+    if (typeof module === "object" && typeof module.exports === "object") {
+        var v = factory(require, exports);
+        if (v !== undefined) module.exports = v;
+    }
+    else if (typeof define === "function" && define.amd) {
+        define(["require", "exports", "./scanner", "./string-intern"], factory);
+    }
+})(function (require, exports) {
+    /*---------------------------------------------------------------------------------------------
+     *  Copyright (c) Microsoft Corporation. All rights reserved.
+     *  Licensed under the MIT License. See License.txt in the project root for license information.
+     *--------------------------------------------------------------------------------------------*/
+    'use strict';
+    Object.defineProperty(exports, "__esModule", { value: true });
+    exports.isEOL = exports.format = void 0;
+    const scanner_1 = require("./scanner");
+    const string_intern_1 = require("./string-intern");
+    function format(documentText, range, options) {
+        let initialIndentLevel;
+        let formatText;
+        let formatTextStart;
+        let rangeStart;
+        let rangeEnd;
+        if (range) {
+            rangeStart = range.offset;
+            rangeEnd = rangeStart + range.length;
+            formatTextStart = rangeStart;
+            while (formatTextStart > 0 && !isEOL(documentText, formatTextStart - 1)) {
+                formatTextStart--;
+            }
+            let endOffset = rangeEnd;
+            while (endOffset < documentText.length && !isEOL(documentText, endOffset)) {
+                endOffset++;
+            }
+            formatText = documentText.substring(formatTextStart, endOffset);
+            initialIndentLevel = computeIndentLevel(formatText, options);
+        }
+        else {
+            formatText = documentText;
+            initialIndentLevel = 0;
+            formatTextStart = 0;
+            rangeStart = 0;
+            rangeEnd = documentText.length;
+        }
+        const eol = getEOL(options, documentText);
+        const eolFastPathSupported = string_intern_1.supportedEols.includes(eol);
+        let numberLineBreaks = 0;
+        let indentLevel = 0;
+        let indentValue;
+        if (options.insertSpaces) {
+            indentValue = string_intern_1.cachedSpaces[options.tabSize || 4] ?? repeat(string_intern_1.cachedSpaces[1], options.tabSize || 4);
+        }
+        else {
+            indentValue = '\t';
+        }
+        const indentType = indentValue === '\t' ? '\t' : ' ';
+        let scanner = (0, scanner_1.createScanner)(formatText, false);
+        let hasError = false;
+        function newLinesAndIndent() {
+            if (numberLineBreaks > 1) {
+                return repeat(eol, numberLineBreaks) + repeat(indentValue, initialIndentLevel + indentLevel);
+            }
+            const amountOfSpaces = indentValue.length * (initialIndentLevel + indentLevel);
+            if (!eolFastPathSupported || amountOfSpaces > string_intern_1.cachedBreakLinesWithSpaces[indentType][eol].length) {
+                return eol + repeat(indentValue, initialIndentLevel + indentLevel);
+            }
+            if (amountOfSpaces <= 0) {
+                return eol;
+            }
+            return string_intern_1.cachedBreakLinesWithSpaces[indentType][eol][amountOfSpaces];
+        }
+        function scanNext() {
+            let token = scanner.scan();
+            numberLineBreaks = 0;
+            while (token === 15 /* SyntaxKind.Trivia */ || token === 14 /* SyntaxKind.LineBreakTrivia */) {
+                if (token === 14 /* SyntaxKind.LineBreakTrivia */ && options.keepLines) {
+                    numberLineBreaks += 1;
+                }
+                else if (token === 14 /* SyntaxKind.LineBreakTrivia */) {
+                    numberLineBreaks = 1;
+                }
+                token = scanner.scan();
+            }
+            hasError = token === 16 /* SyntaxKind.Unknown */ || scanner.getTokenError() !== 0 /* ScanError.None */;
+            return token;
+        }
+        const editOperations = [];
+        function addEdit(text, startOffset, endOffset) {
+            if (!hasError && (!range || (startOffset < rangeEnd && endOffset > rangeStart)) && documentText.substring(startOffset, endOffset) !== text) {
+                editOperations.push({ offset: startOffset, length: endOffset - startOffset, content: text });
+            }
+        }
+        let firstToken = scanNext();
+        if (options.keepLines && numberLineBreaks > 0) {
+            addEdit(repeat(eol, numberLineBreaks), 0, 0);
+        }
+        if (firstToken !== 17 /* SyntaxKind.EOF */) {
+            let firstTokenStart = scanner.getTokenOffset() + formatTextStart;
+            let initialIndent = (indentValue.length * initialIndentLevel < 20) && options.insertSpaces
+                ? string_intern_1.cachedSpaces[indentValue.length * initialIndentLevel]
+                : repeat(indentValue, initialIndentLevel);
+            addEdit(initialIndent, formatTextStart, firstTokenStart);
+        }
+        while (firstToken !== 17 /* SyntaxKind.EOF */) {
+            let firstTokenEnd = scanner.getTokenOffset() + scanner.getTokenLength() + formatTextStart;
+            let secondToken = scanNext();
+            let replaceContent = '';
+            let needsLineBreak = false;
+            while (numberLineBreaks === 0 && (secondToken === 12 /* SyntaxKind.LineCommentTrivia */ || secondToken === 13 /* SyntaxKind.BlockCommentTrivia */)) {
+                let commentTokenStart = scanner.getTokenOffset() + formatTextStart;
+                addEdit(string_intern_1.cachedSpaces[1], firstTokenEnd, commentTokenStart);
+                firstTokenEnd = scanner.getTokenOffset() + scanner.getTokenLength() + formatTextStart;
+                needsLineBreak = secondToken === 12 /* SyntaxKind.LineCommentTrivia */;
+                replaceContent = needsLineBreak ? newLinesAndIndent() : '';
+                secondToken = scanNext();
+            }
+            if (secondToken === 2 /* SyntaxKind.CloseBraceToken */) {
+                if (firstToken !== 1 /* SyntaxKind.OpenBraceToken */) {
+                    indentLevel--;
+                }
+                ;
+                if (options.keepLines && numberLineBreaks > 0 || !options.keepLines && firstToken !== 1 /* SyntaxKind.OpenBraceToken */) {
+                    replaceContent = newLinesAndIndent();
+                }
+                else if (options.keepLines) {
+                    replaceContent = string_intern_1.cachedSpaces[1];
+                }
+            }
+            else if (secondToken === 4 /* SyntaxKind.CloseBracketToken */) {
+                if (firstToken !== 3 /* SyntaxKind.OpenBracketToken */) {
+                    indentLevel--;
+                }
+                ;
+                if (options.keepLines && numberLineBreaks > 0 || !options.keepLines && firstToken !== 3 /* SyntaxKind.OpenBracketToken */) {
+                    replaceContent = newLinesAndIndent();
+                }
+                else if (options.keepLines) {
+                    replaceContent = string_intern_1.cachedSpaces[1];
+                }
+            }
+            else {
+                switch (firstToken) {
+                    case 3 /* SyntaxKind.OpenBracketToken */:
+                    case 1 /* SyntaxKind.OpenBraceToken */:
+                        indentLevel++;
+                        if (options.keepLines && numberLineBreaks > 0 || !options.keepLines) {
+                            replaceContent = newLinesAndIndent();
+                        }
+                        else {
+                            replaceContent = string_intern_1.cachedSpaces[1];
+                        }
+                        break;
+                    case 5 /* SyntaxKind.CommaToken */:
+                        if (options.keepLines && numberLineBreaks > 0 || !options.keepLines) {
+                            replaceContent = newLinesAndIndent();
+                        }
+                        else {
+                            replaceContent = string_intern_1.cachedSpaces[1];
+                        }
+                        break;
+                    case 12 /* SyntaxKind.LineCommentTrivia */:
+                        replaceContent = newLinesAndIndent();
+                        break;
+                    case 13 /* SyntaxKind.BlockCommentTrivia */:
+                        if (numberLineBreaks > 0) {
+                            replaceContent = newLinesAndIndent();
+                        }
+                        else if (!needsLineBreak) {
+                            replaceContent = string_intern_1.cachedSpaces[1];
+                        }
+                        break;
+                    case 6 /* SyntaxKind.ColonToken */:
+                        if (options.keepLines && numberLineBreaks > 0) {
+                            replaceContent = newLinesAndIndent();
+                        }
+                        else if (!needsLineBreak) {
+                            replaceContent = string_intern_1.cachedSpaces[1];
+                        }
+                        break;
+                    case 10 /* SyntaxKind.StringLiteral */:
+                        if (options.keepLines && numberLineBreaks > 0) {
+                            replaceContent = newLinesAndIndent();
+                        }
+                        else if (secondToken === 6 /* SyntaxKind.ColonToken */ && !needsLineBreak) {
+                            replaceContent = '';
+                        }
+                        break;
+                    case 7 /* SyntaxKind.NullKeyword */:
+                    case 8 /* SyntaxKind.TrueKeyword */:
+                    case 9 /* SyntaxKind.FalseKeyword */:
+                    case 11 /* SyntaxKind.NumericLiteral */:
+                    case 2 /* SyntaxKind.CloseBraceToken */:
+                    case 4 /* SyntaxKind.CloseBracketToken */:
+                        if (options.keepLines && numberLineBreaks > 0) {
+                            replaceContent = newLinesAndIndent();
+                        }
+                        else {
+                            if ((secondToken === 12 /* SyntaxKind.LineCommentTrivia */ || secondToken === 13 /* SyntaxKind.BlockCommentTrivia */) && !needsLineBreak) {
+                                replaceContent = string_intern_1.cachedSpaces[1];
+                            }
+                            else if (secondToken !== 5 /* SyntaxKind.CommaToken */ && secondToken !== 17 /* SyntaxKind.EOF */) {
+                                hasError = true;
+                            }
+                        }
+                        break;
+                    case 16 /* SyntaxKind.Unknown */:
+                        hasError = true;
+                        break;
+                }
+                if (numberLineBreaks > 0 && (secondToken === 12 /* SyntaxKind.LineCommentTrivia */ || secondToken === 13 /* SyntaxKind.BlockCommentTrivia */)) {
+                    replaceContent = newLinesAndIndent();
+                }
+            }
+            if (secondToken === 17 /* SyntaxKind.EOF */) {
+                if (options.keepLines && numberLineBreaks > 0) {
+                    replaceContent = newLinesAndIndent();
+                }
+                else {
+                    replaceContent = options.insertFinalNewline ? eol : '';
+                }
+            }
+            const secondTokenStart = scanner.getTokenOffset() + formatTextStart;
+            addEdit(replaceContent, firstTokenEnd, secondTokenStart);
+            firstToken = secondToken;
+        }
+        return editOperations;
+    }
+    exports.format = format;
+    function repeat(s, count) {
+        let result = '';
+        for (let i = 0; i < count; i++) {
+            result += s;
+        }
+        return result;
+    }
+    function computeIndentLevel(content, options) {
+        let i = 0;
+        let nChars = 0;
+        const tabSize = options.tabSize || 4;
+        while (i < content.length) {
+            let ch = content.charAt(i);
+            if (ch === string_intern_1.cachedSpaces[1]) {
+                nChars++;
+            }
+            else if (ch === '\t') {
+                nChars += tabSize;
+            }
+            else {
+                break;
+            }
+            i++;
+        }
+        return Math.floor(nChars / tabSize);
+    }
+    function getEOL(options, text) {
+        for (let i = 0; i < text.length; i++) {
+            const ch = text.charAt(i);
+            if (ch === '\r') {
+                if (i + 1 < text.length && text.charAt(i + 1) === '\n') {
+                    return '\r\n';
+                }
+                return '\r';
+            }
+            else if (ch === '\n') {
+                return '\n';
+            }
+        }
+        return (options && options.eol) || '\n';
+    }
+    function isEOL(text, offset) {
+        return '\r\n'.indexOf(text.charAt(offset)) !== -1;
+    }
+    exports.isEOL = isEOL;
+});

+ 171 - 0
node_modules/jsonfile/CHANGELOG.md

@@ -0,0 +1,171 @@
+6.1.0 / 2020-10-31
+------------------
+
+- Add `finalEOL` option to disable writing final EOL ([#115](https://github.com/jprichardson/node-jsonfile/issues/115), [#137](https://github.com/jprichardson/node-jsonfile/pull/137))
+- Update dependency ([#138](https://github.com/jprichardson/node-jsonfile/pull/138))
+
+6.0.1 / 2020-03-07
+------------------
+
+- Update dependency ([#130](https://github.com/jprichardson/node-jsonfile/pull/130))
+- Fix code style ([#129](https://github.com/jprichardson/node-jsonfile/pull/129))
+
+6.0.0 / 2020-02-24
+------------------
+
+- **BREAKING:** Drop support for Node 6 & 8 ([#128](https://github.com/jprichardson/node-jsonfile/pull/128))
+- **BREAKING:** Do not allow passing `null` as options to `readFile()` or `writeFile()` ([#128](https://github.com/jprichardson/node-jsonfile/pull/128))
+- Refactor internals ([#128](https://github.com/jprichardson/node-jsonfile/pull/128))
+
+5.0.0 / 2018-09-08
+------------------
+
+- **BREAKING:** Drop Node 4 support
+- **BREAKING:** If no callback is passed to an asynchronous method, a promise is now returned ([#109](https://github.com/jprichardson/node-jsonfile/pull/109))
+- Cleanup docs
+
+4.0.0 / 2017-07-12
+------------------
+
+- **BREAKING:** Remove global `spaces` option.
+- **BREAKING:** Drop support for Node 0.10, 0.12, and io.js.
+- Remove undocumented `passParsingErrors` option.
+- Added `EOL` override option to `writeFile` when using `spaces`. [#89]
+
+3.0.1 / 2017-07-05
+------------------
+
+- Fixed bug in `writeFile` when there was a serialization error & no callback was passed. In previous versions, an empty file would be written; now no file is written.
+
+3.0.0 / 2017-04-25
+------------------
+
+- Changed behavior of `throws` option for `readFileSync`; now does not throw filesystem errors when `throws` is `false`
+
+2.4.0 / 2016-09-15
+------------------
+### Changed
+- added optional support for `graceful-fs` [#62]
+
+2.3.1 / 2016-05-13
+------------------
+- fix to support BOM. [#45][#45]
+
+2.3.0 / 2016-04-16
+------------------
+- add `throws` to `readFile()`. See [#39][#39]
+- add support for any arbitrary `fs` module. Useful with [mock-fs](https://www.npmjs.com/package/mock-fs)
+
+2.2.3 / 2015-10-14
+------------------
+- include file name in parse error. See: https://github.com/jprichardson/node-jsonfile/pull/34
+
+2.2.2 / 2015-09-16
+------------------
+- split out tests into separate files
+- fixed `throws` when set to `true` in `readFileSync()`. See: https://github.com/jprichardson/node-jsonfile/pull/33
+
+2.2.1 / 2015-06-25
+------------------
+- fixed regression when passing in string as encoding for options in `writeFile()` and `writeFileSync()`. See: https://github.com/jprichardson/node-jsonfile/issues/28
+
+2.2.0 / 2015-06-25
+------------------
+- added `options.spaces` to `writeFile()` and `writeFileSync()`
+
+2.1.2 / 2015-06-22
+------------------
+- fixed if passed `readFileSync(file, 'utf8')`. See: https://github.com/jprichardson/node-jsonfile/issues/25
+
+2.1.1 / 2015-06-19
+------------------
+- fixed regressions if `null` is passed for options. See: https://github.com/jprichardson/node-jsonfile/issues/24
+
+2.1.0 / 2015-06-19
+------------------
+- cleanup: JavaScript Standard Style, rename files, dropped terst for assert
+- methods now support JSON revivers/replacers
+
+2.0.1 / 2015-05-24
+------------------
+- update license attribute https://github.com/jprichardson/node-jsonfile/pull/21
+
+2.0.0 / 2014-07-28
+------------------
+* added `\n` to end of file on write. [#14](https://github.com/jprichardson/node-jsonfile/pull/14)
+* added `options.throws` to `readFileSync()`
+* dropped support for Node v0.8
+
+1.2.0 / 2014-06-29
+------------------
+* removed semicolons
+* bugfix: passed `options` to `fs.readFile` and `fs.readFileSync`. This technically changes behavior, but
+changes it according to docs. [#12][#12]
+
+1.1.1 / 2013-11-11
+------------------
+* fixed catching of callback bug (ffissore / #5)
+
+1.1.0 / 2013-10-11
+------------------
+* added `options` param to methods, (seanodell / #4)
+
+1.0.1 / 2013-09-05
+------------------
+* removed `homepage` field from package.json to remove NPM warning
+
+1.0.0 / 2013-06-28
+------------------
+* added `.npmignore`, #1
+* changed spacing default from `4` to `2` to follow Node conventions
+
+0.0.1 / 2012-09-10
+------------------
+* Initial release.
+
+[#89]: https://github.com/jprichardson/node-jsonfile/pull/89
+[#45]: https://github.com/jprichardson/node-jsonfile/issues/45    "Reading of UTF8-encoded (w/ BOM) files fails"
+[#44]: https://github.com/jprichardson/node-jsonfile/issues/44    "Extra characters in written file"
+[#43]: https://github.com/jprichardson/node-jsonfile/issues/43    "Prettyfy json when written to file"
+[#42]: https://github.com/jprichardson/node-jsonfile/pull/42      "Moved fs.readFileSync within the try/catch"
+[#41]: https://github.com/jprichardson/node-jsonfile/issues/41    "Linux: Hidden file not working"
+[#40]: https://github.com/jprichardson/node-jsonfile/issues/40    "autocreate folder doesn't work from Path-value"
+[#39]: https://github.com/jprichardson/node-jsonfile/pull/39      "Add `throws` option for readFile (async)"
+[#38]: https://github.com/jprichardson/node-jsonfile/pull/38      "Update README.md writeFile[Sync] signature"
+[#37]: https://github.com/jprichardson/node-jsonfile/pull/37      "support append file"
+[#36]: https://github.com/jprichardson/node-jsonfile/pull/36      "Add typescript definition file."
+[#35]: https://github.com/jprichardson/node-jsonfile/pull/35      "Add typescript definition file."
+[#34]: https://github.com/jprichardson/node-jsonfile/pull/34      "readFile JSON parse error includes filename"
+[#33]: https://github.com/jprichardson/node-jsonfile/pull/33      "fix throw->throws typo in readFileSync()"
+[#32]: https://github.com/jprichardson/node-jsonfile/issues/32    "readFile & readFileSync can possible have strip-comments as an option?"
+[#31]: https://github.com/jprichardson/node-jsonfile/pull/31      "[Modify] Support string include is unicode escape string"
+[#30]: https://github.com/jprichardson/node-jsonfile/issues/30    "How to use Jsonfile package in Meteor.js App?"
+[#29]: https://github.com/jprichardson/node-jsonfile/issues/29    "writefile callback if no error?"
+[#28]: https://github.com/jprichardson/node-jsonfile/issues/28    "writeFile options argument broken "
+[#27]: https://github.com/jprichardson/node-jsonfile/pull/27      "Use svg instead of png to get better image quality"
+[#26]: https://github.com/jprichardson/node-jsonfile/issues/26    "Breaking change to fs-extra"
+[#25]: https://github.com/jprichardson/node-jsonfile/issues/25    "support string encoding param for read methods"
+[#24]: https://github.com/jprichardson/node-jsonfile/issues/24    "readFile: Passing in null options with a callback throws an error"
+[#23]: https://github.com/jprichardson/node-jsonfile/pull/23      "Add appendFile and appendFileSync"
+[#22]: https://github.com/jprichardson/node-jsonfile/issues/22    "Default value for spaces in readme.md is outdated"
+[#21]: https://github.com/jprichardson/node-jsonfile/pull/21      "Update license attribute"
+[#20]: https://github.com/jprichardson/node-jsonfile/issues/20    "Add simple caching functionallity"
+[#19]: https://github.com/jprichardson/node-jsonfile/pull/19      "Add appendFileSync method"
+[#18]: https://github.com/jprichardson/node-jsonfile/issues/18    "Add updateFile and updateFileSync methods"
+[#17]: https://github.com/jprichardson/node-jsonfile/issues/17    "seem read & write sync has sequentially problem"
+[#16]: https://github.com/jprichardson/node-jsonfile/pull/16      "export spaces defaulted to null"
+[#15]: https://github.com/jprichardson/node-jsonfile/issues/15    "`jsonfile.spaces` should default to `null`"
+[#14]: https://github.com/jprichardson/node-jsonfile/pull/14      "Add EOL at EOF"
+[#13]: https://github.com/jprichardson/node-jsonfile/issues/13    "Add a final newline"
+[#12]: https://github.com/jprichardson/node-jsonfile/issues/12    "readFile doesn't accept options"
+[#11]: https://github.com/jprichardson/node-jsonfile/pull/11      "Added try,catch to readFileSync"
+[#10]: https://github.com/jprichardson/node-jsonfile/issues/10    "No output or error from writeFile"
+[#9]: https://github.com/jprichardson/node-jsonfile/pull/9        "Change 'js' to 'jf' in example."
+[#8]: https://github.com/jprichardson/node-jsonfile/pull/8        "Updated forgotten module.exports to me."
+[#7]: https://github.com/jprichardson/node-jsonfile/pull/7        "Add file name in error message"
+[#6]: https://github.com/jprichardson/node-jsonfile/pull/6        "Use graceful-fs when possible"
+[#5]: https://github.com/jprichardson/node-jsonfile/pull/5        "Jsonfile doesn't behave nicely when used inside a test suite."
+[#4]: https://github.com/jprichardson/node-jsonfile/pull/4        "Added options parameter to writeFile and writeFileSync"
+[#3]: https://github.com/jprichardson/node-jsonfile/issues/3      "test2"
+[#2]: https://github.com/jprichardson/node-jsonfile/issues/2      "homepage field must be a string url. Deleted."
+[#1]: https://github.com/jprichardson/node-jsonfile/pull/1        "adding an `.npmignore` file"

+ 15 - 0
node_modules/jsonfile/LICENSE

@@ -0,0 +1,15 @@
+(The MIT License)
+
+Copyright (c) 2012-2015, JP Richardson <jprichardson@gmail.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
+(the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify,
+ merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
+OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 230 - 0
node_modules/jsonfile/README.md

@@ -0,0 +1,230 @@
+Node.js - jsonfile
+================
+
+Easily read/write JSON files in Node.js. _Note: this module cannot be used in the browser._
+
+[![npm Package](https://img.shields.io/npm/v/jsonfile.svg?style=flat-square)](https://www.npmjs.org/package/jsonfile)
+[![build status](https://secure.travis-ci.org/jprichardson/node-jsonfile.svg)](http://travis-ci.org/jprichardson/node-jsonfile)
+[![windows Build status](https://img.shields.io/appveyor/ci/jprichardson/node-jsonfile/master.svg?label=windows%20build)](https://ci.appveyor.com/project/jprichardson/node-jsonfile/branch/master)
+
+<a href="https://github.com/feross/standard"><img src="https://cdn.rawgit.com/feross/standard/master/sticker.svg" alt="Standard JavaScript" width="100"></a>
+
+Why?
+----
+
+Writing `JSON.stringify()` and then `fs.writeFile()` and `JSON.parse()` with `fs.readFile()` enclosed in `try/catch` blocks became annoying.
+
+
+
+Installation
+------------
+
+    npm install --save jsonfile
+
+
+
+API
+---
+
+* [`readFile(filename, [options], callback)`](#readfilefilename-options-callback)
+* [`readFileSync(filename, [options])`](#readfilesyncfilename-options)
+* [`writeFile(filename, obj, [options], callback)`](#writefilefilename-obj-options-callback)
+* [`writeFileSync(filename, obj, [options])`](#writefilesyncfilename-obj-options)
+
+----
+
+### readFile(filename, [options], callback)
+
+`options` (`object`, default `undefined`): Pass in any [`fs.readFile`](https://nodejs.org/api/fs.html#fs_fs_readfile_path_options_callback) options or set `reviver` for a [JSON reviver](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/parse).
+  - `throws` (`boolean`, default: `true`). If `JSON.parse` throws an error, pass this error to the callback.
+  If `false`, returns `null` for the object.
+
+
+```js
+const jsonfile = require('jsonfile')
+const file = '/tmp/data.json'
+jsonfile.readFile(file, function (err, obj) {
+  if (err) console.error(err)
+  console.dir(obj)
+})
+```
+
+You can also use this method with promises. The `readFile` method will return a promise if you do not pass a callback function.
+
+```js
+const jsonfile = require('jsonfile')
+const file = '/tmp/data.json'
+jsonfile.readFile(file)
+  .then(obj => console.dir(obj))
+  .catch(error => console.error(error))
+```
+
+----
+
+### readFileSync(filename, [options])
+
+`options` (`object`, default `undefined`): Pass in any [`fs.readFileSync`](https://nodejs.org/api/fs.html#fs_fs_readfilesync_path_options) options or set `reviver` for a [JSON reviver](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/parse).
+- `throws` (`boolean`, default: `true`). If an error is encountered reading or parsing the file, throw the error. If `false`, returns `null` for the object.
+
+```js
+const jsonfile = require('jsonfile')
+const file = '/tmp/data.json'
+
+console.dir(jsonfile.readFileSync(file))
+```
+
+----
+
+### writeFile(filename, obj, [options], callback)
+
+`options`: Pass in any [`fs.writeFile`](https://nodejs.org/api/fs.html#fs_fs_writefile_file_data_options_callback) options or set `replacer` for a [JSON replacer](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify). Can also pass in `spaces`, or override `EOL` string or set `finalEOL` flag as `false` to not save the file with `EOL` at the end.
+
+
+```js
+const jsonfile = require('jsonfile')
+
+const file = '/tmp/data.json'
+const obj = { name: 'JP' }
+
+jsonfile.writeFile(file, obj, function (err) {
+  if (err) console.error(err)
+})
+```
+Or use with promises as follows:
+
+```js
+const jsonfile = require('jsonfile')
+
+const file = '/tmp/data.json'
+const obj = { name: 'JP' }
+
+jsonfile.writeFile(file, obj)
+  .then(res => {
+    console.log('Write complete')
+  })
+  .catch(error => console.error(error))
+```
+
+
+**formatting with spaces:**
+
+```js
+const jsonfile = require('jsonfile')
+
+const file = '/tmp/data.json'
+const obj = { name: 'JP' }
+
+jsonfile.writeFile(file, obj, { spaces: 2 }, function (err) {
+  if (err) console.error(err)
+})
+```
+
+**overriding EOL:**
+
+```js
+const jsonfile = require('jsonfile')
+
+const file = '/tmp/data.json'
+const obj = { name: 'JP' }
+
+jsonfile.writeFile(file, obj, { spaces: 2, EOL: '\r\n' }, function (err) {
+  if (err) console.error(err)
+})
+```
+
+
+**disabling the EOL at the end of file:**
+
+```js
+const jsonfile = require('jsonfile')
+
+const file = '/tmp/data.json'
+const obj = { name: 'JP' }
+
+jsonfile.writeFile(file, obj, { spaces: 2, finalEOL: false }, function (err) {
+  if (err) console.log(err)
+})
+```
+
+**appending to an existing JSON file:**
+
+You can use `fs.writeFile` option `{ flag: 'a' }` to achieve this.
+
+```js
+const jsonfile = require('jsonfile')
+
+const file = '/tmp/mayAlreadyExistedData.json'
+const obj = { name: 'JP' }
+
+jsonfile.writeFile(file, obj, { flag: 'a' }, function (err) {
+  if (err) console.error(err)
+})
+```
+
+----
+
+### writeFileSync(filename, obj, [options])
+
+`options`: Pass in any [`fs.writeFileSync`](https://nodejs.org/api/fs.html#fs_fs_writefilesync_file_data_options) options or set `replacer` for a [JSON replacer](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify). Can also pass in `spaces`, or override `EOL` string or set `finalEOL` flag as `false` to not save the file with `EOL` at the end.
+
+```js
+const jsonfile = require('jsonfile')
+
+const file = '/tmp/data.json'
+const obj = { name: 'JP' }
+
+jsonfile.writeFileSync(file, obj)
+```
+
+**formatting with spaces:**
+
+```js
+const jsonfile = require('jsonfile')
+
+const file = '/tmp/data.json'
+const obj = { name: 'JP' }
+
+jsonfile.writeFileSync(file, obj, { spaces: 2 })
+```
+
+**overriding EOL:**
+
+```js
+const jsonfile = require('jsonfile')
+
+const file = '/tmp/data.json'
+const obj = { name: 'JP' }
+
+jsonfile.writeFileSync(file, obj, { spaces: 2, EOL: '\r\n' })
+```
+
+**disabling the EOL at the end of file:**
+
+```js
+const jsonfile = require('jsonfile')
+
+const file = '/tmp/data.json'
+const obj = { name: 'JP' }
+
+jsonfile.writeFileSync(file, obj, { spaces: 2, finalEOL: false })
+```
+
+**appending to an existing JSON file:**
+
+You can use `fs.writeFileSync` option `{ flag: 'a' }` to achieve this.
+
+```js
+const jsonfile = require('jsonfile')
+
+const file = '/tmp/mayAlreadyExistedData.json'
+const obj = { name: 'JP' }
+
+jsonfile.writeFileSync(file, obj, { flag: 'a' })
+```
+
+License
+-------
+
+(MIT License)
+
+Copyright 2012-2016, JP Richardson  <jprichardson@gmail.com>

+ 88 - 0
node_modules/jsonfile/index.js

@@ -0,0 +1,88 @@
+let _fs
+try {
+  _fs = require('graceful-fs')
+} catch (_) {
+  _fs = require('fs')
+}
+const universalify = require('universalify')
+const { stringify, stripBom } = require('./utils')
+
+async function _readFile (file, options = {}) {
+  if (typeof options === 'string') {
+    options = { encoding: options }
+  }
+
+  const fs = options.fs || _fs
+
+  const shouldThrow = 'throws' in options ? options.throws : true
+
+  let data = await universalify.fromCallback(fs.readFile)(file, options)
+
+  data = stripBom(data)
+
+  let obj
+  try {
+    obj = JSON.parse(data, options ? options.reviver : null)
+  } catch (err) {
+    if (shouldThrow) {
+      err.message = `${file}: ${err.message}`
+      throw err
+    } else {
+      return null
+    }
+  }
+
+  return obj
+}
+
+const readFile = universalify.fromPromise(_readFile)
+
+function readFileSync (file, options = {}) {
+  if (typeof options === 'string') {
+    options = { encoding: options }
+  }
+
+  const fs = options.fs || _fs
+
+  const shouldThrow = 'throws' in options ? options.throws : true
+
+  try {
+    let content = fs.readFileSync(file, options)
+    content = stripBom(content)
+    return JSON.parse(content, options.reviver)
+  } catch (err) {
+    if (shouldThrow) {
+      err.message = `${file}: ${err.message}`
+      throw err
+    } else {
+      return null
+    }
+  }
+}
+
+async function _writeFile (file, obj, options = {}) {
+  const fs = options.fs || _fs
+
+  const str = stringify(obj, options)
+
+  await universalify.fromCallback(fs.writeFile)(file, str, options)
+}
+
+const writeFile = universalify.fromPromise(_writeFile)
+
+function writeFileSync (file, obj, options = {}) {
+  const fs = options.fs || _fs
+
+  const str = stringify(obj, options)
+  // not sure if fs.writeFileSync returns anything, but just in case
+  return fs.writeFileSync(file, str, options)
+}
+
+const jsonfile = {
+  readFile,
+  readFileSync,
+  writeFile,
+  writeFileSync
+}
+
+module.exports = jsonfile

+ 40 - 0
node_modules/jsonfile/package.json

@@ -0,0 +1,40 @@
+{
+  "name": "jsonfile",
+  "version": "6.1.0",
+  "description": "Easily read/write JSON files.",
+  "repository": {
+    "type": "git",
+    "url": "git@github.com:jprichardson/node-jsonfile.git"
+  },
+  "keywords": [
+    "read",
+    "write",
+    "file",
+    "json",
+    "fs",
+    "fs-extra"
+  ],
+  "author": "JP Richardson <jprichardson@gmail.com>",
+  "license": "MIT",
+  "dependencies": {
+    "universalify": "^2.0.0"
+  },
+  "optionalDependencies": {
+    "graceful-fs": "^4.1.6"
+  },
+  "devDependencies": {
+    "mocha": "^8.2.0",
+    "rimraf": "^2.4.0",
+    "standard": "^16.0.1"
+  },
+  "main": "index.js",
+  "files": [
+    "index.js",
+    "utils.js"
+  ],
+  "scripts": {
+    "lint": "standard",
+    "test": "npm run lint && npm run unit",
+    "unit": "mocha"
+  }
+}

+ 14 - 0
node_modules/jsonfile/utils.js

@@ -0,0 +1,14 @@
+function stringify (obj, { EOL = '\n', finalEOL = true, replacer = null, spaces } = {}) {
+  const EOF = finalEOL ? EOL : ''
+  const str = JSON.stringify(obj, replacer, spaces)
+
+  return str.replace(/\n/g, EOL) + EOF
+}
+
+function stripBom (content) {
+  // we do this because JSON.parse would convert it to a utf8 string if encoding wasn't specified
+  if (Buffer.isBuffer(content)) content = content.toString('utf8')
+  return content.replace(/^\uFEFF/, '')
+}
+
+module.exports = { stringify, stripBom }

+ 1 - 0
node_modules/keycharm/.npmignore

@@ -0,0 +1 @@
+.idea/

+ 176 - 0
node_modules/keycharm/LICENSE-APACHE-2.0

@@ -0,0 +1,176 @@
+                               Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS

+ 22 - 0
node_modules/keycharm/LICENSE-MIT

@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Almende B.V.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+

+ 50 - 0
node_modules/keycharm/README.md

@@ -0,0 +1,50 @@
+keycharm
+========
+
+Easy and free library for binding keys.
+
+Keycharm is on npm so you can install it with:
+```
+npm install keycharm
+```
+
+
+Example:
+
+```
+var keys = keycharm(options);
+
+keys.bind("a", function () {}, 'keydown'); // key, callback function, 'keydown' or 'keyup'
+```
+
+Available options (all are optional):
+```
+{
+    container: document.getElementById("element"), // optional div to bind keycharm to. It will NEED a tabindex. When not supplied, this defaults to window.
+    preventDefault: true/false // default value: false;
+}
+```
+
+Supported keys:
+
+```
+'a'-'z', 'A'-'Z', '0'-'9', 'F1'-'F12', 'space', 'enter', 'ctrl', 'alt', 'tab', 'shift', 
+'delete', 'esc', 'backspace', '-','=', '[', ']', 'left', 'up', 'right', 'down', 'pageup', 'pagedown'
+
+numpad: 'num0'-'num9', 'num/', 'num*', 'num-', 'num+', 'num.'
+```
+
+Each initiation of keycharm has its own bindings to the key events.
+
+Available methods:
+
+```
+.bind(key, callback, [type]);               // bind key, type = 'keydown' or 'keyup', default type = keydown.
+.unbind(key, [callback], [type]);           // unbind key,  type = 'keydown' or 'keyup', default type = keydown. No callback deletes all bound callbacks from key
+.reset();                                   // remove all bound keys
+.destroy();                                 // remove all bound keys and the event listeners of keycharm
+.getKey(event);                             // get the key label of the event
+.bindAll(function, 'keydown' or 'keyup');   // bind all keys to this function, could be used for testing or demos.
+```
+
+Keycharm is Dual-licensed with both the Apache 2.0 license as well as the MIT license. I'll leave it up to the user to pick which one they prefer.

+ 193 - 0
node_modules/keycharm/keycharm.js

@@ -0,0 +1,193 @@
+"use strict";
+/**
+ * Created by Alex on 11/6/2014.
+ */
+
+// https://github.com/umdjs/umd/blob/master/returnExports.js#L40-L60
+// if the module has no dependencies, the above pattern can be simplified to
+(function (root, factory) {
+  if (typeof define === 'function' && define.amd) {
+    // AMD. Register as an anonymous module.
+    define([], factory);
+  } else if (typeof exports === 'object') {
+    // Node. Does not work with strict CommonJS, but
+    // only CommonJS-like environments that support module.exports,
+    // like Node.
+    module.exports = factory();
+  } else {
+    // Browser globals (root is window)
+    root.keycharm = factory();
+  }
+}(this, function () {
+
+  function keycharm(options) {
+    var preventDefault = options && options.preventDefault || false;
+
+    var container = options && options.container || window;
+
+    var _exportFunctions = {};
+    var _bound = {keydown:{}, keyup:{}};
+    var _keys = {};
+    var i;
+
+    // a - z
+    for (i = 97; i <= 122; i++) {_keys[String.fromCharCode(i)] = {code:65 + (i - 97), shift: false};}
+    // A - Z
+    for (i = 65; i <= 90; i++) {_keys[String.fromCharCode(i)] = {code:i, shift: true};}
+    // 0 - 9
+    for (i = 0;  i <= 9;   i++) {_keys['' + i] = {code:48 + i, shift: false};}
+    // F1 - F12
+    for (i = 1;  i <= 12;   i++) {_keys['F' + i] = {code:111 + i, shift: false};}
+    // num0 - num9
+    for (i = 0;  i <= 9;   i++) {_keys['num' + i] = {code:96 + i, shift: false};}
+
+    // numpad misc
+    _keys['num*'] = {code:106, shift: false};
+    _keys['num+'] = {code:107, shift: false};
+    _keys['num-'] = {code:109, shift: false};
+    _keys['num/'] = {code:111, shift: false};
+    _keys['num.'] = {code:110, shift: false};
+    // arrows
+    _keys['left']  = {code:37, shift: false};
+    _keys['up']    = {code:38, shift: false};
+    _keys['right'] = {code:39, shift: false};
+    _keys['down']  = {code:40, shift: false};
+    // extra keys
+    _keys['space'] = {code:32, shift: false};
+    _keys['enter'] = {code:13, shift: false};
+    _keys['shift'] = {code:16, shift: undefined};
+    _keys['esc']   = {code:27, shift: false};
+    _keys['backspace'] = {code:8, shift: false};
+    _keys['tab']       = {code:9, shift: false};
+    _keys['ctrl']      = {code:17, shift: false};
+    _keys['alt']       = {code:18, shift: false};
+    _keys['delete']    = {code:46, shift: false};
+    _keys['pageup']    = {code:33, shift: false};
+    _keys['pagedown']  = {code:34, shift: false};
+    // symbols
+    _keys['=']     = {code:187, shift: false};
+    _keys['-']     = {code:189, shift: false};
+    _keys[']']     = {code:221, shift: false};
+    _keys['[']     = {code:219, shift: false};
+
+
+
+    var down = function(event) {handleEvent(event,'keydown');};
+    var up = function(event) {handleEvent(event,'keyup');};
+
+    // handle the actualy bound key with the event
+    var handleEvent = function(event,type) {
+      if (_bound[type][event.keyCode] !== undefined) {
+        var bound = _bound[type][event.keyCode];
+        for (var i = 0; i < bound.length; i++) {
+          if (bound[i].shift === undefined) {
+            bound[i].fn(event);
+          }
+          else if (bound[i].shift == true && event.shiftKey == true) {
+            bound[i].fn(event);
+          }
+          else if (bound[i].shift == false && event.shiftKey == false) {
+            bound[i].fn(event);
+          }
+        }
+
+        if (preventDefault == true) {
+          event.preventDefault();
+        }
+      }
+    };
+
+    // bind a key to a callback
+    _exportFunctions.bind = function(key, callback, type) {
+      if (type === undefined) {
+        type = 'keydown';
+      }
+      if (_keys[key] === undefined) {
+        throw new Error("unsupported key: " + key);
+      }
+      if (_bound[type][_keys[key].code] === undefined) {
+        _bound[type][_keys[key].code] = [];
+      }
+      _bound[type][_keys[key].code].push({fn:callback, shift:_keys[key].shift});
+    };
+
+
+    // bind all keys to a call back (demo purposes)
+    _exportFunctions.bindAll = function(callback, type) {
+      if (type === undefined) {
+        type = 'keydown';
+      }
+      for (var key in _keys) {
+        if (_keys.hasOwnProperty(key)) {
+          _exportFunctions.bind(key,callback,type);
+        }
+      }
+    };
+
+    // get the key label from an event
+    _exportFunctions.getKey = function(event) {
+      for (var key in _keys) {
+        if (_keys.hasOwnProperty(key)) {
+          if (event.shiftKey == true && _keys[key].shift == true && event.keyCode == _keys[key].code) {
+            return key;
+          }
+          else if (event.shiftKey == false && _keys[key].shift == false && event.keyCode == _keys[key].code) {
+            return key;
+          }
+          else if (event.keyCode == _keys[key].code && key == 'shift') {
+            return key;
+          }
+        }
+      }
+      return "unknown key, currently not supported";
+    };
+
+    // unbind either a specific callback from a key or all of them (by leaving callback undefined)
+    _exportFunctions.unbind = function(key, callback, type) {
+      if (type === undefined) {
+        type = 'keydown';
+      }
+      if (_keys[key] === undefined) {
+        throw new Error("unsupported key: " + key);
+      }
+      if (callback !== undefined) {
+        var newBindings = [];
+        var bound = _bound[type][_keys[key].code];
+        if (bound !== undefined) {
+          for (var i = 0; i < bound.length; i++) {
+            if (!(bound[i].fn == callback && bound[i].shift == _keys[key].shift)) {
+              newBindings.push(_bound[type][_keys[key].code][i]);
+            }
+          }
+        }
+        _bound[type][_keys[key].code] = newBindings;
+      }
+      else {
+        _bound[type][_keys[key].code] = [];
+      }
+    };
+
+    // reset all bound variables.
+    _exportFunctions.reset = function() {
+      _bound = {keydown:{}, keyup:{}};
+    };
+
+    // unbind all listeners and reset all variables.
+    _exportFunctions.destroy = function() {
+      _bound = {keydown:{}, keyup:{}};
+      container.removeEventListener('keydown', down, true);
+      container.removeEventListener('keyup', up, true);
+    };
+
+    // create listeners.
+    container.addEventListener('keydown',down,true);
+    container.addEventListener('keyup',up,true);
+
+    // return the public functions.
+    return _exportFunctions;
+  }
+
+  return keycharm;
+}));
+
+

+ 8 - 0
node_modules/keycharm/package.json

@@ -0,0 +1,8 @@
+{
+  "name": "keycharm",
+  "author": "Alex de Mulder <alex almende org>",
+  "description": "Simple, lightweight key-binding lib",
+  "version": "0.2.0",
+  "main": "keycharm.js",
+  "repository": "https://github.com/AlexDM0/keycharm"
+}

+ 51 - 0
node_modules/keycharm/test/test.html

@@ -0,0 +1,51 @@
+<!DOCTYPE html>
+<html>
+<head lang="en">
+    <meta charset="UTF-8">
+    <title>Keycharm example</title>
+    <style>
+        div.keyfield {
+            display:inline-block;
+            width:190px;
+            height:25px;
+            background-color:#eeeeee;
+            vertical-align:middle;
+        }
+
+        #container {
+            background-color:#dddddd;
+        }
+    </style>
+    <script src="../keycharm.js"></script>
+</head>
+<body>
+
+When using a DIV as a container, it NEEDS a tabindex. Select the DIV to start the listening to events.
+<h2>Press a button!</h2> <br>
+<div id="container" tabindex="1">
+<table>
+    <tr height="30px">
+        <td>Keydown event:</td><td><div id="keydown" class="keyfield">See the key here</div></td>
+    </tr>
+    <tr height="30px">
+        <td>Keyup event:</td><td><div id="keyup" class="keyfield">See the key here</div></td>
+    </tr>
+</table>
+</div>
+
+<script language="JavaScript">
+    var keys = keycharm({container:document.getElementById("container"),preventDefault: false});
+
+    keys.bindAll(function(event) {
+        document.getElementById('keydown').innerHTML = "Key:" + keys.getKey(event);
+    }, 'keydown');
+    keys.bindAll(function(event) {
+        document.getElementById('keyup').innerHTML = "Key:" + keys.getKey(event);
+    }, 'keyup');
+
+    var alertFunction = function() {alert("a!");};
+    keys.bind("a", alertFunction);
+    keys.unbind("a", alertFunction);
+</script>
+</body>
+</html>

+ 21 - 0
node_modules/lines-and-columns/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Brian Donovan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 33 - 0
node_modules/lines-and-columns/README.md

@@ -0,0 +1,33 @@
+# lines-and-columns
+
+Maps lines and columns to character offsets and back. This is useful for parsers
+and other text processors that deal in character ranges but process text with
+meaningful lines and columns.
+
+## Install
+
+```
+$ npm install [--save] lines-and-columns
+```
+
+## Usage
+
+```js
+import { LinesAndColumns } from 'lines-and-columns'
+
+const lines = new LinesAndColumns(
+  `table {
+  border: 0
+}`
+)
+
+lines.locationForIndex(9)
+// { line: 1, column: 1 }
+
+lines.indexForLocation({ line: 1, column: 2 })
+// 10
+```
+
+## License
+
+MIT

+ 13 - 0
node_modules/lines-and-columns/build/index.d.ts

@@ -0,0 +1,13 @@
+export declare type SourceLocation = {
+    line: number;
+    column: number;
+};
+export declare class LinesAndColumns {
+    private string;
+    private offsets;
+    constructor(string: string);
+    locationForIndex(index: number): SourceLocation | null;
+    indexForLocation(location: SourceLocation): number | null;
+    private lengthOfLine;
+}
+export default LinesAndColumns;

+ 62 - 0
node_modules/lines-and-columns/build/index.js

@@ -0,0 +1,62 @@
+"use strict";
+exports.__esModule = true;
+exports.LinesAndColumns = void 0;
+var LF = '\n';
+var CR = '\r';
+var LinesAndColumns = /** @class */ (function () {
+    function LinesAndColumns(string) {
+        this.string = string;
+        var offsets = [0];
+        for (var offset = 0; offset < string.length;) {
+            switch (string[offset]) {
+                case LF:
+                    offset += LF.length;
+                    offsets.push(offset);
+                    break;
+                case CR:
+                    offset += CR.length;
+                    if (string[offset] === LF) {
+                        offset += LF.length;
+                    }
+                    offsets.push(offset);
+                    break;
+                default:
+                    offset++;
+                    break;
+            }
+        }
+        this.offsets = offsets;
+    }
+    LinesAndColumns.prototype.locationForIndex = function (index) {
+        if (index < 0 || index > this.string.length) {
+            return null;
+        }
+        var line = 0;
+        var offsets = this.offsets;
+        while (offsets[line + 1] <= index) {
+            line++;
+        }
+        var column = index - offsets[line];
+        return { line: line, column: column };
+    };
+    LinesAndColumns.prototype.indexForLocation = function (location) {
+        var line = location.line, column = location.column;
+        if (line < 0 || line >= this.offsets.length) {
+            return null;
+        }
+        if (column < 0 || column > this.lengthOfLine(line)) {
+            return null;
+        }
+        return this.offsets[line] + column;
+    };
+    LinesAndColumns.prototype.lengthOfLine = function (line) {
+        var offset = this.offsets[line];
+        var nextOffset = line === this.offsets.length - 1
+            ? this.string.length
+            : this.offsets[line + 1];
+        return nextOffset - offset;
+    };
+    return LinesAndColumns;
+}());
+exports.LinesAndColumns = LinesAndColumns;
+exports["default"] = LinesAndColumns;

+ 49 - 0
node_modules/lines-and-columns/package.json

@@ -0,0 +1,49 @@
+{
+  "name": "lines-and-columns",
+  "version": "1.2.4",
+  "description": "Maps lines and columns to character offsets and back.",
+  "keywords": [
+    "lines",
+    "columns",
+    "parser"
+  ],
+  "homepage": "https://github.com/eventualbuddha/lines-and-columns#readme",
+  "bugs": {
+    "url": "https://github.com/eventualbuddha/lines-and-columns/issues"
+  },
+  "repository": {
+    "type": "git",
+    "url": "https://github.com/eventualbuddha/lines-and-columns.git"
+  },
+  "license": "MIT",
+  "author": "Brian Donovan <brian@donovans.cc>",
+  "main": "./build/index.js",
+  "types": "./build/index.d.ts",
+  "files": [
+    "build"
+  ],
+  "scripts": {
+    "build:watch": "tsc --build tsconfig.build.json --watch",
+    "lint": "eslint .",
+    "lint:fix": "eslint . --fix",
+    "test": "is-ci test:coverage test:watch",
+    "test:coverage": "jest --coverage",
+    "test:watch": "jest --watch"
+  },
+  "devDependencies": {
+    "@types/jest": "^27.0.3",
+    "@types/node": "^16.11.9",
+    "@typescript-eslint/eslint-plugin": "^5.4.0",
+    "@typescript-eslint/parser": "^5.4.0",
+    "esbuild": "^0.13.15",
+    "esbuild-runner": "^2.2.1",
+    "eslint": "^8.2.0",
+    "eslint-config-prettier": "^8.3.0",
+    "eslint-plugin-prettier": "^4.0.0",
+    "is-ci-cli": "^2.2.0",
+    "jest": "^27.3.1",
+    "prettier": "^2.4.1",
+    "semantic-release": "^18.0.0",
+    "typescript": "^4.5.2"
+  }
+}

+ 47 - 0
node_modules/lodash.debounce/LICENSE

@@ -0,0 +1,47 @@
+Copyright jQuery Foundation and other contributors <https://jquery.org/>
+
+Based on Underscore.js, copyright Jeremy Ashkenas,
+DocumentCloud and Investigative Reporters & Editors <http://underscorejs.org/>
+
+This software consists of voluntary contributions made by many
+individuals. For exact contribution history, see the revision history
+available at https://github.com/lodash/lodash
+
+The following license applies to all parts of this software except as
+documented below:
+
+====
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+====
+
+Copyright and related rights for sample code are waived via CC0. Sample
+code is defined as all source code displayed within the prose of the
+documentation.
+
+CC0: http://creativecommons.org/publicdomain/zero/1.0/
+
+====
+
+Files located in the node_modules and vendor directories are externally
+maintained libraries used by this software which have their own
+licenses; we recommend you read them, as their terms may differ from the
+terms above.

+ 18 - 0
node_modules/lodash.debounce/README.md

@@ -0,0 +1,18 @@
+# lodash.debounce v4.0.8
+
+The [lodash](https://lodash.com/) method `_.debounce` exported as a [Node.js](https://nodejs.org/) module.
+
+## Installation
+
+Using npm:
+```bash
+$ {sudo -H} npm i -g npm
+$ npm i --save lodash.debounce
+```
+
+In Node.js:
+```js
+var debounce = require('lodash.debounce');
+```
+
+See the [documentation](https://lodash.com/docs#debounce) or [package source](https://github.com/lodash/lodash/blob/4.0.8-npm-packages/lodash.debounce) for more details.

+ 377 - 0
node_modules/lodash.debounce/index.js

@@ -0,0 +1,377 @@
+/**
+ * lodash (Custom Build) <https://lodash.com/>
+ * Build: `lodash modularize exports="npm" -o ./`
+ * Copyright jQuery Foundation and other contributors <https://jquery.org/>
+ * Released under MIT license <https://lodash.com/license>
+ * Based on Underscore.js 1.8.3 <http://underscorejs.org/LICENSE>
+ * Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
+ */
+
+/** Used as the `TypeError` message for "Functions" methods. */
+var FUNC_ERROR_TEXT = 'Expected a function';
+
+/** Used as references for various `Number` constants. */
+var NAN = 0 / 0;
+
+/** `Object#toString` result references. */
+var symbolTag = '[object Symbol]';
+
+/** Used to match leading and trailing whitespace. */
+var reTrim = /^\s+|\s+$/g;
+
+/** Used to detect bad signed hexadecimal string values. */
+var reIsBadHex = /^[-+]0x[0-9a-f]+$/i;
+
+/** Used to detect binary string values. */
+var reIsBinary = /^0b[01]+$/i;
+
+/** Used to detect octal string values. */
+var reIsOctal = /^0o[0-7]+$/i;
+
+/** Built-in method references without a dependency on `root`. */
+var freeParseInt = parseInt;
+
+/** Detect free variable `global` from Node.js. */
+var freeGlobal = typeof global == 'object' && global && global.Object === Object && global;
+
+/** Detect free variable `self`. */
+var freeSelf = typeof self == 'object' && self && self.Object === Object && self;
+
+/** Used as a reference to the global object. */
+var root = freeGlobal || freeSelf || Function('return this')();
+
+/** Used for built-in method references. */
+var objectProto = Object.prototype;
+
+/**
+ * Used to resolve the
+ * [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring)
+ * of values.
+ */
+var objectToString = objectProto.toString;
+
+/* Built-in method references for those with the same name as other `lodash` methods. */
+var nativeMax = Math.max,
+    nativeMin = Math.min;
+
+/**
+ * Gets the timestamp of the number of milliseconds that have elapsed since
+ * the Unix epoch (1 January 1970 00:00:00 UTC).
+ *
+ * @static
+ * @memberOf _
+ * @since 2.4.0
+ * @category Date
+ * @returns {number} Returns the timestamp.
+ * @example
+ *
+ * _.defer(function(stamp) {
+ *   console.log(_.now() - stamp);
+ * }, _.now());
+ * // => Logs the number of milliseconds it took for the deferred invocation.
+ */
+var now = function() {
+  return root.Date.now();
+};
+
+/**
+ * Creates a debounced function that delays invoking `func` until after `wait`
+ * milliseconds have elapsed since the last time the debounced function was
+ * invoked. The debounced function comes with a `cancel` method to cancel
+ * delayed `func` invocations and a `flush` method to immediately invoke them.
+ * Provide `options` to indicate whether `func` should be invoked on the
+ * leading and/or trailing edge of the `wait` timeout. The `func` is invoked
+ * with the last arguments provided to the debounced function. Subsequent
+ * calls to the debounced function return the result of the last `func`
+ * invocation.
+ *
+ * **Note:** If `leading` and `trailing` options are `true`, `func` is
+ * invoked on the trailing edge of the timeout only if the debounced function
+ * is invoked more than once during the `wait` timeout.
+ *
+ * If `wait` is `0` and `leading` is `false`, `func` invocation is deferred
+ * until to the next tick, similar to `setTimeout` with a timeout of `0`.
+ *
+ * See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/)
+ * for details over the differences between `_.debounce` and `_.throttle`.
+ *
+ * @static
+ * @memberOf _
+ * @since 0.1.0
+ * @category Function
+ * @param {Function} func The function to debounce.
+ * @param {number} [wait=0] The number of milliseconds to delay.
+ * @param {Object} [options={}] The options object.
+ * @param {boolean} [options.leading=false]
+ *  Specify invoking on the leading edge of the timeout.
+ * @param {number} [options.maxWait]
+ *  The maximum time `func` is allowed to be delayed before it's invoked.
+ * @param {boolean} [options.trailing=true]
+ *  Specify invoking on the trailing edge of the timeout.
+ * @returns {Function} Returns the new debounced function.
+ * @example
+ *
+ * // Avoid costly calculations while the window size is in flux.
+ * jQuery(window).on('resize', _.debounce(calculateLayout, 150));
+ *
+ * // Invoke `sendMail` when clicked, debouncing subsequent calls.
+ * jQuery(element).on('click', _.debounce(sendMail, 300, {
+ *   'leading': true,
+ *   'trailing': false
+ * }));
+ *
+ * // Ensure `batchLog` is invoked once after 1 second of debounced calls.
+ * var debounced = _.debounce(batchLog, 250, { 'maxWait': 1000 });
+ * var source = new EventSource('/stream');
+ * jQuery(source).on('message', debounced);
+ *
+ * // Cancel the trailing debounced invocation.
+ * jQuery(window).on('popstate', debounced.cancel);
+ */
+function debounce(func, wait, options) {
+  var lastArgs,
+      lastThis,
+      maxWait,
+      result,
+      timerId,
+      lastCallTime,
+      lastInvokeTime = 0,
+      leading = false,
+      maxing = false,
+      trailing = true;
+
+  if (typeof func != 'function') {
+    throw new TypeError(FUNC_ERROR_TEXT);
+  }
+  wait = toNumber(wait) || 0;
+  if (isObject(options)) {
+    leading = !!options.leading;
+    maxing = 'maxWait' in options;
+    maxWait = maxing ? nativeMax(toNumber(options.maxWait) || 0, wait) : maxWait;
+    trailing = 'trailing' in options ? !!options.trailing : trailing;
+  }
+
+  function invokeFunc(time) {
+    var args = lastArgs,
+        thisArg = lastThis;
+
+    lastArgs = lastThis = undefined;
+    lastInvokeTime = time;
+    result = func.apply(thisArg, args);
+    return result;
+  }
+
+  function leadingEdge(time) {
+    // Reset any `maxWait` timer.
+    lastInvokeTime = time;
+    // Start the timer for the trailing edge.
+    timerId = setTimeout(timerExpired, wait);
+    // Invoke the leading edge.
+    return leading ? invokeFunc(time) : result;
+  }
+
+  function remainingWait(time) {
+    var timeSinceLastCall = time - lastCallTime,
+        timeSinceLastInvoke = time - lastInvokeTime,
+        result = wait - timeSinceLastCall;
+
+    return maxing ? nativeMin(result, maxWait - timeSinceLastInvoke) : result;
+  }
+
+  function shouldInvoke(time) {
+    var timeSinceLastCall = time - lastCallTime,
+        timeSinceLastInvoke = time - lastInvokeTime;
+
+    // Either this is the first call, activity has stopped and we're at the
+    // trailing edge, the system time has gone backwards and we're treating
+    // it as the trailing edge, or we've hit the `maxWait` limit.
+    return (lastCallTime === undefined || (timeSinceLastCall >= wait) ||
+      (timeSinceLastCall < 0) || (maxing && timeSinceLastInvoke >= maxWait));
+  }
+
+  function timerExpired() {
+    var time = now();
+    if (shouldInvoke(time)) {
+      return trailingEdge(time);
+    }
+    // Restart the timer.
+    timerId = setTimeout(timerExpired, remainingWait(time));
+  }
+
+  function trailingEdge(time) {
+    timerId = undefined;
+
+    // Only invoke if we have `lastArgs` which means `func` has been
+    // debounced at least once.
+    if (trailing && lastArgs) {
+      return invokeFunc(time);
+    }
+    lastArgs = lastThis = undefined;
+    return result;
+  }
+
+  function cancel() {
+    if (timerId !== undefined) {
+      clearTimeout(timerId);
+    }
+    lastInvokeTime = 0;
+    lastArgs = lastCallTime = lastThis = timerId = undefined;
+  }
+
+  function flush() {
+    return timerId === undefined ? result : trailingEdge(now());
+  }
+
+  function debounced() {
+    var time = now(),
+        isInvoking = shouldInvoke(time);
+
+    lastArgs = arguments;
+    lastThis = this;
+    lastCallTime = time;
+
+    if (isInvoking) {
+      if (timerId === undefined) {
+        return leadingEdge(lastCallTime);
+      }
+      if (maxing) {
+        // Handle invocations in a tight loop.
+        timerId = setTimeout(timerExpired, wait);
+        return invokeFunc(lastCallTime);
+      }
+    }
+    if (timerId === undefined) {
+      timerId = setTimeout(timerExpired, wait);
+    }
+    return result;
+  }
+  debounced.cancel = cancel;
+  debounced.flush = flush;
+  return debounced;
+}
+
+/**
+ * Checks if `value` is the
+ * [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types)
+ * of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`)
+ *
+ * @static
+ * @memberOf _
+ * @since 0.1.0
+ * @category Lang
+ * @param {*} value The value to check.
+ * @returns {boolean} Returns `true` if `value` is an object, else `false`.
+ * @example
+ *
+ * _.isObject({});
+ * // => true
+ *
+ * _.isObject([1, 2, 3]);
+ * // => true
+ *
+ * _.isObject(_.noop);
+ * // => true
+ *
+ * _.isObject(null);
+ * // => false
+ */
+function isObject(value) {
+  var type = typeof value;
+  return !!value && (type == 'object' || type == 'function');
+}
+
+/**
+ * Checks if `value` is object-like. A value is object-like if it's not `null`
+ * and has a `typeof` result of "object".
+ *
+ * @static
+ * @memberOf _
+ * @since 4.0.0
+ * @category Lang
+ * @param {*} value The value to check.
+ * @returns {boolean} Returns `true` if `value` is object-like, else `false`.
+ * @example
+ *
+ * _.isObjectLike({});
+ * // => true
+ *
+ * _.isObjectLike([1, 2, 3]);
+ * // => true
+ *
+ * _.isObjectLike(_.noop);
+ * // => false
+ *
+ * _.isObjectLike(null);
+ * // => false
+ */
+function isObjectLike(value) {
+  return !!value && typeof value == 'object';
+}
+
+/**
+ * Checks if `value` is classified as a `Symbol` primitive or object.
+ *
+ * @static
+ * @memberOf _
+ * @since 4.0.0
+ * @category Lang
+ * @param {*} value The value to check.
+ * @returns {boolean} Returns `true` if `value` is a symbol, else `false`.
+ * @example
+ *
+ * _.isSymbol(Symbol.iterator);
+ * // => true
+ *
+ * _.isSymbol('abc');
+ * // => false
+ */
+function isSymbol(value) {
+  return typeof value == 'symbol' ||
+    (isObjectLike(value) && objectToString.call(value) == symbolTag);
+}
+
+/**
+ * Converts `value` to a number.
+ *
+ * @static
+ * @memberOf _
+ * @since 4.0.0
+ * @category Lang
+ * @param {*} value The value to process.
+ * @returns {number} Returns the number.
+ * @example
+ *
+ * _.toNumber(3.2);
+ * // => 3.2
+ *
+ * _.toNumber(Number.MIN_VALUE);
+ * // => 5e-324
+ *
+ * _.toNumber(Infinity);
+ * // => Infinity
+ *
+ * _.toNumber('3.2');
+ * // => 3.2
+ */
+function toNumber(value) {
+  if (typeof value == 'number') {
+    return value;
+  }
+  if (isSymbol(value)) {
+    return NAN;
+  }
+  if (isObject(value)) {
+    var other = typeof value.valueOf == 'function' ? value.valueOf() : value;
+    value = isObject(other) ? (other + '') : other;
+  }
+  if (typeof value != 'string') {
+    return value === 0 ? value : +value;
+  }
+  value = value.replace(reTrim, '');
+  var isBinary = reIsBinary.test(value);
+  return (isBinary || reIsOctal.test(value))
+    ? freeParseInt(value.slice(2), isBinary ? 2 : 8)
+    : (reIsBadHex.test(value) ? NAN : +value);
+}
+
+module.exports = debounce;

+ 17 - 0
node_modules/lodash.debounce/package.json

@@ -0,0 +1,17 @@
+{
+  "name": "lodash.debounce",
+  "version": "4.0.8",
+  "description": "The lodash method `_.debounce` exported as a module.",
+  "homepage": "https://lodash.com/",
+  "icon": "https://lodash.com/icon.svg",
+  "license": "MIT",
+  "keywords": "lodash-modularized, debounce",
+  "author": "John-David Dalton <john.david.dalton@gmail.com> (http://allyoucanleet.com/)",
+  "contributors": [
+    "John-David Dalton <john.david.dalton@gmail.com> (http://allyoucanleet.com/)",
+    "Blaine Bublitz <blaine.bublitz@gmail.com> (https://github.com/phated)",
+    "Mathias Bynens <mathias@qiwi.be> (https://mathiasbynens.be/)"
+  ],
+  "repository": "lodash/lodash",
+  "scripts": { "test": "echo \"See https://travis-ci.org/lodash/lodash-cli for testing details.\"" }
+}

+ 47 - 0
node_modules/lodash/LICENSE

@@ -0,0 +1,47 @@
+Copyright OpenJS Foundation and other contributors <https://openjsf.org/>
+
+Based on Underscore.js, copyright Jeremy Ashkenas,
+DocumentCloud and Investigative Reporters & Editors <http://underscorejs.org/>
+
+This software consists of voluntary contributions made by many
+individuals. For exact contribution history, see the revision history
+available at https://github.com/lodash/lodash
+
+The following license applies to all parts of this software except as
+documented below:
+
+====
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+====
+
+Copyright and related rights for sample code are waived via CC0. Sample
+code is defined as all source code displayed within the prose of the
+documentation.
+
+CC0: http://creativecommons.org/publicdomain/zero/1.0/
+
+====
+
+Files located in the node_modules and vendor directories are externally
+maintained libraries used by this software which have their own
+licenses; we recommend you read them, as their terms may differ from the
+terms above.

+ 21 - 0
node_modules/lodash/_apply.js

@@ -0,0 +1,21 @@
+/**
+ * A faster alternative to `Function#apply`, this function invokes `func`
+ * with the `this` binding of `thisArg` and the arguments of `args`.
+ *
+ * @private
+ * @param {Function} func The function to invoke.
+ * @param {*} thisArg The `this` binding of `func`.
+ * @param {Array} args The arguments to invoke `func` with.
+ * @returns {*} Returns the result of `func`.
+ */
+function apply(func, thisArg, args) {
+  switch (args.length) {
+    case 0: return func.call(thisArg);
+    case 1: return func.call(thisArg, args[0]);
+    case 2: return func.call(thisArg, args[0], args[1]);
+    case 3: return func.call(thisArg, args[0], args[1], args[2]);
+  }
+  return func.apply(thisArg, args);
+}
+
+module.exports = apply;

+ 22 - 0
node_modules/lodash/_arrayAggregator.js

@@ -0,0 +1,22 @@
+/**
+ * A specialized version of `baseAggregator` for arrays.
+ *
+ * @private
+ * @param {Array} [array] The array to iterate over.
+ * @param {Function} setter The function to set `accumulator` values.
+ * @param {Function} iteratee The iteratee to transform keys.
+ * @param {Object} accumulator The initial aggregated object.
+ * @returns {Function} Returns `accumulator`.
+ */
+function arrayAggregator(array, setter, iteratee, accumulator) {
+  var index = -1,
+      length = array == null ? 0 : array.length;
+
+  while (++index < length) {
+    var value = array[index];
+    setter(accumulator, value, iteratee(value), array);
+  }
+  return accumulator;
+}
+
+module.exports = arrayAggregator;

+ 22 - 0
node_modules/lodash/_arrayEach.js

@@ -0,0 +1,22 @@
+/**
+ * A specialized version of `_.forEach` for arrays without support for
+ * iteratee shorthands.
+ *
+ * @private
+ * @param {Array} [array] The array to iterate over.
+ * @param {Function} iteratee The function invoked per iteration.
+ * @returns {Array} Returns `array`.
+ */
+function arrayEach(array, iteratee) {
+  var index = -1,
+      length = array == null ? 0 : array.length;
+
+  while (++index < length) {
+    if (iteratee(array[index], index, array) === false) {
+      break;
+    }
+  }
+  return array;
+}
+
+module.exports = arrayEach;

+ 21 - 0
node_modules/lodash/_arrayEachRight.js

@@ -0,0 +1,21 @@
+/**
+ * A specialized version of `_.forEachRight` for arrays without support for
+ * iteratee shorthands.
+ *
+ * @private
+ * @param {Array} [array] The array to iterate over.
+ * @param {Function} iteratee The function invoked per iteration.
+ * @returns {Array} Returns `array`.
+ */
+function arrayEachRight(array, iteratee) {
+  var length = array == null ? 0 : array.length;
+
+  while (length--) {
+    if (iteratee(array[length], length, array) === false) {
+      break;
+    }
+  }
+  return array;
+}
+
+module.exports = arrayEachRight;

+ 23 - 0
node_modules/lodash/_arrayEvery.js

@@ -0,0 +1,23 @@
+/**
+ * A specialized version of `_.every` for arrays without support for
+ * iteratee shorthands.
+ *
+ * @private
+ * @param {Array} [array] The array to iterate over.
+ * @param {Function} predicate The function invoked per iteration.
+ * @returns {boolean} Returns `true` if all elements pass the predicate check,
+ *  else `false`.
+ */
+function arrayEvery(array, predicate) {
+  var index = -1,
+      length = array == null ? 0 : array.length;
+
+  while (++index < length) {
+    if (!predicate(array[index], index, array)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+module.exports = arrayEvery;

+ 25 - 0
node_modules/lodash/_arrayFilter.js

@@ -0,0 +1,25 @@
+/**
+ * A specialized version of `_.filter` for arrays without support for
+ * iteratee shorthands.
+ *
+ * @private
+ * @param {Array} [array] The array to iterate over.
+ * @param {Function} predicate The function invoked per iteration.
+ * @returns {Array} Returns the new filtered array.
+ */
+function arrayFilter(array, predicate) {
+  var index = -1,
+      length = array == null ? 0 : array.length,
+      resIndex = 0,
+      result = [];
+
+  while (++index < length) {
+    var value = array[index];
+    if (predicate(value, index, array)) {
+      result[resIndex++] = value;
+    }
+  }
+  return result;
+}
+
+module.exports = arrayFilter;

+ 17 - 0
node_modules/lodash/_arrayIncludes.js

@@ -0,0 +1,17 @@
+var baseIndexOf = require('./_baseIndexOf');
+
+/**
+ * A specialized version of `_.includes` for arrays without support for
+ * specifying an index to search from.
+ *
+ * @private
+ * @param {Array} [array] The array to inspect.
+ * @param {*} target The value to search for.
+ * @returns {boolean} Returns `true` if `target` is found, else `false`.
+ */
+function arrayIncludes(array, value) {
+  var length = array == null ? 0 : array.length;
+  return !!length && baseIndexOf(array, value, 0) > -1;
+}
+
+module.exports = arrayIncludes;

+ 1 - 0
node_modules/lodash/fp/__.js

@@ -0,0 +1 @@
+module.exports = require('./placeholder');

+ 8 - 0
node_modules/log-symbols/browser.js

@@ -0,0 +1,8 @@
+'use strict';
+
+module.exports = {
+	info: 'ℹ️',
+	success: '✅',
+	warning: '⚠️',
+	error: '❌️'
+};

+ 25 - 0
node_modules/log-symbols/index.d.ts

@@ -0,0 +1,25 @@
+/**
+Colored symbols for various log levels.
+
+Includes fallbacks for Windows CMD which only supports a [limited character set](https://en.wikipedia.org/wiki/Code_page_437).
+
+@example
+```
+import logSymbols = require('log-symbols');
+
+console.log(logSymbols.success, 'Finished successfully!');
+// Terminals with Unicode support:     ✔ Finished successfully!
+// Terminals without Unicode support:  √ Finished successfully!
+```
+*/
+declare const logSymbols: {
+	readonly info: string;
+
+	readonly success: string;
+
+	readonly warning: string;
+
+	readonly error: string;
+};
+
+export = logSymbols;

+ 19 - 0
node_modules/log-symbols/index.js

@@ -0,0 +1,19 @@
+'use strict';
+const chalk = require('chalk');
+const isUnicodeSupported = require('is-unicode-supported');
+
+const main = {
+	info: chalk.blue('ℹ'),
+	success: chalk.green('✔'),
+	warning: chalk.yellow('⚠'),
+	error: chalk.red('✖')
+};
+
+const fallback = {
+	info: chalk.blue('i'),
+	success: chalk.green('√'),
+	warning: chalk.yellow('‼'),
+	error: chalk.red('×')
+};
+
+module.exports = isUnicodeSupported() ? main : fallback;

+ 9 - 0
node_modules/log-symbols/license

@@ -0,0 +1,9 @@
+MIT License
+
+Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (https://sindresorhus.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 52 - 0
node_modules/log-symbols/package.json

@@ -0,0 +1,52 @@
+{
+	"name": "log-symbols",
+	"version": "4.1.0",
+	"description": "Colored symbols for various log levels. Example: `✔︎ Success`",
+	"license": "MIT",
+	"repository": "sindresorhus/log-symbols",
+	"funding": "https://github.com/sponsors/sindresorhus",
+	"author": {
+		"name": "Sindre Sorhus",
+		"email": "sindresorhus@gmail.com",
+		"url": "https://sindresorhus.com"
+	},
+	"engines": {
+		"node": ">=10"
+	},
+	"scripts": {
+		"test": "xo && ava && tsd"
+	},
+	"files": [
+		"index.js",
+		"index.d.ts",
+		"browser.js"
+	],
+	"keywords": [
+		"unicode",
+		"cli",
+		"cmd",
+		"command-line",
+		"characters",
+		"symbol",
+		"symbols",
+		"figure",
+		"figures",
+		"fallback",
+		"windows",
+		"log",
+		"logging",
+		"terminal",
+		"stdout"
+	],
+	"dependencies": {
+		"chalk": "^4.1.0",
+		"is-unicode-supported": "^0.1.0"
+	},
+	"devDependencies": {
+		"ava": "^2.4.0",
+		"strip-ansi": "^6.0.0",
+		"tsd": "^0.14.0",
+		"xo": "^0.38.2"
+	},
+	"browser": "browser.js"
+}

+ 51 - 0
node_modules/log-symbols/readme.md

@@ -0,0 +1,51 @@
+# log-symbols
+
+<img src="screenshot.png" width="226" height="192" align="right">
+
+> Colored symbols for various log levels
+
+Includes fallbacks for Windows CMD which only supports a [limited character set](https://en.wikipedia.org/wiki/Code_page_437).
+
+## Install
+
+```
+$ npm install log-symbols
+```
+
+## Usage
+
+```js
+const logSymbols = require('log-symbols');
+
+console.log(logSymbols.success, 'Finished successfully!');
+// Terminals with Unicode support:     ✔ Finished successfully!
+// Terminals without Unicode support:  √ Finished successfully!
+```
+
+## API
+
+### logSymbols
+
+#### info
+#### success
+#### warning
+#### error
+
+## Related
+
+- [figures](https://github.com/sindresorhus/figures) - Unicode symbols with Windows CMD fallbacks
+- [py-log-symbols](https://github.com/ManrajGrover/py-log-symbols) - Python port
+- [log-symbols](https://github.com/palash25/log-symbols) - Ruby port
+- [guumaster/logsymbols](https://github.com/guumaster/logsymbols) - Golang port
+
+---
+
+<div align="center">
+	<b>
+		<a href="https://tidelift.com/subscription/pkg/npm-log-symbols?utm_source=npm-log-symbols&utm_medium=referral&utm_campaign=readme">Get professional support for this package with a Tidelift subscription</a>
+	</b>
+	<br>
+	<sub>
+		Tidelift helps make open source sustainable for maintainers while giving companies<br>assurances about security, maintenance, and licensing for their dependencies.
+	</sub>
+</div>

+ 3 - 0
node_modules/loglevel-plugin-prefix/.travis.yml

@@ -0,0 +1,3 @@
+language: node_js
+node_js:
+  - "node"

+ 21 - 0
node_modules/loglevel-plugin-prefix/LICENSE

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Evgeniy Pavlov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 218 - 0
node_modules/loglevel-plugin-prefix/README.md

@@ -0,0 +1,218 @@
+# loglevel-plugin-prefix
+
+Plugin for [loglevel](https://github.com/pimterry/loglevel) message prefixing.
+
+[![NPM version](https://img.shields.io/npm/v/loglevel-plugin-prefix.svg?style=flat-square)](https://www.npmjs.com/package/loglevel-plugin-prefix)[![Build Status](https://img.shields.io/travis/kutuluk/loglevel-plugin-prefix/master.svg?style=flat-square)](https://travis-ci.org/kutuluk/loglevel-plugin-prefix)
+
+## Installation
+
+```sh
+npm install loglevel-plugin-prefix
+```
+
+## API
+
+**This plugin is under active development and should be considered as an unstable. No guarantees regarding API stability are made. Backward compatibility is guaranteed only by path releases.**
+
+#### `reg(loglevel)`
+
+This method registers plugin for loglevel. This method must be called at least once before any call to the apply method. Repeated calls to this method are ignored.
+
+#### Parameters
+
+`loglevel` - the root logger, imported from loglevel module
+
+#### `apply(logger, options)`
+
+This method applies the plugin to the logger. Before using this method, the `reg` method must be called, otherwise a warning will be logged. **From the next release, the call apply before reg will throw an error.**
+
+#### Parameters
+
+`logger` - any logger of loglevel
+
+`options` - an optional configuration object
+
+```javascript
+var defaults = {
+  template: '[%t] %l:',
+  levelFormatter: function (level) {
+    return level.toUpperCase();
+  },
+  nameFormatter: function (name) {
+    return name || 'root';
+  },
+  timestampFormatter: function (date) {
+    return date.toTimeString().replace(/.*(\d{2}:\d{2}:\d{2}).*/, '$1');
+  },
+  format: undefined
+};
+```
+
+Plugin formats the prefix using `template` option as a printf-like format. The `template` is a string containing zero or more placeholder tokens. Each placeholder token is replaced with the value from loglevel messages parameters. Supported placeholders are:
+
+- `%l` - level of message
+- `%n` - name of logger
+- `%t` - timestamp of message
+
+The `levelFormatter`, `nameFormatter` and `timestampFormatter` is a functions for formatting corresponding values.
+
+Alternatively, you can use `format` option. This is a function that receives formatted values (level, name, timestamp) and should returns a prefix string.
+
+If both `format` and` template` are present in the configuration, the `template` parameter is ignored. When both these parameters are missing in the configuration, the inherited behavior is used.
+
+## Usage
+
+### Browser directly
+```html
+<script src="https://unpkg.com/loglevel/dist/loglevel.min.js"></script>
+<script src="https://unpkg.com/loglevel-plugin-prefix@^0.8/dist/loglevel-plugin-prefix.min.js"></script>
+
+<script>
+  var logger = log.noConflict();
+  var prefixer = prefix.noConflict();
+  prefixer.reg(logger);
+  prefixer.apply(logger);
+  logger.warn('prefixed message');
+</script>
+```
+
+Output
+```
+[16:53:46] WARN: prefixed message
+```
+
+### Node
+```javascript
+const chalk = require('chalk');
+const log = require('loglevel');
+const prefix = require('loglevel-plugin-prefix');
+
+const colors = {
+  TRACE: chalk.magenta,
+  DEBUG: chalk.cyan,
+  INFO: chalk.blue,
+  WARN: chalk.yellow,
+  ERROR: chalk.red,
+};
+
+prefix.reg(log);
+log.enableAll();
+
+prefix.apply(log, {
+  format(level, name, timestamp) {
+    return `${chalk.gray(`[${timestamp}]`)} ${colors[level.toUpperCase()](level)} ${chalk.green(`${name}:`)}`;
+  },
+});
+
+prefix.apply(log.getLogger('critical'), {
+  format(level, name, timestamp) {
+    return chalk.red.bold(`[${timestamp}] ${level} ${name}:`);
+  },
+});
+
+log.trace('trace');
+log.debug('debug');
+log.getLogger('critical').info('Something significant happened');
+log.log('log');
+log.info('info');
+log.warn('warn');
+log.error('error');
+```
+
+Output
+
+![output](https://raw.githubusercontent.com/kutuluk/loglevel-plugin-prefix/master/colored.png "output")
+
+## Custom options
+
+```javascript
+const log = require('loglevel');
+const prefix = require('loglevel-plugin-prefix');
+
+prefix.reg(log);
+log.enableAll();
+
+prefix.apply(log, {
+  template: '[%t] %l (%n) static text:',
+  levelFormatter(level) {
+    return level.toUpperCase();
+  },
+  nameFormatter(name) {
+    return name || 'global';
+  },
+  timestampFormatter(date) {
+    return date.toISOString();
+  },
+});
+
+log.info('%s prefix', 'template');
+
+const fn = (level, name, timestamp) => `[${timestamp}] ${level} (${name}) static text:`;
+
+prefix.apply(log, { format: fn });
+
+log.info('%s prefix', 'functional');
+
+prefix.apply(log, { template: '[%t] %l (%n) static text:' });
+
+log.info('again %s prefix', 'template');
+```
+
+Output
+```
+[2017-05-29T12:53:46.000Z] INFO (global) static text: template prefix
+[2017-05-29T12:53:46.000Z] INFO (global) static text: functional prefix
+[2017-05-29T12:53:46.000Z] INFO (global) static text: again template prefix
+```
+
+## Option inheritance
+
+```javascript
+const log = require('loglevel');
+const prefix = require('loglevel-plugin-prefix');
+
+prefix.reg(log);
+log.enableAll();
+
+log.info('root');
+
+const chicken = log.getLogger('chicken');
+chicken.info('chicken');
+
+prefix.apply(chicken, { template: '%l (%n):' });
+chicken.info('chicken');
+
+prefix.apply(log);
+log.info('root');
+
+const egg = log.getLogger('egg');
+egg.info('egg');
+
+const fn = (level, name) => `${level} (${name}):`;
+
+prefix.apply(egg, { format: fn });
+egg.info('egg');
+
+prefix.apply(egg, {
+  levelFormatter(level) {
+    return level.toLowerCase();
+  },
+});
+egg.info('egg');
+
+chicken.info('chicken');
+log.info('root');
+```
+
+Output
+```
+root
+chicken
+INFO (chicken): chicken
+[16:53:46] INFO: root
+[16:53:46] INFO: egg
+INFO (egg): egg
+info (egg): egg
+INFO (chicken): chicken
+[16:53:46] INFO: root
+```

BIN
node_modules/loglevel-plugin-prefix/colored.png


+ 1 - 0
node_modules/loglevel-plugin-prefix/dist/loglevel-plugin-prefix.min.js

@@ -0,0 +1 @@
+!function(e,t){"function"==typeof define&&define.amd?define(t):"object"==typeof module&&module.exports?module.exports=t():e.prefix=t(e)}(this,function(e){"use strict";var n,t,a={template:"[%t] %l:",levelFormatter:function(e){return e.toUpperCase()},nameFormatter:function(e){return e||"root"},timestampFormatter:function(e){return e.toTimeString().replace(/.*(\d{2}:\d{2}:\d{2}).*/,"$1")},format:void 0},d={},r={reg:function(e){if(!e||!e.getLogger)throw new TypeError("Argument is not a root logger");n=e},apply:function(e,t){if(!e||!e.setLevel)throw new TypeError("Argument is not a logger");var r=e.methodFactory,s=e.name||"",o=d[s]||d[""]||a;return d[s]||(e.methodFactory=function(i,e,f){var p=r(i,e,f),m=d[f]||d[""],u=-1!==m.template.indexOf("%t"),c=-1!==m.template.indexOf("%l"),g=-1!==m.template.indexOf("%n");return function(){for(var e="",t=arguments.length,r=Array(t),o=0;o<t;o++)r[o]=arguments[o];if(s||!d[f]){var n=m.timestampFormatter(new Date),a=m.levelFormatter(i),l=m.nameFormatter(f);m.format?e+=m.format(a,l,n):(e+=m.template,u&&(e=e.replace(/%t/,n)),c&&(e=e.replace(/%l/,a)),g&&(e=e.replace(/%n/,l))),r.length&&"string"==typeof r[0]?r[0]=e+" "+r[0]:r.unshift(e)}p.apply(void 0,r)}}),(t=t||{}).template&&(t.format=void 0),d[s]=function(e){for(var t,r=1,o=arguments.length;r<o;r++)for(t in arguments[r])Object.prototype.hasOwnProperty.call(arguments[r],t)&&(e[t]=arguments[r][t]);return e}({},o,t),e.setLevel(e.getLevel()),n||e.warn("It is necessary to call the function reg() of loglevel-plugin-prefix before calling apply. From the next release, it will throw an error. See more: https://github.com/kutuluk/loglevel-plugin-prefix/blob/master/README.md"),e}};return e&&(t=e.prefix,r.noConflict=function(){return e.prefix===r&&(e.prefix=t),r}),r});

+ 7 - 0
node_modules/loglevel-plugin-prefix/examples/.eslintrc.json

@@ -0,0 +1,7 @@
+{
+    "extends": "airbnb-base",
+    "env": {
+        "es6": true
+    },
+    "rules": {}
+}

+ 14 - 0
node_modules/loglevel-plugin-prefix/index.d.ts

@@ -0,0 +1,14 @@
+declare module 'loglevel-plugin-prefix' {
+    import { Logger } from 'loglevel';
+
+    interface LoglevelPluginPrefixOptions {
+        template?: string;
+        levelFormatter?: (level: string) => string;
+        nameFormatter?: (name: string | undefined) => string;
+        timestampFormatter?: (date: Date) => string;
+        format?: (level: string, name: string | undefined, timestamp: Date) => string | undefined;
+    }
+
+    function reg(loglevel: Logger): void;
+    function apply(logger: Logger, options?: LoglevelPluginPrefixOptions): Logger;
+}

+ 50 - 0
node_modules/loglevel-plugin-prefix/package.json

@@ -0,0 +1,50 @@
+{
+  "name": "loglevel-plugin-prefix",
+  "description": "Plugin for loglevel message prefixing",
+  "author": {
+    "name": "kutuluk"
+  },
+  "keywords": [
+    "log",
+    "logger",
+    "logging",
+    "browser",
+    "node",
+    "prefix",
+    "colored",
+    "loglevel",
+    "plugin"
+  ],
+  "homepage": "https://github.com/kutuluk/loglevel-plugin-prefix",
+  "bugs": {
+    "url": "https://github.com/kutuluk/loglevel-plugin-prefix/issues"
+  },
+  "repository": {
+    "type": "git",
+    "url": "git@github.com:kutuluk/loglevel-plugin-prefix.git"
+  },
+  "license": "MIT",
+  "version": "0.8.4",
+  "main": "./lib/loglevel-plugin-prefix",
+  "types": "index.d.ts",
+  "scripts": {
+    "eslint": "eslint ./lib/loglevel-plugin-prefix.js",
+    "test": "mocha",
+    "example": "node ./example/example.js",
+    "uglifyjs": "uglifyjs ./lib/loglevel-plugin-prefix.js -o ./dist/loglevel-plugin-prefix.min.js -c -m",
+    "build": "npm install && npm run eslint && npm run test && npm run uglifyjs"
+  },
+  "devDependencies": {
+    "chai": "^4.1.2",
+    "chalk": "^2.4.1",
+    "eslint": "^3.19.0",
+    "eslint-config-airbnb-base": "^11.3.2",
+    "eslint-plugin-chai-friendly": "^0.3.6",
+    "eslint-plugin-import": "^2.12.0",
+    "loglevel": "^1.6.1",
+    "loglevel-plugin-mock": "^0.1.0",
+    "mocha": "^3.5.3",
+    "sinon": "^2.4.1",
+    "uglify-js": "^3.4.0"
+  }
+}

+ 27 - 0
node_modules/loglevel/.editorconfig

@@ -0,0 +1,27 @@
+
+# EditorConfig defines and maintains consistent coding styles between different
+# editors and IDEs: http://EditorConfig.org/
+# Top-most EditorConfig file
+root = true
+
+# All files
+[*.*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 4
+trim_trailing_whitespace = true
+max_line_length = 80
+
+[*.md]
+indent_size = 2
+
+[*.json]
+indent_size = 2
+
+[*.{yaml,yml}]
+indent_size = 2
+
+[vendor/grunt-template-jasmine-requirejs/**/*]
+indent_size = 2

+ 165 - 0
node_modules/loglevel/Gruntfile.js

@@ -0,0 +1,165 @@
+'use strict';
+
+var Jasmine = require('jasmine');
+
+module.exports = function (grunt) {
+    var jasmineRequireJsOptions = {
+        specs: 'test/*-test.js',
+        helpers: 'test/*-helper.js',
+    };
+
+    // Project configuration.
+    grunt.initConfig({
+        // Metadata.
+        pkg: grunt.file.readJSON('package.json'),
+        banner: '/*! <%= pkg.name %> - v<%= pkg.version %>' +
+                ' - <%= pkg.homepage %>' +
+                ' - (c) <%= grunt.template.today("yyyy") %> <%= pkg.author.name %>' +
+                ' - licensed <%= pkg.license %> */\n',
+        // Task configuration.
+        concat: {
+            options: {
+                banner: '<%= banner %>',
+                stripBanners: true
+            },
+            dist: {
+                src: ['lib/<%= pkg.name %>.js'],
+                dest: 'dist/<%= pkg.name %>.js'
+            }
+        },
+        uglify: {
+            options: {
+                banner: '<%= banner %>'
+            },
+            dist: {
+                src: '<%= concat.dist.dest %>',
+                dest: 'dist/<%= pkg.name %>.min.js'
+            }
+        },
+        jasmine: {
+            requirejs: {
+                src: [],
+                options: {
+                    specs: jasmineRequireJsOptions.specs,
+                    helpers: jasmineRequireJsOptions.helpers,
+                    template: require('./vendor/grunt-template-jasmine-requirejs')
+                }
+            },
+            global: {
+                src: 'lib/**/*.js',
+                options: {
+                    specs: 'test/global-integration.js',
+                }
+            },
+            context: {
+                src: 'test/test-context-using-apply.generated.js',
+                options: {
+                    specs: 'test/global-integration-with-new-context.js',
+                }
+            }
+        },
+        jasmine_node: {
+            options: {
+                specs: ['test/node-integration.js']
+            }
+        },
+        open: {
+            jasmine: {
+                path: 'http://127.0.0.1:8000/_SpecRunner.html'
+            }
+        },
+        connect: {
+            test: {
+                port: 8000,
+                keepalive: true
+            }
+        },
+        jshint: {
+            options: {
+                jshintrc: '.jshintrc'
+            },
+            gruntfile: {
+                src: 'Gruntfile.js'
+            },
+            lib: {
+                options: {
+                    jshintrc: 'lib/.jshintrc'
+                },
+                src: ['lib/**/*.js']
+            },
+            test: {
+                options: {
+                    jshintrc: 'test/.jshintrc'
+                },
+                src: ['test/*.js', '!test/*.generated.js']
+            }
+        },
+        watch: {
+            gruntfile: {
+                files: '<%= jshint.gruntfile.src %>',
+                tasks: ['jshint:gruntfile']
+            },
+            lib: {
+                files: '<%= jshint.lib.src %>',
+                tasks: ['jshint:lib', 'test']
+            },
+            test: {
+                files: '<%= jshint.test.src %>',
+                tasks: ['jshint:test', 'test']
+            }
+        },
+        preprocess: {
+            "test-context-using-apply": {
+                src: 'test/test-context-using-apply.js',
+                dest: 'test/test-context-using-apply.generated.js'
+            }
+        },
+        clean:{
+            test:['test/test-context-using-apply.generated.js']
+        }
+    });
+
+    // These plugins provide necessary tasks.
+    grunt.loadNpmTasks('grunt-contrib-concat');
+    grunt.loadNpmTasks('grunt-contrib-uglify');
+    grunt.loadNpmTasks('grunt-contrib-jasmine');
+    grunt.loadNpmTasks('grunt-contrib-jshint');
+    grunt.loadNpmTasks('grunt-contrib-watch');
+
+    grunt.loadNpmTasks('grunt-contrib-connect');
+    grunt.loadNpmTasks('grunt-open');
+    grunt.loadNpmTasks('grunt-preprocess');
+    grunt.loadNpmTasks('grunt-contrib-clean');
+
+    // Run Jasmine with Node.js tests (as opposed to browser tests).
+    //
+    // NOTE: This is designed for Jasmine 2.4, which matches the version used
+    // in `grunt-contrib-jasmine`. If that package is updated, this should also
+    // be updated to match.
+    grunt.registerTask('jasmine_node', 'Run Jasmine in Node.js', function() {
+        var done = this.async();
+
+        var jasmine = new Jasmine({ projectBaseDir: __dirname });
+        jasmine.onComplete(function(success) {
+            done(success);
+        });
+
+        jasmine.execute(this.options().specs);
+    });
+
+    // Build a distributable release
+    grunt.registerTask('dist', ['test', 'dist-build']);
+    grunt.registerTask('dist-build', ['concat', 'uglify']);
+
+    // Check everything is good
+    grunt.registerTask('test', ['jshint', 'test-browser', 'test-node']);
+    grunt.registerTask('test-browser', ['jasmine:global', 'test-browser-context', 'jasmine:requirejs']);
+    grunt.registerTask('test-browser-context', ['preprocess', 'jasmine:context', 'clean:test']);
+    grunt.registerTask('test-node', ['jasmine_node']);
+
+    // Test with a live server and an actual browser
+    grunt.registerTask('integration-test', ['jasmine:requirejs:src:build', 'open:jasmine', 'connect:test:keepalive']);
+
+    // Default task.
+    grunt.registerTask('default', 'test');
+};

+ 22 - 0
node_modules/loglevel/LICENSE-MIT

@@ -0,0 +1,22 @@
+Copyright (c) 2013 Tim Perry
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.

+ 139 - 0
node_modules/loglevel/demo/index.html

@@ -0,0 +1,139 @@
+<!DOCTYPE html>
+<html lang="en-GB">
+    <head>
+        <meta charset="utf-8" />
+        <title>loglevel Demo</title>
+        <meta
+            name="description"
+            content="A demo to show the features of loglevel."
+        />
+        <meta name="viewport" content="width=device-width, initial-scale=1" />
+        <link rel="stylesheet" href="styles.css" />
+        <link
+            rel="stylesheet"
+            href="https://fonts.googleapis.com/css2?family=Material+Symbols+Rounded:opsz,wght,FILL,GRAD@48,600,0,0"
+        />
+    </head>
+    <main>
+        <h1>loglevel Demo</h1>
+        <form id="LogForm" class="code-container">
+            <code>
+                log.
+                <select name="logLevel" aria-label="Log type" required>
+                    <option value="trace">trace</option>
+                    <option value="debug">debug</option>
+                    <option value="info">info</option>
+                    <option value="warn">warn</option>
+                    <option value="error">error</option>
+                </select>
+                ("
+                <input
+                    name="debugMessage"
+                    type="text"
+                    placeholder="Log text"
+                    aria-label="Log text"
+                    required
+                />
+                ")
+            </code>
+            <button type="submit">Run</button>
+            <details>
+                <summary>More information...</summary>
+                Choose your level of logging and enter some text to output it to the console using logLevel.
+                <a href="https://github.com/pimterry/loglevel#logging-methods">Documentation for logging methods.</a>
+            </details>
+        </form>
+        <form id="SetLevel" class="code-container">
+            <code>
+                log.setLevel("
+                <select name="level" aria-label="Log type" required>
+                    <option value="0">trace</option>
+                    <option value="1">debug</option>
+                    <option value="2">info</option>
+                    <option value="3">warn</option>
+                    <option value="4">error</option>
+                    <option value="5">silent</option>
+                </select>
+                ",
+                <select name="persist" aria-label="Log type" required>
+                    <option value="true">true</option>
+                    <option value="false">false</option>
+                </select>
+                )
+            </code>
+            <button type="submit">Run</button>
+            <details>
+                <summary>More information...</summary>
+                Disable all logging below the given level.
+                <a href="https://github.com/pimterry/loglevel#logsetlevellevel-persist">Documentation for setLevel().</a>
+            </details>
+        </form>
+        <form id="SetDefaultLevel" class="code-container">
+            <code>
+                log.setDefaultLevel("
+                <select name="level" aria-label="Log type" required>
+                    <option value="0">trace</option>
+                    <option value="1">debug</option>
+                    <option value="2">info</option>
+                    <option value="3">warn</option>
+                    <option value="4">error</option>
+                    <option value="5">silent</option>
+                </select>
+                ")
+            </code>
+            <button type="submit">Run</button>
+            <details>
+                <summary>More information...</summary>
+                Select a level and run to set the default logging level.
+                <a href="https://github.com/pimterry/loglevel#logsetdefaultlevellevel">Documentation for setDefaultLevel().</a>
+            </details>
+        </form>
+        <div class="code-container">
+            <code>
+                log.resetLevel()
+            </code>
+            <button id="ResetLevelButton" type="button">Run</button>
+            <details>
+                <summary>More information...</summary>
+                Reset the current logging level to default.
+                <a href="https://github.com/pimterry/loglevel#logresetlevel">Documentation for resetLevel().</a>
+            </details>
+        </div>
+        <div class="code-container">
+            <code>
+                log.enableAll()
+            </code>
+            <button id="EnableAllButton" type="button">Run</button>
+            <details>
+                <summary>More information...</summary>
+                Enables all logs - equivalent of <code>setLevel('trace')</code>.
+                <a href="https://github.com/pimterry/loglevel##logenableall-and-logdisableall">Documentation for enableAll().</a>
+            </details>
+        </div>
+        <div class="code-container">
+            <code>
+                log.disableAll()
+            </code>
+            <button id="DisableAllButton" type="button">Run</button>
+            <details>
+                <summary>More information...</summary>
+                Disables all logs - equivalent of <code>setLevel('silent')</code>.
+                <a href="https://github.com/pimterry/loglevel##logenableall-and-logdisableall">Documentation for disableAll().</a>
+            </details>
+        </div>
+        <h2>Log State</h2>
+        <div id="LogState" class="code-container">
+            <label>
+                Current log level
+                <input name="currentLevel" aria-label="Current log level" readonly>
+            </label>
+            <details>
+                <summary>More information...</summary>
+                Uses the <code>getLevel()</code> method to display the current log level.
+                <a href="https://github.com/pimterry/loglevel##logenableall-and-logdisableall">Documentation for disableAll().</a>
+            </details>
+        </div>
+    </main>
+    <script src="../dist/loglevel.js"></script>
+    <script src="script.js"></script>
+</html>

+ 107 - 0
node_modules/loglevel/demo/styles.css

@@ -0,0 +1,107 @@
+* {
+    box-sizing: border-box;
+    font-family:system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
+    color: #161230;
+}
+
+body {
+    font-size: 18px;
+    background-color: #fafafa;
+}
+
+main {
+    display: flex;
+    flex-direction: column;
+    justify-content: center;
+    align-items: center;
+    gap: 1rem;
+}
+
+form {
+}
+
+label {
+    display: flex;
+    gap: 0.5rem;
+    flex-wrap: wrap;
+    align-items: center;
+    justify-content: space-between;
+}
+
+input,
+select,
+button {
+    /* flex-grow: 1; */
+    font-size: 1rem;
+    padding: 0.25rem;
+    border: 1px solid;
+    border-radius: 4px;
+}
+
+input[type="checkbox"] {
+    width: 1.5rem;
+    height: 1.5rem;
+}
+
+button {
+    cursor: pointer;
+    align-self: center;
+    padding: 0.5rem 1.5rem;
+    background-color: #161230;
+    color: #fafafa;
+    font-size: 1.5rem;
+}
+
+summary {
+    cursor: pointer;
+}
+
+code,
+code > input,
+code > select {
+    font-family: 'Courier New', Courier, monospace;
+    font-size: 1.5rem;
+}
+
+code {
+    display: flex;
+    align-items: center;
+}
+
+code input,
+code select {
+    border-color: #a9b7c9;
+}
+
+code button {
+    margin-left: 2rem;
+}
+
+details {
+    width: 100%;
+}
+
+summary {
+    margin-bottom: 0.5rem;
+}
+
+details code {
+    display: inline-block;
+    font-size: 1.1rem;
+    margin-left: 0.2rem;
+    font-weight: 600;
+}
+
+.code-container {
+    width: 80vw;
+    max-width: 800px;
+    display: flex;
+    justify-content: space-between;
+    align-items: center;
+    gap: 1rem;
+    border: 1px solid;
+    border-radius: 4px;
+    background-color: #eaf3ff;
+    padding: 1rem;
+    flex-wrap: wrap;
+}

+ 352 - 0
node_modules/loglevel/dist/loglevel.js

@@ -0,0 +1,352 @@
+/*! loglevel - v1.9.2 - https://github.com/pimterry/loglevel - (c) 2024 Tim Perry - licensed MIT */
+(function (root, definition) {
+    "use strict";
+    if (typeof define === 'function' && define.amd) {
+        define(definition);
+    } else if (typeof module === 'object' && module.exports) {
+        module.exports = definition();
+    } else {
+        root.log = definition();
+    }
+}(this, function () {
+    "use strict";
+
+    // Slightly dubious tricks to cut down minimized file size
+    var noop = function() {};
+    var undefinedType = "undefined";
+    var isIE = (typeof window !== undefinedType) && (typeof window.navigator !== undefinedType) && (
+        /Trident\/|MSIE /.test(window.navigator.userAgent)
+    );
+
+    var logMethods = [
+        "trace",
+        "debug",
+        "info",
+        "warn",
+        "error"
+    ];
+
+    var _loggersByName = {};
+    var defaultLogger = null;
+
+    // Cross-browser bind equivalent that works at least back to IE6
+    function bindMethod(obj, methodName) {
+        var method = obj[methodName];
+        if (typeof method.bind === 'function') {
+            return method.bind(obj);
+        } else {
+            try {
+                return Function.prototype.bind.call(method, obj);
+            } catch (e) {
+                // Missing bind shim or IE8 + Modernizr, fallback to wrapping
+                return function() {
+                    return Function.prototype.apply.apply(method, [obj, arguments]);
+                };
+            }
+        }
+    }
+
+    // Trace() doesn't print the message in IE, so for that case we need to wrap it
+    function traceForIE() {
+        if (console.log) {
+            if (console.log.apply) {
+                console.log.apply(console, arguments);
+            } else {
+                // In old IE, native console methods themselves don't have apply().
+                Function.prototype.apply.apply(console.log, [console, arguments]);
+            }
+        }
+        if (console.trace) console.trace();
+    }
+
+    // Build the best logging method possible for this env
+    // Wherever possible we want to bind, not wrap, to preserve stack traces
+    function realMethod(methodName) {
+        if (methodName === 'debug') {
+            methodName = 'log';
+        }
+
+        if (typeof console === undefinedType) {
+            return false; // No method possible, for now - fixed later by enableLoggingWhenConsoleArrives
+        } else if (methodName === 'trace' && isIE) {
+            return traceForIE;
+        } else if (console[methodName] !== undefined) {
+            return bindMethod(console, methodName);
+        } else if (console.log !== undefined) {
+            return bindMethod(console, 'log');
+        } else {
+            return noop;
+        }
+    }
+
+    // These private functions always need `this` to be set properly
+
+    function replaceLoggingMethods() {
+        /*jshint validthis:true */
+        var level = this.getLevel();
+
+        // Replace the actual methods.
+        for (var i = 0; i < logMethods.length; i++) {
+            var methodName = logMethods[i];
+            this[methodName] = (i < level) ?
+                noop :
+                this.methodFactory(methodName, level, this.name);
+        }
+
+        // Define log.log as an alias for log.debug
+        this.log = this.debug;
+
+        // Return any important warnings.
+        if (typeof console === undefinedType && level < this.levels.SILENT) {
+            return "No console available for logging";
+        }
+    }
+
+    // In old IE versions, the console isn't present until you first open it.
+    // We build realMethod() replacements here that regenerate logging methods
+    function enableLoggingWhenConsoleArrives(methodName) {
+        return function () {
+            if (typeof console !== undefinedType) {
+                replaceLoggingMethods.call(this);
+                this[methodName].apply(this, arguments);
+            }
+        };
+    }
+
+    // By default, we use closely bound real methods wherever possible, and
+    // otherwise we wait for a console to appear, and then try again.
+    function defaultMethodFactory(methodName, _level, _loggerName) {
+        /*jshint validthis:true */
+        return realMethod(methodName) ||
+               enableLoggingWhenConsoleArrives.apply(this, arguments);
+    }
+
+    function Logger(name, factory) {
+      // Private instance variables.
+      var self = this;
+      /**
+       * The level inherited from a parent logger (or a global default). We
+       * cache this here rather than delegating to the parent so that it stays
+       * in sync with the actual logging methods that we have installed (the
+       * parent could change levels but we might not have rebuilt the loggers
+       * in this child yet).
+       * @type {number}
+       */
+      var inheritedLevel;
+      /**
+       * The default level for this logger, if any. If set, this overrides
+       * `inheritedLevel`.
+       * @type {number|null}
+       */
+      var defaultLevel;
+      /**
+       * A user-specific level for this logger. If set, this overrides
+       * `defaultLevel`.
+       * @type {number|null}
+       */
+      var userLevel;
+
+      var storageKey = "loglevel";
+      if (typeof name === "string") {
+        storageKey += ":" + name;
+      } else if (typeof name === "symbol") {
+        storageKey = undefined;
+      }
+
+      function persistLevelIfPossible(levelNum) {
+          var levelName = (logMethods[levelNum] || 'silent').toUpperCase();
+
+          if (typeof window === undefinedType || !storageKey) return;
+
+          // Use localStorage if available
+          try {
+              window.localStorage[storageKey] = levelName;
+              return;
+          } catch (ignore) {}
+
+          // Use session cookie as fallback
+          try {
+              window.document.cookie =
+                encodeURIComponent(storageKey) + "=" + levelName + ";";
+          } catch (ignore) {}
+      }
+
+      function getPersistedLevel() {
+          var storedLevel;
+
+          if (typeof window === undefinedType || !storageKey) return;
+
+          try {
+              storedLevel = window.localStorage[storageKey];
+          } catch (ignore) {}
+
+          // Fallback to cookies if local storage gives us nothing
+          if (typeof storedLevel === undefinedType) {
+              try {
+                  var cookie = window.document.cookie;
+                  var cookieName = encodeURIComponent(storageKey);
+                  var location = cookie.indexOf(cookieName + "=");
+                  if (location !== -1) {
+                      storedLevel = /^([^;]+)/.exec(
+                          cookie.slice(location + cookieName.length + 1)
+                      )[1];
+                  }
+              } catch (ignore) {}
+          }
+
+          // If the stored level is not valid, treat it as if nothing was stored.
+          if (self.levels[storedLevel] === undefined) {
+              storedLevel = undefined;
+          }
+
+          return storedLevel;
+      }
+
+      function clearPersistedLevel() {
+          if (typeof window === undefinedType || !storageKey) return;
+
+          // Use localStorage if available
+          try {
+              window.localStorage.removeItem(storageKey);
+          } catch (ignore) {}
+
+          // Use session cookie as fallback
+          try {
+              window.document.cookie =
+                encodeURIComponent(storageKey) + "=; expires=Thu, 01 Jan 1970 00:00:00 UTC";
+          } catch (ignore) {}
+      }
+
+      function normalizeLevel(input) {
+          var level = input;
+          if (typeof level === "string" && self.levels[level.toUpperCase()] !== undefined) {
+              level = self.levels[level.toUpperCase()];
+          }
+          if (typeof level === "number" && level >= 0 && level <= self.levels.SILENT) {
+              return level;
+          } else {
+              throw new TypeError("log.setLevel() called with invalid level: " + input);
+          }
+      }
+
+      /*
+       *
+       * Public logger API - see https://github.com/pimterry/loglevel for details
+       *
+       */
+
+      self.name = name;
+
+      self.levels = { "TRACE": 0, "DEBUG": 1, "INFO": 2, "WARN": 3,
+          "ERROR": 4, "SILENT": 5};
+
+      self.methodFactory = factory || defaultMethodFactory;
+
+      self.getLevel = function () {
+          if (userLevel != null) {
+            return userLevel;
+          } else if (defaultLevel != null) {
+            return defaultLevel;
+          } else {
+            return inheritedLevel;
+          }
+      };
+
+      self.setLevel = function (level, persist) {
+          userLevel = normalizeLevel(level);
+          if (persist !== false) {  // defaults to true
+              persistLevelIfPossible(userLevel);
+          }
+
+          // NOTE: in v2, this should call rebuild(), which updates children.
+          return replaceLoggingMethods.call(self);
+      };
+
+      self.setDefaultLevel = function (level) {
+          defaultLevel = normalizeLevel(level);
+          if (!getPersistedLevel()) {
+              self.setLevel(level, false);
+          }
+      };
+
+      self.resetLevel = function () {
+          userLevel = null;
+          clearPersistedLevel();
+          replaceLoggingMethods.call(self);
+      };
+
+      self.enableAll = function(persist) {
+          self.setLevel(self.levels.TRACE, persist);
+      };
+
+      self.disableAll = function(persist) {
+          self.setLevel(self.levels.SILENT, persist);
+      };
+
+      self.rebuild = function () {
+          if (defaultLogger !== self) {
+              inheritedLevel = normalizeLevel(defaultLogger.getLevel());
+          }
+          replaceLoggingMethods.call(self);
+
+          if (defaultLogger === self) {
+              for (var childName in _loggersByName) {
+                _loggersByName[childName].rebuild();
+              }
+          }
+      };
+
+      // Initialize all the internal levels.
+      inheritedLevel = normalizeLevel(
+          defaultLogger ? defaultLogger.getLevel() : "WARN"
+      );
+      var initialLevel = getPersistedLevel();
+      if (initialLevel != null) {
+          userLevel = normalizeLevel(initialLevel);
+      }
+      replaceLoggingMethods.call(self);
+    }
+
+    /*
+     *
+     * Top-level API
+     *
+     */
+
+    defaultLogger = new Logger();
+
+    defaultLogger.getLogger = function getLogger(name) {
+        if ((typeof name !== "symbol" && typeof name !== "string") || name === "") {
+            throw new TypeError("You must supply a name when creating a logger.");
+        }
+
+        var logger = _loggersByName[name];
+        if (!logger) {
+            logger = _loggersByName[name] = new Logger(
+                name,
+                defaultLogger.methodFactory
+            );
+        }
+        return logger;
+    };
+
+    // Grab the current global log variable in case of overwrite
+    var _log = (typeof window !== undefinedType) ? window.log : undefined;
+    defaultLogger.noConflict = function() {
+        if (typeof window !== undefinedType &&
+               window.log === defaultLogger) {
+            window.log = _log;
+        }
+
+        return defaultLogger;
+    };
+
+    defaultLogger.getLoggers = function getLoggers() {
+        return _loggersByName;
+    };
+
+    // ES6 default export, for compatibility
+    defaultLogger['default'] = defaultLogger;
+
+    return defaultLogger;
+}));

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 2 - 0
node_modules/loglevel/dist/loglevel.min.js


+ 357 - 0
node_modules/loglevel/lib/loglevel.js

@@ -0,0 +1,357 @@
+/*
+* loglevel - https://github.com/pimterry/loglevel
+*
+* Copyright (c) 2013 Tim Perry
+* Licensed under the MIT license.
+*/
+(function (root, definition) {
+    "use strict";
+    if (typeof define === 'function' && define.amd) {
+        define(definition);
+    } else if (typeof module === 'object' && module.exports) {
+        module.exports = definition();
+    } else {
+        root.log = definition();
+    }
+}(this, function () {
+    "use strict";
+
+    // Slightly dubious tricks to cut down minimized file size
+    var noop = function() {};
+    var undefinedType = "undefined";
+    var isIE = (typeof window !== undefinedType) && (typeof window.navigator !== undefinedType) && (
+        /Trident\/|MSIE /.test(window.navigator.userAgent)
+    );
+
+    var logMethods = [
+        "trace",
+        "debug",
+        "info",
+        "warn",
+        "error"
+    ];
+
+    var _loggersByName = {};
+    var defaultLogger = null;
+
+    // Cross-browser bind equivalent that works at least back to IE6
+    function bindMethod(obj, methodName) {
+        var method = obj[methodName];
+        if (typeof method.bind === 'function') {
+            return method.bind(obj);
+        } else {
+            try {
+                return Function.prototype.bind.call(method, obj);
+            } catch (e) {
+                // Missing bind shim or IE8 + Modernizr, fallback to wrapping
+                return function() {
+                    return Function.prototype.apply.apply(method, [obj, arguments]);
+                };
+            }
+        }
+    }
+
+    // Trace() doesn't print the message in IE, so for that case we need to wrap it
+    function traceForIE() {
+        if (console.log) {
+            if (console.log.apply) {
+                console.log.apply(console, arguments);
+            } else {
+                // In old IE, native console methods themselves don't have apply().
+                Function.prototype.apply.apply(console.log, [console, arguments]);
+            }
+        }
+        if (console.trace) console.trace();
+    }
+
+    // Build the best logging method possible for this env
+    // Wherever possible we want to bind, not wrap, to preserve stack traces
+    function realMethod(methodName) {
+        if (methodName === 'debug') {
+            methodName = 'log';
+        }
+
+        if (typeof console === undefinedType) {
+            return false; // No method possible, for now - fixed later by enableLoggingWhenConsoleArrives
+        } else if (methodName === 'trace' && isIE) {
+            return traceForIE;
+        } else if (console[methodName] !== undefined) {
+            return bindMethod(console, methodName);
+        } else if (console.log !== undefined) {
+            return bindMethod(console, 'log');
+        } else {
+            return noop;
+        }
+    }
+
+    // These private functions always need `this` to be set properly
+
+    function replaceLoggingMethods() {
+        /*jshint validthis:true */
+        var level = this.getLevel();
+
+        // Replace the actual methods.
+        for (var i = 0; i < logMethods.length; i++) {
+            var methodName = logMethods[i];
+            this[methodName] = (i < level) ?
+                noop :
+                this.methodFactory(methodName, level, this.name);
+        }
+
+        // Define log.log as an alias for log.debug
+        this.log = this.debug;
+
+        // Return any important warnings.
+        if (typeof console === undefinedType && level < this.levels.SILENT) {
+            return "No console available for logging";
+        }
+    }
+
+    // In old IE versions, the console isn't present until you first open it.
+    // We build realMethod() replacements here that regenerate logging methods
+    function enableLoggingWhenConsoleArrives(methodName) {
+        return function () {
+            if (typeof console !== undefinedType) {
+                replaceLoggingMethods.call(this);
+                this[methodName].apply(this, arguments);
+            }
+        };
+    }
+
+    // By default, we use closely bound real methods wherever possible, and
+    // otherwise we wait for a console to appear, and then try again.
+    function defaultMethodFactory(methodName, _level, _loggerName) {
+        /*jshint validthis:true */
+        return realMethod(methodName) ||
+               enableLoggingWhenConsoleArrives.apply(this, arguments);
+    }
+
+    function Logger(name, factory) {
+      // Private instance variables.
+      var self = this;
+      /**
+       * The level inherited from a parent logger (or a global default). We
+       * cache this here rather than delegating to the parent so that it stays
+       * in sync with the actual logging methods that we have installed (the
+       * parent could change levels but we might not have rebuilt the loggers
+       * in this child yet).
+       * @type {number}
+       */
+      var inheritedLevel;
+      /**
+       * The default level for this logger, if any. If set, this overrides
+       * `inheritedLevel`.
+       * @type {number|null}
+       */
+      var defaultLevel;
+      /**
+       * A user-specific level for this logger. If set, this overrides
+       * `defaultLevel`.
+       * @type {number|null}
+       */
+      var userLevel;
+
+      var storageKey = "loglevel";
+      if (typeof name === "string") {
+        storageKey += ":" + name;
+      } else if (typeof name === "symbol") {
+        storageKey = undefined;
+      }
+
+      function persistLevelIfPossible(levelNum) {
+          var levelName = (logMethods[levelNum] || 'silent').toUpperCase();
+
+          if (typeof window === undefinedType || !storageKey) return;
+
+          // Use localStorage if available
+          try {
+              window.localStorage[storageKey] = levelName;
+              return;
+          } catch (ignore) {}
+
+          // Use session cookie as fallback
+          try {
+              window.document.cookie =
+                encodeURIComponent(storageKey) + "=" + levelName + ";";
+          } catch (ignore) {}
+      }
+
+      function getPersistedLevel() {
+          var storedLevel;
+
+          if (typeof window === undefinedType || !storageKey) return;
+
+          try {
+              storedLevel = window.localStorage[storageKey];
+          } catch (ignore) {}
+
+          // Fallback to cookies if local storage gives us nothing
+          if (typeof storedLevel === undefinedType) {
+              try {
+                  var cookie = window.document.cookie;
+                  var cookieName = encodeURIComponent(storageKey);
+                  var location = cookie.indexOf(cookieName + "=");
+                  if (location !== -1) {
+                      storedLevel = /^([^;]+)/.exec(
+                          cookie.slice(location + cookieName.length + 1)
+                      )[1];
+                  }
+              } catch (ignore) {}
+          }
+
+          // If the stored level is not valid, treat it as if nothing was stored.
+          if (self.levels[storedLevel] === undefined) {
+              storedLevel = undefined;
+          }
+
+          return storedLevel;
+      }
+
+      function clearPersistedLevel() {
+          if (typeof window === undefinedType || !storageKey) return;
+
+          // Use localStorage if available
+          try {
+              window.localStorage.removeItem(storageKey);
+          } catch (ignore) {}
+
+          // Use session cookie as fallback
+          try {
+              window.document.cookie =
+                encodeURIComponent(storageKey) + "=; expires=Thu, 01 Jan 1970 00:00:00 UTC";
+          } catch (ignore) {}
+      }
+
+      function normalizeLevel(input) {
+          var level = input;
+          if (typeof level === "string" && self.levels[level.toUpperCase()] !== undefined) {
+              level = self.levels[level.toUpperCase()];
+          }
+          if (typeof level === "number" && level >= 0 && level <= self.levels.SILENT) {
+              return level;
+          } else {
+              throw new TypeError("log.setLevel() called with invalid level: " + input);
+          }
+      }
+
+      /*
+       *
+       * Public logger API - see https://github.com/pimterry/loglevel for details
+       *
+       */
+
+      self.name = name;
+
+      self.levels = { "TRACE": 0, "DEBUG": 1, "INFO": 2, "WARN": 3,
+          "ERROR": 4, "SILENT": 5};
+
+      self.methodFactory = factory || defaultMethodFactory;
+
+      self.getLevel = function () {
+          if (userLevel != null) {
+            return userLevel;
+          } else if (defaultLevel != null) {
+            return defaultLevel;
+          } else {
+            return inheritedLevel;
+          }
+      };
+
+      self.setLevel = function (level, persist) {
+          userLevel = normalizeLevel(level);
+          if (persist !== false) {  // defaults to true
+              persistLevelIfPossible(userLevel);
+          }
+
+          // NOTE: in v2, this should call rebuild(), which updates children.
+          return replaceLoggingMethods.call(self);
+      };
+
+      self.setDefaultLevel = function (level) {
+          defaultLevel = normalizeLevel(level);
+          if (!getPersistedLevel()) {
+              self.setLevel(level, false);
+          }
+      };
+
+      self.resetLevel = function () {
+          userLevel = null;
+          clearPersistedLevel();
+          replaceLoggingMethods.call(self);
+      };
+
+      self.enableAll = function(persist) {
+          self.setLevel(self.levels.TRACE, persist);
+      };
+
+      self.disableAll = function(persist) {
+          self.setLevel(self.levels.SILENT, persist);
+      };
+
+      self.rebuild = function () {
+          if (defaultLogger !== self) {
+              inheritedLevel = normalizeLevel(defaultLogger.getLevel());
+          }
+          replaceLoggingMethods.call(self);
+
+          if (defaultLogger === self) {
+              for (var childName in _loggersByName) {
+                _loggersByName[childName].rebuild();
+              }
+          }
+      };
+
+      // Initialize all the internal levels.
+      inheritedLevel = normalizeLevel(
+          defaultLogger ? defaultLogger.getLevel() : "WARN"
+      );
+      var initialLevel = getPersistedLevel();
+      if (initialLevel != null) {
+          userLevel = normalizeLevel(initialLevel);
+      }
+      replaceLoggingMethods.call(self);
+    }
+
+    /*
+     *
+     * Top-level API
+     *
+     */
+
+    defaultLogger = new Logger();
+
+    defaultLogger.getLogger = function getLogger(name) {
+        if ((typeof name !== "symbol" && typeof name !== "string") || name === "") {
+            throw new TypeError("You must supply a name when creating a logger.");
+        }
+
+        var logger = _loggersByName[name];
+        if (!logger) {
+            logger = _loggersByName[name] = new Logger(
+                name,
+                defaultLogger.methodFactory
+            );
+        }
+        return logger;
+    };
+
+    // Grab the current global log variable in case of overwrite
+    var _log = (typeof window !== undefinedType) ? window.log : undefined;
+    defaultLogger.noConflict = function() {
+        if (typeof window !== undefinedType &&
+               window.log === defaultLogger) {
+            window.log = _log;
+        }
+
+        return defaultLogger;
+    };
+
+    defaultLogger.getLoggers = function getLoggers() {
+        return _loggersByName;
+    };
+
+    // ES6 default export, for compatibility
+    defaultLogger['default'] = defaultLogger;
+
+    return defaultLogger;
+}));

+ 15 - 0
node_modules/lru-cache/LICENSE

@@ -0,0 +1,15 @@
+The ISC License
+
+Copyright (c) Isaac Z. Schlueter and Contributors
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
+IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ 166 - 0
node_modules/lru-cache/README.md

@@ -0,0 +1,166 @@
+# lru cache
+
+A cache object that deletes the least-recently-used items.
+
+[![Build Status](https://travis-ci.org/isaacs/node-lru-cache.svg?branch=master)](https://travis-ci.org/isaacs/node-lru-cache) [![Coverage Status](https://coveralls.io/repos/isaacs/node-lru-cache/badge.svg?service=github)](https://coveralls.io/github/isaacs/node-lru-cache)
+
+## Installation:
+
+```javascript
+npm install lru-cache --save
+```
+
+## Usage:
+
+```javascript
+var LRU = require("lru-cache")
+  , options = { max: 500
+              , length: function (n, key) { return n * 2 + key.length }
+              , dispose: function (key, n) { n.close() }
+              , maxAge: 1000 * 60 * 60 }
+  , cache = new LRU(options)
+  , otherCache = new LRU(50) // sets just the max size
+
+cache.set("key", "value")
+cache.get("key") // "value"
+
+// non-string keys ARE fully supported
+// but note that it must be THE SAME object, not
+// just a JSON-equivalent object.
+var someObject = { a: 1 }
+cache.set(someObject, 'a value')
+// Object keys are not toString()-ed
+cache.set('[object Object]', 'a different value')
+assert.equal(cache.get(someObject), 'a value')
+// A similar object with same keys/values won't work,
+// because it's a different object identity
+assert.equal(cache.get({ a: 1 }), undefined)
+
+cache.reset()    // empty the cache
+```
+
+If you put more stuff in it, then items will fall out.
+
+If you try to put an oversized thing in it, then it'll fall out right
+away.
+
+## Options
+
+* `max` The maximum size of the cache, checked by applying the length
+  function to all values in the cache.  Not setting this is kind of
+  silly, since that's the whole purpose of this lib, but it defaults
+  to `Infinity`.  Setting it to a non-number or negative number will
+  throw a `TypeError`.  Setting it to 0 makes it be `Infinity`.
+* `maxAge` Maximum age in ms.  Items are not pro-actively pruned out
+  as they age, but if you try to get an item that is too old, it'll
+  drop it and return undefined instead of giving it to you.
+  Setting this to a negative value will make everything seem old!
+  Setting it to a non-number will throw a `TypeError`.
+* `length` Function that is used to calculate the length of stored
+  items.  If you're storing strings or buffers, then you probably want
+  to do something like `function(n, key){return n.length}`.  The default is
+  `function(){return 1}`, which is fine if you want to store `max`
+  like-sized things.  The item is passed as the first argument, and
+  the key is passed as the second argumnet.
+* `dispose` Function that is called on items when they are dropped
+  from the cache.  This can be handy if you want to close file
+  descriptors or do other cleanup tasks when items are no longer
+  accessible.  Called with `key, value`.  It's called *before*
+  actually removing the item from the internal cache, so if you want
+  to immediately put it back in, you'll have to do that in a
+  `nextTick` or `setTimeout` callback or it won't do anything.
+* `stale` By default, if you set a `maxAge`, it'll only actually pull
+  stale items out of the cache when you `get(key)`.  (That is, it's
+  not pre-emptively doing a `setTimeout` or anything.)  If you set
+  `stale:true`, it'll return the stale value before deleting it.  If
+  you don't set this, then it'll return `undefined` when you try to
+  get a stale entry, as if it had already been deleted.
+* `noDisposeOnSet` By default, if you set a `dispose()` method, then
+  it'll be called whenever a `set()` operation overwrites an existing
+  key.  If you set this option, `dispose()` will only be called when a
+  key falls out of the cache, not when it is overwritten.
+* `updateAgeOnGet` When using time-expiring entries with `maxAge`,
+  setting this to `true` will make each item's effective time update
+  to the current time whenever it is retrieved from cache, causing it
+  to not expire.  (It can still fall out of cache based on recency of
+  use, of course.)
+
+## API
+
+* `set(key, value, maxAge)`
+* `get(key) => value`
+
+    Both of these will update the "recently used"-ness of the key.
+    They do what you think. `maxAge` is optional and overrides the
+    cache `maxAge` option if provided.
+
+    If the key is not found, `get()` will return `undefined`.
+
+    The key and val can be any value.
+
+* `peek(key)`
+
+    Returns the key value (or `undefined` if not found) without
+    updating the "recently used"-ness of the key.
+
+    (If you find yourself using this a lot, you *might* be using the
+    wrong sort of data structure, but there are some use cases where
+    it's handy.)
+
+* `del(key)`
+
+    Deletes a key out of the cache.
+
+* `reset()`
+
+    Clear the cache entirely, throwing away all values.
+
+* `has(key)`
+
+    Check if a key is in the cache, without updating the recent-ness
+    or deleting it for being stale.
+
+* `forEach(function(value,key,cache), [thisp])`
+
+    Just like `Array.prototype.forEach`.  Iterates over all the keys
+    in the cache, in order of recent-ness.  (Ie, more recently used
+    items are iterated over first.)
+
+* `rforEach(function(value,key,cache), [thisp])`
+
+    The same as `cache.forEach(...)` but items are iterated over in
+    reverse order.  (ie, less recently used items are iterated over
+    first.)
+
+* `keys()`
+
+    Return an array of the keys in the cache.
+
+* `values()`
+
+    Return an array of the values in the cache.
+
+* `length`
+
+    Return total length of objects in cache taking into account
+    `length` options function.
+
+* `itemCount`
+
+    Return total quantity of objects currently in cache. Note, that
+    `stale` (see options) items are returned as part of this item
+    count.
+
+* `dump()`
+
+    Return an array of the cache entries ready for serialization and usage
+    with 'destinationCache.load(arr)`.
+
+* `load(cacheEntriesArray)`
+
+    Loads another cache entries array, obtained with `sourceCache.dump()`,
+    into the cache. The destination cache is reset before loading new entries
+
+* `prune()`
+
+    Manually iterates over the entire cache proactively pruning old entries

+ 334 - 0
node_modules/lru-cache/index.js

@@ -0,0 +1,334 @@
+'use strict'
+
+// A linked list to keep track of recently-used-ness
+const Yallist = require('yallist')
+
+const MAX = Symbol('max')
+const LENGTH = Symbol('length')
+const LENGTH_CALCULATOR = Symbol('lengthCalculator')
+const ALLOW_STALE = Symbol('allowStale')
+const MAX_AGE = Symbol('maxAge')
+const DISPOSE = Symbol('dispose')
+const NO_DISPOSE_ON_SET = Symbol('noDisposeOnSet')
+const LRU_LIST = Symbol('lruList')
+const CACHE = Symbol('cache')
+const UPDATE_AGE_ON_GET = Symbol('updateAgeOnGet')
+
+const naiveLength = () => 1
+
+// lruList is a yallist where the head is the youngest
+// item, and the tail is the oldest.  the list contains the Hit
+// objects as the entries.
+// Each Hit object has a reference to its Yallist.Node.  This
+// never changes.
+//
+// cache is a Map (or PseudoMap) that matches the keys to
+// the Yallist.Node object.
+class LRUCache {
+  constructor (options) {
+    if (typeof options === 'number')
+      options = { max: options }
+
+    if (!options)
+      options = {}
+
+    if (options.max && (typeof options.max !== 'number' || options.max < 0))
+      throw new TypeError('max must be a non-negative number')
+    // Kind of weird to have a default max of Infinity, but oh well.
+    const max = this[MAX] = options.max || Infinity
+
+    const lc = options.length || naiveLength
+    this[LENGTH_CALCULATOR] = (typeof lc !== 'function') ? naiveLength : lc
+    this[ALLOW_STALE] = options.stale || false
+    if (options.maxAge && typeof options.maxAge !== 'number')
+      throw new TypeError('maxAge must be a number')
+    this[MAX_AGE] = options.maxAge || 0
+    this[DISPOSE] = options.dispose
+    this[NO_DISPOSE_ON_SET] = options.noDisposeOnSet || false
+    this[UPDATE_AGE_ON_GET] = options.updateAgeOnGet || false
+    this.reset()
+  }
+
+  // resize the cache when the max changes.
+  set max (mL) {
+    if (typeof mL !== 'number' || mL < 0)
+      throw new TypeError('max must be a non-negative number')
+
+    this[MAX] = mL || Infinity
+    trim(this)
+  }
+  get max () {
+    return this[MAX]
+  }
+
+  set allowStale (allowStale) {
+    this[ALLOW_STALE] = !!allowStale
+  }
+  get allowStale () {
+    return this[ALLOW_STALE]
+  }
+
+  set maxAge (mA) {
+    if (typeof mA !== 'number')
+      throw new TypeError('maxAge must be a non-negative number')
+
+    this[MAX_AGE] = mA
+    trim(this)
+  }
+  get maxAge () {
+    return this[MAX_AGE]
+  }
+
+  // resize the cache when the lengthCalculator changes.
+  set lengthCalculator (lC) {
+    if (typeof lC !== 'function')
+      lC = naiveLength
+
+    if (lC !== this[LENGTH_CALCULATOR]) {
+      this[LENGTH_CALCULATOR] = lC
+      this[LENGTH] = 0
+      this[LRU_LIST].forEach(hit => {
+        hit.length = this[LENGTH_CALCULATOR](hit.value, hit.key)
+        this[LENGTH] += hit.length
+      })
+    }
+    trim(this)
+  }
+  get lengthCalculator () { return this[LENGTH_CALCULATOR] }
+
+  get length () { return this[LENGTH] }
+  get itemCount () { return this[LRU_LIST].length }
+
+  rforEach (fn, thisp) {
+    thisp = thisp || this
+    for (let walker = this[LRU_LIST].tail; walker !== null;) {
+      const prev = walker.prev
+      forEachStep(this, fn, walker, thisp)
+      walker = prev
+    }
+  }
+
+  forEach (fn, thisp) {
+    thisp = thisp || this
+    for (let walker = this[LRU_LIST].head; walker !== null;) {
+      const next = walker.next
+      forEachStep(this, fn, walker, thisp)
+      walker = next
+    }
+  }
+
+  keys () {
+    return this[LRU_LIST].toArray().map(k => k.key)
+  }
+
+  values () {
+    return this[LRU_LIST].toArray().map(k => k.value)
+  }
+
+  reset () {
+    if (this[DISPOSE] &&
+        this[LRU_LIST] &&
+        this[LRU_LIST].length) {
+      this[LRU_LIST].forEach(hit => this[DISPOSE](hit.key, hit.value))
+    }
+
+    this[CACHE] = new Map() // hash of items by key
+    this[LRU_LIST] = new Yallist() // list of items in order of use recency
+    this[LENGTH] = 0 // length of items in the list
+  }
+
+  dump () {
+    return this[LRU_LIST].map(hit =>
+      isStale(this, hit) ? false : {
+        k: hit.key,
+        v: hit.value,
+        e: hit.now + (hit.maxAge || 0)
+      }).toArray().filter(h => h)
+  }
+
+  dumpLru () {
+    return this[LRU_LIST]
+  }
+
+  set (key, value, maxAge) {
+    maxAge = maxAge || this[MAX_AGE]
+
+    if (maxAge && typeof maxAge !== 'number')
+      throw new TypeError('maxAge must be a number')
+
+    const now = maxAge ? Date.now() : 0
+    const len = this[LENGTH_CALCULATOR](value, key)
+
+    if (this[CACHE].has(key)) {
+      if (len > this[MAX]) {
+        del(this, this[CACHE].get(key))
+        return false
+      }
+
+      const node = this[CACHE].get(key)
+      const item = node.value
+
+      // dispose of the old one before overwriting
+      // split out into 2 ifs for better coverage tracking
+      if (this[DISPOSE]) {
+        if (!this[NO_DISPOSE_ON_SET])
+          this[DISPOSE](key, item.value)
+      }
+
+      item.now = now
+      item.maxAge = maxAge
+      item.value = value
+      this[LENGTH] += len - item.length
+      item.length = len
+      this.get(key)
+      trim(this)
+      return true
+    }
+
+    const hit = new Entry(key, value, len, now, maxAge)
+
+    // oversized objects fall out of cache automatically.
+    if (hit.length > this[MAX]) {
+      if (this[DISPOSE])
+        this[DISPOSE](key, value)
+
+      return false
+    }
+
+    this[LENGTH] += hit.length
+    this[LRU_LIST].unshift(hit)
+    this[CACHE].set(key, this[LRU_LIST].head)
+    trim(this)
+    return true
+  }
+
+  has (key) {
+    if (!this[CACHE].has(key)) return false
+    const hit = this[CACHE].get(key).value
+    return !isStale(this, hit)
+  }
+
+  get (key) {
+    return get(this, key, true)
+  }
+
+  peek (key) {
+    return get(this, key, false)
+  }
+
+  pop () {
+    const node = this[LRU_LIST].tail
+    if (!node)
+      return null
+
+    del(this, node)
+    return node.value
+  }
+
+  del (key) {
+    del(this, this[CACHE].get(key))
+  }
+
+  load (arr) {
+    // reset the cache
+    this.reset()
+
+    const now = Date.now()
+    // A previous serialized cache has the most recent items first
+    for (let l = arr.length - 1; l >= 0; l--) {
+      const hit = arr[l]
+      const expiresAt = hit.e || 0
+      if (expiresAt === 0)
+        // the item was created without expiration in a non aged cache
+        this.set(hit.k, hit.v)
+      else {
+        const maxAge = expiresAt - now
+        // dont add already expired items
+        if (maxAge > 0) {
+          this.set(hit.k, hit.v, maxAge)
+        }
+      }
+    }
+  }
+
+  prune () {
+    this[CACHE].forEach((value, key) => get(this, key, false))
+  }
+}
+
+const get = (self, key, doUse) => {
+  const node = self[CACHE].get(key)
+  if (node) {
+    const hit = node.value
+    if (isStale(self, hit)) {
+      del(self, node)
+      if (!self[ALLOW_STALE])
+        return undefined
+    } else {
+      if (doUse) {
+        if (self[UPDATE_AGE_ON_GET])
+          node.value.now = Date.now()
+        self[LRU_LIST].unshiftNode(node)
+      }
+    }
+    return hit.value
+  }
+}
+
+const isStale = (self, hit) => {
+  if (!hit || (!hit.maxAge && !self[MAX_AGE]))
+    return false
+
+  const diff = Date.now() - hit.now
+  return hit.maxAge ? diff > hit.maxAge
+    : self[MAX_AGE] && (diff > self[MAX_AGE])
+}
+
+const trim = self => {
+  if (self[LENGTH] > self[MAX]) {
+    for (let walker = self[LRU_LIST].tail;
+      self[LENGTH] > self[MAX] && walker !== null;) {
+      // We know that we're about to delete this one, and also
+      // what the next least recently used key will be, so just
+      // go ahead and set it now.
+      const prev = walker.prev
+      del(self, walker)
+      walker = prev
+    }
+  }
+}
+
+const del = (self, node) => {
+  if (node) {
+    const hit = node.value
+    if (self[DISPOSE])
+      self[DISPOSE](hit.key, hit.value)
+
+    self[LENGTH] -= hit.length
+    self[CACHE].delete(hit.key)
+    self[LRU_LIST].removeNode(node)
+  }
+}
+
+class Entry {
+  constructor (key, value, length, now, maxAge) {
+    this.key = key
+    this.value = value
+    this.length = length
+    this.now = now
+    this.maxAge = maxAge || 0
+  }
+}
+
+const forEachStep = (self, fn, node, thisp) => {
+  let hit = node.value
+  if (isStale(self, hit)) {
+    del(self, node)
+    if (!self[ALLOW_STALE])
+      hit = undefined
+  }
+  if (hit)
+    fn.call(thisp, hit.value, hit.key, self)
+}
+
+module.exports = LRUCache

+ 32 - 0
node_modules/lru-cache/package.json

@@ -0,0 +1,32 @@
+{
+  "name": "lru-cache",
+  "description": "A cache object that deletes the least-recently-used items.",
+  "version": "5.1.1",
+  "author": "Isaac Z. Schlueter <i@izs.me>",
+  "keywords": [
+    "mru",
+    "lru",
+    "cache"
+  ],
+  "scripts": {
+    "test": "tap test/*.js --100 -J",
+    "snap": "TAP_SNAPSHOT=1 tap test/*.js -J",
+    "coveragerport": "tap --coverage-report=html",
+    "preversion": "npm test",
+    "postversion": "npm publish",
+    "postpublish": "git push origin --all; git push origin --tags"
+  },
+  "main": "index.js",
+  "repository": "git://github.com/isaacs/node-lru-cache.git",
+  "devDependencies": {
+    "benchmark": "^2.1.4",
+    "tap": "^12.1.0"
+  },
+  "license": "ISC",
+  "dependencies": {
+    "yallist": "^3.0.2"
+  },
+  "files": [
+    "index.js"
+  ]
+}

+ 3 - 0
node_modules/lunr/.npmignore

@@ -0,0 +1,3 @@
+/node_modules
+docs/
+test/env/file_list.json

+ 19 - 0
node_modules/lunr/LICENSE

@@ -0,0 +1,19 @@
+Copyright (C) 2013 by Oliver Nightingale
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 78 - 0
node_modules/lunr/README.md

@@ -0,0 +1,78 @@
+# Lunr.js
+
+[![Join the chat at https://gitter.im/olivernn/lunr.js](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/olivernn/lunr.js?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+[![Build Status](https://travis-ci.org/olivernn/lunr.js.svg?branch=master)](https://travis-ci.org/olivernn/lunr.js)
+
+A bit like Solr, but much smaller and not as bright.
+
+## Example
+
+A very simple search index can be created using the following:
+
+```javascript
+var idx = lunr(function () {
+  this.field('title')
+  this.field('body')
+
+  this.add({
+    "title": "Twelfth-Night",
+    "body": "If music be the food of love, play on: Give me excess of it…",
+    "author": "William Shakespeare",
+    "id": "1"
+  })
+})
+```
+
+Then searching is as simple as:
+
+```javascript
+idx.search("love")
+```
+
+This returns a list of matching documents with a score of how closely they match the search query as well as any associated metadata about the match:
+
+```javascript
+[
+  {
+    "ref": "1",
+    "score": 0.3535533905932737,
+    "matchData": {
+      "metadata": {
+        "love": {
+          "body": {}
+        }
+      }
+    }
+  }
+]
+```
+
+[API documentation](https://lunrjs.com/docs/index.html) is available, as well as a [full working example](https://olivernn.github.io/moonwalkers/).
+
+## Description
+
+Lunr.js is a small, full-text search library for use in the browser.  It indexes JSON documents and provides a simple search interface for retrieving documents that best match text queries.
+
+## Why
+
+For web applications with all their data already sitting in the client, it makes sense to be able to search that data on the client too.  It saves adding extra, compacted services on the server.  A local search index will be quicker, there is no network overhead, and will remain available and usable even without a network connection.
+
+## Installation
+
+Simply include the lunr.js source file in the page that you want to use it.  Lunr.js is supported in all modern browsers.
+
+Alternatively an npm package is also available `npm install lunr`.
+
+Browsers that do not support ES5 will require a JavaScript shim for Lunr to work. You can either use [Augment.js](https://github.com/olivernn/augment.js), [ES5-Shim](https://github.com/kriskowal/es5-shim) or any library that patches old browsers to provide an ES5 compatible JavaScript environment.
+
+## Features
+
+* Full text search support for 14 languages
+* Boost terms at query time or boost entire documents at index time
+* Scope searches to specific fields
+* Fuzzy term matching with wildcards or edit distance
+
+## Contributing
+
+See the [`CONTRIBUTING.md` file](CONTRIBUTING.md).

+ 11 - 0
node_modules/lunr/build/bower.json.template

@@ -0,0 +1,11 @@
+{
+  "name": "lunr.js",
+  "version": "@VERSION",
+  "main": "lunr.js",
+  "ignore": [
+    "tests/",
+    "perf/",
+    "build/",
+    "docs/"
+  ]
+}

+ 3475 - 0
node_modules/lunr/lunr.js

@@ -0,0 +1,3475 @@
+/**
+ * lunr - http://lunrjs.com - A bit like Solr, but much smaller and not as bright - 2.3.9
+ * Copyright (C) 2020 Oliver Nightingale
+ * @license MIT
+ */
+
+;(function(){
+
+/**
+ * A convenience function for configuring and constructing
+ * a new lunr Index.
+ *
+ * A lunr.Builder instance is created and the pipeline setup
+ * with a trimmer, stop word filter and stemmer.
+ *
+ * This builder object is yielded to the configuration function
+ * that is passed as a parameter, allowing the list of fields
+ * and other builder parameters to be customised.
+ *
+ * All documents _must_ be added within the passed config function.
+ *
+ * @example
+ * var idx = lunr(function () {
+ *   this.field('title')
+ *   this.field('body')
+ *   this.ref('id')
+ *
+ *   documents.forEach(function (doc) {
+ *     this.add(doc)
+ *   }, this)
+ * })
+ *
+ * @see {@link lunr.Builder}
+ * @see {@link lunr.Pipeline}
+ * @see {@link lunr.trimmer}
+ * @see {@link lunr.stopWordFilter}
+ * @see {@link lunr.stemmer}
+ * @namespace {function} lunr
+ */
+var lunr = function (config) {
+  var builder = new lunr.Builder
+
+  builder.pipeline.add(
+    lunr.trimmer,
+    lunr.stopWordFilter,
+    lunr.stemmer
+  )
+
+  builder.searchPipeline.add(
+    lunr.stemmer
+  )
+
+  config.call(builder, builder)
+  return builder.build()
+}
+
+lunr.version = "2.3.9"
+/*!
+ * lunr.utils
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+/**
+ * A namespace containing utils for the rest of the lunr library
+ * @namespace lunr.utils
+ */
+lunr.utils = {}
+
+/**
+ * Print a warning message to the console.
+ *
+ * @param {String} message The message to be printed.
+ * @memberOf lunr.utils
+ * @function
+ */
+lunr.utils.warn = (function (global) {
+  /* eslint-disable no-console */
+  return function (message) {
+    if (global.console && console.warn) {
+      console.warn(message)
+    }
+  }
+  /* eslint-enable no-console */
+})(this)
+
+/**
+ * Convert an object to a string.
+ *
+ * In the case of `null` and `undefined` the function returns
+ * the empty string, in all other cases the result of calling
+ * `toString` on the passed object is returned.
+ *
+ * @param {Any} obj The object to convert to a string.
+ * @return {String} string representation of the passed object.
+ * @memberOf lunr.utils
+ */
+lunr.utils.asString = function (obj) {
+  if (obj === void 0 || obj === null) {
+    return ""
+  } else {
+    return obj.toString()
+  }
+}
+
+/**
+ * Clones an object.
+ *
+ * Will create a copy of an existing object such that any mutations
+ * on the copy cannot affect the original.
+ *
+ * Only shallow objects are supported, passing a nested object to this
+ * function will cause a TypeError.
+ *
+ * Objects with primitives, and arrays of primitives are supported.
+ *
+ * @param {Object} obj The object to clone.
+ * @return {Object} a clone of the passed object.
+ * @throws {TypeError} when a nested object is passed.
+ * @memberOf Utils
+ */
+lunr.utils.clone = function (obj) {
+  if (obj === null || obj === undefined) {
+    return obj
+  }
+
+  var clone = Object.create(null),
+      keys = Object.keys(obj)
+
+  for (var i = 0; i < keys.length; i++) {
+    var key = keys[i],
+        val = obj[key]
+
+    if (Array.isArray(val)) {
+      clone[key] = val.slice()
+      continue
+    }
+
+    if (typeof val === 'string' ||
+        typeof val === 'number' ||
+        typeof val === 'boolean') {
+      clone[key] = val
+      continue
+    }
+
+    throw new TypeError("clone is not deep and does not support nested objects")
+  }
+
+  return clone
+}
+lunr.FieldRef = function (docRef, fieldName, stringValue) {
+  this.docRef = docRef
+  this.fieldName = fieldName
+  this._stringValue = stringValue
+}
+
+lunr.FieldRef.joiner = "/"
+
+lunr.FieldRef.fromString = function (s) {
+  var n = s.indexOf(lunr.FieldRef.joiner)
+
+  if (n === -1) {
+    throw "malformed field ref string"
+  }
+
+  var fieldRef = s.slice(0, n),
+      docRef = s.slice(n + 1)
+
+  return new lunr.FieldRef (docRef, fieldRef, s)
+}
+
+lunr.FieldRef.prototype.toString = function () {
+  if (this._stringValue == undefined) {
+    this._stringValue = this.fieldName + lunr.FieldRef.joiner + this.docRef
+  }
+
+  return this._stringValue
+}
+/*!
+ * lunr.Set
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+/**
+ * A lunr set.
+ *
+ * @constructor
+ */
+lunr.Set = function (elements) {
+  this.elements = Object.create(null)
+
+  if (elements) {
+    this.length = elements.length
+
+    for (var i = 0; i < this.length; i++) {
+      this.elements[elements[i]] = true
+    }
+  } else {
+    this.length = 0
+  }
+}
+
+/**
+ * A complete set that contains all elements.
+ *
+ * @static
+ * @readonly
+ * @type {lunr.Set}
+ */
+lunr.Set.complete = {
+  intersect: function (other) {
+    return other
+  },
+
+  union: function () {
+    return this
+  },
+
+  contains: function () {
+    return true
+  }
+}
+
+/**
+ * An empty set that contains no elements.
+ *
+ * @static
+ * @readonly
+ * @type {lunr.Set}
+ */
+lunr.Set.empty = {
+  intersect: function () {
+    return this
+  },
+
+  union: function (other) {
+    return other
+  },
+
+  contains: function () {
+    return false
+  }
+}
+
+/**
+ * Returns true if this set contains the specified object.
+ *
+ * @param {object} object - Object whose presence in this set is to be tested.
+ * @returns {boolean} - True if this set contains the specified object.
+ */
+lunr.Set.prototype.contains = function (object) {
+  return !!this.elements[object]
+}
+
+/**
+ * Returns a new set containing only the elements that are present in both
+ * this set and the specified set.
+ *
+ * @param {lunr.Set} other - set to intersect with this set.
+ * @returns {lunr.Set} a new set that is the intersection of this and the specified set.
+ */
+
+lunr.Set.prototype.intersect = function (other) {
+  var a, b, elements, intersection = []
+
+  if (other === lunr.Set.complete) {
+    return this
+  }
+
+  if (other === lunr.Set.empty) {
+    return other
+  }
+
+  if (this.length < other.length) {
+    a = this
+    b = other
+  } else {
+    a = other
+    b = this
+  }
+
+  elements = Object.keys(a.elements)
+
+  for (var i = 0; i < elements.length; i++) {
+    var element = elements[i]
+    if (element in b.elements) {
+      intersection.push(element)
+    }
+  }
+
+  return new lunr.Set (intersection)
+}
+
+/**
+ * Returns a new set combining the elements of this and the specified set.
+ *
+ * @param {lunr.Set} other - set to union with this set.
+ * @return {lunr.Set} a new set that is the union of this and the specified set.
+ */
+
+lunr.Set.prototype.union = function (other) {
+  if (other === lunr.Set.complete) {
+    return lunr.Set.complete
+  }
+
+  if (other === lunr.Set.empty) {
+    return this
+  }
+
+  return new lunr.Set(Object.keys(this.elements).concat(Object.keys(other.elements)))
+}
+/**
+ * A function to calculate the inverse document frequency for
+ * a posting. This is shared between the builder and the index
+ *
+ * @private
+ * @param {object} posting - The posting for a given term
+ * @param {number} documentCount - The total number of documents.
+ */
+lunr.idf = function (posting, documentCount) {
+  var documentsWithTerm = 0
+
+  for (var fieldName in posting) {
+    if (fieldName == '_index') continue // Ignore the term index, its not a field
+    documentsWithTerm += Object.keys(posting[fieldName]).length
+  }
+
+  var x = (documentCount - documentsWithTerm + 0.5) / (documentsWithTerm + 0.5)
+
+  return Math.log(1 + Math.abs(x))
+}
+
+/**
+ * A token wraps a string representation of a token
+ * as it is passed through the text processing pipeline.
+ *
+ * @constructor
+ * @param {string} [str=''] - The string token being wrapped.
+ * @param {object} [metadata={}] - Metadata associated with this token.
+ */
+lunr.Token = function (str, metadata) {
+  this.str = str || ""
+  this.metadata = metadata || {}
+}
+
+/**
+ * Returns the token string that is being wrapped by this object.
+ *
+ * @returns {string}
+ */
+lunr.Token.prototype.toString = function () {
+  return this.str
+}
+
+/**
+ * A token update function is used when updating or optionally
+ * when cloning a token.
+ *
+ * @callback lunr.Token~updateFunction
+ * @param {string} str - The string representation of the token.
+ * @param {Object} metadata - All metadata associated with this token.
+ */
+
+/**
+ * Applies the given function to the wrapped string token.
+ *
+ * @example
+ * token.update(function (str, metadata) {
+ *   return str.toUpperCase()
+ * })
+ *
+ * @param {lunr.Token~updateFunction} fn - A function to apply to the token string.
+ * @returns {lunr.Token}
+ */
+lunr.Token.prototype.update = function (fn) {
+  this.str = fn(this.str, this.metadata)
+  return this
+}
+
+/**
+ * Creates a clone of this token. Optionally a function can be
+ * applied to the cloned token.
+ *
+ * @param {lunr.Token~updateFunction} [fn] - An optional function to apply to the cloned token.
+ * @returns {lunr.Token}
+ */
+lunr.Token.prototype.clone = function (fn) {
+  fn = fn || function (s) { return s }
+  return new lunr.Token (fn(this.str, this.metadata), this.metadata)
+}
+/*!
+ * lunr.tokenizer
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+/**
+ * A function for splitting a string into tokens ready to be inserted into
+ * the search index. Uses `lunr.tokenizer.separator` to split strings, change
+ * the value of this property to change how strings are split into tokens.
+ *
+ * This tokenizer will convert its parameter to a string by calling `toString` and
+ * then will split this string on the character in `lunr.tokenizer.separator`.
+ * Arrays will have their elements converted to strings and wrapped in a lunr.Token.
+ *
+ * Optional metadata can be passed to the tokenizer, this metadata will be cloned and
+ * added as metadata to every token that is created from the object to be tokenized.
+ *
+ * @static
+ * @param {?(string|object|object[])} obj - The object to convert into tokens
+ * @param {?object} metadata - Optional metadata to associate with every token
+ * @returns {lunr.Token[]}
+ * @see {@link lunr.Pipeline}
+ */
+lunr.tokenizer = function (obj, metadata) {
+  if (obj == null || obj == undefined) {
+    return []
+  }
+
+  if (Array.isArray(obj)) {
+    return obj.map(function (t) {
+      return new lunr.Token(
+        lunr.utils.asString(t).toLowerCase(),
+        lunr.utils.clone(metadata)
+      )
+    })
+  }
+
+  var str = obj.toString().toLowerCase(),
+      len = str.length,
+      tokens = []
+
+  for (var sliceEnd = 0, sliceStart = 0; sliceEnd <= len; sliceEnd++) {
+    var char = str.charAt(sliceEnd),
+        sliceLength = sliceEnd - sliceStart
+
+    if ((char.match(lunr.tokenizer.separator) || sliceEnd == len)) {
+
+      if (sliceLength > 0) {
+        var tokenMetadata = lunr.utils.clone(metadata) || {}
+        tokenMetadata["position"] = [sliceStart, sliceLength]
+        tokenMetadata["index"] = tokens.length
+
+        tokens.push(
+          new lunr.Token (
+            str.slice(sliceStart, sliceEnd),
+            tokenMetadata
+          )
+        )
+      }
+
+      sliceStart = sliceEnd + 1
+    }
+
+  }
+
+  return tokens
+}
+
+/**
+ * The separator used to split a string into tokens. Override this property to change the behaviour of
+ * `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens.
+ *
+ * @static
+ * @see lunr.tokenizer
+ */
+lunr.tokenizer.separator = /[\s\-]+/
+/*!
+ * lunr.Pipeline
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+/**
+ * lunr.Pipelines maintain an ordered list of functions to be applied to all
+ * tokens in documents entering the search index and queries being ran against
+ * the index.
+ *
+ * An instance of lunr.Index created with the lunr shortcut will contain a
+ * pipeline with a stop word filter and an English language stemmer. Extra
+ * functions can be added before or after either of these functions or these
+ * default functions can be removed.
+ *
+ * When run the pipeline will call each function in turn, passing a token, the
+ * index of that token in the original list of all tokens and finally a list of
+ * all the original tokens.
+ *
+ * The output of functions in the pipeline will be passed to the next function
+ * in the pipeline. To exclude a token from entering the index the function
+ * should return undefined, the rest of the pipeline will not be called with
+ * this token.
+ *
+ * For serialisation of pipelines to work, all functions used in an instance of
+ * a pipeline should be registered with lunr.Pipeline. Registered functions can
+ * then be loaded. If trying to load a serialised pipeline that uses functions
+ * that are not registered an error will be thrown.
+ *
+ * If not planning on serialising the pipeline then registering pipeline functions
+ * is not necessary.
+ *
+ * @constructor
+ */
+lunr.Pipeline = function () {
+  this._stack = []
+}
+
+lunr.Pipeline.registeredFunctions = Object.create(null)
+
+/**
+ * A pipeline function maps lunr.Token to lunr.Token. A lunr.Token contains the token
+ * string as well as all known metadata. A pipeline function can mutate the token string
+ * or mutate (or add) metadata for a given token.
+ *
+ * A pipeline function can indicate that the passed token should be discarded by returning
+ * null, undefined or an empty string. This token will not be passed to any downstream pipeline
+ * functions and will not be added to the index.
+ *
+ * Multiple tokens can be returned by returning an array of tokens. Each token will be passed
+ * to any downstream pipeline functions and all will returned tokens will be added to the index.
+ *
+ * Any number of pipeline functions may be chained together using a lunr.Pipeline.
+ *
+ * @interface lunr.PipelineFunction
+ * @param {lunr.Token} token - A token from the document being processed.
+ * @param {number} i - The index of this token in the complete list of tokens for this document/field.
+ * @param {lunr.Token[]} tokens - All tokens for this document/field.
+ * @returns {(?lunr.Token|lunr.Token[])}
+ */
+
+/**
+ * Register a function with the pipeline.
+ *
+ * Functions that are used in the pipeline should be registered if the pipeline
+ * needs to be serialised, or a serialised pipeline needs to be loaded.
+ *
+ * Registering a function does not add it to a pipeline, functions must still be
+ * added to instances of the pipeline for them to be used when running a pipeline.
+ *
+ * @param {lunr.PipelineFunction} fn - The function to check for.
+ * @param {String} label - The label to register this function with
+ */
+lunr.Pipeline.registerFunction = function (fn, label) {
+  if (label in this.registeredFunctions) {
+    lunr.utils.warn('Overwriting existing registered function: ' + label)
+  }
+
+  fn.label = label
+  lunr.Pipeline.registeredFunctions[fn.label] = fn
+}
+
+/**
+ * Warns if the function is not registered as a Pipeline function.
+ *
+ * @param {lunr.PipelineFunction} fn - The function to check for.
+ * @private
+ */
+lunr.Pipeline.warnIfFunctionNotRegistered = function (fn) {
+  var isRegistered = fn.label && (fn.label in this.registeredFunctions)
+
+  if (!isRegistered) {
+    lunr.utils.warn('Function is not registered with pipeline. This may cause problems when serialising the index.\n', fn)
+  }
+}
+
+/**
+ * Loads a previously serialised pipeline.
+ *
+ * All functions to be loaded must already be registered with lunr.Pipeline.
+ * If any function from the serialised data has not been registered then an
+ * error will be thrown.
+ *
+ * @param {Object} serialised - The serialised pipeline to load.
+ * @returns {lunr.Pipeline}
+ */
+lunr.Pipeline.load = function (serialised) {
+  var pipeline = new lunr.Pipeline
+
+  serialised.forEach(function (fnName) {
+    var fn = lunr.Pipeline.registeredFunctions[fnName]
+
+    if (fn) {
+      pipeline.add(fn)
+    } else {
+      throw new Error('Cannot load unregistered function: ' + fnName)
+    }
+  })
+
+  return pipeline
+}
+
+/**
+ * Adds new functions to the end of the pipeline.
+ *
+ * Logs a warning if the function has not been registered.
+ *
+ * @param {lunr.PipelineFunction[]} functions - Any number of functions to add to the pipeline.
+ */
+lunr.Pipeline.prototype.add = function () {
+  var fns = Array.prototype.slice.call(arguments)
+
+  fns.forEach(function (fn) {
+    lunr.Pipeline.warnIfFunctionNotRegistered(fn)
+    this._stack.push(fn)
+  }, this)
+}
+
+/**
+ * Adds a single function after a function that already exists in the
+ * pipeline.
+ *
+ * Logs a warning if the function has not been registered.
+ *
+ * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.
+ * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.
+ */
+lunr.Pipeline.prototype.after = function (existingFn, newFn) {
+  lunr.Pipeline.warnIfFunctionNotRegistered(newFn)
+
+  var pos = this._stack.indexOf(existingFn)
+  if (pos == -1) {
+    throw new Error('Cannot find existingFn')
+  }
+
+  pos = pos + 1
+  this._stack.splice(pos, 0, newFn)
+}
+
+/**
+ * Adds a single function before a function that already exists in the
+ * pipeline.
+ *
+ * Logs a warning if the function has not been registered.
+ *
+ * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline.
+ * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline.
+ */
+lunr.Pipeline.prototype.before = function (existingFn, newFn) {
+  lunr.Pipeline.warnIfFunctionNotRegistered(newFn)
+
+  var pos = this._stack.indexOf(existingFn)
+  if (pos == -1) {
+    throw new Error('Cannot find existingFn')
+  }
+
+  this._stack.splice(pos, 0, newFn)
+}
+
+/**
+ * Removes a function from the pipeline.
+ *
+ * @param {lunr.PipelineFunction} fn The function to remove from the pipeline.
+ */
+lunr.Pipeline.prototype.remove = function (fn) {
+  var pos = this._stack.indexOf(fn)
+  if (pos == -1) {
+    return
+  }
+
+  this._stack.splice(pos, 1)
+}
+
+/**
+ * Runs the current list of functions that make up the pipeline against the
+ * passed tokens.
+ *
+ * @param {Array} tokens The tokens to run through the pipeline.
+ * @returns {Array}
+ */
+lunr.Pipeline.prototype.run = function (tokens) {
+  var stackLength = this._stack.length
+
+  for (var i = 0; i < stackLength; i++) {
+    var fn = this._stack[i]
+    var memo = []
+
+    for (var j = 0; j < tokens.length; j++) {
+      var result = fn(tokens[j], j, tokens)
+
+      if (result === null || result === void 0 || result === '') continue
+
+      if (Array.isArray(result)) {
+        for (var k = 0; k < result.length; k++) {
+          memo.push(result[k])
+        }
+      } else {
+        memo.push(result)
+      }
+    }
+
+    tokens = memo
+  }
+
+  return tokens
+}
+
+/**
+ * Convenience method for passing a string through a pipeline and getting
+ * strings out. This method takes care of wrapping the passed string in a
+ * token and mapping the resulting tokens back to strings.
+ *
+ * @param {string} str - The string to pass through the pipeline.
+ * @param {?object} metadata - Optional metadata to associate with the token
+ * passed to the pipeline.
+ * @returns {string[]}
+ */
+lunr.Pipeline.prototype.runString = function (str, metadata) {
+  var token = new lunr.Token (str, metadata)
+
+  return this.run([token]).map(function (t) {
+    return t.toString()
+  })
+}
+
+/**
+ * Resets the pipeline by removing any existing processors.
+ *
+ */
+lunr.Pipeline.prototype.reset = function () {
+  this._stack = []
+}
+
+/**
+ * Returns a representation of the pipeline ready for serialisation.
+ *
+ * Logs a warning if the function has not been registered.
+ *
+ * @returns {Array}
+ */
+lunr.Pipeline.prototype.toJSON = function () {
+  return this._stack.map(function (fn) {
+    lunr.Pipeline.warnIfFunctionNotRegistered(fn)
+
+    return fn.label
+  })
+}
+/*!
+ * lunr.Vector
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+/**
+ * A vector is used to construct the vector space of documents and queries. These
+ * vectors support operations to determine the similarity between two documents or
+ * a document and a query.
+ *
+ * Normally no parameters are required for initializing a vector, but in the case of
+ * loading a previously dumped vector the raw elements can be provided to the constructor.
+ *
+ * For performance reasons vectors are implemented with a flat array, where an elements
+ * index is immediately followed by its value. E.g. [index, value, index, value]. This
+ * allows the underlying array to be as sparse as possible and still offer decent
+ * performance when being used for vector calculations.
+ *
+ * @constructor
+ * @param {Number[]} [elements] - The flat list of element index and element value pairs.
+ */
+lunr.Vector = function (elements) {
+  this._magnitude = 0
+  this.elements = elements || []
+}
+
+
+/**
+ * Calculates the position within the vector to insert a given index.
+ *
+ * This is used internally by insert and upsert. If there are duplicate indexes then
+ * the position is returned as if the value for that index were to be updated, but it
+ * is the callers responsibility to check whether there is a duplicate at that index
+ *
+ * @param {Number} insertIdx - The index at which the element should be inserted.
+ * @returns {Number}
+ */
+lunr.Vector.prototype.positionForIndex = function (index) {
+  // For an empty vector the tuple can be inserted at the beginning
+  if (this.elements.length == 0) {
+    return 0
+  }
+
+  var start = 0,
+      end = this.elements.length / 2,
+      sliceLength = end - start,
+      pivotPoint = Math.floor(sliceLength / 2),
+      pivotIndex = this.elements[pivotPoint * 2]
+
+  while (sliceLength > 1) {
+    if (pivotIndex < index) {
+      start = pivotPoint
+    }
+
+    if (pivotIndex > index) {
+      end = pivotPoint
+    }
+
+    if (pivotIndex == index) {
+      break
+    }
+
+    sliceLength = end - start
+    pivotPoint = start + Math.floor(sliceLength / 2)
+    pivotIndex = this.elements[pivotPoint * 2]
+  }
+
+  if (pivotIndex == index) {
+    return pivotPoint * 2
+  }
+
+  if (pivotIndex > index) {
+    return pivotPoint * 2
+  }
+
+  if (pivotIndex < index) {
+    return (pivotPoint + 1) * 2
+  }
+}
+
+/**
+ * Inserts an element at an index within the vector.
+ *
+ * Does not allow duplicates, will throw an error if there is already an entry
+ * for this index.
+ *
+ * @param {Number} insertIdx - The index at which the element should be inserted.
+ * @param {Number} val - The value to be inserted into the vector.
+ */
+lunr.Vector.prototype.insert = function (insertIdx, val) {
+  this.upsert(insertIdx, val, function () {
+    throw "duplicate index"
+  })
+}
+
+/**
+ * Inserts or updates an existing index within the vector.
+ *
+ * @param {Number} insertIdx - The index at which the element should be inserted.
+ * @param {Number} val - The value to be inserted into the vector.
+ * @param {function} fn - A function that is called for updates, the existing value and the
+ * requested value are passed as arguments
+ */
+lunr.Vector.prototype.upsert = function (insertIdx, val, fn) {
+  this._magnitude = 0
+  var position = this.positionForIndex(insertIdx)
+
+  if (this.elements[position] == insertIdx) {
+    this.elements[position + 1] = fn(this.elements[position + 1], val)
+  } else {
+    this.elements.splice(position, 0, insertIdx, val)
+  }
+}
+
+/**
+ * Calculates the magnitude of this vector.
+ *
+ * @returns {Number}
+ */
+lunr.Vector.prototype.magnitude = function () {
+  if (this._magnitude) return this._magnitude
+
+  var sumOfSquares = 0,
+      elementsLength = this.elements.length
+
+  for (var i = 1; i < elementsLength; i += 2) {
+    var val = this.elements[i]
+    sumOfSquares += val * val
+  }
+
+  return this._magnitude = Math.sqrt(sumOfSquares)
+}
+
+/**
+ * Calculates the dot product of this vector and another vector.
+ *
+ * @param {lunr.Vector} otherVector - The vector to compute the dot product with.
+ * @returns {Number}
+ */
+lunr.Vector.prototype.dot = function (otherVector) {
+  var dotProduct = 0,
+      a = this.elements, b = otherVector.elements,
+      aLen = a.length, bLen = b.length,
+      aVal = 0, bVal = 0,
+      i = 0, j = 0
+
+  while (i < aLen && j < bLen) {
+    aVal = a[i], bVal = b[j]
+    if (aVal < bVal) {
+      i += 2
+    } else if (aVal > bVal) {
+      j += 2
+    } else if (aVal == bVal) {
+      dotProduct += a[i + 1] * b[j + 1]
+      i += 2
+      j += 2
+    }
+  }
+
+  return dotProduct
+}
+
+/**
+ * Calculates the similarity between this vector and another vector.
+ *
+ * @param {lunr.Vector} otherVector - The other vector to calculate the
+ * similarity with.
+ * @returns {Number}
+ */
+lunr.Vector.prototype.similarity = function (otherVector) {
+  return this.dot(otherVector) / this.magnitude() || 0
+}
+
+/**
+ * Converts the vector to an array of the elements within the vector.
+ *
+ * @returns {Number[]}
+ */
+lunr.Vector.prototype.toArray = function () {
+  var output = new Array (this.elements.length / 2)
+
+  for (var i = 1, j = 0; i < this.elements.length; i += 2, j++) {
+    output[j] = this.elements[i]
+  }
+
+  return output
+}
+
+/**
+ * A JSON serializable representation of the vector.
+ *
+ * @returns {Number[]}
+ */
+lunr.Vector.prototype.toJSON = function () {
+  return this.elements
+}
+/* eslint-disable */
+/*!
+ * lunr.stemmer
+ * Copyright (C) 2020 Oliver Nightingale
+ * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt
+ */
+
+/**
+ * lunr.stemmer is an english language stemmer, this is a JavaScript
+ * implementation of the PorterStemmer taken from http://tartarus.org/~martin
+ *
+ * @static
+ * @implements {lunr.PipelineFunction}
+ * @param {lunr.Token} token - The string to stem
+ * @returns {lunr.Token}
+ * @see {@link lunr.Pipeline}
+ * @function
+ */
+lunr.stemmer = (function(){
+  var step2list = {
+      "ational" : "ate",
+      "tional" : "tion",
+      "enci" : "ence",
+      "anci" : "ance",
+      "izer" : "ize",
+      "bli" : "ble",
+      "alli" : "al",
+      "entli" : "ent",
+      "eli" : "e",
+      "ousli" : "ous",
+      "ization" : "ize",
+      "ation" : "ate",
+      "ator" : "ate",
+      "alism" : "al",
+      "iveness" : "ive",
+      "fulness" : "ful",
+      "ousness" : "ous",
+      "aliti" : "al",
+      "iviti" : "ive",
+      "biliti" : "ble",
+      "logi" : "log"
+    },
+
+    step3list = {
+      "icate" : "ic",
+      "ative" : "",
+      "alize" : "al",
+      "iciti" : "ic",
+      "ical" : "ic",
+      "ful" : "",
+      "ness" : ""
+    },
+
+    c = "[^aeiou]",          // consonant
+    v = "[aeiouy]",          // vowel
+    C = c + "[^aeiouy]*",    // consonant sequence
+    V = v + "[aeiou]*",      // vowel sequence
+
+    mgr0 = "^(" + C + ")?" + V + C,               // [C]VC... is m>0
+    meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$",  // [C]VC[V] is m=1
+    mgr1 = "^(" + C + ")?" + V + C + V + C,       // [C]VCVC... is m>1
+    s_v = "^(" + C + ")?" + v;                   // vowel in stem
+
+  var re_mgr0 = new RegExp(mgr0);
+  var re_mgr1 = new RegExp(mgr1);
+  var re_meq1 = new RegExp(meq1);
+  var re_s_v = new RegExp(s_v);
+
+  var re_1a = /^(.+?)(ss|i)es$/;
+  var re2_1a = /^(.+?)([^s])s$/;
+  var re_1b = /^(.+?)eed$/;
+  var re2_1b = /^(.+?)(ed|ing)$/;
+  var re_1b_2 = /.$/;
+  var re2_1b_2 = /(at|bl|iz)$/;
+  var re3_1b_2 = new RegExp("([^aeiouylsz])\\1$");
+  var re4_1b_2 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+
+  var re_1c = /^(.+?[^aeiou])y$/;
+  var re_2 = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
+
+  var re_3 = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
+
+  var re_4 = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
+  var re2_4 = /^(.+?)(s|t)(ion)$/;
+
+  var re_5 = /^(.+?)e$/;
+  var re_5_1 = /ll$/;
+  var re3_5 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+
+  var porterStemmer = function porterStemmer(w) {
+    var stem,
+      suffix,
+      firstch,
+      re,
+      re2,
+      re3,
+      re4;
+
+    if (w.length < 3) { return w; }
+
+    firstch = w.substr(0,1);
+    if (firstch == "y") {
+      w = firstch.toUpperCase() + w.substr(1);
+    }
+
+    // Step 1a
+    re = re_1a
+    re2 = re2_1a;
+
+    if (re.test(w)) { w = w.replace(re,"$1$2"); }
+    else if (re2.test(w)) { w = w.replace(re2,"$1$2"); }
+
+    // Step 1b
+    re = re_1b;
+    re2 = re2_1b;
+    if (re.test(w)) {
+      var fp = re.exec(w);
+      re = re_mgr0;
+      if (re.test(fp[1])) {
+        re = re_1b_2;
+        w = w.replace(re,"");
+      }
+    } else if (re2.test(w)) {
+      var fp = re2.exec(w);
+      stem = fp[1];
+      re2 = re_s_v;
+      if (re2.test(stem)) {
+        w = stem;
+        re2 = re2_1b_2;
+        re3 = re3_1b_2;
+        re4 = re4_1b_2;
+        if (re2.test(w)) { w = w + "e"; }
+        else if (re3.test(w)) { re = re_1b_2; w = w.replace(re,""); }
+        else if (re4.test(w)) { w = w + "e"; }
+      }
+    }
+
+    // Step 1c - replace suffix y or Y by i if preceded by a non-vowel which is not the first letter of the word (so cry -> cri, by -> by, say -> say)
+    re = re_1c;
+    if (re.test(w)) {
+      var fp = re.exec(w);
+      stem = fp[1];
+      w = stem + "i";
+    }
+
+    // Step 2
+    re = re_2;
+    if (re.test(w)) {
+      var fp = re.exec(w);
+      stem = fp[1];
+      suffix = fp[2];
+      re = re_mgr0;
+      if (re.test(stem)) {
+        w = stem + step2list[suffix];
+      }
+    }
+
+    // Step 3
+    re = re_3;
+    if (re.test(w)) {
+      var fp = re.exec(w);
+      stem = fp[1];
+      suffix = fp[2];
+      re = re_mgr0;
+      if (re.test(stem)) {
+        w = stem + step3list[suffix];
+      }
+    }
+
+    // Step 4
+    re = re_4;
+    re2 = re2_4;
+    if (re.test(w)) {
+      var fp = re.exec(w);
+      stem = fp[1];
+      re = re_mgr1;
+      if (re.test(stem)) {
+        w = stem;
+      }
+    } else if (re2.test(w)) {
+      var fp = re2.exec(w);
+      stem = fp[1] + fp[2];
+      re2 = re_mgr1;
+      if (re2.test(stem)) {
+        w = stem;
+      }
+    }
+
+    // Step 5
+    re = re_5;
+    if (re.test(w)) {
+      var fp = re.exec(w);
+      stem = fp[1];
+      re = re_mgr1;
+      re2 = re_meq1;
+      re3 = re3_5;
+      if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) {
+        w = stem;
+      }
+    }
+
+    re = re_5_1;
+    re2 = re_mgr1;
+    if (re.test(w) && re2.test(w)) {
+      re = re_1b_2;
+      w = w.replace(re,"");
+    }
+
+    // and turn initial Y back to y
+
+    if (firstch == "y") {
+      w = firstch.toLowerCase() + w.substr(1);
+    }
+
+    return w;
+  };
+
+  return function (token) {
+    return token.update(porterStemmer);
+  }
+})();
+
+lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer')
+/*!
+ * lunr.stopWordFilter
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+/**
+ * lunr.generateStopWordFilter builds a stopWordFilter function from the provided
+ * list of stop words.
+ *
+ * The built in lunr.stopWordFilter is built using this generator and can be used
+ * to generate custom stopWordFilters for applications or non English languages.
+ *
+ * @function
+ * @param {Array} token The token to pass through the filter
+ * @returns {lunr.PipelineFunction}
+ * @see lunr.Pipeline
+ * @see lunr.stopWordFilter
+ */
+lunr.generateStopWordFilter = function (stopWords) {
+  var words = stopWords.reduce(function (memo, stopWord) {
+    memo[stopWord] = stopWord
+    return memo
+  }, {})
+
+  return function (token) {
+    if (token && words[token.toString()] !== token.toString()) return token
+  }
+}
+
+/**
+ * lunr.stopWordFilter is an English language stop word list filter, any words
+ * contained in the list will not be passed through the filter.
+ *
+ * This is intended to be used in the Pipeline. If the token does not pass the
+ * filter then undefined will be returned.
+ *
+ * @function
+ * @implements {lunr.PipelineFunction}
+ * @params {lunr.Token} token - A token to check for being a stop word.
+ * @returns {lunr.Token}
+ * @see {@link lunr.Pipeline}
+ */
+lunr.stopWordFilter = lunr.generateStopWordFilter([
+  'a',
+  'able',
+  'about',
+  'across',
+  'after',
+  'all',
+  'almost',
+  'also',
+  'am',
+  'among',
+  'an',
+  'and',
+  'any',
+  'are',
+  'as',
+  'at',
+  'be',
+  'because',
+  'been',
+  'but',
+  'by',
+  'can',
+  'cannot',
+  'could',
+  'dear',
+  'did',
+  'do',
+  'does',
+  'either',
+  'else',
+  'ever',
+  'every',
+  'for',
+  'from',
+  'get',
+  'got',
+  'had',
+  'has',
+  'have',
+  'he',
+  'her',
+  'hers',
+  'him',
+  'his',
+  'how',
+  'however',
+  'i',
+  'if',
+  'in',
+  'into',
+  'is',
+  'it',
+  'its',
+  'just',
+  'least',
+  'let',
+  'like',
+  'likely',
+  'may',
+  'me',
+  'might',
+  'most',
+  'must',
+  'my',
+  'neither',
+  'no',
+  'nor',
+  'not',
+  'of',
+  'off',
+  'often',
+  'on',
+  'only',
+  'or',
+  'other',
+  'our',
+  'own',
+  'rather',
+  'said',
+  'say',
+  'says',
+  'she',
+  'should',
+  'since',
+  'so',
+  'some',
+  'than',
+  'that',
+  'the',
+  'their',
+  'them',
+  'then',
+  'there',
+  'these',
+  'they',
+  'this',
+  'tis',
+  'to',
+  'too',
+  'twas',
+  'us',
+  'wants',
+  'was',
+  'we',
+  'were',
+  'what',
+  'when',
+  'where',
+  'which',
+  'while',
+  'who',
+  'whom',
+  'why',
+  'will',
+  'with',
+  'would',
+  'yet',
+  'you',
+  'your'
+])
+
+lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter')
+/*!
+ * lunr.trimmer
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+/**
+ * lunr.trimmer is a pipeline function for trimming non word
+ * characters from the beginning and end of tokens before they
+ * enter the index.
+ *
+ * This implementation may not work correctly for non latin
+ * characters and should either be removed or adapted for use
+ * with languages with non-latin characters.
+ *
+ * @static
+ * @implements {lunr.PipelineFunction}
+ * @param {lunr.Token} token The token to pass through the filter
+ * @returns {lunr.Token}
+ * @see lunr.Pipeline
+ */
+lunr.trimmer = function (token) {
+  return token.update(function (s) {
+    return s.replace(/^\W+/, '').replace(/\W+$/, '')
+  })
+}
+
+lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer')
+/*!
+ * lunr.TokenSet
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+/**
+ * A token set is used to store the unique list of all tokens
+ * within an index. Token sets are also used to represent an
+ * incoming query to the index, this query token set and index
+ * token set are then intersected to find which tokens to look
+ * up in the inverted index.
+ *
+ * A token set can hold multiple tokens, as in the case of the
+ * index token set, or it can hold a single token as in the
+ * case of a simple query token set.
+ *
+ * Additionally token sets are used to perform wildcard matching.
+ * Leading, contained and trailing wildcards are supported, and
+ * from this edit distance matching can also be provided.
+ *
+ * Token sets are implemented as a minimal finite state automata,
+ * where both common prefixes and suffixes are shared between tokens.
+ * This helps to reduce the space used for storing the token set.
+ *
+ * @constructor
+ */
+lunr.TokenSet = function () {
+  this.final = false
+  this.edges = {}
+  this.id = lunr.TokenSet._nextId
+  lunr.TokenSet._nextId += 1
+}
+
+/**
+ * Keeps track of the next, auto increment, identifier to assign
+ * to a new tokenSet.
+ *
+ * TokenSets require a unique identifier to be correctly minimised.
+ *
+ * @private
+ */
+lunr.TokenSet._nextId = 1
+
+/**
+ * Creates a TokenSet instance from the given sorted array of words.
+ *
+ * @param {String[]} arr - A sorted array of strings to create the set from.
+ * @returns {lunr.TokenSet}
+ * @throws Will throw an error if the input array is not sorted.
+ */
+lunr.TokenSet.fromArray = function (arr) {
+  var builder = new lunr.TokenSet.Builder
+
+  for (var i = 0, len = arr.length; i < len; i++) {
+    builder.insert(arr[i])
+  }
+
+  builder.finish()
+  return builder.root
+}
+
+/**
+ * Creates a token set from a query clause.
+ *
+ * @private
+ * @param {Object} clause - A single clause from lunr.Query.
+ * @param {string} clause.term - The query clause term.
+ * @param {number} [clause.editDistance] - The optional edit distance for the term.
+ * @returns {lunr.TokenSet}
+ */
+lunr.TokenSet.fromClause = function (clause) {
+  if ('editDistance' in clause) {
+    return lunr.TokenSet.fromFuzzyString(clause.term, clause.editDistance)
+  } else {
+    return lunr.TokenSet.fromString(clause.term)
+  }
+}
+
+/**
+ * Creates a token set representing a single string with a specified
+ * edit distance.
+ *
+ * Insertions, deletions, substitutions and transpositions are each
+ * treated as an edit distance of 1.
+ *
+ * Increasing the allowed edit distance will have a dramatic impact
+ * on the performance of both creating and intersecting these TokenSets.
+ * It is advised to keep the edit distance less than 3.
+ *
+ * @param {string} str - The string to create the token set from.
+ * @param {number} editDistance - The allowed edit distance to match.
+ * @returns {lunr.Vector}
+ */
+lunr.TokenSet.fromFuzzyString = function (str, editDistance) {
+  var root = new lunr.TokenSet
+
+  var stack = [{
+    node: root,
+    editsRemaining: editDistance,
+    str: str
+  }]
+
+  while (stack.length) {
+    var frame = stack.pop()
+
+    // no edit
+    if (frame.str.length > 0) {
+      var char = frame.str.charAt(0),
+          noEditNode
+
+      if (char in frame.node.edges) {
+        noEditNode = frame.node.edges[char]
+      } else {
+        noEditNode = new lunr.TokenSet
+        frame.node.edges[char] = noEditNode
+      }
+
+      if (frame.str.length == 1) {
+        noEditNode.final = true
+      }
+
+      stack.push({
+        node: noEditNode,
+        editsRemaining: frame.editsRemaining,
+        str: frame.str.slice(1)
+      })
+    }
+
+    if (frame.editsRemaining == 0) {
+      continue
+    }
+
+    // insertion
+    if ("*" in frame.node.edges) {
+      var insertionNode = frame.node.edges["*"]
+    } else {
+      var insertionNode = new lunr.TokenSet
+      frame.node.edges["*"] = insertionNode
+    }
+
+    if (frame.str.length == 0) {
+      insertionNode.final = true
+    }
+
+    stack.push({
+      node: insertionNode,
+      editsRemaining: frame.editsRemaining - 1,
+      str: frame.str
+    })
+
+    // deletion
+    // can only do a deletion if we have enough edits remaining
+    // and if there are characters left to delete in the string
+    if (frame.str.length > 1) {
+      stack.push({
+        node: frame.node,
+        editsRemaining: frame.editsRemaining - 1,
+        str: frame.str.slice(1)
+      })
+    }
+
+    // deletion
+    // just removing the last character from the str
+    if (frame.str.length == 1) {
+      frame.node.final = true
+    }
+
+    // substitution
+    // can only do a substitution if we have enough edits remaining
+    // and if there are characters left to substitute
+    if (frame.str.length >= 1) {
+      if ("*" in frame.node.edges) {
+        var substitutionNode = frame.node.edges["*"]
+      } else {
+        var substitutionNode = new lunr.TokenSet
+        frame.node.edges["*"] = substitutionNode
+      }
+
+      if (frame.str.length == 1) {
+        substitutionNode.final = true
+      }
+
+      stack.push({
+        node: substitutionNode,
+        editsRemaining: frame.editsRemaining - 1,
+        str: frame.str.slice(1)
+      })
+    }
+
+    // transposition
+    // can only do a transposition if there are edits remaining
+    // and there are enough characters to transpose
+    if (frame.str.length > 1) {
+      var charA = frame.str.charAt(0),
+          charB = frame.str.charAt(1),
+          transposeNode
+
+      if (charB in frame.node.edges) {
+        transposeNode = frame.node.edges[charB]
+      } else {
+        transposeNode = new lunr.TokenSet
+        frame.node.edges[charB] = transposeNode
+      }
+
+      if (frame.str.length == 1) {
+        transposeNode.final = true
+      }
+
+      stack.push({
+        node: transposeNode,
+        editsRemaining: frame.editsRemaining - 1,
+        str: charA + frame.str.slice(2)
+      })
+    }
+  }
+
+  return root
+}
+
+/**
+ * Creates a TokenSet from a string.
+ *
+ * The string may contain one or more wildcard characters (*)
+ * that will allow wildcard matching when intersecting with
+ * another TokenSet.
+ *
+ * @param {string} str - The string to create a TokenSet from.
+ * @returns {lunr.TokenSet}
+ */
+lunr.TokenSet.fromString = function (str) {
+  var node = new lunr.TokenSet,
+      root = node
+
+  /*
+   * Iterates through all characters within the passed string
+   * appending a node for each character.
+   *
+   * When a wildcard character is found then a self
+   * referencing edge is introduced to continually match
+   * any number of any characters.
+   */
+  for (var i = 0, len = str.length; i < len; i++) {
+    var char = str[i],
+        final = (i == len - 1)
+
+    if (char == "*") {
+      node.edges[char] = node
+      node.final = final
+
+    } else {
+      var next = new lunr.TokenSet
+      next.final = final
+
+      node.edges[char] = next
+      node = next
+    }
+  }
+
+  return root
+}
+
+/**
+ * Converts this TokenSet into an array of strings
+ * contained within the TokenSet.
+ *
+ * This is not intended to be used on a TokenSet that
+ * contains wildcards, in these cases the results are
+ * undefined and are likely to cause an infinite loop.
+ *
+ * @returns {string[]}
+ */
+lunr.TokenSet.prototype.toArray = function () {
+  var words = []
+
+  var stack = [{
+    prefix: "",
+    node: this
+  }]
+
+  while (stack.length) {
+    var frame = stack.pop(),
+        edges = Object.keys(frame.node.edges),
+        len = edges.length
+
+    if (frame.node.final) {
+      /* In Safari, at this point the prefix is sometimes corrupted, see:
+       * https://github.com/olivernn/lunr.js/issues/279 Calling any
+       * String.prototype method forces Safari to "cast" this string to what
+       * it's supposed to be, fixing the bug. */
+      frame.prefix.charAt(0)
+      words.push(frame.prefix)
+    }
+
+    for (var i = 0; i < len; i++) {
+      var edge = edges[i]
+
+      stack.push({
+        prefix: frame.prefix.concat(edge),
+        node: frame.node.edges[edge]
+      })
+    }
+  }
+
+  return words
+}
+
+/**
+ * Generates a string representation of a TokenSet.
+ *
+ * This is intended to allow TokenSets to be used as keys
+ * in objects, largely to aid the construction and minimisation
+ * of a TokenSet. As such it is not designed to be a human
+ * friendly representation of the TokenSet.
+ *
+ * @returns {string}
+ */
+lunr.TokenSet.prototype.toString = function () {
+  // NOTE: Using Object.keys here as this.edges is very likely
+  // to enter 'hash-mode' with many keys being added
+  //
+  // avoiding a for-in loop here as it leads to the function
+  // being de-optimised (at least in V8). From some simple
+  // benchmarks the performance is comparable, but allowing
+  // V8 to optimize may mean easy performance wins in the future.
+
+  if (this._str) {
+    return this._str
+  }
+
+  var str = this.final ? '1' : '0',
+      labels = Object.keys(this.edges).sort(),
+      len = labels.length
+
+  for (var i = 0; i < len; i++) {
+    var label = labels[i],
+        node = this.edges[label]
+
+    str = str + label + node.id
+  }
+
+  return str
+}
+
+/**
+ * Returns a new TokenSet that is the intersection of
+ * this TokenSet and the passed TokenSet.
+ *
+ * This intersection will take into account any wildcards
+ * contained within the TokenSet.
+ *
+ * @param {lunr.TokenSet} b - An other TokenSet to intersect with.
+ * @returns {lunr.TokenSet}
+ */
+lunr.TokenSet.prototype.intersect = function (b) {
+  var output = new lunr.TokenSet,
+      frame = undefined
+
+  var stack = [{
+    qNode: b,
+    output: output,
+    node: this
+  }]
+
+  while (stack.length) {
+    frame = stack.pop()
+
+    // NOTE: As with the #toString method, we are using
+    // Object.keys and a for loop instead of a for-in loop
+    // as both of these objects enter 'hash' mode, causing
+    // the function to be de-optimised in V8
+    var qEdges = Object.keys(frame.qNode.edges),
+        qLen = qEdges.length,
+        nEdges = Object.keys(frame.node.edges),
+        nLen = nEdges.length
+
+    for (var q = 0; q < qLen; q++) {
+      var qEdge = qEdges[q]
+
+      for (var n = 0; n < nLen; n++) {
+        var nEdge = nEdges[n]
+
+        if (nEdge == qEdge || qEdge == '*') {
+          var node = frame.node.edges[nEdge],
+              qNode = frame.qNode.edges[qEdge],
+              final = node.final && qNode.final,
+              next = undefined
+
+          if (nEdge in frame.output.edges) {
+            // an edge already exists for this character
+            // no need to create a new node, just set the finality
+            // bit unless this node is already final
+            next = frame.output.edges[nEdge]
+            next.final = next.final || final
+
+          } else {
+            // no edge exists yet, must create one
+            // set the finality bit and insert it
+            // into the output
+            next = new lunr.TokenSet
+            next.final = final
+            frame.output.edges[nEdge] = next
+          }
+
+          stack.push({
+            qNode: qNode,
+            output: next,
+            node: node
+          })
+        }
+      }
+    }
+  }
+
+  return output
+}
+lunr.TokenSet.Builder = function () {
+  this.previousWord = ""
+  this.root = new lunr.TokenSet
+  this.uncheckedNodes = []
+  this.minimizedNodes = {}
+}
+
+lunr.TokenSet.Builder.prototype.insert = function (word) {
+  var node,
+      commonPrefix = 0
+
+  if (word < this.previousWord) {
+    throw new Error ("Out of order word insertion")
+  }
+
+  for (var i = 0; i < word.length && i < this.previousWord.length; i++) {
+    if (word[i] != this.previousWord[i]) break
+    commonPrefix++
+  }
+
+  this.minimize(commonPrefix)
+
+  if (this.uncheckedNodes.length == 0) {
+    node = this.root
+  } else {
+    node = this.uncheckedNodes[this.uncheckedNodes.length - 1].child
+  }
+
+  for (var i = commonPrefix; i < word.length; i++) {
+    var nextNode = new lunr.TokenSet,
+        char = word[i]
+
+    node.edges[char] = nextNode
+
+    this.uncheckedNodes.push({
+      parent: node,
+      char: char,
+      child: nextNode
+    })
+
+    node = nextNode
+  }
+
+  node.final = true
+  this.previousWord = word
+}
+
+lunr.TokenSet.Builder.prototype.finish = function () {
+  this.minimize(0)
+}
+
+lunr.TokenSet.Builder.prototype.minimize = function (downTo) {
+  for (var i = this.uncheckedNodes.length - 1; i >= downTo; i--) {
+    var node = this.uncheckedNodes[i],
+        childKey = node.child.toString()
+
+    if (childKey in this.minimizedNodes) {
+      node.parent.edges[node.char] = this.minimizedNodes[childKey]
+    } else {
+      // Cache the key for this node since
+      // we know it can't change anymore
+      node.child._str = childKey
+
+      this.minimizedNodes[childKey] = node.child
+    }
+
+    this.uncheckedNodes.pop()
+  }
+}
+/*!
+ * lunr.Index
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+/**
+ * An index contains the built index of all documents and provides a query interface
+ * to the index.
+ *
+ * Usually instances of lunr.Index will not be created using this constructor, instead
+ * lunr.Builder should be used to construct new indexes, or lunr.Index.load should be
+ * used to load previously built and serialized indexes.
+ *
+ * @constructor
+ * @param {Object} attrs - The attributes of the built search index.
+ * @param {Object} attrs.invertedIndex - An index of term/field to document reference.
+ * @param {Object<string, lunr.Vector>} attrs.fieldVectors - Field vectors
+ * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens.
+ * @param {string[]} attrs.fields - The names of indexed document fields.
+ * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms.
+ */
+lunr.Index = function (attrs) {
+  this.invertedIndex = attrs.invertedIndex
+  this.fieldVectors = attrs.fieldVectors
+  this.tokenSet = attrs.tokenSet
+  this.fields = attrs.fields
+  this.pipeline = attrs.pipeline
+}
+
+/**
+ * A result contains details of a document matching a search query.
+ * @typedef {Object} lunr.Index~Result
+ * @property {string} ref - The reference of the document this result represents.
+ * @property {number} score - A number between 0 and 1 representing how similar this document is to the query.
+ * @property {lunr.MatchData} matchData - Contains metadata about this match including which term(s) caused the match.
+ */
+
+/**
+ * Although lunr provides the ability to create queries using lunr.Query, it also provides a simple
+ * query language which itself is parsed into an instance of lunr.Query.
+ *
+ * For programmatically building queries it is advised to directly use lunr.Query, the query language
+ * is best used for human entered text rather than program generated text.
+ *
+ * At its simplest queries can just be a single term, e.g. `hello`, multiple terms are also supported
+ * and will be combined with OR, e.g `hello world` will match documents that contain either 'hello'
+ * or 'world', though those that contain both will rank higher in the results.
+ *
+ * Wildcards can be included in terms to match one or more unspecified characters, these wildcards can
+ * be inserted anywhere within the term, and more than one wildcard can exist in a single term. Adding
+ * wildcards will increase the number of documents that will be found but can also have a negative
+ * impact on query performance, especially with wildcards at the beginning of a term.
+ *
+ * Terms can be restricted to specific fields, e.g. `title:hello`, only documents with the term
+ * hello in the title field will match this query. Using a field not present in the index will lead
+ * to an error being thrown.
+ *
+ * Modifiers can also be added to terms, lunr supports edit distance and boost modifiers on terms. A term
+ * boost will make documents matching that term score higher, e.g. `foo^5`. Edit distance is also supported
+ * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2.
+ * Avoid large values for edit distance to improve query performance.
+ *
+ * Each term also supports a presence modifier. By default a term's presence in document is optional, however
+ * this can be changed to either required or prohibited. For a term's presence to be required in a document the
+ * term should be prefixed with a '+', e.g. `+foo bar` is a search for documents that must contain 'foo' and
+ * optionally contain 'bar'. Conversely a leading '-' sets the terms presence to prohibited, i.e. it must not
+ * appear in a document, e.g. `-foo bar` is a search for documents that do not contain 'foo' but may contain 'bar'.
+ *
+ * To escape special characters the backslash character '\' can be used, this allows searches to include
+ * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead
+ * of attempting to apply a boost of 2 to the search term "foo".
+ *
+ * @typedef {string} lunr.Index~QueryString
+ * @example <caption>Simple single term query</caption>
+ * hello
+ * @example <caption>Multiple term query</caption>
+ * hello world
+ * @example <caption>term scoped to a field</caption>
+ * title:hello
+ * @example <caption>term with a boost of 10</caption>
+ * hello^10
+ * @example <caption>term with an edit distance of 2</caption>
+ * hello~2
+ * @example <caption>terms with presence modifiers</caption>
+ * -foo +bar baz
+ */
+
+/**
+ * Performs a search against the index using lunr query syntax.
+ *
+ * Results will be returned sorted by their score, the most relevant results
+ * will be returned first.  For details on how the score is calculated, please see
+ * the {@link https://lunrjs.com/guides/searching.html#scoring|guide}.
+ *
+ * For more programmatic querying use lunr.Index#query.
+ *
+ * @param {lunr.Index~QueryString} queryString - A string containing a lunr query.
+ * @throws {lunr.QueryParseError} If the passed query string cannot be parsed.
+ * @returns {lunr.Index~Result[]}
+ */
+lunr.Index.prototype.search = function (queryString) {
+  return this.query(function (query) {
+    var parser = new lunr.QueryParser(queryString, query)
+    parser.parse()
+  })
+}
+
+/**
+ * A query builder callback provides a query object to be used to express
+ * the query to perform on the index.
+ *
+ * @callback lunr.Index~queryBuilder
+ * @param {lunr.Query} query - The query object to build up.
+ * @this lunr.Query
+ */
+
+/**
+ * Performs a query against the index using the yielded lunr.Query object.
+ *
+ * If performing programmatic queries against the index, this method is preferred
+ * over lunr.Index#search so as to avoid the additional query parsing overhead.
+ *
+ * A query object is yielded to the supplied function which should be used to
+ * express the query to be run against the index.
+ *
+ * Note that although this function takes a callback parameter it is _not_ an
+ * asynchronous operation, the callback is just yielded a query object to be
+ * customized.
+ *
+ * @param {lunr.Index~queryBuilder} fn - A function that is used to build the query.
+ * @returns {lunr.Index~Result[]}
+ */
+lunr.Index.prototype.query = function (fn) {
+  // for each query clause
+  // * process terms
+  // * expand terms from token set
+  // * find matching documents and metadata
+  // * get document vectors
+  // * score documents
+
+  var query = new lunr.Query(this.fields),
+      matchingFields = Object.create(null),
+      queryVectors = Object.create(null),
+      termFieldCache = Object.create(null),
+      requiredMatches = Object.create(null),
+      prohibitedMatches = Object.create(null)
+
+  /*
+   * To support field level boosts a query vector is created per
+   * field. An empty vector is eagerly created to support negated
+   * queries.
+   */
+  for (var i = 0; i < this.fields.length; i++) {
+    queryVectors[this.fields[i]] = new lunr.Vector
+  }
+
+  fn.call(query, query)
+
+  for (var i = 0; i < query.clauses.length; i++) {
+    /*
+     * Unless the pipeline has been disabled for this term, which is
+     * the case for terms with wildcards, we need to pass the clause
+     * term through the search pipeline. A pipeline returns an array
+     * of processed terms. Pipeline functions may expand the passed
+     * term, which means we may end up performing multiple index lookups
+     * for a single query term.
+     */
+    var clause = query.clauses[i],
+        terms = null,
+        clauseMatches = lunr.Set.empty
+
+    if (clause.usePipeline) {
+      terms = this.pipeline.runString(clause.term, {
+        fields: clause.fields
+      })
+    } else {
+      terms = [clause.term]
+    }
+
+    for (var m = 0; m < terms.length; m++) {
+      var term = terms[m]
+
+      /*
+       * Each term returned from the pipeline needs to use the same query
+       * clause object, e.g. the same boost and or edit distance. The
+       * simplest way to do this is to re-use the clause object but mutate
+       * its term property.
+       */
+      clause.term = term
+
+      /*
+       * From the term in the clause we create a token set which will then
+       * be used to intersect the indexes token set to get a list of terms
+       * to lookup in the inverted index
+       */
+      var termTokenSet = lunr.TokenSet.fromClause(clause),
+          expandedTerms = this.tokenSet.intersect(termTokenSet).toArray()
+
+      /*
+       * If a term marked as required does not exist in the tokenSet it is
+       * impossible for the search to return any matches. We set all the field
+       * scoped required matches set to empty and stop examining any further
+       * clauses.
+       */
+      if (expandedTerms.length === 0 && clause.presence === lunr.Query.presence.REQUIRED) {
+        for (var k = 0; k < clause.fields.length; k++) {
+          var field = clause.fields[k]
+          requiredMatches[field] = lunr.Set.empty
+        }
+
+        break
+      }
+
+      for (var j = 0; j < expandedTerms.length; j++) {
+        /*
+         * For each term get the posting and termIndex, this is required for
+         * building the query vector.
+         */
+        var expandedTerm = expandedTerms[j],
+            posting = this.invertedIndex[expandedTerm],
+            termIndex = posting._index
+
+        for (var k = 0; k < clause.fields.length; k++) {
+          /*
+           * For each field that this query term is scoped by (by default
+           * all fields are in scope) we need to get all the document refs
+           * that have this term in that field.
+           *
+           * The posting is the entry in the invertedIndex for the matching
+           * term from above.
+           */
+          var field = clause.fields[k],
+              fieldPosting = posting[field],
+              matchingDocumentRefs = Object.keys(fieldPosting),
+              termField = expandedTerm + "/" + field,
+              matchingDocumentsSet = new lunr.Set(matchingDocumentRefs)
+
+          /*
+           * if the presence of this term is required ensure that the matching
+           * documents are added to the set of required matches for this clause.
+           *
+           */
+          if (clause.presence == lunr.Query.presence.REQUIRED) {
+            clauseMatches = clauseMatches.union(matchingDocumentsSet)
+
+            if (requiredMatches[field] === undefined) {
+              requiredMatches[field] = lunr.Set.complete
+            }
+          }
+
+          /*
+           * if the presence of this term is prohibited ensure that the matching
+           * documents are added to the set of prohibited matches for this field,
+           * creating that set if it does not yet exist.
+           */
+          if (clause.presence == lunr.Query.presence.PROHIBITED) {
+            if (prohibitedMatches[field] === undefined) {
+              prohibitedMatches[field] = lunr.Set.empty
+            }
+
+            prohibitedMatches[field] = prohibitedMatches[field].union(matchingDocumentsSet)
+
+            /*
+             * Prohibited matches should not be part of the query vector used for
+             * similarity scoring and no metadata should be extracted so we continue
+             * to the next field
+             */
+            continue
+          }
+
+          /*
+           * The query field vector is populated using the termIndex found for
+           * the term and a unit value with the appropriate boost applied.
+           * Using upsert because there could already be an entry in the vector
+           * for the term we are working with. In that case we just add the scores
+           * together.
+           */
+          queryVectors[field].upsert(termIndex, clause.boost, function (a, b) { return a + b })
+
+          /**
+           * If we've already seen this term, field combo then we've already collected
+           * the matching documents and metadata, no need to go through all that again
+           */
+          if (termFieldCache[termField]) {
+            continue
+          }
+
+          for (var l = 0; l < matchingDocumentRefs.length; l++) {
+            /*
+             * All metadata for this term/field/document triple
+             * are then extracted and collected into an instance
+             * of lunr.MatchData ready to be returned in the query
+             * results
+             */
+            var matchingDocumentRef = matchingDocumentRefs[l],
+                matchingFieldRef = new lunr.FieldRef (matchingDocumentRef, field),
+                metadata = fieldPosting[matchingDocumentRef],
+                fieldMatch
+
+            if ((fieldMatch = matchingFields[matchingFieldRef]) === undefined) {
+              matchingFields[matchingFieldRef] = new lunr.MatchData (expandedTerm, field, metadata)
+            } else {
+              fieldMatch.add(expandedTerm, field, metadata)
+            }
+
+          }
+
+          termFieldCache[termField] = true
+        }
+      }
+    }
+
+    /**
+     * If the presence was required we need to update the requiredMatches field sets.
+     * We do this after all fields for the term have collected their matches because
+     * the clause terms presence is required in _any_ of the fields not _all_ of the
+     * fields.
+     */
+    if (clause.presence === lunr.Query.presence.REQUIRED) {
+      for (var k = 0; k < clause.fields.length; k++) {
+        var field = clause.fields[k]
+        requiredMatches[field] = requiredMatches[field].intersect(clauseMatches)
+      }
+    }
+  }
+
+  /**
+   * Need to combine the field scoped required and prohibited
+   * matching documents into a global set of required and prohibited
+   * matches
+   */
+  var allRequiredMatches = lunr.Set.complete,
+      allProhibitedMatches = lunr.Set.empty
+
+  for (var i = 0; i < this.fields.length; i++) {
+    var field = this.fields[i]
+
+    if (requiredMatches[field]) {
+      allRequiredMatches = allRequiredMatches.intersect(requiredMatches[field])
+    }
+
+    if (prohibitedMatches[field]) {
+      allProhibitedMatches = allProhibitedMatches.union(prohibitedMatches[field])
+    }
+  }
+
+  var matchingFieldRefs = Object.keys(matchingFields),
+      results = [],
+      matches = Object.create(null)
+
+  /*
+   * If the query is negated (contains only prohibited terms)
+   * we need to get _all_ fieldRefs currently existing in the
+   * index. This is only done when we know that the query is
+   * entirely prohibited terms to avoid any cost of getting all
+   * fieldRefs unnecessarily.
+   *
+   * Additionally, blank MatchData must be created to correctly
+   * populate the results.
+   */
+  if (query.isNegated()) {
+    matchingFieldRefs = Object.keys(this.fieldVectors)
+
+    for (var i = 0; i < matchingFieldRefs.length; i++) {
+      var matchingFieldRef = matchingFieldRefs[i]
+      var fieldRef = lunr.FieldRef.fromString(matchingFieldRef)
+      matchingFields[matchingFieldRef] = new lunr.MatchData
+    }
+  }
+
+  for (var i = 0; i < matchingFieldRefs.length; i++) {
+    /*
+     * Currently we have document fields that match the query, but we
+     * need to return documents. The matchData and scores are combined
+     * from multiple fields belonging to the same document.
+     *
+     * Scores are calculated by field, using the query vectors created
+     * above, and combined into a final document score using addition.
+     */
+    var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]),
+        docRef = fieldRef.docRef
+
+    if (!allRequiredMatches.contains(docRef)) {
+      continue
+    }
+
+    if (allProhibitedMatches.contains(docRef)) {
+      continue
+    }
+
+    var fieldVector = this.fieldVectors[fieldRef],
+        score = queryVectors[fieldRef.fieldName].similarity(fieldVector),
+        docMatch
+
+    if ((docMatch = matches[docRef]) !== undefined) {
+      docMatch.score += score
+      docMatch.matchData.combine(matchingFields[fieldRef])
+    } else {
+      var match = {
+        ref: docRef,
+        score: score,
+        matchData: matchingFields[fieldRef]
+      }
+      matches[docRef] = match
+      results.push(match)
+    }
+  }
+
+  /*
+   * Sort the results objects by score, highest first.
+   */
+  return results.sort(function (a, b) {
+    return b.score - a.score
+  })
+}
+
+/**
+ * Prepares the index for JSON serialization.
+ *
+ * The schema for this JSON blob will be described in a
+ * separate JSON schema file.
+ *
+ * @returns {Object}
+ */
+lunr.Index.prototype.toJSON = function () {
+  var invertedIndex = Object.keys(this.invertedIndex)
+    .sort()
+    .map(function (term) {
+      return [term, this.invertedIndex[term]]
+    }, this)
+
+  var fieldVectors = Object.keys(this.fieldVectors)
+    .map(function (ref) {
+      return [ref, this.fieldVectors[ref].toJSON()]
+    }, this)
+
+  return {
+    version: lunr.version,
+    fields: this.fields,
+    fieldVectors: fieldVectors,
+    invertedIndex: invertedIndex,
+    pipeline: this.pipeline.toJSON()
+  }
+}
+
+/**
+ * Loads a previously serialized lunr.Index
+ *
+ * @param {Object} serializedIndex - A previously serialized lunr.Index
+ * @returns {lunr.Index}
+ */
+lunr.Index.load = function (serializedIndex) {
+  var attrs = {},
+      fieldVectors = {},
+      serializedVectors = serializedIndex.fieldVectors,
+      invertedIndex = Object.create(null),
+      serializedInvertedIndex = serializedIndex.invertedIndex,
+      tokenSetBuilder = new lunr.TokenSet.Builder,
+      pipeline = lunr.Pipeline.load(serializedIndex.pipeline)
+
+  if (serializedIndex.version != lunr.version) {
+    lunr.utils.warn("Version mismatch when loading serialised index. Current version of lunr '" + lunr.version + "' does not match serialized index '" + serializedIndex.version + "'")
+  }
+
+  for (var i = 0; i < serializedVectors.length; i++) {
+    var tuple = serializedVectors[i],
+        ref = tuple[0],
+        elements = tuple[1]
+
+    fieldVectors[ref] = new lunr.Vector(elements)
+  }
+
+  for (var i = 0; i < serializedInvertedIndex.length; i++) {
+    var tuple = serializedInvertedIndex[i],
+        term = tuple[0],
+        posting = tuple[1]
+
+    tokenSetBuilder.insert(term)
+    invertedIndex[term] = posting
+  }
+
+  tokenSetBuilder.finish()
+
+  attrs.fields = serializedIndex.fields
+
+  attrs.fieldVectors = fieldVectors
+  attrs.invertedIndex = invertedIndex
+  attrs.tokenSet = tokenSetBuilder.root
+  attrs.pipeline = pipeline
+
+  return new lunr.Index(attrs)
+}
+/*!
+ * lunr.Builder
+ * Copyright (C) 2020 Oliver Nightingale
+ */
+
+/**
+ * lunr.Builder performs indexing on a set of documents and
+ * returns instances of lunr.Index ready for querying.
+ *
+ * All configuration of the index is done via the builder, the
+ * fields to index, the document reference, the text processing
+ * pipeline and document scoring parameters are all set on the
+ * builder before indexing.
+ *
+ * @constructor
+ * @property {string} _ref - Internal reference to the document reference field.
+ * @property {string[]} _fields - Internal reference to the document fields to index.
+ * @property {object} invertedIndex - The inverted index maps terms to document fields.
+ * @property {object} documentTermFrequencies - Keeps track of document term frequencies.
+ * @property {object} documentLengths - Keeps track of the length of documents added to the index.
+ * @property {lunr.tokenizer} tokenizer - Function for splitting strings into tokens for indexing.
+ * @property {lunr.Pipeline} pipeline - The pipeline performs text processing on tokens before indexing.
+ * @property {lunr.Pipeline} searchPipeline - A pipeline for processing search terms before querying the index.
+ * @property {number} documentCount - Keeps track of the total number of documents indexed.
+ * @property {number} _b - A parameter to control field length normalization, setting this to 0 disabled normalization, 1 fully normalizes field lengths, the default value is 0.75.
+ * @property {number} _k1 - A parameter to control how quickly an increase in term frequency results in term frequency saturation, the default value is 1.2.
+ * @property {number} termIndex - A counter incremented for each unique term, used to identify a terms position in the vector space.
+ * @property {array} metadataWhitelist - A list of metadata keys that have been whitelisted for entry in the index.
+ */
+lunr.Builder = function () {
+  this._ref = "id"
+  this._fields = Object.create(null)
+  this._documents = Object.create(null)
+  this.invertedIndex = Object.create(null)
+  this.fieldTermFrequencies = {}
+  this.fieldLengths = {}
+  this.tokenizer = lunr.tokenizer
+  this.pipeline = new lunr.Pipeline
+  this.searchPipeline = new lunr.Pipeline
+  this.documentCount = 0
+  this._b = 0.75
+  this._k1 = 1.2
+  this.termIndex = 0
+  this.metadataWhitelist = []
+}
+
+/**
+ * Sets the document field used as the document reference. Every document must have this field.
+ * The type of this field in the document should be a string, if it is not a string it will be
+ * coerced into a string by calling toString.
+ *
+ * The default ref is 'id'.
+ *
+ * The ref should _not_ be changed during indexing, it should be set before any documents are
+ * added to the index. Changing it during indexing can lead to inconsistent results.
+ *
+ * @param {string} ref - The name of the reference field in the document.
+ */
+lunr.Builder.prototype.ref = function (ref) {
+  this._ref = ref
+}
+
+/**
+ * A function that is used to extract a field from a document.
+ *
+ * Lunr expects a field to be at the top level of a document, if however the field
+ * is deeply nested within a document an extractor function can be used to extract
+ * the right field for indexing.
+ *
+ * @callback fieldExtractor
+ * @param {object} doc - The document being added to the index.
+ * @returns {?(string|object|object[])} obj - The object that will be indexed for this field.
+ * @example <caption>Extracting a nested field</caption>
+ * function (doc) { return doc.nested.field }
+ */
+
+/**
+ * Adds a field to the list of document fields that will be indexed. Every document being
+ * indexed should have this field. Null values for this field in indexed documents will
+ * not cause errors but will limit the chance of that document being retrieved by searches.
+ *
+ * All fields should be added before adding documents to the index. Adding fields after
+ * a document has been indexed will have no effect on already indexed documents.
+ *
+ * Fields can be boosted at build time. This allows terms within that field to have more
+ * importance when ranking search results. Use a field boost to specify that matches within
+ * one field are more important than other fields.
+ *
+ * @param {string} fieldName - The name of a field to index in all documents.
+ * @param {object} attributes - Optional attributes associated with this field.
+ * @param {number} [attributes.boost=1] - Boost applied to all terms within this field.
+ * @param {fieldExtractor} [attributes.extractor] - Function to extract a field from a document.
+ * @throws {RangeError} fieldName cannot contain unsupported characters '/'
+ */
+lunr.Builder.prototype.field = function (fieldName, attributes) {
+  if (/\//.test(fieldName)) {
+    throw new RangeError ("Field '" + fieldName + "' contains illegal character '/'")
+  }
+
+  this._fields[fieldName] = attributes || {}
+}
+
+/**
+ * A parameter to tune the amount of field length normalisation that is applied when
+ * calculating relevance scores. A value of 0 will completely disable any normalisation
+ * and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b
+ * will be clamped to the range 0 - 1.
+ *
+ * @param {number} number - The value to set for this tuning parameter.
+ */
+lunr.Builder.prototype.b = function (number) {
+  if (number < 0) {
+    this._b = 0
+  } else if (number > 1) {
+    this._b = 1
+  } else {
+    this._b = number
+  }
+}
+
+/**
+ * A parameter that controls the speed at which a rise in term frequency results in term
+ * frequency saturation. The default value is 1.2. Setting this to a higher value will give
+ * slower saturation levels, a lower value will result in quicker saturation.
+ *
+ * @param {number} number - The value to set for this tuning parameter.
+ */
+lunr.Builder.prototype.k1 = function (number) {
+  this._k1 = number
+}
+
+/**
+ * Adds a document to the index.
+ *
+ * Before adding fields to the index the index should have been fully setup, with the document
+ * ref and all fields to index already having been specified.
+ *
+ * The document must have a field name as specified by the ref (by default this is 'id') and
+ * it should have all fields defined for indexing, though null or undefined values will not
+ * cause errors.
+ *
+ * Entire documents can be boosted at build time. Applying a boost to a document indicates that
+ * this document should rank higher in search results than other documents.
+ *
+ * @param {object} doc - The document to add to the index.
+ * @param {object} attributes - Optional attributes associated with this document.
+ * @param {number} [attributes.boost=1] - Boost applied to all terms within this document.
+ */
+lunr.Builder.prototype.add = function (doc, attributes) {
+  var docRef = doc[this._ref],
+      fields = Object.keys(this._fields)
+
+  this._documents[docRef] = attributes || {}
+  this.documentCount += 1
+
+  for (var i = 0; i < fields.length; i++) {
+    var fieldName = fields[i],
+        extractor = this._fields[fieldName].extractor,
+        field = extractor ? extractor(doc) : doc[fieldName],
+        tokens = this.tokenizer(field, {
+          fields: [fieldName]
+        }),
+        terms = this.pipeline.run(tokens),
+        fieldRef = new lunr.FieldRef (docRef, fieldName),
+        fieldTerms = Object.create(null)
+
+    this.fieldTermFrequencies[fieldRef] = fieldTerms
+    this.fieldLengths[fieldRef] = 0
+
+    // store the length of this field for this document
+    this.fieldLengths[fieldRef] += terms.length
+
+    // calculate term frequencies for this field
+    for (var j = 0; j < terms.length; j++) {
+      var term = terms[j]
+
+      if (fieldTerms[term] == undefined) {
+        fieldTerms[term] = 0
+      }
+
+      fieldTerms[term] += 1
+
+      // add to inverted index
+      // create an initial posting if one doesn't exist
+      if (this.invertedIndex[term] == undefined) {
+        var posting = Object.create(null)
+        posting["_index"] = this.termIndex
+        this.termIndex += 1
+
+        for (var k = 0; k < fields.length; k++) {
+          posting[fields[k]] = Object.create(null)
+        }
+
+        this.invertedIndex[term] = posting
+      }
+
+      // add an entry for this term/fieldName/docRef to the invertedIndex
+      if (this.invertedIndex[term][fieldName][docRef] == undefined) {
+        this.invertedIndex[term][fieldName][docRef] = Object.create(null)
+      }
+
+      // store all whitelisted metadata about this token in the
+      // inverted index
+      for (var l = 0; l < this.metadataWhitelist.length; l++) {
+        var metadataKey = this.metadataWhitelist[l],
+            metadata = term.metadata[metadataKey]
+
+        if (this.invertedIndex[term][fieldName][docRef][metadataKey] == undefined) {
+          this.invertedIndex[term][fieldName][docRef][metadataKey] = []
+        }
+
+        this.invertedIndex[term][fieldName][docRef][metadataKey].push(metadata)
+      }
+    }
+
+  }
+}
+
+/**
+ * Calculates the average document length for this index
+ *
+ * @private
+ */
+lunr.Builder.prototype.calculateAverageFieldLengths = function () {
+
+  var fieldRefs = Object.keys(this.fieldLengths),
+      numberOfFields = fieldRefs.length,
+      accumulator = {},
+      documentsWithField = {}
+
+  for (var i = 0; i < numberOfFields; i++) {
+    var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),
+        field = fieldRef.fieldName
+
+    documentsWithField[field] || (documentsWithField[field] = 0)
+    documentsWithField[field] += 1
+
+    accumulator[field] || (accumulator[field] = 0)
+    accumulator[field] += this.fieldLengths[fieldRef]
+  }
+
+  var fields = Object.keys(this._fields)
+
+  for (var i = 0; i < fields.length; i++) {
+    var fieldName = fields[i]
+    accumulator[fieldName] = accumulator[fieldName] / documentsWithField[fieldName]
+  }
+
+  this.averageFieldLength = accumulator
+}
+
+/**
+ * Builds a vector space model of every document using lunr.Vector
+ *
+ * @private
+ */
+lunr.Builder.prototype.createFieldVectors = function () {
+  var fieldVectors = {},
+      fieldRefs = Object.keys(this.fieldTermFrequencies),
+      fieldRefsLength = fieldRefs.length,
+      termIdfCache = Object.create(null)
+
+  for (var i = 0; i < fieldRefsLength; i++) {
+    var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]),
+        fieldName = fieldRef.fieldName,
+        fieldLength = this.fieldLengths[fieldRef],
+        fieldVector = new lunr.Vector,
+        termFrequencies = this.fieldTermFrequencies[fieldRef],
+        terms = Object.keys(termFrequencies),
+        termsLength = terms.length
+
+
+    var fieldBoost = this._fields[fieldName].boost || 1,
+        docBoost = this._documents[fieldRef.docRef].boost || 1
+
+    for (var j = 0; j < termsLength; j++) {
+      var term = terms[j],
+          tf = termFrequencies[term],
+          termIndex = this.invertedIndex[term]._index,
+          idf, score, scoreWithPrecision
+
+      if (termIdfCache[term] === undefined) {
+        idf = lunr.idf(this.invertedIndex[term], this.documentCount)
+        termIdfCache[term] = idf
+      } else {
+        idf = termIdfCache[term]
+      }
+
+      score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[fieldName])) + tf)
+      score *= fieldBoost
+      score *= docBoost
+      scoreWithPrecision = Math.round(score * 1000) / 1000
+      // Converts 1.23456789 to 1.234.
+      // Reducing the precision so that the vectors take up less
+      // space when serialised. Doing it now so that they behave
+      // the same before and after serialisation. Also, this is
+      // the fastest approach to reducing a number's precision in
+      // JavaScript.
+
+      fieldVector.insert(termIndex, scoreWithPrecision)
+    }
+
+    fieldVectors[fieldRef] = fieldVector
+  }
+
+  this.fieldVectors = fieldVectors
+}
+
+/**
+ * Creates a token set of all tokens in the index using lunr.TokenSet
+ *
+ * @private
+ */
+lunr.Builder.prototype.createTokenSet = function () {
+  this.tokenSet = lunr.TokenSet.fromArray(
+    Object.keys(this.invertedIndex).sort()
+  )
+}
+
+/**
+ * Builds the index, creating an instance of lunr.Index.
+ *
+ * This completes the indexing process and should only be called
+ * once all documents have been added to the index.
+ *
+ * @returns {lunr.Index}
+ */
+lunr.Builder.prototype.build = function () {
+  this.calculateAverageFieldLengths()
+  this.createFieldVectors()
+  this.createTokenSet()
+
+  return new lunr.Index({
+    invertedIndex: this.invertedIndex,
+    fieldVectors: this.fieldVectors,
+    tokenSet: this.tokenSet,
+    fields: Object.keys(this._fields),
+    pipeline: this.searchPipeline
+  })
+}
+
+/**
+ * Applies a plugin to the index builder.
+ *
+ * A plugin is a function that is called with the index builder as its context.
+ * Plugins can be used to customise or extend the behaviour of the index
+ * in some way. A plugin is just a function, that encapsulated the custom
+ * behaviour that should be applied when building the index.
+ *
+ * The plugin function will be called with the index builder as its argument, additional
+ * arguments can also be passed when calling use. The function will be called
+ * with the index builder as its context.
+ *
+ * @param {Function} plugin The plugin to apply.
+ */
+lunr.Builder.prototype.use = function (fn) {
+  var args = Array.prototype.slice.call(arguments, 1)
+  args.unshift(this)
+  fn.apply(this, args)
+}
+/**
+ * Contains and collects metadata about a matching document.
+ * A single instance of lunr.MatchData is returned as part of every
+ * lunr.Index~Result.
+ *
+ * @constructor
+ * @param {string} term - The term this match data is associated with
+ * @param {string} field - The field in which the term was found
+ * @param {object} metadata - The metadata recorded about this term in this field
+ * @property {object} metadata - A cloned collection of metadata associated with this document.
+ * @see {@link lunr.Index~Result}
+ */
+lunr.MatchData = function (term, field, metadata) {
+  var clonedMetadata = Object.create(null),
+      metadataKeys = Object.keys(metadata || {})
+
+  // Cloning the metadata to prevent the original
+  // being mutated during match data combination.
+  // Metadata is kept in an array within the inverted
+  // index so cloning the data can be done with
+  // Array#slice
+  for (var i = 0; i < metadataKeys.length; i++) {
+    var key = metadataKeys[i]
+    clonedMetadata[key] = metadata[key].slice()
+  }
+
+  this.metadata = Object.create(null)
+
+  if (term !== undefined) {
+    this.metadata[term] = Object.create(null)
+    this.metadata[term][field] = clonedMetadata
+  }
+}
+
+/**
+ * An instance of lunr.MatchData will be created for every term that matches a
+ * document. However only one instance is required in a lunr.Index~Result. This
+ * method combines metadata from another instance of lunr.MatchData with this
+ * objects metadata.
+ *
+ * @param {lunr.MatchData} otherMatchData - Another instance of match data to merge with this one.
+ * @see {@link lunr.Index~Result}
+ */
+lunr.MatchData.prototype.combine = function (otherMatchData) {
+  var terms = Object.keys(otherMatchData.metadata)
+
+  for (var i = 0; i < terms.length; i++) {
+    var term = terms[i],
+        fields = Object.keys(otherMatchData.metadata[term])
+
+    if (this.metadata[term] == undefined) {
+      this.metadata[term] = Object.create(null)
+    }
+
+    for (var j = 0; j < fields.length; j++) {
+      var field = fields[j],
+          keys = Object.keys(otherMatchData.metadata[term][field])
+
+      if (this.metadata[term][field] == undefined) {
+        this.metadata[term][field] = Object.create(null)
+      }
+
+      for (var k = 0; k < keys.length; k++) {
+        var key = keys[k]
+
+        if (this.metadata[term][field][key] == undefined) {
+          this.metadata[term][field][key] = otherMatchData.metadata[term][field][key]
+        } else {
+          this.metadata[term][field][key] = this.metadata[term][field][key].concat(otherMatchData.metadata[term][field][key])
+        }
+
+      }
+    }
+  }
+}
+
+/**
+ * Add metadata for a term/field pair to this instance of match data.
+ *
+ * @param {string} term - The term this match data is associated with
+ * @param {string} field - The field in which the term was found
+ * @param {object} metadata - The metadata recorded about this term in this field
+ */
+lunr.MatchData.prototype.add = function (term, field, metadata) {
+  if (!(term in this.metadata)) {
+    this.metadata[term] = Object.create(null)
+    this.metadata[term][field] = metadata
+    return
+  }
+
+  if (!(field in this.metadata[term])) {
+    this.metadata[term][field] = metadata
+    return
+  }
+
+  var metadataKeys = Object.keys(metadata)
+
+  for (var i = 0; i < metadataKeys.length; i++) {
+    var key = metadataKeys[i]
+
+    if (key in this.metadata[term][field]) {
+      this.metadata[term][field][key] = this.metadata[term][field][key].concat(metadata[key])
+    } else {
+      this.metadata[term][field][key] = metadata[key]
+    }
+  }
+}
+/**
+ * A lunr.Query provides a programmatic way of defining queries to be performed
+ * against a {@link lunr.Index}.
+ *
+ * Prefer constructing a lunr.Query using the {@link lunr.Index#query} method
+ * so the query object is pre-initialized with the right index fields.
+ *
+ * @constructor
+ * @property {lunr.Query~Clause[]} clauses - An array of query clauses.
+ * @property {string[]} allFields - An array of all available fields in a lunr.Index.
+ */
+lunr.Query = function (allFields) {
+  this.clauses = []
+  this.allFields = allFields
+}
+
+/**
+ * Constants for indicating what kind of automatic wildcard insertion will be used when constructing a query clause.
+ *
+ * This allows wildcards to be added to the beginning and end of a term without having to manually do any string
+ * concatenation.
+ *
+ * The wildcard constants can be bitwise combined to select both leading and trailing wildcards.
+ *
+ * @constant
+ * @default
+ * @property {number} wildcard.NONE - The term will have no wildcards inserted, this is the default behaviour
+ * @property {number} wildcard.LEADING - Prepend the term with a wildcard, unless a leading wildcard already exists
+ * @property {number} wildcard.TRAILING - Append a wildcard to the term, unless a trailing wildcard already exists
+ * @see lunr.Query~Clause
+ * @see lunr.Query#clause
+ * @see lunr.Query#term
+ * @example <caption>query term with trailing wildcard</caption>
+ * query.term('foo', { wildcard: lunr.Query.wildcard.TRAILING })
+ * @example <caption>query term with leading and trailing wildcard</caption>
+ * query.term('foo', {
+ *   wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING
+ * })
+ */
+
+lunr.Query.wildcard = new String ("*")
+lunr.Query.wildcard.NONE = 0
+lunr.Query.wildcard.LEADING = 1
+lunr.Query.wildcard.TRAILING = 2
+
+/**
+ * Constants for indicating what kind of presence a term must have in matching documents.
+ *
+ * @constant
+ * @enum {number}
+ * @see lunr.Query~Clause
+ * @see lunr.Query#clause
+ * @see lunr.Query#term
+ * @example <caption>query term with required presence</caption>
+ * query.term('foo', { presence: lunr.Query.presence.REQUIRED })
+ */
+lunr.Query.presence = {
+  /**
+   * Term's presence in a document is optional, this is the default value.
+   */
+  OPTIONAL: 1,
+
+  /**
+   * Term's presence in a document is required, documents that do not contain
+   * this term will not be returned.
+   */
+  REQUIRED: 2,
+
+  /**
+   * Term's presence in a document is prohibited, documents that do contain
+   * this term will not be returned.
+   */
+  PROHIBITED: 3
+}
+
+/**
+ * A single clause in a {@link lunr.Query} contains a term and details on how to
+ * match that term against a {@link lunr.Index}.
+ *
+ * @typedef {Object} lunr.Query~Clause
+ * @property {string[]} fields - The fields in an index this clause should be matched against.
+ * @property {number} [boost=1] - Any boost that should be applied when matching this clause.
+ * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be.
+ * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline.
+ * @property {number} [wildcard=lunr.Query.wildcard.NONE] - Whether the term should have wildcards appended or prepended.
+ * @property {number} [presence=lunr.Query.presence.OPTIONAL] - The terms presence in any matching documents.
+ */
+
+/**
+ * Adds a {@link lunr.Query~Clause} to this query.
+ *
+ * Unless the clause contains the fields to be matched all fields will be matched. In addition
+ * a default boost of 1 is applied to the clause.
+ *
+ * @param {lunr.Query~Clause} clause - The clause to add to this query.
+ * @see lunr.Query~Clause
+ * @returns {lunr.Query}
+ */
+lunr.Query.prototype.clause = function (clause) {
+  if (!('fields' in clause)) {
+    clause.fields = this.allFields
+  }
+
+  if (!('boost' in clause)) {
+    clause.boost = 1
+  }
+
+  if (!('usePipeline' in clause)) {
+    clause.usePipeline = true
+  }
+
+  if (!('wildcard' in clause)) {
+    clause.wildcard = lunr.Query.wildcard.NONE
+  }
+
+  if ((clause.wildcard & lunr.Query.wildcard.LEADING) && (clause.term.charAt(0) != lunr.Query.wildcard)) {
+    clause.term = "*" + clause.term
+  }
+
+  if ((clause.wildcard & lunr.Query.wildcard.TRAILING) && (clause.term.slice(-1) != lunr.Query.wildcard)) {
+    clause.term = "" + clause.term + "*"
+  }
+
+  if (!('presence' in clause)) {
+    clause.presence = lunr.Query.presence.OPTIONAL
+  }
+
+  this.clauses.push(clause)
+
+  return this
+}
+
+/**
+ * A negated query is one in which every clause has a presence of
+ * prohibited. These queries require some special processing to return
+ * the expected results.
+ *
+ * @returns boolean
+ */
+lunr.Query.prototype.isNegated = function () {
+  for (var i = 0; i < this.clauses.length; i++) {
+    if (this.clauses[i].presence != lunr.Query.presence.PROHIBITED) {
+      return false
+    }
+  }
+
+  return true
+}
+
+/**
+ * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause}
+ * to the list of clauses that make up this query.
+ *
+ * The term is used as is, i.e. no tokenization will be performed by this method. Instead conversion
+ * to a token or token-like string should be done before calling this method.
+ *
+ * The term will be converted to a string by calling `toString`. Multiple terms can be passed as an
+ * array, each term in the array will share the same options.
+ *
+ * @param {object|object[]} term - The term(s) to add to the query.
+ * @param {object} [options] - Any additional properties to add to the query clause.
+ * @returns {lunr.Query}
+ * @see lunr.Query#clause
+ * @see lunr.Query~Clause
+ * @example <caption>adding a single term to a query</caption>
+ * query.term("foo")
+ * @example <caption>adding a single term to a query and specifying search fields, term boost and automatic trailing wildcard</caption>
+ * query.term("foo", {
+ *   fields: ["title"],
+ *   boost: 10,
+ *   wildcard: lunr.Query.wildcard.TRAILING
+ * })
+ * @example <caption>using lunr.tokenizer to convert a string to tokens before using them as terms</caption>
+ * query.term(lunr.tokenizer("foo bar"))
+ */
+lunr.Query.prototype.term = function (term, options) {
+  if (Array.isArray(term)) {
+    term.forEach(function (t) { this.term(t, lunr.utils.clone(options)) }, this)
+    return this
+  }
+
+  var clause = options || {}
+  clause.term = term.toString()
+
+  this.clause(clause)
+
+  return this
+}
+lunr.QueryParseError = function (message, start, end) {
+  this.name = "QueryParseError"
+  this.message = message
+  this.start = start
+  this.end = end
+}
+
+lunr.QueryParseError.prototype = new Error
+lunr.QueryLexer = function (str) {
+  this.lexemes = []
+  this.str = str
+  this.length = str.length
+  this.pos = 0
+  this.start = 0
+  this.escapeCharPositions = []
+}
+
+lunr.QueryLexer.prototype.run = function () {
+  var state = lunr.QueryLexer.lexText
+
+  while (state) {
+    state = state(this)
+  }
+}
+
+lunr.QueryLexer.prototype.sliceString = function () {
+  var subSlices = [],
+      sliceStart = this.start,
+      sliceEnd = this.pos
+
+  for (var i = 0; i < this.escapeCharPositions.length; i++) {
+    sliceEnd = this.escapeCharPositions[i]
+    subSlices.push(this.str.slice(sliceStart, sliceEnd))
+    sliceStart = sliceEnd + 1
+  }
+
+  subSlices.push(this.str.slice(sliceStart, this.pos))
+  this.escapeCharPositions.length = 0
+
+  return subSlices.join('')
+}
+
+lunr.QueryLexer.prototype.emit = function (type) {
+  this.lexemes.push({
+    type: type,
+    str: this.sliceString(),
+    start: this.start,
+    end: this.pos
+  })
+
+  this.start = this.pos
+}
+
+lunr.QueryLexer.prototype.escapeCharacter = function () {
+  this.escapeCharPositions.push(this.pos - 1)
+  this.pos += 1
+}
+
+lunr.QueryLexer.prototype.next = function () {
+  if (this.pos >= this.length) {
+    return lunr.QueryLexer.EOS
+  }
+
+  var char = this.str.charAt(this.pos)
+  this.pos += 1
+  return char
+}
+
+lunr.QueryLexer.prototype.width = function () {
+  return this.pos - this.start
+}
+
+lunr.QueryLexer.prototype.ignore = function () {
+  if (this.start == this.pos) {
+    this.pos += 1
+  }
+
+  this.start = this.pos
+}
+
+lunr.QueryLexer.prototype.backup = function () {
+  this.pos -= 1
+}
+
+lunr.QueryLexer.prototype.acceptDigitRun = function () {
+  var char, charCode
+
+  do {
+    char = this.next()
+    charCode = char.charCodeAt(0)
+  } while (charCode > 47 && charCode < 58)
+
+  if (char != lunr.QueryLexer.EOS) {
+    this.backup()
+  }
+}
+
+lunr.QueryLexer.prototype.more = function () {
+  return this.pos < this.length
+}
+
+lunr.QueryLexer.EOS = 'EOS'
+lunr.QueryLexer.FIELD = 'FIELD'
+lunr.QueryLexer.TERM = 'TERM'
+lunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE'
+lunr.QueryLexer.BOOST = 'BOOST'
+lunr.QueryLexer.PRESENCE = 'PRESENCE'
+
+lunr.QueryLexer.lexField = function (lexer) {
+  lexer.backup()
+  lexer.emit(lunr.QueryLexer.FIELD)
+  lexer.ignore()
+  return lunr.QueryLexer.lexText
+}
+
+lunr.QueryLexer.lexTerm = function (lexer) {
+  if (lexer.width() > 1) {
+    lexer.backup()
+    lexer.emit(lunr.QueryLexer.TERM)
+  }
+
+  lexer.ignore()
+
+  if (lexer.more()) {
+    return lunr.QueryLexer.lexText
+  }
+}
+
+lunr.QueryLexer.lexEditDistance = function (lexer) {
+  lexer.ignore()
+  lexer.acceptDigitRun()
+  lexer.emit(lunr.QueryLexer.EDIT_DISTANCE)
+  return lunr.QueryLexer.lexText
+}
+
+lunr.QueryLexer.lexBoost = function (lexer) {
+  lexer.ignore()
+  lexer.acceptDigitRun()
+  lexer.emit(lunr.QueryLexer.BOOST)
+  return lunr.QueryLexer.lexText
+}
+
+lunr.QueryLexer.lexEOS = function (lexer) {
+  if (lexer.width() > 0) {
+    lexer.emit(lunr.QueryLexer.TERM)
+  }
+}
+
+// This matches the separator used when tokenising fields
+// within a document. These should match otherwise it is
+// not possible to search for some tokens within a document.
+//
+// It is possible for the user to change the separator on the
+// tokenizer so it _might_ clash with any other of the special
+// characters already used within the search string, e.g. :.
+//
+// This means that it is possible to change the separator in
+// such a way that makes some words unsearchable using a search
+// string.
+lunr.QueryLexer.termSeparator = lunr.tokenizer.separator
+
+lunr.QueryLexer.lexText = function (lexer) {
+  while (true) {
+    var char = lexer.next()
+
+    if (char == lunr.QueryLexer.EOS) {
+      return lunr.QueryLexer.lexEOS
+    }
+
+    // Escape character is '\'
+    if (char.charCodeAt(0) == 92) {
+      lexer.escapeCharacter()
+      continue
+    }
+
+    if (char == ":") {
+      return lunr.QueryLexer.lexField
+    }
+
+    if (char == "~") {
+      lexer.backup()
+      if (lexer.width() > 0) {
+        lexer.emit(lunr.QueryLexer.TERM)
+      }
+      return lunr.QueryLexer.lexEditDistance
+    }
+
+    if (char == "^") {
+      lexer.backup()
+      if (lexer.width() > 0) {
+        lexer.emit(lunr.QueryLexer.TERM)
+      }
+      return lunr.QueryLexer.lexBoost
+    }
+
+    // "+" indicates term presence is required
+    // checking for length to ensure that only
+    // leading "+" are considered
+    if (char == "+" && lexer.width() === 1) {
+      lexer.emit(lunr.QueryLexer.PRESENCE)
+      return lunr.QueryLexer.lexText
+    }
+
+    // "-" indicates term presence is prohibited
+    // checking for length to ensure that only
+    // leading "-" are considered
+    if (char == "-" && lexer.width() === 1) {
+      lexer.emit(lunr.QueryLexer.PRESENCE)
+      return lunr.QueryLexer.lexText
+    }
+
+    if (char.match(lunr.QueryLexer.termSeparator)) {
+      return lunr.QueryLexer.lexTerm
+    }
+  }
+}
+
+lunr.QueryParser = function (str, query) {
+  this.lexer = new lunr.QueryLexer (str)
+  this.query = query
+  this.currentClause = {}
+  this.lexemeIdx = 0
+}
+
+lunr.QueryParser.prototype.parse = function () {
+  this.lexer.run()
+  this.lexemes = this.lexer.lexemes
+
+  var state = lunr.QueryParser.parseClause
+
+  while (state) {
+    state = state(this)
+  }
+
+  return this.query
+}
+
+lunr.QueryParser.prototype.peekLexeme = function () {
+  return this.lexemes[this.lexemeIdx]
+}
+
+lunr.QueryParser.prototype.consumeLexeme = function () {
+  var lexeme = this.peekLexeme()
+  this.lexemeIdx += 1
+  return lexeme
+}
+
+lunr.QueryParser.prototype.nextClause = function () {
+  var completedClause = this.currentClause
+  this.query.clause(completedClause)
+  this.currentClause = {}
+}
+
+lunr.QueryParser.parseClause = function (parser) {
+  var lexeme = parser.peekLexeme()
+
+  if (lexeme == undefined) {
+    return
+  }
+
+  switch (lexeme.type) {
+    case lunr.QueryLexer.PRESENCE:
+      return lunr.QueryParser.parsePresence
+    case lunr.QueryLexer.FIELD:
+      return lunr.QueryParser.parseField
+    case lunr.QueryLexer.TERM:
+      return lunr.QueryParser.parseTerm
+    default:
+      var errorMessage = "expected either a field or a term, found " + lexeme.type
+
+      if (lexeme.str.length >= 1) {
+        errorMessage += " with value '" + lexeme.str + "'"
+      }
+
+      throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
+  }
+}
+
+lunr.QueryParser.parsePresence = function (parser) {
+  var lexeme = parser.consumeLexeme()
+
+  if (lexeme == undefined) {
+    return
+  }
+
+  switch (lexeme.str) {
+    case "-":
+      parser.currentClause.presence = lunr.Query.presence.PROHIBITED
+      break
+    case "+":
+      parser.currentClause.presence = lunr.Query.presence.REQUIRED
+      break
+    default:
+      var errorMessage = "unrecognised presence operator'" + lexeme.str + "'"
+      throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
+  }
+
+  var nextLexeme = parser.peekLexeme()
+
+  if (nextLexeme == undefined) {
+    var errorMessage = "expecting term or field, found nothing"
+    throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
+  }
+
+  switch (nextLexeme.type) {
+    case lunr.QueryLexer.FIELD:
+      return lunr.QueryParser.parseField
+    case lunr.QueryLexer.TERM:
+      return lunr.QueryParser.parseTerm
+    default:
+      var errorMessage = "expecting term or field, found '" + nextLexeme.type + "'"
+      throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
+  }
+}
+
+lunr.QueryParser.parseField = function (parser) {
+  var lexeme = parser.consumeLexeme()
+
+  if (lexeme == undefined) {
+    return
+  }
+
+  if (parser.query.allFields.indexOf(lexeme.str) == -1) {
+    var possibleFields = parser.query.allFields.map(function (f) { return "'" + f + "'" }).join(', '),
+        errorMessage = "unrecognised field '" + lexeme.str + "', possible fields: " + possibleFields
+
+    throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
+  }
+
+  parser.currentClause.fields = [lexeme.str]
+
+  var nextLexeme = parser.peekLexeme()
+
+  if (nextLexeme == undefined) {
+    var errorMessage = "expecting term, found nothing"
+    throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
+  }
+
+  switch (nextLexeme.type) {
+    case lunr.QueryLexer.TERM:
+      return lunr.QueryParser.parseTerm
+    default:
+      var errorMessage = "expecting term, found '" + nextLexeme.type + "'"
+      throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
+  }
+}
+
+lunr.QueryParser.parseTerm = function (parser) {
+  var lexeme = parser.consumeLexeme()
+
+  if (lexeme == undefined) {
+    return
+  }
+
+  parser.currentClause.term = lexeme.str.toLowerCase()
+
+  if (lexeme.str.indexOf("*") != -1) {
+    parser.currentClause.usePipeline = false
+  }
+
+  var nextLexeme = parser.peekLexeme()
+
+  if (nextLexeme == undefined) {
+    parser.nextClause()
+    return
+  }
+
+  switch (nextLexeme.type) {
+    case lunr.QueryLexer.TERM:
+      parser.nextClause()
+      return lunr.QueryParser.parseTerm
+    case lunr.QueryLexer.FIELD:
+      parser.nextClause()
+      return lunr.QueryParser.parseField
+    case lunr.QueryLexer.EDIT_DISTANCE:
+      return lunr.QueryParser.parseEditDistance
+    case lunr.QueryLexer.BOOST:
+      return lunr.QueryParser.parseBoost
+    case lunr.QueryLexer.PRESENCE:
+      parser.nextClause()
+      return lunr.QueryParser.parsePresence
+    default:
+      var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
+      throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
+  }
+}
+
+lunr.QueryParser.parseEditDistance = function (parser) {
+  var lexeme = parser.consumeLexeme()
+
+  if (lexeme == undefined) {
+    return
+  }
+
+  var editDistance = parseInt(lexeme.str, 10)
+
+  if (isNaN(editDistance)) {
+    var errorMessage = "edit distance must be numeric"
+    throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
+  }
+
+  parser.currentClause.editDistance = editDistance
+
+  var nextLexeme = parser.peekLexeme()
+
+  if (nextLexeme == undefined) {
+    parser.nextClause()
+    return
+  }
+
+  switch (nextLexeme.type) {
+    case lunr.QueryLexer.TERM:
+      parser.nextClause()
+      return lunr.QueryParser.parseTerm
+    case lunr.QueryLexer.FIELD:
+      parser.nextClause()
+      return lunr.QueryParser.parseField
+    case lunr.QueryLexer.EDIT_DISTANCE:
+      return lunr.QueryParser.parseEditDistance
+    case lunr.QueryLexer.BOOST:
+      return lunr.QueryParser.parseBoost
+    case lunr.QueryLexer.PRESENCE:
+      parser.nextClause()
+      return lunr.QueryParser.parsePresence
+    default:
+      var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
+      throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
+  }
+}
+
+lunr.QueryParser.parseBoost = function (parser) {
+  var lexeme = parser.consumeLexeme()
+
+  if (lexeme == undefined) {
+    return
+  }
+
+  var boost = parseInt(lexeme.str, 10)
+
+  if (isNaN(boost)) {
+    var errorMessage = "boost must be numeric"
+    throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end)
+  }
+
+  parser.currentClause.boost = boost
+
+  var nextLexeme = parser.peekLexeme()
+
+  if (nextLexeme == undefined) {
+    parser.nextClause()
+    return
+  }
+
+  switch (nextLexeme.type) {
+    case lunr.QueryLexer.TERM:
+      parser.nextClause()
+      return lunr.QueryParser.parseTerm
+    case lunr.QueryLexer.FIELD:
+      parser.nextClause()
+      return lunr.QueryParser.parseField
+    case lunr.QueryLexer.EDIT_DISTANCE:
+      return lunr.QueryParser.parseEditDistance
+    case lunr.QueryLexer.BOOST:
+      return lunr.QueryParser.parseBoost
+    case lunr.QueryLexer.PRESENCE:
+      parser.nextClause()
+      return lunr.QueryParser.parsePresence
+    default:
+      var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'"
+      throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end)
+  }
+}
+
+  /**
+   * export the module via AMD, CommonJS or as a browser global
+   * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js
+   */
+  ;(function (root, factory) {
+    if (typeof define === 'function' && define.amd) {
+      // AMD. Register as an anonymous module.
+      define(factory)
+    } else if (typeof exports === 'object') {
+      /**
+       * Node. Does not work with strict CommonJS, but
+       * only CommonJS-like enviroments that support module.exports,
+       * like Node.
+       */
+      module.exports = factory()
+    } else {
+      // Browser globals (root is window)
+      root.lunr = factory()
+    }
+  }(this, function () {
+    /**
+     * Just return a value to define the module export.
+     * This example returns an object, but the module
+     * can return a function as the exported value.
+     */
+    return lunr
+  }))
+})();

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 5 - 0
node_modules/lunr/lunr.min.js


+ 30 - 0
node_modules/lunr/package.json

@@ -0,0 +1,30 @@
+{
+  "name": "lunr",
+  "description": "Simple full-text search in your browser.",
+  "version": "2.3.9",
+  "author": "Oliver Nightingale",
+  "keywords": ["search"],
+  "homepage": "https://lunrjs.com",
+  "bugs": "https://github.com/olivernn/lunr.js/issues",
+  "main": "lunr.js",
+  "license": "MIT",
+  "repository": {
+    "type": "git",
+    "url": "https://github.com/olivernn/lunr.js.git"
+  },
+  "devDependencies": {
+    "benchmark": "2.1.x",
+    "chai": "3.5.x",
+    "eslint-plugin-spellcheck": "0.0.8",
+    "eslint": "3.4.x",
+    "jsdoc": "3.5.x",
+    "mocha": "3.3.x",
+    "mustache": "2.2.x",
+    "node-static": "0.7.x",
+    "uglify-js": "2.6.x",
+    "word-list": "1.0.x"
+  },
+  "scripts": {
+    "test": "make test"
+  }
+}

+ 40 - 0
node_modules/macos-release/index.d.ts

@@ -0,0 +1,40 @@
+declare const macosRelease: {
+	/**
+	Get the name and version of a macOS release from the Darwin version.
+
+	@param release - By default, the current operating system is used, but you can supply a custom [Darwin kernel version](https://en.wikipedia.org/wiki/Darwin_%28operating_system%29#Release_history), which is the output of [`os.release()`](https://nodejs.org/api/os.html#os_os_release).
+
+	@example
+	```
+	import * as os from 'os';
+	import macosRelease = require('macos-release');
+
+	// On a macOS Sierra system
+
+	macosRelease();
+	//=> {name: 'Sierra', version: '10.12'}
+
+	os.release();
+	//=> 13.2.0
+	// This is the Darwin kernel version
+
+	macosRelease(os.release());
+	//=> {name: 'Sierra', version: '10.12'}
+
+	macosRelease('14.0.0');
+	//=> {name: 'Yosemite', version: '10.10'}
+
+	macosRelease('20.0.0');
+	//=> {name: 'Big Sur', version: '11'}
+	```
+	*/
+	(): {name: string, version: string}
+	(release: string): {name: string, version: string} | undefined;
+
+	// TODO: remove this in the next major version, refactor the whole definition to:
+	// declare function macosRelease(release?: string): {name: string, version: string};
+	// export = macosRelease;
+	default: typeof macosRelease;
+};
+
+export = macosRelease;

+ 37 - 0
node_modules/macos-release/index.js

@@ -0,0 +1,37 @@
+'use strict';
+const os = require('os');
+
+const nameMap = new Map([
+	[21, ['Monterey', '12']],
+	[20, ['Big Sur', '11']],
+	[19, ['Catalina', '10.15']],
+	[18, ['Mojave', '10.14']],
+	[17, ['High Sierra', '10.13']],
+	[16, ['Sierra', '10.12']],
+	[15, ['El Capitan', '10.11']],
+	[14, ['Yosemite', '10.10']],
+	[13, ['Mavericks', '10.9']],
+	[12, ['Mountain Lion', '10.8']],
+	[11, ['Lion', '10.7']],
+	[10, ['Snow Leopard', '10.6']],
+	[9, ['Leopard', '10.5']],
+	[8, ['Tiger', '10.4']],
+	[7, ['Panther', '10.3']],
+	[6, ['Jaguar', '10.2']],
+	[5, ['Puma', '10.1']]
+]);
+
+const macosRelease = release => {
+	release = Number((release || os.release()).split('.')[0]);
+
+	const [name, version] = nameMap.get(release) || ['Unknown', ''];
+
+	return {
+		name,
+		version
+	};
+};
+
+module.exports = macosRelease;
+// TODO: remove this in the next major version
+module.exports.default = macosRelease;

+ 9 - 0
node_modules/macos-release/license

@@ -0,0 +1,9 @@
+MIT License
+
+Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (https://sindresorhus.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 40 - 0
node_modules/macos-release/package.json

@@ -0,0 +1,40 @@
+{
+	"name": "macos-release",
+	"version": "2.5.1",
+	"description": "Get the name and version of a macOS release from the Darwin version",
+	"license": "MIT",
+	"repository": "sindresorhus/macos-release",
+	"funding": "https://github.com/sponsors/sindresorhus",
+	"author": {
+		"name": "Sindre Sorhus",
+		"email": "sindresorhus@gmail.com",
+		"url": "https://sindresorhus.com"
+	},
+	"engines": {
+		"node": ">=6"
+	},
+	"scripts": {
+		"test": "xo && ava && tsd"
+	},
+	"files": [
+		"index.js",
+		"index.d.ts"
+	],
+	"keywords": [
+		"macos",
+		"os",
+		"darwin",
+		"operating",
+		"system",
+		"platform",
+		"name",
+		"title",
+		"release",
+		"version"
+	],
+	"devDependencies": {
+		"ava": "^1.4.1",
+		"tsd": "^0.7.1",
+		"xo": "^0.24.0"
+	}
+}

+ 63 - 0
node_modules/macos-release/readme.md

@@ -0,0 +1,63 @@
+# macos-release
+
+> Get the name and version of a macOS release from the Darwin version\
+> Example: `13.2.0` → `{name: 'Mavericks', version: '10.9'}`
+
+## Install
+
+```
+$ npm install macos-release
+```
+
+## Usage
+
+```js
+const os = require('os');
+const macosRelease = require('macos-release');
+
+// On a macOS Sierra system
+
+macosRelease();
+//=> {name: 'Sierra', version: '10.12'}
+
+os.release();
+//=> 13.2.0
+// This is the Darwin kernel version
+
+macosRelease(os.release());
+//=> {name: 'Sierra', version: '10.12'}
+
+macosRelease('14.0.0');
+//=> {name: 'Yosemite', version: '10.10'}
+
+macosRelease('20.0.0');
+//=> {name: 'Big Sur', version: '11'}
+```
+
+## API
+
+### macosRelease(release?)
+
+#### release
+
+Type: `string`
+
+By default, the current operating system is used, but you can supply a custom [Darwin kernel version](https://en.wikipedia.org/wiki/Darwin_%28operating_system%29#Release_history), which is the output of [`os.release()`](https://nodejs.org/api/os.html#os_os_release).
+
+## Related
+
+- [os-name](https://github.com/sindresorhus/os-name) - Get the name of the current operating system. Example: `macOS Sierra`
+- [macos-version](https://github.com/sindresorhus/macos-version) - Get the macOS version of the current system. Example: `10.9.3`
+- [win-release](https://github.com/sindresorhus/win-release) - Get the name of a Windows version from the release number: `5.1.2600` → `XP`
+
+---
+
+<div align="center">
+	<b>
+		<a href="https://tidelift.com/subscription/pkg/npm-macos-release?utm_source=npm-macos-release&utm_medium=referral&utm_campaign=readme">Get professional support for this package with a Tidelift subscription</a>
+	</b>
+	<br>
+	<sub>
+		Tidelift helps make open source sustainable for maintainers while giving companies<br>assurances about security, maintenance, and licensing for their dependencies.
+	</sub>
+</div>

+ 7 - 0
node_modules/magic-string/LICENSE

@@ -0,0 +1,7 @@
+Copyright 2018 Rich Harris
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 1555 - 0
node_modules/magic-string/dist/magic-string.cjs.js

@@ -0,0 +1,1555 @@
+'use strict';
+
+var sourcemapCodec = require('@jridgewell/sourcemap-codec');
+
+class BitSet {
+	constructor(arg) {
+		this.bits = arg instanceof BitSet ? arg.bits.slice() : [];
+	}
+
+	add(n) {
+		this.bits[n >> 5] |= 1 << (n & 31);
+	}
+
+	has(n) {
+		return !!(this.bits[n >> 5] & (1 << (n & 31)));
+	}
+}
+
+class Chunk {
+	constructor(start, end, content) {
+		this.start = start;
+		this.end = end;
+		this.original = content;
+
+		this.intro = '';
+		this.outro = '';
+
+		this.content = content;
+		this.storeName = false;
+		this.edited = false;
+
+		{
+			this.previous = null;
+			this.next = null;
+		}
+	}
+
+	appendLeft(content) {
+		this.outro += content;
+	}
+
+	appendRight(content) {
+		this.intro = this.intro + content;
+	}
+
+	clone() {
+		const chunk = new Chunk(this.start, this.end, this.original);
+
+		chunk.intro = this.intro;
+		chunk.outro = this.outro;
+		chunk.content = this.content;
+		chunk.storeName = this.storeName;
+		chunk.edited = this.edited;
+
+		return chunk;
+	}
+
+	contains(index) {
+		return this.start < index && index < this.end;
+	}
+
+	eachNext(fn) {
+		let chunk = this;
+		while (chunk) {
+			fn(chunk);
+			chunk = chunk.next;
+		}
+	}
+
+	eachPrevious(fn) {
+		let chunk = this;
+		while (chunk) {
+			fn(chunk);
+			chunk = chunk.previous;
+		}
+	}
+
+	edit(content, storeName, contentOnly) {
+		this.content = content;
+		if (!contentOnly) {
+			this.intro = '';
+			this.outro = '';
+		}
+		this.storeName = storeName;
+
+		this.edited = true;
+
+		return this;
+	}
+
+	prependLeft(content) {
+		this.outro = content + this.outro;
+	}
+
+	prependRight(content) {
+		this.intro = content + this.intro;
+	}
+
+	reset() {
+		this.intro = '';
+		this.outro = '';
+		if (this.edited) {
+			this.content = this.original;
+			this.storeName = false;
+			this.edited = false;
+		}
+	}
+
+	split(index) {
+		const sliceIndex = index - this.start;
+
+		const originalBefore = this.original.slice(0, sliceIndex);
+		const originalAfter = this.original.slice(sliceIndex);
+
+		this.original = originalBefore;
+
+		const newChunk = new Chunk(index, this.end, originalAfter);
+		newChunk.outro = this.outro;
+		this.outro = '';
+
+		this.end = index;
+
+		if (this.edited) {
+			// after split we should save the edit content record into the correct chunk
+			// to make sure sourcemap correct
+			// For example:
+			// '  test'.trim()
+			//     split   -> '  ' + 'test'
+			//   ✔️ edit    -> '' + 'test'
+			//   ✖️ edit    -> 'test' + '' 
+			// TODO is this block necessary?...
+			newChunk.edit('', false);
+			this.content = '';
+		} else {
+			this.content = originalBefore;
+		}
+
+		newChunk.next = this.next;
+		if (newChunk.next) newChunk.next.previous = newChunk;
+		newChunk.previous = this;
+		this.next = newChunk;
+
+		return newChunk;
+	}
+
+	toString() {
+		return this.intro + this.content + this.outro;
+	}
+
+	trimEnd(rx) {
+		this.outro = this.outro.replace(rx, '');
+		if (this.outro.length) return true;
+
+		const trimmed = this.content.replace(rx, '');
+
+		if (trimmed.length) {
+			if (trimmed !== this.content) {
+				this.split(this.start + trimmed.length).edit('', undefined, true);
+				if (this.edited) {
+					// save the change, if it has been edited
+					this.edit(trimmed, this.storeName, true);
+				}
+			}
+			return true;
+		} else {
+			this.edit('', undefined, true);
+
+			this.intro = this.intro.replace(rx, '');
+			if (this.intro.length) return true;
+		}
+	}
+
+	trimStart(rx) {
+		this.intro = this.intro.replace(rx, '');
+		if (this.intro.length) return true;
+
+		const trimmed = this.content.replace(rx, '');
+
+		if (trimmed.length) {
+			if (trimmed !== this.content) {
+				const newChunk = this.split(this.end - trimmed.length);
+				if (this.edited) {
+					// save the change, if it has been edited
+					newChunk.edit(trimmed, this.storeName, true);
+				}
+				this.edit('', undefined, true);
+			}
+			return true;
+		} else {
+			this.edit('', undefined, true);
+
+			this.outro = this.outro.replace(rx, '');
+			if (this.outro.length) return true;
+		}
+	}
+}
+
+function getBtoa() {
+	if (typeof globalThis !== 'undefined' && typeof globalThis.btoa === 'function') {
+		return (str) => globalThis.btoa(unescape(encodeURIComponent(str)));
+	} else if (typeof Buffer === 'function') {
+		return (str) => Buffer.from(str, 'utf-8').toString('base64');
+	} else {
+		return () => {
+			throw new Error('Unsupported environment: `window.btoa` or `Buffer` should be supported.');
+		};
+	}
+}
+
+const btoa = /*#__PURE__*/ getBtoa();
+
+class SourceMap {
+	constructor(properties) {
+		this.version = 3;
+		this.file = properties.file;
+		this.sources = properties.sources;
+		this.sourcesContent = properties.sourcesContent;
+		this.names = properties.names;
+		this.mappings = sourcemapCodec.encode(properties.mappings);
+		if (typeof properties.x_google_ignoreList !== 'undefined') {
+			this.x_google_ignoreList = properties.x_google_ignoreList;
+		}
+	}
+
+	toString() {
+		return JSON.stringify(this);
+	}
+
+	toUrl() {
+		return 'data:application/json;charset=utf-8;base64,' + btoa(this.toString());
+	}
+}
+
+function guessIndent(code) {
+	const lines = code.split('\n');
+
+	const tabbed = lines.filter((line) => /^\t+/.test(line));
+	const spaced = lines.filter((line) => /^ {2,}/.test(line));
+
+	if (tabbed.length === 0 && spaced.length === 0) {
+		return null;
+	}
+
+	// More lines tabbed than spaced? Assume tabs, and
+	// default to tabs in the case of a tie (or nothing
+	// to go on)
+	if (tabbed.length >= spaced.length) {
+		return '\t';
+	}
+
+	// Otherwise, we need to guess the multiple
+	const min = spaced.reduce((previous, current) => {
+		const numSpaces = /^ +/.exec(current)[0].length;
+		return Math.min(numSpaces, previous);
+	}, Infinity);
+
+	return new Array(min + 1).join(' ');
+}
+
+function getRelativePath(from, to) {
+	const fromParts = from.split(/[/\\]/);
+	const toParts = to.split(/[/\\]/);
+
+	fromParts.pop(); // get dirname
+
+	while (fromParts[0] === toParts[0]) {
+		fromParts.shift();
+		toParts.shift();
+	}
+
+	if (fromParts.length) {
+		let i = fromParts.length;
+		while (i--) fromParts[i] = '..';
+	}
+
+	return fromParts.concat(toParts).join('/');
+}
+
+const toString = Object.prototype.toString;
+
+function isObject(thing) {
+	return toString.call(thing) === '[object Object]';
+}
+
+function getLocator(source) {
+	const originalLines = source.split('\n');
+	const lineOffsets = [];
+
+	for (let i = 0, pos = 0; i < originalLines.length; i++) {
+		lineOffsets.push(pos);
+		pos += originalLines[i].length + 1;
+	}
+
+	return function locate(index) {
+		let i = 0;
+		let j = lineOffsets.length;
+		while (i < j) {
+			const m = (i + j) >> 1;
+			if (index < lineOffsets[m]) {
+				j = m;
+			} else {
+				i = m + 1;
+			}
+		}
+		const line = i - 1;
+		const column = index - lineOffsets[line];
+		return { line, column };
+	};
+}
+
+const wordRegex = /\w/;
+
+class Mappings {
+	constructor(hires) {
+		this.hires = hires;
+		this.generatedCodeLine = 0;
+		this.generatedCodeColumn = 0;
+		this.raw = [];
+		this.rawSegments = this.raw[this.generatedCodeLine] = [];
+		this.pending = null;
+	}
+
+	addEdit(sourceIndex, content, loc, nameIndex) {
+		if (content.length) {
+			const contentLengthMinusOne = content.length - 1;
+			let contentLineEnd = content.indexOf('\n', 0);
+			let previousContentLineEnd = -1;
+			// Loop through each line in the content and add a segment, but stop if the last line is empty,
+			// else code afterwards would fill one line too many
+			while (contentLineEnd >= 0 && contentLengthMinusOne > contentLineEnd) {
+				const segment = [this.generatedCodeColumn, sourceIndex, loc.line, loc.column];
+				if (nameIndex >= 0) {
+					segment.push(nameIndex);
+				}
+				this.rawSegments.push(segment);
+
+				this.generatedCodeLine += 1;
+				this.raw[this.generatedCodeLine] = this.rawSegments = [];
+				this.generatedCodeColumn = 0;
+
+				previousContentLineEnd = contentLineEnd;
+				contentLineEnd = content.indexOf('\n', contentLineEnd + 1);
+			}
+
+			const segment = [this.generatedCodeColumn, sourceIndex, loc.line, loc.column];
+			if (nameIndex >= 0) {
+				segment.push(nameIndex);
+			}
+			this.rawSegments.push(segment);
+
+			this.advance(content.slice(previousContentLineEnd + 1));
+		} else if (this.pending) {
+			this.rawSegments.push(this.pending);
+			this.advance(content);
+		}
+
+		this.pending = null;
+	}
+
+	addUneditedChunk(sourceIndex, chunk, original, loc, sourcemapLocations) {
+		let originalCharIndex = chunk.start;
+		let first = true;
+		// when iterating each char, check if it's in a word boundary
+		let charInHiresBoundary = false;
+
+		while (originalCharIndex < chunk.end) {
+			if (this.hires || first || sourcemapLocations.has(originalCharIndex)) {
+				const segment = [this.generatedCodeColumn, sourceIndex, loc.line, loc.column];
+
+				if (this.hires === 'boundary') {
+					// in hires "boundary", group segments per word boundary than per char
+					if (wordRegex.test(original[originalCharIndex])) {
+						// for first char in the boundary found, start the boundary by pushing a segment
+						if (!charInHiresBoundary) {
+							this.rawSegments.push(segment);
+							charInHiresBoundary = true;
+						}
+					} else {
+						// for non-word char, end the boundary by pushing a segment
+						this.rawSegments.push(segment);
+						charInHiresBoundary = false;
+					}
+				} else {
+					this.rawSegments.push(segment);
+				}
+			}
+
+			if (original[originalCharIndex] === '\n') {
+				loc.line += 1;
+				loc.column = 0;
+				this.generatedCodeLine += 1;
+				this.raw[this.generatedCodeLine] = this.rawSegments = [];
+				this.generatedCodeColumn = 0;
+				first = true;
+			} else {
+				loc.column += 1;
+				this.generatedCodeColumn += 1;
+				first = false;
+			}
+
+			originalCharIndex += 1;
+		}
+
+		this.pending = null;
+	}
+
+	advance(str) {
+		if (!str) return;
+
+		const lines = str.split('\n');
+
+		if (lines.length > 1) {
+			for (let i = 0; i < lines.length - 1; i++) {
+				this.generatedCodeLine++;
+				this.raw[this.generatedCodeLine] = this.rawSegments = [];
+			}
+			this.generatedCodeColumn = 0;
+		}
+
+		this.generatedCodeColumn += lines[lines.length - 1].length;
+	}
+}
+
+const n = '\n';
+
+const warned = {
+	insertLeft: false,
+	insertRight: false,
+	storeName: false,
+};
+
+class MagicString {
+	constructor(string, options = {}) {
+		const chunk = new Chunk(0, string.length, string);
+
+		Object.defineProperties(this, {
+			original: { writable: true, value: string },
+			outro: { writable: true, value: '' },
+			intro: { writable: true, value: '' },
+			firstChunk: { writable: true, value: chunk },
+			lastChunk: { writable: true, value: chunk },
+			lastSearchedChunk: { writable: true, value: chunk },
+			byStart: { writable: true, value: {} },
+			byEnd: { writable: true, value: {} },
+			filename: { writable: true, value: options.filename },
+			indentExclusionRanges: { writable: true, value: options.indentExclusionRanges },
+			sourcemapLocations: { writable: true, value: new BitSet() },
+			storedNames: { writable: true, value: {} },
+			indentStr: { writable: true, value: undefined },
+			ignoreList: { writable: true, value: options.ignoreList },
+		});
+
+		this.byStart[0] = chunk;
+		this.byEnd[string.length] = chunk;
+	}
+
+	addSourcemapLocation(char) {
+		this.sourcemapLocations.add(char);
+	}
+
+	append(content) {
+		if (typeof content !== 'string') throw new TypeError('outro content must be a string');
+
+		this.outro += content;
+		return this;
+	}
+
+	appendLeft(index, content) {
+		if (typeof content !== 'string') throw new TypeError('inserted content must be a string');
+
+		this._split(index);
+
+		const chunk = this.byEnd[index];
+
+		if (chunk) {
+			chunk.appendLeft(content);
+		} else {
+			this.intro += content;
+		}
+		return this;
+	}
+
+	appendRight(index, content) {
+		if (typeof content !== 'string') throw new TypeError('inserted content must be a string');
+
+		this._split(index);
+
+		const chunk = this.byStart[index];
+
+		if (chunk) {
+			chunk.appendRight(content);
+		} else {
+			this.outro += content;
+		}
+		return this;
+	}
+
+	clone() {
+		const cloned = new MagicString(this.original, { filename: this.filename });
+
+		let originalChunk = this.firstChunk;
+		let clonedChunk = (cloned.firstChunk = cloned.lastSearchedChunk = originalChunk.clone());
+
+		while (originalChunk) {
+			cloned.byStart[clonedChunk.start] = clonedChunk;
+			cloned.byEnd[clonedChunk.end] = clonedChunk;
+
+			const nextOriginalChunk = originalChunk.next;
+			const nextClonedChunk = nextOriginalChunk && nextOriginalChunk.clone();
+
+			if (nextClonedChunk) {
+				clonedChunk.next = nextClonedChunk;
+				nextClonedChunk.previous = clonedChunk;
+
+				clonedChunk = nextClonedChunk;
+			}
+
+			originalChunk = nextOriginalChunk;
+		}
+
+		cloned.lastChunk = clonedChunk;
+
+		if (this.indentExclusionRanges) {
+			cloned.indentExclusionRanges = this.indentExclusionRanges.slice();
+		}
+
+		cloned.sourcemapLocations = new BitSet(this.sourcemapLocations);
+
+		cloned.intro = this.intro;
+		cloned.outro = this.outro;
+
+		return cloned;
+	}
+
+	generateDecodedMap(options) {
+		options = options || {};
+
+		const sourceIndex = 0;
+		const names = Object.keys(this.storedNames);
+		const mappings = new Mappings(options.hires);
+
+		const locate = getLocator(this.original);
+
+		if (this.intro) {
+			mappings.advance(this.intro);
+		}
+
+		this.firstChunk.eachNext((chunk) => {
+			const loc = locate(chunk.start);
+
+			if (chunk.intro.length) mappings.advance(chunk.intro);
+
+			if (chunk.edited) {
+				mappings.addEdit(
+					sourceIndex,
+					chunk.content,
+					loc,
+					chunk.storeName ? names.indexOf(chunk.original) : -1,
+				);
+			} else {
+				mappings.addUneditedChunk(sourceIndex, chunk, this.original, loc, this.sourcemapLocations);
+			}
+
+			if (chunk.outro.length) mappings.advance(chunk.outro);
+		});
+
+		return {
+			file: options.file ? options.file.split(/[/\\]/).pop() : undefined,
+			sources: [
+				options.source ? getRelativePath(options.file || '', options.source) : options.file || '',
+			],
+			sourcesContent: options.includeContent ? [this.original] : undefined,
+			names,
+			mappings: mappings.raw,
+			x_google_ignoreList: this.ignoreList ? [sourceIndex] : undefined,
+		};
+	}
+
+	generateMap(options) {
+		return new SourceMap(this.generateDecodedMap(options));
+	}
+
+	_ensureindentStr() {
+		if (this.indentStr === undefined) {
+			this.indentStr = guessIndent(this.original);
+		}
+	}
+
+	_getRawIndentString() {
+		this._ensureindentStr();
+		return this.indentStr;
+	}
+
+	getIndentString() {
+		this._ensureindentStr();
+		return this.indentStr === null ? '\t' : this.indentStr;
+	}
+
+	indent(indentStr, options) {
+		const pattern = /^[^\r\n]/gm;
+
+		if (isObject(indentStr)) {
+			options = indentStr;
+			indentStr = undefined;
+		}
+
+		if (indentStr === undefined) {
+			this._ensureindentStr();
+			indentStr = this.indentStr || '\t';
+		}
+
+		if (indentStr === '') return this; // noop
+
+		options = options || {};
+
+		// Process exclusion ranges
+		const isExcluded = {};
+
+		if (options.exclude) {
+			const exclusions =
+				typeof options.exclude[0] === 'number' ? [options.exclude] : options.exclude;
+			exclusions.forEach((exclusion) => {
+				for (let i = exclusion[0]; i < exclusion[1]; i += 1) {
+					isExcluded[i] = true;
+				}
+			});
+		}
+
+		let shouldIndentNextCharacter = options.indentStart !== false;
+		const replacer = (match) => {
+			if (shouldIndentNextCharacter) return `${indentStr}${match}`;
+			shouldIndentNextCharacter = true;
+			return match;
+		};
+
+		this.intro = this.intro.replace(pattern, replacer);
+
+		let charIndex = 0;
+		let chunk = this.firstChunk;
+
+		while (chunk) {
+			const end = chunk.end;
+
+			if (chunk.edited) {
+				if (!isExcluded[charIndex]) {
+					chunk.content = chunk.content.replace(pattern, replacer);
+
+					if (chunk.content.length) {
+						shouldIndentNextCharacter = chunk.content[chunk.content.length - 1] === '\n';
+					}
+				}
+			} else {
+				charIndex = chunk.start;
+
+				while (charIndex < end) {
+					if (!isExcluded[charIndex]) {
+						const char = this.original[charIndex];
+
+						if (char === '\n') {
+							shouldIndentNextCharacter = true;
+						} else if (char !== '\r' && shouldIndentNextCharacter) {
+							shouldIndentNextCharacter = false;
+
+							if (charIndex === chunk.start) {
+								chunk.prependRight(indentStr);
+							} else {
+								this._splitChunk(chunk, charIndex);
+								chunk = chunk.next;
+								chunk.prependRight(indentStr);
+							}
+						}
+					}
+
+					charIndex += 1;
+				}
+			}
+
+			charIndex = chunk.end;
+			chunk = chunk.next;
+		}
+
+		this.outro = this.outro.replace(pattern, replacer);
+
+		return this;
+	}
+
+	insert() {
+		throw new Error(
+			'magicString.insert(...) is deprecated. Use prependRight(...) or appendLeft(...)',
+		);
+	}
+
+	insertLeft(index, content) {
+		if (!warned.insertLeft) {
+			console.warn(
+				'magicString.insertLeft(...) is deprecated. Use magicString.appendLeft(...) instead',
+			); // eslint-disable-line no-console
+			warned.insertLeft = true;
+		}
+
+		return this.appendLeft(index, content);
+	}
+
+	insertRight(index, content) {
+		if (!warned.insertRight) {
+			console.warn(
+				'magicString.insertRight(...) is deprecated. Use magicString.prependRight(...) instead',
+			); // eslint-disable-line no-console
+			warned.insertRight = true;
+		}
+
+		return this.prependRight(index, content);
+	}
+
+	move(start, end, index) {
+		if (index >= start && index <= end) throw new Error('Cannot move a selection inside itself');
+
+		this._split(start);
+		this._split(end);
+		this._split(index);
+
+		const first = this.byStart[start];
+		const last = this.byEnd[end];
+
+		const oldLeft = first.previous;
+		const oldRight = last.next;
+
+		const newRight = this.byStart[index];
+		if (!newRight && last === this.lastChunk) return this;
+		const newLeft = newRight ? newRight.previous : this.lastChunk;
+
+		if (oldLeft) oldLeft.next = oldRight;
+		if (oldRight) oldRight.previous = oldLeft;
+
+		if (newLeft) newLeft.next = first;
+		if (newRight) newRight.previous = last;
+
+		if (!first.previous) this.firstChunk = last.next;
+		if (!last.next) {
+			this.lastChunk = first.previous;
+			this.lastChunk.next = null;
+		}
+
+		first.previous = newLeft;
+		last.next = newRight || null;
+
+		if (!newLeft) this.firstChunk = first;
+		if (!newRight) this.lastChunk = last;
+		return this;
+	}
+
+	overwrite(start, end, content, options) {
+		options = options || {};
+		return this.update(start, end, content, { ...options, overwrite: !options.contentOnly });
+	}
+
+	update(start, end, content, options) {
+		if (typeof content !== 'string') throw new TypeError('replacement content must be a string');
+
+		if (this.original.length !== 0) {
+			while (start < 0) start += this.original.length;
+			while (end < 0) end += this.original.length;
+		}
+
+		if (end > this.original.length) throw new Error('end is out of bounds');
+		if (start === end)
+			throw new Error(
+				'Cannot overwrite a zero-length range – use appendLeft or prependRight instead',
+			);
+
+		this._split(start);
+		this._split(end);
+
+		if (options === true) {
+			if (!warned.storeName) {
+				console.warn(
+					'The final argument to magicString.overwrite(...) should be an options object. See https://github.com/rich-harris/magic-string',
+				); // eslint-disable-line no-console
+				warned.storeName = true;
+			}
+
+			options = { storeName: true };
+		}
+		const storeName = options !== undefined ? options.storeName : false;
+		const overwrite = options !== undefined ? options.overwrite : false;
+
+		if (storeName) {
+			const original = this.original.slice(start, end);
+			Object.defineProperty(this.storedNames, original, {
+				writable: true,
+				value: true,
+				enumerable: true,
+			});
+		}
+
+		const first = this.byStart[start];
+		const last = this.byEnd[end];
+
+		if (first) {
+			let chunk = first;
+			while (chunk !== last) {
+				if (chunk.next !== this.byStart[chunk.end]) {
+					throw new Error('Cannot overwrite across a split point');
+				}
+				chunk = chunk.next;
+				chunk.edit('', false);
+			}
+
+			first.edit(content, storeName, !overwrite);
+		} else {
+			// must be inserting at the end
+			const newChunk = new Chunk(start, end, '').edit(content, storeName);
+
+			// TODO last chunk in the array may not be the last chunk, if it's moved...
+			last.next = newChunk;
+			newChunk.previous = last;
+		}
+		return this;
+	}
+
+	prepend(content) {
+		if (typeof content !== 'string') throw new TypeError('outro content must be a string');
+
+		this.intro = content + this.intro;
+		return this;
+	}
+
+	prependLeft(index, content) {
+		if (typeof content !== 'string') throw new TypeError('inserted content must be a string');
+
+		this._split(index);
+
+		const chunk = this.byEnd[index];
+
+		if (chunk) {
+			chunk.prependLeft(content);
+		} else {
+			this.intro = content + this.intro;
+		}
+		return this;
+	}
+
+	prependRight(index, content) {
+		if (typeof content !== 'string') throw new TypeError('inserted content must be a string');
+
+		this._split(index);
+
+		const chunk = this.byStart[index];
+
+		if (chunk) {
+			chunk.prependRight(content);
+		} else {
+			this.outro = content + this.outro;
+		}
+		return this;
+	}
+
+	remove(start, end) {
+		if (this.original.length !== 0) {
+			while (start < 0) start += this.original.length;
+			while (end < 0) end += this.original.length;
+		}
+
+		if (start === end) return this;
+
+		if (start < 0 || end > this.original.length) throw new Error('Character is out of bounds');
+		if (start > end) throw new Error('end must be greater than start');
+
+		this._split(start);
+		this._split(end);
+
+		let chunk = this.byStart[start];
+
+		while (chunk) {
+			chunk.intro = '';
+			chunk.outro = '';
+			chunk.edit('');
+
+			chunk = end > chunk.end ? this.byStart[chunk.end] : null;
+		}
+		return this;
+	}
+
+	reset(start, end) {
+		if (this.original.length !== 0) {
+			while (start < 0) start += this.original.length;
+			while (end < 0) end += this.original.length;
+		}
+
+		if (start === end) return this;
+
+		if (start < 0 || end > this.original.length) throw new Error('Character is out of bounds');
+		if (start > end) throw new Error('end must be greater than start');
+
+		this._split(start);
+		this._split(end);
+
+		let chunk = this.byStart[start];
+
+		while (chunk) {
+			chunk.reset();
+
+			chunk = end > chunk.end ? this.byStart[chunk.end] : null;
+		}
+		return this;
+	}
+
+	lastChar() {
+		if (this.outro.length) return this.outro[this.outro.length - 1];
+		let chunk = this.lastChunk;
+		do {
+			if (chunk.outro.length) return chunk.outro[chunk.outro.length - 1];
+			if (chunk.content.length) return chunk.content[chunk.content.length - 1];
+			if (chunk.intro.length) return chunk.intro[chunk.intro.length - 1];
+		} while ((chunk = chunk.previous));
+		if (this.intro.length) return this.intro[this.intro.length - 1];
+		return '';
+	}
+
+	lastLine() {
+		let lineIndex = this.outro.lastIndexOf(n);
+		if (lineIndex !== -1) return this.outro.substr(lineIndex + 1);
+		let lineStr = this.outro;
+		let chunk = this.lastChunk;
+		do {
+			if (chunk.outro.length > 0) {
+				lineIndex = chunk.outro.lastIndexOf(n);
+				if (lineIndex !== -1) return chunk.outro.substr(lineIndex + 1) + lineStr;
+				lineStr = chunk.outro + lineStr;
+			}
+
+			if (chunk.content.length > 0) {
+				lineIndex = chunk.content.lastIndexOf(n);
+				if (lineIndex !== -1) return chunk.content.substr(lineIndex + 1) + lineStr;
+				lineStr = chunk.content + lineStr;
+			}
+
+			if (chunk.intro.length > 0) {
+				lineIndex = chunk.intro.lastIndexOf(n);
+				if (lineIndex !== -1) return chunk.intro.substr(lineIndex + 1) + lineStr;
+				lineStr = chunk.intro + lineStr;
+			}
+		} while ((chunk = chunk.previous));
+		lineIndex = this.intro.lastIndexOf(n);
+		if (lineIndex !== -1) return this.intro.substr(lineIndex + 1) + lineStr;
+		return this.intro + lineStr;
+	}
+
+	slice(start = 0, end = this.original.length) {
+		if (this.original.length !== 0) {
+			while (start < 0) start += this.original.length;
+			while (end < 0) end += this.original.length;
+		}
+
+		let result = '';
+
+		// find start chunk
+		let chunk = this.firstChunk;
+		while (chunk && (chunk.start > start || chunk.end <= start)) {
+			// found end chunk before start
+			if (chunk.start < end && chunk.end >= end) {
+				return result;
+			}
+
+			chunk = chunk.next;
+		}
+
+		if (chunk && chunk.edited && chunk.start !== start)
+			throw new Error(`Cannot use replaced character ${start} as slice start anchor.`);
+
+		const startChunk = chunk;
+		while (chunk) {
+			if (chunk.intro && (startChunk !== chunk || chunk.start === start)) {
+				result += chunk.intro;
+			}
+
+			const containsEnd = chunk.start < end && chunk.end >= end;
+			if (containsEnd && chunk.edited && chunk.end !== end)
+				throw new Error(`Cannot use replaced character ${end} as slice end anchor.`);
+
+			const sliceStart = startChunk === chunk ? start - chunk.start : 0;
+			const sliceEnd = containsEnd ? chunk.content.length + end - chunk.end : chunk.content.length;
+
+			result += chunk.content.slice(sliceStart, sliceEnd);
+
+			if (chunk.outro && (!containsEnd || chunk.end === end)) {
+				result += chunk.outro;
+			}
+
+			if (containsEnd) {
+				break;
+			}
+
+			chunk = chunk.next;
+		}
+
+		return result;
+	}
+
+	// TODO deprecate this? not really very useful
+	snip(start, end) {
+		const clone = this.clone();
+		clone.remove(0, start);
+		clone.remove(end, clone.original.length);
+
+		return clone;
+	}
+
+	_split(index) {
+		if (this.byStart[index] || this.byEnd[index]) return;
+
+		let chunk = this.lastSearchedChunk;
+		const searchForward = index > chunk.end;
+
+		while (chunk) {
+			if (chunk.contains(index)) return this._splitChunk(chunk, index);
+
+			chunk = searchForward ? this.byStart[chunk.end] : this.byEnd[chunk.start];
+		}
+	}
+
+	_splitChunk(chunk, index) {
+		if (chunk.edited && chunk.content.length) {
+			// zero-length edited chunks are a special case (overlapping replacements)
+			const loc = getLocator(this.original)(index);
+			throw new Error(
+				`Cannot split a chunk that has already been edited (${loc.line}:${loc.column} – "${chunk.original}")`,
+			);
+		}
+
+		const newChunk = chunk.split(index);
+
+		this.byEnd[index] = chunk;
+		this.byStart[index] = newChunk;
+		this.byEnd[newChunk.end] = newChunk;
+
+		if (chunk === this.lastChunk) this.lastChunk = newChunk;
+
+		this.lastSearchedChunk = chunk;
+		return true;
+	}
+
+	toString() {
+		let str = this.intro;
+
+		let chunk = this.firstChunk;
+		while (chunk) {
+			str += chunk.toString();
+			chunk = chunk.next;
+		}
+
+		return str + this.outro;
+	}
+
+	isEmpty() {
+		let chunk = this.firstChunk;
+		do {
+			if (
+				(chunk.intro.length && chunk.intro.trim()) ||
+				(chunk.content.length && chunk.content.trim()) ||
+				(chunk.outro.length && chunk.outro.trim())
+			)
+				return false;
+		} while ((chunk = chunk.next));
+		return true;
+	}
+
+	length() {
+		let chunk = this.firstChunk;
+		let length = 0;
+		do {
+			length += chunk.intro.length + chunk.content.length + chunk.outro.length;
+		} while ((chunk = chunk.next));
+		return length;
+	}
+
+	trimLines() {
+		return this.trim('[\\r\\n]');
+	}
+
+	trim(charType) {
+		return this.trimStart(charType).trimEnd(charType);
+	}
+
+	trimEndAborted(charType) {
+		const rx = new RegExp((charType || '\\s') + '+$');
+
+		this.outro = this.outro.replace(rx, '');
+		if (this.outro.length) return true;
+
+		let chunk = this.lastChunk;
+
+		do {
+			const end = chunk.end;
+			const aborted = chunk.trimEnd(rx);
+
+			// if chunk was trimmed, we have a new lastChunk
+			if (chunk.end !== end) {
+				if (this.lastChunk === chunk) {
+					this.lastChunk = chunk.next;
+				}
+
+				this.byEnd[chunk.end] = chunk;
+				this.byStart[chunk.next.start] = chunk.next;
+				this.byEnd[chunk.next.end] = chunk.next;
+			}
+
+			if (aborted) return true;
+			chunk = chunk.previous;
+		} while (chunk);
+
+		return false;
+	}
+
+	trimEnd(charType) {
+		this.trimEndAborted(charType);
+		return this;
+	}
+	trimStartAborted(charType) {
+		const rx = new RegExp('^' + (charType || '\\s') + '+');
+
+		this.intro = this.intro.replace(rx, '');
+		if (this.intro.length) return true;
+
+		let chunk = this.firstChunk;
+
+		do {
+			const end = chunk.end;
+			const aborted = chunk.trimStart(rx);
+
+			if (chunk.end !== end) {
+				// special case...
+				if (chunk === this.lastChunk) this.lastChunk = chunk.next;
+
+				this.byEnd[chunk.end] = chunk;
+				this.byStart[chunk.next.start] = chunk.next;
+				this.byEnd[chunk.next.end] = chunk.next;
+			}
+
+			if (aborted) return true;
+			chunk = chunk.next;
+		} while (chunk);
+
+		return false;
+	}
+
+	trimStart(charType) {
+		this.trimStartAborted(charType);
+		return this;
+	}
+
+	hasChanged() {
+		return this.original !== this.toString();
+	}
+
+	_replaceRegexp(searchValue, replacement) {
+		function getReplacement(match, str) {
+			if (typeof replacement === 'string') {
+				return replacement.replace(/\$(\$|&|\d+)/g, (_, i) => {
+					// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace#specifying_a_string_as_a_parameter
+					if (i === '$') return '$';
+					if (i === '&') return match[0];
+					const num = +i;
+					if (num < match.length) return match[+i];
+					return `$${i}`;
+				});
+			} else {
+				return replacement(...match, match.index, str, match.groups);
+			}
+		}
+		function matchAll(re, str) {
+			let match;
+			const matches = [];
+			while ((match = re.exec(str))) {
+				matches.push(match);
+			}
+			return matches;
+		}
+		if (searchValue.global) {
+			const matches = matchAll(searchValue, this.original);
+			matches.forEach((match) => {
+				if (match.index != null) {
+					const replacement = getReplacement(match, this.original);
+					if (replacement !== match[0]) {
+						this.overwrite(
+							match.index,
+							match.index + match[0].length,
+							replacement
+						);
+					}
+				}
+			});
+		} else {
+			const match = this.original.match(searchValue);
+			if (match && match.index != null) {
+				const replacement = getReplacement(match, this.original);
+				if (replacement !== match[0]) {
+					this.overwrite(
+						match.index,
+						match.index + match[0].length,
+						replacement
+					);
+				}
+			}
+		}
+		return this;
+	}
+
+	_replaceString(string, replacement) {
+		const { original } = this;
+		const index = original.indexOf(string);
+
+		if (index !== -1) {
+			this.overwrite(index, index + string.length, replacement);
+		}
+
+		return this;
+	}
+
+	replace(searchValue, replacement) {
+		if (typeof searchValue === 'string') {
+			return this._replaceString(searchValue, replacement);
+		}
+
+		return this._replaceRegexp(searchValue, replacement);
+	}
+
+	_replaceAllString(string, replacement) {
+		const { original } = this;
+		const stringLength = string.length;
+		for (
+			let index = original.indexOf(string);
+			index !== -1;
+			index = original.indexOf(string, index + stringLength)
+		) {
+			const previous = original.slice(index, index + stringLength);
+			if (previous !== replacement)
+				this.overwrite(index, index + stringLength, replacement);
+		}
+
+		return this;
+	}
+
+	replaceAll(searchValue, replacement) {
+		if (typeof searchValue === 'string') {
+			return this._replaceAllString(searchValue, replacement);
+		}
+
+		if (!searchValue.global) {
+			throw new TypeError(
+				'MagicString.prototype.replaceAll called with a non-global RegExp argument',
+			);
+		}
+
+		return this._replaceRegexp(searchValue, replacement);
+	}
+}
+
+const hasOwnProp = Object.prototype.hasOwnProperty;
+
+class Bundle {
+	constructor(options = {}) {
+		this.intro = options.intro || '';
+		this.separator = options.separator !== undefined ? options.separator : '\n';
+		this.sources = [];
+		this.uniqueSources = [];
+		this.uniqueSourceIndexByFilename = {};
+	}
+
+	addSource(source) {
+		if (source instanceof MagicString) {
+			return this.addSource({
+				content: source,
+				filename: source.filename,
+				separator: this.separator,
+			});
+		}
+
+		if (!isObject(source) || !source.content) {
+			throw new Error(
+				'bundle.addSource() takes an object with a `content` property, which should be an instance of MagicString, and an optional `filename`',
+			);
+		}
+
+		['filename', 'ignoreList', 'indentExclusionRanges', 'separator'].forEach((option) => {
+			if (!hasOwnProp.call(source, option)) source[option] = source.content[option];
+		});
+
+		if (source.separator === undefined) {
+			// TODO there's a bunch of this sort of thing, needs cleaning up
+			source.separator = this.separator;
+		}
+
+		if (source.filename) {
+			if (!hasOwnProp.call(this.uniqueSourceIndexByFilename, source.filename)) {
+				this.uniqueSourceIndexByFilename[source.filename] = this.uniqueSources.length;
+				this.uniqueSources.push({ filename: source.filename, content: source.content.original });
+			} else {
+				const uniqueSource = this.uniqueSources[this.uniqueSourceIndexByFilename[source.filename]];
+				if (source.content.original !== uniqueSource.content) {
+					throw new Error(`Illegal source: same filename (${source.filename}), different contents`);
+				}
+			}
+		}
+
+		this.sources.push(source);
+		return this;
+	}
+
+	append(str, options) {
+		this.addSource({
+			content: new MagicString(str),
+			separator: (options && options.separator) || '',
+		});
+
+		return this;
+	}
+
+	clone() {
+		const bundle = new Bundle({
+			intro: this.intro,
+			separator: this.separator,
+		});
+
+		this.sources.forEach((source) => {
+			bundle.addSource({
+				filename: source.filename,
+				content: source.content.clone(),
+				separator: source.separator,
+			});
+		});
+
+		return bundle;
+	}
+
+	generateDecodedMap(options = {}) {
+		const names = [];
+		let x_google_ignoreList = undefined;
+		this.sources.forEach((source) => {
+			Object.keys(source.content.storedNames).forEach((name) => {
+				if (!~names.indexOf(name)) names.push(name);
+			});
+		});
+
+		const mappings = new Mappings(options.hires);
+
+		if (this.intro) {
+			mappings.advance(this.intro);
+		}
+
+		this.sources.forEach((source, i) => {
+			if (i > 0) {
+				mappings.advance(this.separator);
+			}
+
+			const sourceIndex = source.filename ? this.uniqueSourceIndexByFilename[source.filename] : -1;
+			const magicString = source.content;
+			const locate = getLocator(magicString.original);
+
+			if (magicString.intro) {
+				mappings.advance(magicString.intro);
+			}
+
+			magicString.firstChunk.eachNext((chunk) => {
+				const loc = locate(chunk.start);
+
+				if (chunk.intro.length) mappings.advance(chunk.intro);
+
+				if (source.filename) {
+					if (chunk.edited) {
+						mappings.addEdit(
+							sourceIndex,
+							chunk.content,
+							loc,
+							chunk.storeName ? names.indexOf(chunk.original) : -1,
+						);
+					} else {
+						mappings.addUneditedChunk(
+							sourceIndex,
+							chunk,
+							magicString.original,
+							loc,
+							magicString.sourcemapLocations,
+						);
+					}
+				} else {
+					mappings.advance(chunk.content);
+				}
+
+				if (chunk.outro.length) mappings.advance(chunk.outro);
+			});
+
+			if (magicString.outro) {
+				mappings.advance(magicString.outro);
+			}
+
+			if (source.ignoreList && sourceIndex !== -1) {
+				if (x_google_ignoreList === undefined) {
+					x_google_ignoreList = [];
+				}
+				x_google_ignoreList.push(sourceIndex);
+			}
+		});
+
+		return {
+			file: options.file ? options.file.split(/[/\\]/).pop() : undefined,
+			sources: this.uniqueSources.map((source) => {
+				return options.file ? getRelativePath(options.file, source.filename) : source.filename;
+			}),
+			sourcesContent: this.uniqueSources.map((source) => {
+				return options.includeContent ? source.content : null;
+			}),
+			names,
+			mappings: mappings.raw,
+			x_google_ignoreList,
+		};
+	}
+
+	generateMap(options) {
+		return new SourceMap(this.generateDecodedMap(options));
+	}
+
+	getIndentString() {
+		const indentStringCounts = {};
+
+		this.sources.forEach((source) => {
+			const indentStr = source.content._getRawIndentString();
+
+			if (indentStr === null) return;
+
+			if (!indentStringCounts[indentStr]) indentStringCounts[indentStr] = 0;
+			indentStringCounts[indentStr] += 1;
+		});
+
+		return (
+			Object.keys(indentStringCounts).sort((a, b) => {
+				return indentStringCounts[a] - indentStringCounts[b];
+			})[0] || '\t'
+		);
+	}
+
+	indent(indentStr) {
+		if (!arguments.length) {
+			indentStr = this.getIndentString();
+		}
+
+		if (indentStr === '') return this; // noop
+
+		let trailingNewline = !this.intro || this.intro.slice(-1) === '\n';
+
+		this.sources.forEach((source, i) => {
+			const separator = source.separator !== undefined ? source.separator : this.separator;
+			const indentStart = trailingNewline || (i > 0 && /\r?\n$/.test(separator));
+
+			source.content.indent(indentStr, {
+				exclude: source.indentExclusionRanges,
+				indentStart, //: trailingNewline || /\r?\n$/.test( separator )  //true///\r?\n/.test( separator )
+			});
+
+			trailingNewline = source.content.lastChar() === '\n';
+		});
+
+		if (this.intro) {
+			this.intro =
+				indentStr +
+				this.intro.replace(/^[^\n]/gm, (match, index) => {
+					return index > 0 ? indentStr + match : match;
+				});
+		}
+
+		return this;
+	}
+
+	prepend(str) {
+		this.intro = str + this.intro;
+		return this;
+	}
+
+	toString() {
+		const body = this.sources
+			.map((source, i) => {
+				const separator = source.separator !== undefined ? source.separator : this.separator;
+				const str = (i > 0 ? separator : '') + source.content.toString();
+
+				return str;
+			})
+			.join('');
+
+		return this.intro + body;
+	}
+
+	isEmpty() {
+		if (this.intro.length && this.intro.trim()) return false;
+		if (this.sources.some((source) => !source.content.isEmpty())) return false;
+		return true;
+	}
+
+	length() {
+		return this.sources.reduce(
+			(length, source) => length + source.content.length(),
+			this.intro.length,
+		);
+	}
+
+	trimLines() {
+		return this.trim('[\\r\\n]');
+	}
+
+	trim(charType) {
+		return this.trimStart(charType).trimEnd(charType);
+	}
+
+	trimStart(charType) {
+		const rx = new RegExp('^' + (charType || '\\s') + '+');
+		this.intro = this.intro.replace(rx, '');
+
+		if (!this.intro) {
+			let source;
+			let i = 0;
+
+			do {
+				source = this.sources[i++];
+				if (!source) {
+					break;
+				}
+			} while (!source.content.trimStartAborted(charType));
+		}
+
+		return this;
+	}
+
+	trimEnd(charType) {
+		const rx = new RegExp((charType || '\\s') + '+$');
+
+		let source;
+		let i = this.sources.length - 1;
+
+		do {
+			source = this.sources[i--];
+			if (!source) {
+				this.intro = this.intro.replace(rx, '');
+				break;
+			}
+		} while (!source.content.trimEndAborted(charType));
+
+		return this;
+	}
+}
+
+MagicString.Bundle = Bundle;
+MagicString.SourceMap = SourceMap;
+MagicString.default = MagicString; // work around TypeScript bug https://github.com/Rich-Harris/magic-string/pull/121
+
+module.exports = MagicString;
+//# sourceMappingURL=magic-string.cjs.js.map

파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 0 - 0
node_modules/magic-string/dist/magic-string.cjs.js.map


파일 크기가 너무 크기때문에 변경 상태를 표시하지 않습니다.
+ 0 - 0
node_modules/magic-string/dist/magic-string.es.mjs.map


+ 1652 - 0
node_modules/magic-string/dist/magic-string.umd.js

@@ -0,0 +1,1652 @@
+(function (global, factory) {
+	typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
+	typeof define === 'function' && define.amd ? define(factory) :
+	(global = typeof globalThis !== 'undefined' ? globalThis : global || self, global.MagicString = factory());
+})(this, (function () { 'use strict';
+
+	class BitSet {
+		constructor(arg) {
+			this.bits = arg instanceof BitSet ? arg.bits.slice() : [];
+		}
+
+		add(n) {
+			this.bits[n >> 5] |= 1 << (n & 31);
+		}
+
+		has(n) {
+			return !!(this.bits[n >> 5] & (1 << (n & 31)));
+		}
+	}
+
+	class Chunk {
+		constructor(start, end, content) {
+			this.start = start;
+			this.end = end;
+			this.original = content;
+
+			this.intro = '';
+			this.outro = '';
+
+			this.content = content;
+			this.storeName = false;
+			this.edited = false;
+
+			{
+				this.previous = null;
+				this.next = null;
+			}
+		}
+
+		appendLeft(content) {
+			this.outro += content;
+		}
+
+		appendRight(content) {
+			this.intro = this.intro + content;
+		}
+
+		clone() {
+			const chunk = new Chunk(this.start, this.end, this.original);
+
+			chunk.intro = this.intro;
+			chunk.outro = this.outro;
+			chunk.content = this.content;
+			chunk.storeName = this.storeName;
+			chunk.edited = this.edited;
+
+			return chunk;
+		}
+
+		contains(index) {
+			return this.start < index && index < this.end;
+		}
+
+		eachNext(fn) {
+			let chunk = this;
+			while (chunk) {
+				fn(chunk);
+				chunk = chunk.next;
+			}
+		}
+
+		eachPrevious(fn) {
+			let chunk = this;
+			while (chunk) {
+				fn(chunk);
+				chunk = chunk.previous;
+			}
+		}
+
+		edit(content, storeName, contentOnly) {
+			this.content = content;
+			if (!contentOnly) {
+				this.intro = '';
+				this.outro = '';
+			}
+			this.storeName = storeName;
+
+			this.edited = true;
+
+			return this;
+		}
+
+		prependLeft(content) {
+			this.outro = content + this.outro;
+		}
+
+		prependRight(content) {
+			this.intro = content + this.intro;
+		}
+
+		reset() {
+			this.intro = '';
+			this.outro = '';
+			if (this.edited) {
+				this.content = this.original;
+				this.storeName = false;
+				this.edited = false;
+			}
+		}
+
+		split(index) {
+			const sliceIndex = index - this.start;
+
+			const originalBefore = this.original.slice(0, sliceIndex);
+			const originalAfter = this.original.slice(sliceIndex);
+
+			this.original = originalBefore;
+
+			const newChunk = new Chunk(index, this.end, originalAfter);
+			newChunk.outro = this.outro;
+			this.outro = '';
+
+			this.end = index;
+
+			if (this.edited) {
+				// after split we should save the edit content record into the correct chunk
+				// to make sure sourcemap correct
+				// For example:
+				// '  test'.trim()
+				//     split   -> '  ' + 'test'
+				//   ✔️ edit    -> '' + 'test'
+				//   ✖️ edit    -> 'test' + '' 
+				// TODO is this block necessary?...
+				newChunk.edit('', false);
+				this.content = '';
+			} else {
+				this.content = originalBefore;
+			}
+
+			newChunk.next = this.next;
+			if (newChunk.next) newChunk.next.previous = newChunk;
+			newChunk.previous = this;
+			this.next = newChunk;
+
+			return newChunk;
+		}
+
+		toString() {
+			return this.intro + this.content + this.outro;
+		}
+
+		trimEnd(rx) {
+			this.outro = this.outro.replace(rx, '');
+			if (this.outro.length) return true;
+
+			const trimmed = this.content.replace(rx, '');
+
+			if (trimmed.length) {
+				if (trimmed !== this.content) {
+					this.split(this.start + trimmed.length).edit('', undefined, true);
+					if (this.edited) {
+						// save the change, if it has been edited
+						this.edit(trimmed, this.storeName, true);
+					}
+				}
+				return true;
+			} else {
+				this.edit('', undefined, true);
+
+				this.intro = this.intro.replace(rx, '');
+				if (this.intro.length) return true;
+			}
+		}
+
+		trimStart(rx) {
+			this.intro = this.intro.replace(rx, '');
+			if (this.intro.length) return true;
+
+			const trimmed = this.content.replace(rx, '');
+
+			if (trimmed.length) {
+				if (trimmed !== this.content) {
+					const newChunk = this.split(this.end - trimmed.length);
+					if (this.edited) {
+						// save the change, if it has been edited
+						newChunk.edit(trimmed, this.storeName, true);
+					}
+					this.edit('', undefined, true);
+				}
+				return true;
+			} else {
+				this.edit('', undefined, true);
+
+				this.outro = this.outro.replace(rx, '');
+				if (this.outro.length) return true;
+			}
+		}
+	}
+
+	const comma = ','.charCodeAt(0);
+	const semicolon = ';'.charCodeAt(0);
+	const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/';
+	const intToChar = new Uint8Array(64); // 64 possible chars.
+	const charToInt = new Uint8Array(128); // z is 122 in ASCII
+	for (let i = 0; i < chars.length; i++) {
+	    const c = chars.charCodeAt(i);
+	    intToChar[i] = c;
+	    charToInt[c] = i;
+	}
+	function encodeInteger(builder, num, relative) {
+	    let delta = num - relative;
+	    delta = delta < 0 ? (-delta << 1) | 1 : delta << 1;
+	    do {
+	        let clamped = delta & 0b011111;
+	        delta >>>= 5;
+	        if (delta > 0)
+	            clamped |= 0b100000;
+	        builder.write(intToChar[clamped]);
+	    } while (delta > 0);
+	    return num;
+	}
+
+	const bufLength = 1024 * 16;
+	// Provide a fallback for older environments.
+	const td = typeof TextDecoder !== 'undefined'
+	    ? /* #__PURE__ */ new TextDecoder()
+	    : typeof Buffer !== 'undefined'
+	        ? {
+	            decode(buf) {
+	                const out = Buffer.from(buf.buffer, buf.byteOffset, buf.byteLength);
+	                return out.toString();
+	            },
+	        }
+	        : {
+	            decode(buf) {
+	                let out = '';
+	                for (let i = 0; i < buf.length; i++) {
+	                    out += String.fromCharCode(buf[i]);
+	                }
+	                return out;
+	            },
+	        };
+	class StringWriter {
+	    constructor() {
+	        this.pos = 0;
+	        this.out = '';
+	        this.buffer = new Uint8Array(bufLength);
+	    }
+	    write(v) {
+	        const { buffer } = this;
+	        buffer[this.pos++] = v;
+	        if (this.pos === bufLength) {
+	            this.out += td.decode(buffer);
+	            this.pos = 0;
+	        }
+	    }
+	    flush() {
+	        const { buffer, out, pos } = this;
+	        return pos > 0 ? out + td.decode(buffer.subarray(0, pos)) : out;
+	    }
+	}
+	function encode(decoded) {
+	    const writer = new StringWriter();
+	    let sourcesIndex = 0;
+	    let sourceLine = 0;
+	    let sourceColumn = 0;
+	    let namesIndex = 0;
+	    for (let i = 0; i < decoded.length; i++) {
+	        const line = decoded[i];
+	        if (i > 0)
+	            writer.write(semicolon);
+	        if (line.length === 0)
+	            continue;
+	        let genColumn = 0;
+	        for (let j = 0; j < line.length; j++) {
+	            const segment = line[j];
+	            if (j > 0)
+	                writer.write(comma);
+	            genColumn = encodeInteger(writer, segment[0], genColumn);
+	            if (segment.length === 1)
+	                continue;
+	            sourcesIndex = encodeInteger(writer, segment[1], sourcesIndex);
+	            sourceLine = encodeInteger(writer, segment[2], sourceLine);
+	            sourceColumn = encodeInteger(writer, segment[3], sourceColumn);
+	            if (segment.length === 4)
+	                continue;
+	            namesIndex = encodeInteger(writer, segment[4], namesIndex);
+	        }
+	    }
+	    return writer.flush();
+	}
+
+	function getBtoa() {
+		if (typeof globalThis !== 'undefined' && typeof globalThis.btoa === 'function') {
+			return (str) => globalThis.btoa(unescape(encodeURIComponent(str)));
+		} else if (typeof Buffer === 'function') {
+			return (str) => Buffer.from(str, 'utf-8').toString('base64');
+		} else {
+			return () => {
+				throw new Error('Unsupported environment: `window.btoa` or `Buffer` should be supported.');
+			};
+		}
+	}
+
+	const btoa = /*#__PURE__*/ getBtoa();
+
+	class SourceMap {
+		constructor(properties) {
+			this.version = 3;
+			this.file = properties.file;
+			this.sources = properties.sources;
+			this.sourcesContent = properties.sourcesContent;
+			this.names = properties.names;
+			this.mappings = encode(properties.mappings);
+			if (typeof properties.x_google_ignoreList !== 'undefined') {
+				this.x_google_ignoreList = properties.x_google_ignoreList;
+			}
+		}
+
+		toString() {
+			return JSON.stringify(this);
+		}
+
+		toUrl() {
+			return 'data:application/json;charset=utf-8;base64,' + btoa(this.toString());
+		}
+	}
+
+	function guessIndent(code) {
+		const lines = code.split('\n');
+
+		const tabbed = lines.filter((line) => /^\t+/.test(line));
+		const spaced = lines.filter((line) => /^ {2,}/.test(line));
+
+		if (tabbed.length === 0 && spaced.length === 0) {
+			return null;
+		}
+
+		// More lines tabbed than spaced? Assume tabs, and
+		// default to tabs in the case of a tie (or nothing
+		// to go on)
+		if (tabbed.length >= spaced.length) {
+			return '\t';
+		}
+
+		// Otherwise, we need to guess the multiple
+		const min = spaced.reduce((previous, current) => {
+			const numSpaces = /^ +/.exec(current)[0].length;
+			return Math.min(numSpaces, previous);
+		}, Infinity);
+
+		return new Array(min + 1).join(' ');
+	}
+
+	function getRelativePath(from, to) {
+		const fromParts = from.split(/[/\\]/);
+		const toParts = to.split(/[/\\]/);
+
+		fromParts.pop(); // get dirname
+
+		while (fromParts[0] === toParts[0]) {
+			fromParts.shift();
+			toParts.shift();
+		}
+
+		if (fromParts.length) {
+			let i = fromParts.length;
+			while (i--) fromParts[i] = '..';
+		}
+
+		return fromParts.concat(toParts).join('/');
+	}
+
+	const toString = Object.prototype.toString;
+
+	function isObject(thing) {
+		return toString.call(thing) === '[object Object]';
+	}
+
+	function getLocator(source) {
+		const originalLines = source.split('\n');
+		const lineOffsets = [];
+
+		for (let i = 0, pos = 0; i < originalLines.length; i++) {
+			lineOffsets.push(pos);
+			pos += originalLines[i].length + 1;
+		}
+
+		return function locate(index) {
+			let i = 0;
+			let j = lineOffsets.length;
+			while (i < j) {
+				const m = (i + j) >> 1;
+				if (index < lineOffsets[m]) {
+					j = m;
+				} else {
+					i = m + 1;
+				}
+			}
+			const line = i - 1;
+			const column = index - lineOffsets[line];
+			return { line, column };
+		};
+	}
+
+	const wordRegex = /\w/;
+
+	class Mappings {
+		constructor(hires) {
+			this.hires = hires;
+			this.generatedCodeLine = 0;
+			this.generatedCodeColumn = 0;
+			this.raw = [];
+			this.rawSegments = this.raw[this.generatedCodeLine] = [];
+			this.pending = null;
+		}
+
+		addEdit(sourceIndex, content, loc, nameIndex) {
+			if (content.length) {
+				const contentLengthMinusOne = content.length - 1;
+				let contentLineEnd = content.indexOf('\n', 0);
+				let previousContentLineEnd = -1;
+				// Loop through each line in the content and add a segment, but stop if the last line is empty,
+				// else code afterwards would fill one line too many
+				while (contentLineEnd >= 0 && contentLengthMinusOne > contentLineEnd) {
+					const segment = [this.generatedCodeColumn, sourceIndex, loc.line, loc.column];
+					if (nameIndex >= 0) {
+						segment.push(nameIndex);
+					}
+					this.rawSegments.push(segment);
+
+					this.generatedCodeLine += 1;
+					this.raw[this.generatedCodeLine] = this.rawSegments = [];
+					this.generatedCodeColumn = 0;
+
+					previousContentLineEnd = contentLineEnd;
+					contentLineEnd = content.indexOf('\n', contentLineEnd + 1);
+				}
+
+				const segment = [this.generatedCodeColumn, sourceIndex, loc.line, loc.column];
+				if (nameIndex >= 0) {
+					segment.push(nameIndex);
+				}
+				this.rawSegments.push(segment);
+
+				this.advance(content.slice(previousContentLineEnd + 1));
+			} else if (this.pending) {
+				this.rawSegments.push(this.pending);
+				this.advance(content);
+			}
+
+			this.pending = null;
+		}
+
+		addUneditedChunk(sourceIndex, chunk, original, loc, sourcemapLocations) {
+			let originalCharIndex = chunk.start;
+			let first = true;
+			// when iterating each char, check if it's in a word boundary
+			let charInHiresBoundary = false;
+
+			while (originalCharIndex < chunk.end) {
+				if (this.hires || first || sourcemapLocations.has(originalCharIndex)) {
+					const segment = [this.generatedCodeColumn, sourceIndex, loc.line, loc.column];
+
+					if (this.hires === 'boundary') {
+						// in hires "boundary", group segments per word boundary than per char
+						if (wordRegex.test(original[originalCharIndex])) {
+							// for first char in the boundary found, start the boundary by pushing a segment
+							if (!charInHiresBoundary) {
+								this.rawSegments.push(segment);
+								charInHiresBoundary = true;
+							}
+						} else {
+							// for non-word char, end the boundary by pushing a segment
+							this.rawSegments.push(segment);
+							charInHiresBoundary = false;
+						}
+					} else {
+						this.rawSegments.push(segment);
+					}
+				}
+
+				if (original[originalCharIndex] === '\n') {
+					loc.line += 1;
+					loc.column = 0;
+					this.generatedCodeLine += 1;
+					this.raw[this.generatedCodeLine] = this.rawSegments = [];
+					this.generatedCodeColumn = 0;
+					first = true;
+				} else {
+					loc.column += 1;
+					this.generatedCodeColumn += 1;
+					first = false;
+				}
+
+				originalCharIndex += 1;
+			}
+
+			this.pending = null;
+		}
+
+		advance(str) {
+			if (!str) return;
+
+			const lines = str.split('\n');
+
+			if (lines.length > 1) {
+				for (let i = 0; i < lines.length - 1; i++) {
+					this.generatedCodeLine++;
+					this.raw[this.generatedCodeLine] = this.rawSegments = [];
+				}
+				this.generatedCodeColumn = 0;
+			}
+
+			this.generatedCodeColumn += lines[lines.length - 1].length;
+		}
+	}
+
+	const n = '\n';
+
+	const warned = {
+		insertLeft: false,
+		insertRight: false,
+		storeName: false,
+	};
+
+	class MagicString {
+		constructor(string, options = {}) {
+			const chunk = new Chunk(0, string.length, string);
+
+			Object.defineProperties(this, {
+				original: { writable: true, value: string },
+				outro: { writable: true, value: '' },
+				intro: { writable: true, value: '' },
+				firstChunk: { writable: true, value: chunk },
+				lastChunk: { writable: true, value: chunk },
+				lastSearchedChunk: { writable: true, value: chunk },
+				byStart: { writable: true, value: {} },
+				byEnd: { writable: true, value: {} },
+				filename: { writable: true, value: options.filename },
+				indentExclusionRanges: { writable: true, value: options.indentExclusionRanges },
+				sourcemapLocations: { writable: true, value: new BitSet() },
+				storedNames: { writable: true, value: {} },
+				indentStr: { writable: true, value: undefined },
+				ignoreList: { writable: true, value: options.ignoreList },
+			});
+
+			this.byStart[0] = chunk;
+			this.byEnd[string.length] = chunk;
+		}
+
+		addSourcemapLocation(char) {
+			this.sourcemapLocations.add(char);
+		}
+
+		append(content) {
+			if (typeof content !== 'string') throw new TypeError('outro content must be a string');
+
+			this.outro += content;
+			return this;
+		}
+
+		appendLeft(index, content) {
+			if (typeof content !== 'string') throw new TypeError('inserted content must be a string');
+
+			this._split(index);
+
+			const chunk = this.byEnd[index];
+
+			if (chunk) {
+				chunk.appendLeft(content);
+			} else {
+				this.intro += content;
+			}
+			return this;
+		}
+
+		appendRight(index, content) {
+			if (typeof content !== 'string') throw new TypeError('inserted content must be a string');
+
+			this._split(index);
+
+			const chunk = this.byStart[index];
+
+			if (chunk) {
+				chunk.appendRight(content);
+			} else {
+				this.outro += content;
+			}
+			return this;
+		}
+
+		clone() {
+			const cloned = new MagicString(this.original, { filename: this.filename });
+
+			let originalChunk = this.firstChunk;
+			let clonedChunk = (cloned.firstChunk = cloned.lastSearchedChunk = originalChunk.clone());
+
+			while (originalChunk) {
+				cloned.byStart[clonedChunk.start] = clonedChunk;
+				cloned.byEnd[clonedChunk.end] = clonedChunk;
+
+				const nextOriginalChunk = originalChunk.next;
+				const nextClonedChunk = nextOriginalChunk && nextOriginalChunk.clone();
+
+				if (nextClonedChunk) {
+					clonedChunk.next = nextClonedChunk;
+					nextClonedChunk.previous = clonedChunk;
+
+					clonedChunk = nextClonedChunk;
+				}
+
+				originalChunk = nextOriginalChunk;
+			}
+
+			cloned.lastChunk = clonedChunk;
+
+			if (this.indentExclusionRanges) {
+				cloned.indentExclusionRanges = this.indentExclusionRanges.slice();
+			}
+
+			cloned.sourcemapLocations = new BitSet(this.sourcemapLocations);
+
+			cloned.intro = this.intro;
+			cloned.outro = this.outro;
+
+			return cloned;
+		}
+
+		generateDecodedMap(options) {
+			options = options || {};
+
+			const sourceIndex = 0;
+			const names = Object.keys(this.storedNames);
+			const mappings = new Mappings(options.hires);
+
+			const locate = getLocator(this.original);
+
+			if (this.intro) {
+				mappings.advance(this.intro);
+			}
+
+			this.firstChunk.eachNext((chunk) => {
+				const loc = locate(chunk.start);
+
+				if (chunk.intro.length) mappings.advance(chunk.intro);
+
+				if (chunk.edited) {
+					mappings.addEdit(
+						sourceIndex,
+						chunk.content,
+						loc,
+						chunk.storeName ? names.indexOf(chunk.original) : -1,
+					);
+				} else {
+					mappings.addUneditedChunk(sourceIndex, chunk, this.original, loc, this.sourcemapLocations);
+				}
+
+				if (chunk.outro.length) mappings.advance(chunk.outro);
+			});
+
+			return {
+				file: options.file ? options.file.split(/[/\\]/).pop() : undefined,
+				sources: [
+					options.source ? getRelativePath(options.file || '', options.source) : options.file || '',
+				],
+				sourcesContent: options.includeContent ? [this.original] : undefined,
+				names,
+				mappings: mappings.raw,
+				x_google_ignoreList: this.ignoreList ? [sourceIndex] : undefined,
+			};
+		}
+
+		generateMap(options) {
+			return new SourceMap(this.generateDecodedMap(options));
+		}
+
+		_ensureindentStr() {
+			if (this.indentStr === undefined) {
+				this.indentStr = guessIndent(this.original);
+			}
+		}
+
+		_getRawIndentString() {
+			this._ensureindentStr();
+			return this.indentStr;
+		}
+
+		getIndentString() {
+			this._ensureindentStr();
+			return this.indentStr === null ? '\t' : this.indentStr;
+		}
+
+		indent(indentStr, options) {
+			const pattern = /^[^\r\n]/gm;
+
+			if (isObject(indentStr)) {
+				options = indentStr;
+				indentStr = undefined;
+			}
+
+			if (indentStr === undefined) {
+				this._ensureindentStr();
+				indentStr = this.indentStr || '\t';
+			}
+
+			if (indentStr === '') return this; // noop
+
+			options = options || {};
+
+			// Process exclusion ranges
+			const isExcluded = {};
+
+			if (options.exclude) {
+				const exclusions =
+					typeof options.exclude[0] === 'number' ? [options.exclude] : options.exclude;
+				exclusions.forEach((exclusion) => {
+					for (let i = exclusion[0]; i < exclusion[1]; i += 1) {
+						isExcluded[i] = true;
+					}
+				});
+			}
+
+			let shouldIndentNextCharacter = options.indentStart !== false;
+			const replacer = (match) => {
+				if (shouldIndentNextCharacter) return `${indentStr}${match}`;
+				shouldIndentNextCharacter = true;
+				return match;
+			};
+
+			this.intro = this.intro.replace(pattern, replacer);
+
+			let charIndex = 0;
+			let chunk = this.firstChunk;
+
+			while (chunk) {
+				const end = chunk.end;
+
+				if (chunk.edited) {
+					if (!isExcluded[charIndex]) {
+						chunk.content = chunk.content.replace(pattern, replacer);
+
+						if (chunk.content.length) {
+							shouldIndentNextCharacter = chunk.content[chunk.content.length - 1] === '\n';
+						}
+					}
+				} else {
+					charIndex = chunk.start;
+
+					while (charIndex < end) {
+						if (!isExcluded[charIndex]) {
+							const char = this.original[charIndex];
+
+							if (char === '\n') {
+								shouldIndentNextCharacter = true;
+							} else if (char !== '\r' && shouldIndentNextCharacter) {
+								shouldIndentNextCharacter = false;
+
+								if (charIndex === chunk.start) {
+									chunk.prependRight(indentStr);
+								} else {
+									this._splitChunk(chunk, charIndex);
+									chunk = chunk.next;
+									chunk.prependRight(indentStr);
+								}
+							}
+						}
+
+						charIndex += 1;
+					}
+				}
+
+				charIndex = chunk.end;
+				chunk = chunk.next;
+			}
+
+			this.outro = this.outro.replace(pattern, replacer);
+
+			return this;
+		}
+
+		insert() {
+			throw new Error(
+				'magicString.insert(...) is deprecated. Use prependRight(...) or appendLeft(...)',
+			);
+		}
+
+		insertLeft(index, content) {
+			if (!warned.insertLeft) {
+				console.warn(
+					'magicString.insertLeft(...) is deprecated. Use magicString.appendLeft(...) instead',
+				); // eslint-disable-line no-console
+				warned.insertLeft = true;
+			}
+
+			return this.appendLeft(index, content);
+		}
+
+		insertRight(index, content) {
+			if (!warned.insertRight) {
+				console.warn(
+					'magicString.insertRight(...) is deprecated. Use magicString.prependRight(...) instead',
+				); // eslint-disable-line no-console
+				warned.insertRight = true;
+			}
+
+			return this.prependRight(index, content);
+		}
+
+		move(start, end, index) {
+			if (index >= start && index <= end) throw new Error('Cannot move a selection inside itself');
+
+			this._split(start);
+			this._split(end);
+			this._split(index);
+
+			const first = this.byStart[start];
+			const last = this.byEnd[end];
+
+			const oldLeft = first.previous;
+			const oldRight = last.next;
+
+			const newRight = this.byStart[index];
+			if (!newRight && last === this.lastChunk) return this;
+			const newLeft = newRight ? newRight.previous : this.lastChunk;
+
+			if (oldLeft) oldLeft.next = oldRight;
+			if (oldRight) oldRight.previous = oldLeft;
+
+			if (newLeft) newLeft.next = first;
+			if (newRight) newRight.previous = last;
+
+			if (!first.previous) this.firstChunk = last.next;
+			if (!last.next) {
+				this.lastChunk = first.previous;
+				this.lastChunk.next = null;
+			}
+
+			first.previous = newLeft;
+			last.next = newRight || null;
+
+			if (!newLeft) this.firstChunk = first;
+			if (!newRight) this.lastChunk = last;
+			return this;
+		}
+
+		overwrite(start, end, content, options) {
+			options = options || {};
+			return this.update(start, end, content, { ...options, overwrite: !options.contentOnly });
+		}
+
+		update(start, end, content, options) {
+			if (typeof content !== 'string') throw new TypeError('replacement content must be a string');
+
+			if (this.original.length !== 0) {
+				while (start < 0) start += this.original.length;
+				while (end < 0) end += this.original.length;
+			}
+
+			if (end > this.original.length) throw new Error('end is out of bounds');
+			if (start === end)
+				throw new Error(
+					'Cannot overwrite a zero-length range – use appendLeft or prependRight instead',
+				);
+
+			this._split(start);
+			this._split(end);
+
+			if (options === true) {
+				if (!warned.storeName) {
+					console.warn(
+						'The final argument to magicString.overwrite(...) should be an options object. See https://github.com/rich-harris/magic-string',
+					); // eslint-disable-line no-console
+					warned.storeName = true;
+				}
+
+				options = { storeName: true };
+			}
+			const storeName = options !== undefined ? options.storeName : false;
+			const overwrite = options !== undefined ? options.overwrite : false;
+
+			if (storeName) {
+				const original = this.original.slice(start, end);
+				Object.defineProperty(this.storedNames, original, {
+					writable: true,
+					value: true,
+					enumerable: true,
+				});
+			}
+
+			const first = this.byStart[start];
+			const last = this.byEnd[end];
+
+			if (first) {
+				let chunk = first;
+				while (chunk !== last) {
+					if (chunk.next !== this.byStart[chunk.end]) {
+						throw new Error('Cannot overwrite across a split point');
+					}
+					chunk = chunk.next;
+					chunk.edit('', false);
+				}
+
+				first.edit(content, storeName, !overwrite);
+			} else {
+				// must be inserting at the end
+				const newChunk = new Chunk(start, end, '').edit(content, storeName);
+
+				// TODO last chunk in the array may not be the last chunk, if it's moved...
+				last.next = newChunk;
+				newChunk.previous = last;
+			}
+			return this;
+		}
+
+		prepend(content) {
+			if (typeof content !== 'string') throw new TypeError('outro content must be a string');
+
+			this.intro = content + this.intro;
+			return this;
+		}
+
+		prependLeft(index, content) {
+			if (typeof content !== 'string') throw new TypeError('inserted content must be a string');
+
+			this._split(index);
+
+			const chunk = this.byEnd[index];
+
+			if (chunk) {
+				chunk.prependLeft(content);
+			} else {
+				this.intro = content + this.intro;
+			}
+			return this;
+		}
+
+		prependRight(index, content) {
+			if (typeof content !== 'string') throw new TypeError('inserted content must be a string');
+
+			this._split(index);
+
+			const chunk = this.byStart[index];
+
+			if (chunk) {
+				chunk.prependRight(content);
+			} else {
+				this.outro = content + this.outro;
+			}
+			return this;
+		}
+
+		remove(start, end) {
+			if (this.original.length !== 0) {
+				while (start < 0) start += this.original.length;
+				while (end < 0) end += this.original.length;
+			}
+
+			if (start === end) return this;
+
+			if (start < 0 || end > this.original.length) throw new Error('Character is out of bounds');
+			if (start > end) throw new Error('end must be greater than start');
+
+			this._split(start);
+			this._split(end);
+
+			let chunk = this.byStart[start];
+
+			while (chunk) {
+				chunk.intro = '';
+				chunk.outro = '';
+				chunk.edit('');
+
+				chunk = end > chunk.end ? this.byStart[chunk.end] : null;
+			}
+			return this;
+		}
+
+		reset(start, end) {
+			if (this.original.length !== 0) {
+				while (start < 0) start += this.original.length;
+				while (end < 0) end += this.original.length;
+			}
+
+			if (start === end) return this;
+
+			if (start < 0 || end > this.original.length) throw new Error('Character is out of bounds');
+			if (start > end) throw new Error('end must be greater than start');
+
+			this._split(start);
+			this._split(end);
+
+			let chunk = this.byStart[start];
+
+			while (chunk) {
+				chunk.reset();
+
+				chunk = end > chunk.end ? this.byStart[chunk.end] : null;
+			}
+			return this;
+		}
+
+		lastChar() {
+			if (this.outro.length) return this.outro[this.outro.length - 1];
+			let chunk = this.lastChunk;
+			do {
+				if (chunk.outro.length) return chunk.outro[chunk.outro.length - 1];
+				if (chunk.content.length) return chunk.content[chunk.content.length - 1];
+				if (chunk.intro.length) return chunk.intro[chunk.intro.length - 1];
+			} while ((chunk = chunk.previous));
+			if (this.intro.length) return this.intro[this.intro.length - 1];
+			return '';
+		}
+
+		lastLine() {
+			let lineIndex = this.outro.lastIndexOf(n);
+			if (lineIndex !== -1) return this.outro.substr(lineIndex + 1);
+			let lineStr = this.outro;
+			let chunk = this.lastChunk;
+			do {
+				if (chunk.outro.length > 0) {
+					lineIndex = chunk.outro.lastIndexOf(n);
+					if (lineIndex !== -1) return chunk.outro.substr(lineIndex + 1) + lineStr;
+					lineStr = chunk.outro + lineStr;
+				}
+
+				if (chunk.content.length > 0) {
+					lineIndex = chunk.content.lastIndexOf(n);
+					if (lineIndex !== -1) return chunk.content.substr(lineIndex + 1) + lineStr;
+					lineStr = chunk.content + lineStr;
+				}
+
+				if (chunk.intro.length > 0) {
+					lineIndex = chunk.intro.lastIndexOf(n);
+					if (lineIndex !== -1) return chunk.intro.substr(lineIndex + 1) + lineStr;
+					lineStr = chunk.intro + lineStr;
+				}
+			} while ((chunk = chunk.previous));
+			lineIndex = this.intro.lastIndexOf(n);
+			if (lineIndex !== -1) return this.intro.substr(lineIndex + 1) + lineStr;
+			return this.intro + lineStr;
+		}
+
+		slice(start = 0, end = this.original.length) {
+			if (this.original.length !== 0) {
+				while (start < 0) start += this.original.length;
+				while (end < 0) end += this.original.length;
+			}
+
+			let result = '';
+
+			// find start chunk
+			let chunk = this.firstChunk;
+			while (chunk && (chunk.start > start || chunk.end <= start)) {
+				// found end chunk before start
+				if (chunk.start < end && chunk.end >= end) {
+					return result;
+				}
+
+				chunk = chunk.next;
+			}
+
+			if (chunk && chunk.edited && chunk.start !== start)
+				throw new Error(`Cannot use replaced character ${start} as slice start anchor.`);
+
+			const startChunk = chunk;
+			while (chunk) {
+				if (chunk.intro && (startChunk !== chunk || chunk.start === start)) {
+					result += chunk.intro;
+				}
+
+				const containsEnd = chunk.start < end && chunk.end >= end;
+				if (containsEnd && chunk.edited && chunk.end !== end)
+					throw new Error(`Cannot use replaced character ${end} as slice end anchor.`);
+
+				const sliceStart = startChunk === chunk ? start - chunk.start : 0;
+				const sliceEnd = containsEnd ? chunk.content.length + end - chunk.end : chunk.content.length;
+
+				result += chunk.content.slice(sliceStart, sliceEnd);
+
+				if (chunk.outro && (!containsEnd || chunk.end === end)) {
+					result += chunk.outro;
+				}
+
+				if (containsEnd) {
+					break;
+				}
+
+				chunk = chunk.next;
+			}
+
+			return result;
+		}
+
+		// TODO deprecate this? not really very useful
+		snip(start, end) {
+			const clone = this.clone();
+			clone.remove(0, start);
+			clone.remove(end, clone.original.length);
+
+			return clone;
+		}
+
+		_split(index) {
+			if (this.byStart[index] || this.byEnd[index]) return;
+
+			let chunk = this.lastSearchedChunk;
+			const searchForward = index > chunk.end;
+
+			while (chunk) {
+				if (chunk.contains(index)) return this._splitChunk(chunk, index);
+
+				chunk = searchForward ? this.byStart[chunk.end] : this.byEnd[chunk.start];
+			}
+		}
+
+		_splitChunk(chunk, index) {
+			if (chunk.edited && chunk.content.length) {
+				// zero-length edited chunks are a special case (overlapping replacements)
+				const loc = getLocator(this.original)(index);
+				throw new Error(
+					`Cannot split a chunk that has already been edited (${loc.line}:${loc.column} – "${chunk.original}")`,
+				);
+			}
+
+			const newChunk = chunk.split(index);
+
+			this.byEnd[index] = chunk;
+			this.byStart[index] = newChunk;
+			this.byEnd[newChunk.end] = newChunk;
+
+			if (chunk === this.lastChunk) this.lastChunk = newChunk;
+
+			this.lastSearchedChunk = chunk;
+			return true;
+		}
+
+		toString() {
+			let str = this.intro;
+
+			let chunk = this.firstChunk;
+			while (chunk) {
+				str += chunk.toString();
+				chunk = chunk.next;
+			}
+
+			return str + this.outro;
+		}
+
+		isEmpty() {
+			let chunk = this.firstChunk;
+			do {
+				if (
+					(chunk.intro.length && chunk.intro.trim()) ||
+					(chunk.content.length && chunk.content.trim()) ||
+					(chunk.outro.length && chunk.outro.trim())
+				)
+					return false;
+			} while ((chunk = chunk.next));
+			return true;
+		}
+
+		length() {
+			let chunk = this.firstChunk;
+			let length = 0;
+			do {
+				length += chunk.intro.length + chunk.content.length + chunk.outro.length;
+			} while ((chunk = chunk.next));
+			return length;
+		}
+
+		trimLines() {
+			return this.trim('[\\r\\n]');
+		}
+
+		trim(charType) {
+			return this.trimStart(charType).trimEnd(charType);
+		}
+
+		trimEndAborted(charType) {
+			const rx = new RegExp((charType || '\\s') + '+$');
+
+			this.outro = this.outro.replace(rx, '');
+			if (this.outro.length) return true;
+
+			let chunk = this.lastChunk;
+
+			do {
+				const end = chunk.end;
+				const aborted = chunk.trimEnd(rx);
+
+				// if chunk was trimmed, we have a new lastChunk
+				if (chunk.end !== end) {
+					if (this.lastChunk === chunk) {
+						this.lastChunk = chunk.next;
+					}
+
+					this.byEnd[chunk.end] = chunk;
+					this.byStart[chunk.next.start] = chunk.next;
+					this.byEnd[chunk.next.end] = chunk.next;
+				}
+
+				if (aborted) return true;
+				chunk = chunk.previous;
+			} while (chunk);
+
+			return false;
+		}
+
+		trimEnd(charType) {
+			this.trimEndAborted(charType);
+			return this;
+		}
+		trimStartAborted(charType) {
+			const rx = new RegExp('^' + (charType || '\\s') + '+');
+
+			this.intro = this.intro.replace(rx, '');
+			if (this.intro.length) return true;
+
+			let chunk = this.firstChunk;
+
+			do {
+				const end = chunk.end;
+				const aborted = chunk.trimStart(rx);
+
+				if (chunk.end !== end) {
+					// special case...
+					if (chunk === this.lastChunk) this.lastChunk = chunk.next;
+
+					this.byEnd[chunk.end] = chunk;
+					this.byStart[chunk.next.start] = chunk.next;
+					this.byEnd[chunk.next.end] = chunk.next;
+				}
+
+				if (aborted) return true;
+				chunk = chunk.next;
+			} while (chunk);
+
+			return false;
+		}
+
+		trimStart(charType) {
+			this.trimStartAborted(charType);
+			return this;
+		}
+
+		hasChanged() {
+			return this.original !== this.toString();
+		}
+
+		_replaceRegexp(searchValue, replacement) {
+			function getReplacement(match, str) {
+				if (typeof replacement === 'string') {
+					return replacement.replace(/\$(\$|&|\d+)/g, (_, i) => {
+						// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace#specifying_a_string_as_a_parameter
+						if (i === '$') return '$';
+						if (i === '&') return match[0];
+						const num = +i;
+						if (num < match.length) return match[+i];
+						return `$${i}`;
+					});
+				} else {
+					return replacement(...match, match.index, str, match.groups);
+				}
+			}
+			function matchAll(re, str) {
+				let match;
+				const matches = [];
+				while ((match = re.exec(str))) {
+					matches.push(match);
+				}
+				return matches;
+			}
+			if (searchValue.global) {
+				const matches = matchAll(searchValue, this.original);
+				matches.forEach((match) => {
+					if (match.index != null) {
+						const replacement = getReplacement(match, this.original);
+						if (replacement !== match[0]) {
+							this.overwrite(
+								match.index,
+								match.index + match[0].length,
+								replacement
+							);
+						}
+					}
+				});
+			} else {
+				const match = this.original.match(searchValue);
+				if (match && match.index != null) {
+					const replacement = getReplacement(match, this.original);
+					if (replacement !== match[0]) {
+						this.overwrite(
+							match.index,
+							match.index + match[0].length,
+							replacement
+						);
+					}
+				}
+			}
+			return this;
+		}
+
+		_replaceString(string, replacement) {
+			const { original } = this;
+			const index = original.indexOf(string);
+
+			if (index !== -1) {
+				this.overwrite(index, index + string.length, replacement);
+			}
+
+			return this;
+		}
+
+		replace(searchValue, replacement) {
+			if (typeof searchValue === 'string') {
+				return this._replaceString(searchValue, replacement);
+			}
+
+			return this._replaceRegexp(searchValue, replacement);
+		}
+
+		_replaceAllString(string, replacement) {
+			const { original } = this;
+			const stringLength = string.length;
+			for (
+				let index = original.indexOf(string);
+				index !== -1;
+				index = original.indexOf(string, index + stringLength)
+			) {
+				const previous = original.slice(index, index + stringLength);
+				if (previous !== replacement)
+					this.overwrite(index, index + stringLength, replacement);
+			}
+
+			return this;
+		}
+
+		replaceAll(searchValue, replacement) {
+			if (typeof searchValue === 'string') {
+				return this._replaceAllString(searchValue, replacement);
+			}
+
+			if (!searchValue.global) {
+				throw new TypeError(
+					'MagicString.prototype.replaceAll called with a non-global RegExp argument',
+				);
+			}
+
+			return this._replaceRegexp(searchValue, replacement);
+		}
+	}
+
+	const hasOwnProp = Object.prototype.hasOwnProperty;
+
+	class Bundle {
+		constructor(options = {}) {
+			this.intro = options.intro || '';
+			this.separator = options.separator !== undefined ? options.separator : '\n';
+			this.sources = [];
+			this.uniqueSources = [];
+			this.uniqueSourceIndexByFilename = {};
+		}
+
+		addSource(source) {
+			if (source instanceof MagicString) {
+				return this.addSource({
+					content: source,
+					filename: source.filename,
+					separator: this.separator,
+				});
+			}
+
+			if (!isObject(source) || !source.content) {
+				throw new Error(
+					'bundle.addSource() takes an object with a `content` property, which should be an instance of MagicString, and an optional `filename`',
+				);
+			}
+
+			['filename', 'ignoreList', 'indentExclusionRanges', 'separator'].forEach((option) => {
+				if (!hasOwnProp.call(source, option)) source[option] = source.content[option];
+			});
+
+			if (source.separator === undefined) {
+				// TODO there's a bunch of this sort of thing, needs cleaning up
+				source.separator = this.separator;
+			}
+
+			if (source.filename) {
+				if (!hasOwnProp.call(this.uniqueSourceIndexByFilename, source.filename)) {
+					this.uniqueSourceIndexByFilename[source.filename] = this.uniqueSources.length;
+					this.uniqueSources.push({ filename: source.filename, content: source.content.original });
+				} else {
+					const uniqueSource = this.uniqueSources[this.uniqueSourceIndexByFilename[source.filename]];
+					if (source.content.original !== uniqueSource.content) {
+						throw new Error(`Illegal source: same filename (${source.filename}), different contents`);
+					}
+				}
+			}
+
+			this.sources.push(source);
+			return this;
+		}
+
+		append(str, options) {
+			this.addSource({
+				content: new MagicString(str),
+				separator: (options && options.separator) || '',
+			});
+
+			return this;
+		}
+
+		clone() {
+			const bundle = new Bundle({
+				intro: this.intro,
+				separator: this.separator,
+			});
+
+			this.sources.forEach((source) => {
+				bundle.addSource({
+					filename: source.filename,
+					content: source.content.clone(),
+					separator: source.separator,
+				});
+			});
+
+			return bundle;
+		}
+
+		generateDecodedMap(options = {}) {
+			const names = [];
+			let x_google_ignoreList = undefined;
+			this.sources.forEach((source) => {
+				Object.keys(source.content.storedNames).forEach((name) => {
+					if (!~names.indexOf(name)) names.push(name);
+				});
+			});
+
+			const mappings = new Mappings(options.hires);
+
+			if (this.intro) {
+				mappings.advance(this.intro);
+			}
+
+			this.sources.forEach((source, i) => {
+				if (i > 0) {
+					mappings.advance(this.separator);
+				}
+
+				const sourceIndex = source.filename ? this.uniqueSourceIndexByFilename[source.filename] : -1;
+				const magicString = source.content;
+				const locate = getLocator(magicString.original);
+
+				if (magicString.intro) {
+					mappings.advance(magicString.intro);
+				}
+
+				magicString.firstChunk.eachNext((chunk) => {
+					const loc = locate(chunk.start);
+
+					if (chunk.intro.length) mappings.advance(chunk.intro);
+
+					if (source.filename) {
+						if (chunk.edited) {
+							mappings.addEdit(
+								sourceIndex,
+								chunk.content,
+								loc,
+								chunk.storeName ? names.indexOf(chunk.original) : -1,
+							);
+						} else {
+							mappings.addUneditedChunk(
+								sourceIndex,
+								chunk,
+								magicString.original,
+								loc,
+								magicString.sourcemapLocations,
+							);
+						}
+					} else {
+						mappings.advance(chunk.content);
+					}
+
+					if (chunk.outro.length) mappings.advance(chunk.outro);
+				});
+
+				if (magicString.outro) {
+					mappings.advance(magicString.outro);
+				}
+
+				if (source.ignoreList && sourceIndex !== -1) {
+					if (x_google_ignoreList === undefined) {
+						x_google_ignoreList = [];
+					}
+					x_google_ignoreList.push(sourceIndex);
+				}
+			});
+
+			return {
+				file: options.file ? options.file.split(/[/\\]/).pop() : undefined,
+				sources: this.uniqueSources.map((source) => {
+					return options.file ? getRelativePath(options.file, source.filename) : source.filename;
+				}),
+				sourcesContent: this.uniqueSources.map((source) => {
+					return options.includeContent ? source.content : null;
+				}),
+				names,
+				mappings: mappings.raw,
+				x_google_ignoreList,
+			};
+		}
+
+		generateMap(options) {
+			return new SourceMap(this.generateDecodedMap(options));
+		}
+
+		getIndentString() {
+			const indentStringCounts = {};
+
+			this.sources.forEach((source) => {
+				const indentStr = source.content._getRawIndentString();
+
+				if (indentStr === null) return;
+
+				if (!indentStringCounts[indentStr]) indentStringCounts[indentStr] = 0;
+				indentStringCounts[indentStr] += 1;
+			});
+
+			return (
+				Object.keys(indentStringCounts).sort((a, b) => {
+					return indentStringCounts[a] - indentStringCounts[b];
+				})[0] || '\t'
+			);
+		}
+
+		indent(indentStr) {
+			if (!arguments.length) {
+				indentStr = this.getIndentString();
+			}
+
+			if (indentStr === '') return this; // noop
+
+			let trailingNewline = !this.intro || this.intro.slice(-1) === '\n';
+
+			this.sources.forEach((source, i) => {
+				const separator = source.separator !== undefined ? source.separator : this.separator;
+				const indentStart = trailingNewline || (i > 0 && /\r?\n$/.test(separator));
+
+				source.content.indent(indentStr, {
+					exclude: source.indentExclusionRanges,
+					indentStart, //: trailingNewline || /\r?\n$/.test( separator )  //true///\r?\n/.test( separator )
+				});
+
+				trailingNewline = source.content.lastChar() === '\n';
+			});
+
+			if (this.intro) {
+				this.intro =
+					indentStr +
+					this.intro.replace(/^[^\n]/gm, (match, index) => {
+						return index > 0 ? indentStr + match : match;
+					});
+			}
+
+			return this;
+		}
+
+		prepend(str) {
+			this.intro = str + this.intro;
+			return this;
+		}
+
+		toString() {
+			const body = this.sources
+				.map((source, i) => {
+					const separator = source.separator !== undefined ? source.separator : this.separator;
+					const str = (i > 0 ? separator : '') + source.content.toString();
+
+					return str;
+				})
+				.join('');
+
+			return this.intro + body;
+		}
+
+		isEmpty() {
+			if (this.intro.length && this.intro.trim()) return false;
+			if (this.sources.some((source) => !source.content.isEmpty())) return false;
+			return true;
+		}
+
+		length() {
+			return this.sources.reduce(
+				(length, source) => length + source.content.length(),
+				this.intro.length,
+			);
+		}
+
+		trimLines() {
+			return this.trim('[\\r\\n]');
+		}
+
+		trim(charType) {
+			return this.trimStart(charType).trimEnd(charType);
+		}
+
+		trimStart(charType) {
+			const rx = new RegExp('^' + (charType || '\\s') + '+');
+			this.intro = this.intro.replace(rx, '');
+
+			if (!this.intro) {
+				let source;
+				let i = 0;
+
+				do {
+					source = this.sources[i++];
+					if (!source) {
+						break;
+					}
+				} while (!source.content.trimStartAborted(charType));
+			}
+
+			return this;
+		}
+
+		trimEnd(charType) {
+			const rx = new RegExp((charType || '\\s') + '+$');
+
+			let source;
+			let i = this.sources.length - 1;
+
+			do {
+				source = this.sources[i--];
+				if (!source) {
+					this.intro = this.intro.replace(rx, '');
+					break;
+				}
+			} while (!source.content.trimEndAborted(charType));
+
+			return this;
+		}
+	}
+
+	MagicString.Bundle = Bundle;
+	MagicString.SourceMap = SourceMap;
+	MagicString.default = MagicString; // work around TypeScript bug https://github.com/Rich-Harris/magic-string/pull/121
+
+	return MagicString;
+
+}));
+//# sourceMappingURL=magic-string.umd.js.map

+ 63 - 0
node_modules/magic-string/package.json

@@ -0,0 +1,63 @@
+{
+  "name": "magic-string",
+  "version": "0.30.11",
+  "packageManager": "pnpm@9.6.0",
+  "description": "Modify strings, generate sourcemaps",
+  "keywords": [
+    "string",
+    "string manipulation",
+    "sourcemap",
+    "templating",
+    "transpilation"
+  ],
+  "repository": "https://github.com/rich-harris/magic-string",
+  "license": "MIT",
+  "author": "Rich Harris",
+  "main": "./dist/magic-string.cjs.js",
+  "module": "./dist/magic-string.es.mjs",
+  "jsnext:main": "./dist/magic-string.es.mjs",
+  "types": "./dist/magic-string.cjs.d.ts",
+  "exports": {
+    "./package.json": "./package.json",
+    ".": {
+      "import": "./dist/magic-string.es.mjs",
+      "require": "./dist/magic-string.cjs.js"
+    }
+  },
+  "files": [
+    "dist/*",
+    "index.d.ts",
+    "README.md"
+  ],
+  "scripts": {
+    "build": "rollup -c",
+    "changelog": "conventional-changelog -p angular -i CHANGELOG.md -s",
+    "format": "prettier --single-quote --print-width 100 --use-tabs --write src/*.js src/**/*.js",
+    "lint": "eslint src test && publint",
+    "lint:fix": "eslint src test --fix",
+    "prepare": "npm run build",
+    "prepublishOnly": "npm run lint && rm -rf dist && npm test",
+    "release": "bumpp -x \"npm run changelog\" --all --commit --tag --push && npm publish",
+    "pretest": "npm run build",
+    "test": "mocha",
+    "bench": "npm run build && node benchmark/index.mjs",
+    "watch": "rollup -cw"
+  },
+  "devDependencies": {
+    "@rollup/plugin-node-resolve": "^15.2.3",
+    "@rollup/plugin-replace": "^5.0.7",
+    "benchmark": "^2.1.4",
+    "bumpp": "^9.4.1",
+    "conventional-changelog-cli": "^3.0.0",
+    "eslint": "^8.57.0",
+    "mocha": "^10.7.0",
+    "prettier": "^3.3.3",
+    "publint": "^0.2.9",
+    "rollup": "^3.29.4",
+    "source-map-js": "^1.2.0",
+    "source-map-support": "^0.5.21"
+  },
+  "dependencies": {
+    "@jridgewell/sourcemap-codec": "^1.5.0"
+  }
+}

이 변경점에서 너무 많은 파일들이 변경되어 몇몇 파일들은 표시되지 않았습니다.