Fix bin/publish: copy docs.dist from project root

Fix bin/publish: use correct .env path for rspade_system
Fix bin/publish script: prevent grep exit code 1 from terminating script

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
root
2025-10-21 02:08:33 +00:00
commit f6fac6c4bc
79758 changed files with 10547827 additions and 0 deletions

14
vendor/spatie/ignition/node_modules/micromark/dev/index.d.ts generated vendored Executable file
View File

@@ -0,0 +1,14 @@
/**
* @param value Markdown to parse (`string` or `Buffer`).
* @param [encoding] Character encoding to understand `value` as when its a `Buffer` (`string`, default: `'utf8'`).
* @param [options] Configuration
*/
export const micromark: ((
value: Value,
encoding: Encoding,
options?: Options
) => string) &
((value: Value, options?: Options) => string)
export type Options = import('micromark-util-types').Options
export type Value = import('micromark-util-types').Value
export type Encoding = import('micromark-util-types').Encoding

42
vendor/spatie/ignition/node_modules/micromark/dev/index.js generated vendored Executable file
View File

@@ -0,0 +1,42 @@
/**
* @typedef {import('micromark-util-types').Options} Options
* @typedef {import('micromark-util-types').Value} Value
* @typedef {import('micromark-util-types').Encoding} Encoding
*/
import {compile} from './lib/compile.js'
import {parse} from './lib/parse.js'
import {postprocess} from './lib/postprocess.js'
import {preprocess} from './lib/preprocess.js'
/**
* @param value Markdown to parse (`string` or `Buffer`).
* @param [encoding] Character encoding to understand `value` as when its a `Buffer` (`string`, default: `'utf8'`).
* @param [options] Configuration
*/
export const micromark =
/**
* @type {(
* ((value: Value, encoding: Encoding, options?: Options) => string) &
* ((value: Value, options?: Options) => string)
* )}
*/
(
/**
* @param {Value} value
* @param {Encoding} [encoding]
* @param {Options} [options]
*/
function (value, encoding, options) {
if (typeof encoding !== 'string') {
options = encoding
encoding = undefined
}
return compile(options)(
postprocess(
parse(options).document().write(preprocess()(value, encoding, true))
)
)
}
)

View File

@@ -0,0 +1,25 @@
/**
* @param {CompileOptions} [options]
* @returns {Compile}
*/
export function compile(
options?: import('micromark-util-types').CompileOptions | undefined
): Compile
export type Event = import('micromark-util-types').Event
export type CompileOptions = import('micromark-util-types').CompileOptions
export type CompileData = import('micromark-util-types').CompileData
export type CompileContext = import('micromark-util-types').CompileContext
export type Definition = import('micromark-util-types').Definition
export type Compile = import('micromark-util-types').Compile
export type Handle = import('micromark-util-types').Handle
export type HtmlExtension = import('micromark-util-types').HtmlExtension
export type NormalizedHtmlExtension =
import('micromark-util-types').NormalizedHtmlExtension
export type Media = {
image?: boolean | undefined
labelId?: string | undefined
label?: string | undefined
referenceId?: string | undefined
destination?: string | undefined
title?: string | undefined
}

View File

@@ -0,0 +1,990 @@
/**
* While micromark is a lexer/tokenizer, the common case of going from markdown
* to html is currently built in as this module, even though the parts can be
* used separately to build ASTs, CSTs, or many other output formats.
*
* Having an HTML compiler built in is useful because it allows us to check for
* compliancy to CommonMark, the de facto norm of markdown, specified in roughly
* 600 input/output cases.
*
* This module has an interface that accepts lists of events instead of the
* whole at once, however, because markdown cant be truly streaming, we buffer
* events before processing and outputting the final result.
*/
/**
* @typedef {import('micromark-util-types').Event} Event
* @typedef {import('micromark-util-types').CompileOptions} CompileOptions
* @typedef {import('micromark-util-types').CompileData} CompileData
* @typedef {import('micromark-util-types').CompileContext} CompileContext
* @typedef {import('micromark-util-types').Definition} Definition
* @typedef {import('micromark-util-types').Compile} Compile
* @typedef {import('micromark-util-types').Handle} Handle
* @typedef {import('micromark-util-types').HtmlExtension} HtmlExtension
* @typedef {import('micromark-util-types').NormalizedHtmlExtension} NormalizedHtmlExtension
*/
/**
* @typedef Media
* @property {boolean} [image]
* @property {string} [labelId]
* @property {string} [label]
* @property {string} [referenceId]
* @property {string} [destination]
* @property {string} [title]
*/
import {ok as assert} from 'uvu/assert'
import {decodeNamedCharacterReference} from 'decode-named-character-reference'
import {combineHtmlExtensions} from 'micromark-util-combine-extensions'
import {push} from 'micromark-util-chunked'
import {decodeNumericCharacterReference} from 'micromark-util-decode-numeric-character-reference'
import {encode as _encode} from 'micromark-util-encode'
import {normalizeIdentifier} from 'micromark-util-normalize-identifier'
import {sanitizeUri} from 'micromark-util-sanitize-uri'
import {codes} from 'micromark-util-symbol/codes.js'
import {constants} from 'micromark-util-symbol/constants.js'
import {types} from 'micromark-util-symbol/types.js'
const hasOwnProperty = {}.hasOwnProperty
/**
* These two are allowlists of safe protocols for full URLs in respectively the
* `href` (on `<a>`) and `src` (on `<img>`) attributes.
* They are based on what is allowed on GitHub,
* <https://github.com/syntax-tree/hast-util-sanitize/blob/9275b21/lib/github.json#L31>
*/
const protocolHref = /^(https?|ircs?|mailto|xmpp)$/i
const protocolSrc = /^https?$/i
/**
* @param {CompileOptions} [options]
* @returns {Compile}
*/
export function compile(options = {}) {
/**
* Tags is needed because according to markdown, links and emphasis and
* whatnot can exist in images, however, as HTML doesnt allow content in
* images, the tags are ignored in the `alt` attribute, but the content
* remains.
*
* @type {boolean|undefined}
*/
let tags = true
/**
* An object to track identifiers to media (URLs and titles) defined with
* definitions.
*
* @type {Record<string, Definition>}
*/
const definitions = {}
/**
* A lot of the handlers need to capture some of the output data, modify it
* somehow, and then deal with it.
* We do that by tracking a stack of buffers, that can be opened (with
* `buffer`) and closed (with `resume`) to access them.
*
* @type {Array<Array<string>>}
*/
const buffers = [[]]
/**
* As we can have links in images and the other way around, where the deepest
* ones are closed first, we need to track which one were in.
*
* @type {Array<Media>}
*/
const mediaStack = []
/**
* Same as `mediaStack` for tightness, which is specific to lists.
* We need to track if were currently in a tight or loose container.
*
* @type {Array<boolean>}
*/
const tightStack = []
/** @type {HtmlExtension} */
const defaultHandlers = {
enter: {
blockQuote: onenterblockquote,
codeFenced: onentercodefenced,
codeFencedFenceInfo: buffer,
codeFencedFenceMeta: buffer,
codeIndented: onentercodeindented,
codeText: onentercodetext,
content: onentercontent,
definition: onenterdefinition,
definitionDestinationString: onenterdefinitiondestinationstring,
definitionLabelString: buffer,
definitionTitleString: buffer,
emphasis: onenteremphasis,
htmlFlow: onenterhtmlflow,
htmlText: onenterhtml,
image: onenterimage,
label: buffer,
link: onenterlink,
listItemMarker: onenterlistitemmarker,
listItemValue: onenterlistitemvalue,
listOrdered: onenterlistordered,
listUnordered: onenterlistunordered,
paragraph: onenterparagraph,
reference: buffer,
resource: onenterresource,
resourceDestinationString: onenterresourcedestinationstring,
resourceTitleString: buffer,
setextHeading: onentersetextheading,
strong: onenterstrong
},
exit: {
atxHeading: onexitatxheading,
atxHeadingSequence: onexitatxheadingsequence,
autolinkEmail: onexitautolinkemail,
autolinkProtocol: onexitautolinkprotocol,
blockQuote: onexitblockquote,
characterEscapeValue: onexitdata,
characterReferenceMarkerHexadecimal: onexitcharacterreferencemarker,
characterReferenceMarkerNumeric: onexitcharacterreferencemarker,
characterReferenceValue: onexitcharacterreferencevalue,
codeFenced: onexitflowcode,
codeFencedFence: onexitcodefencedfence,
codeFencedFenceInfo: onexitcodefencedfenceinfo,
codeFencedFenceMeta: resume,
codeFlowValue: onexitcodeflowvalue,
codeIndented: onexitflowcode,
codeText: onexitcodetext,
codeTextData: onexitdata,
data: onexitdata,
definition: onexitdefinition,
definitionDestinationString: onexitdefinitiondestinationstring,
definitionLabelString: onexitdefinitionlabelstring,
definitionTitleString: onexitdefinitiontitlestring,
emphasis: onexitemphasis,
hardBreakEscape: onexithardbreak,
hardBreakTrailing: onexithardbreak,
htmlFlow: onexithtml,
htmlFlowData: onexitdata,
htmlText: onexithtml,
htmlTextData: onexitdata,
image: onexitmedia,
label: onexitlabel,
labelText: onexitlabeltext,
lineEnding: onexitlineending,
link: onexitmedia,
listOrdered: onexitlistordered,
listUnordered: onexitlistunordered,
paragraph: onexitparagraph,
reference: resume,
referenceString: onexitreferencestring,
resource: resume,
resourceDestinationString: onexitresourcedestinationstring,
resourceTitleString: onexitresourcetitlestring,
setextHeading: onexitsetextheading,
setextHeadingLineSequence: onexitsetextheadinglinesequence,
setextHeadingText: onexitsetextheadingtext,
strong: onexitstrong,
thematicBreak: onexitthematicbreak
}
}
/**
* Combine the HTML extensions with the default handlers.
* An HTML extension is an object whose fields are either `enter` or `exit`
* (reflecting whether a token is entered or exited).
* The values at such objects are names of tokens mapping to handlers.
* Handlers are called, respectively when a token is opener or closed, with
* that token, and a context as `this`.
*
* @type {NormalizedHtmlExtension}
*/
// @ts-expect-error `defaultHandlers` is full, so the result will be too.
const handlers = combineHtmlExtensions(
[defaultHandlers].concat(options.htmlExtensions || [])
)
/**
* Handlers do often need to keep track of some state.
* That state is provided here as a key-value store (an object).
*
* @type {CompileData}
*/
const data = {
tightStack,
definitions
}
/**
* The context for handlers references a couple of useful functions.
* In handlers from extensions, those can be accessed at `this`.
* For the handlers here, they can be accessed directly.
*
* @type {Omit<CompileContext, 'sliceSerialize'>}
*/
const context = {
lineEndingIfNeeded,
options,
encode,
raw,
tag,
buffer,
resume,
setData,
getData
}
/**
* Generally, micromark copies line endings (`'\r'`, `'\n'`, `'\r\n'`) in the
* markdown document over to the compiled HTML.
* In some cases, such as `> a`, CommonMark requires that extra line endings
* are added: `<blockquote>\n<p>a</p>\n</blockquote>`.
* This variable hold the default line ending when given (or `undefined`),
* and in the latter case will be updated to the first found line ending if
* there is one.
*/
let lineEndingStyle = options.defaultLineEnding
// Return the function that handles a slice of events.
return compile
/**
* Deal w/ a slice of events.
* Return either the empty string if theres nothing of note to return, or the
* result when done.
*
* @param {Array<Event>} events
* @returns {string}
*/
function compile(events) {
let index = -1
let start = 0
/** @type {Array<number>} */
const listStack = []
// As definitions can come after references, we need to figure out the media
// (urls and titles) defined by them before handling the references.
// So, we do sort of what HTML does: put metadata at the start (in head), and
// then put content after (`body`).
/** @type {Array<Event>} */
let head = []
/** @type {Array<Event>} */
let body = []
while (++index < events.length) {
// Figure out the line ending style used in the document.
if (
!lineEndingStyle &&
(events[index][1].type === types.lineEnding ||
events[index][1].type === types.lineEndingBlank)
) {
// @ts-expect-error Hush, its a line ending.
lineEndingStyle = events[index][2].sliceSerialize(events[index][1])
}
// Preprocess lists to infer whether the list is loose or not.
if (
events[index][1].type === types.listOrdered ||
events[index][1].type === types.listUnordered
) {
if (events[index][0] === 'enter') {
listStack.push(index)
} else {
prepareList(events.slice(listStack.pop(), index))
}
}
// Move definitions to the front.
if (events[index][1].type === types.definition) {
if (events[index][0] === 'enter') {
body = push(body, events.slice(start, index))
start = index
} else {
head = push(head, events.slice(start, index + 1))
start = index + 1
}
}
}
head = push(head, body)
head = push(head, events.slice(start))
index = -1
const result = head
// Handle the start of the document, if defined.
if (handlers.enter.null) {
handlers.enter.null.call(context)
}
// Handle all events.
while (++index < events.length) {
const handler = handlers[result[index][0]]
if (hasOwnProperty.call(handler, result[index][1].type)) {
handler[result[index][1].type].call(
Object.assign(
{sliceSerialize: result[index][2].sliceSerialize},
context
),
result[index][1]
)
}
}
// Handle the end of the document, if defined.
if (handlers.exit.null) {
handlers.exit.null.call(context)
}
return buffers[0].join('')
}
/**
* Figure out whether lists are loose or not.
*
* @param {Array<Event>} slice
* @returns {void}
*/
function prepareList(slice) {
const length = slice.length
let index = 0 // Skip open.
let containerBalance = 0
let loose = false
/** @type {boolean|undefined} */
let atMarker
while (++index < length) {
const event = slice[index]
if (event[1]._container) {
atMarker = undefined
if (event[0] === 'enter') {
containerBalance++
} else {
containerBalance--
}
} else
switch (event[1].type) {
case types.listItemPrefix: {
if (event[0] === 'exit') {
atMarker = true
}
break
}
case types.linePrefix: {
// Ignore
break
}
case types.lineEndingBlank: {
if (event[0] === 'enter' && !containerBalance) {
if (atMarker) {
atMarker = undefined
} else {
loose = true
}
}
break
}
default: {
atMarker = undefined
}
}
}
slice[0][1]._loose = loose
}
/**
* @type {CompileContext['setData']}
* @param [value]
*/
function setData(key, value) {
data[key] = value
}
/**
* @type {CompileContext['getData']}
* @template {string} K
* @param {K} key
* @returns {CompileData[K]}
*/
function getData(key) {
return data[key]
}
/** @type {CompileContext['buffer']} */
function buffer() {
buffers.push([])
}
/** @type {CompileContext['resume']} */
function resume() {
const buf = buffers.pop()
assert(buf !== undefined, 'Cannot resume w/o buffer')
return buf.join('')
}
/** @type {CompileContext['tag']} */
function tag(value) {
if (!tags) return
setData('lastWasTag', true)
buffers[buffers.length - 1].push(value)
}
/** @type {CompileContext['raw']} */
function raw(value) {
setData('lastWasTag')
buffers[buffers.length - 1].push(value)
}
/**
* Output an extra line ending.
*
* @returns {void}
*/
function lineEnding() {
raw(lineEndingStyle || '\n')
}
/** @type {CompileContext['lineEndingIfNeeded']} */
function lineEndingIfNeeded() {
const buffer = buffers[buffers.length - 1]
const slice = buffer[buffer.length - 1]
const previous = slice ? slice.charCodeAt(slice.length - 1) : codes.eof
if (
previous === codes.lf ||
previous === codes.cr ||
previous === codes.eof
) {
return
}
lineEnding()
}
/** @type {CompileContext['encode']} */
function encode(value) {
return getData('ignoreEncode') ? value : _encode(value)
}
//
// Handlers.
//
/** @type {Handle} */
function onenterlistordered(token) {
tightStack.push(!token._loose)
lineEndingIfNeeded()
tag('<ol')
setData('expectFirstItem', true)
}
/** @type {Handle} */
function onenterlistunordered(token) {
tightStack.push(!token._loose)
lineEndingIfNeeded()
tag('<ul')
setData('expectFirstItem', true)
}
/** @type {Handle} */
function onenterlistitemvalue(token) {
if (getData('expectFirstItem')) {
const value = Number.parseInt(
this.sliceSerialize(token),
constants.numericBaseDecimal
)
if (value !== 1) {
tag(' start="' + encode(String(value)) + '"')
}
}
}
/** @type {Handle} */
function onenterlistitemmarker() {
if (getData('expectFirstItem')) {
tag('>')
} else {
onexitlistitem()
}
lineEndingIfNeeded()
tag('<li>')
setData('expectFirstItem')
// “Hack” to prevent a line ending from showing up if the item is empty.
setData('lastWasTag')
}
/** @type {Handle} */
function onexitlistordered() {
onexitlistitem()
tightStack.pop()
lineEnding()
tag('</ol>')
}
/** @type {Handle} */
function onexitlistunordered() {
onexitlistitem()
tightStack.pop()
lineEnding()
tag('</ul>')
}
/** @type {Handle} */
function onexitlistitem() {
if (getData('lastWasTag') && !getData('slurpAllLineEndings')) {
lineEndingIfNeeded()
}
tag('</li>')
setData('slurpAllLineEndings')
}
/** @type {Handle} */
function onenterblockquote() {
tightStack.push(false)
lineEndingIfNeeded()
tag('<blockquote>')
}
/** @type {Handle} */
function onexitblockquote() {
tightStack.pop()
lineEndingIfNeeded()
tag('</blockquote>')
setData('slurpAllLineEndings')
}
/** @type {Handle} */
function onenterparagraph() {
if (!tightStack[tightStack.length - 1]) {
lineEndingIfNeeded()
tag('<p>')
}
setData('slurpAllLineEndings')
}
/** @type {Handle} */
function onexitparagraph() {
if (tightStack[tightStack.length - 1]) {
setData('slurpAllLineEndings', true)
} else {
tag('</p>')
}
}
/** @type {Handle} */
function onentercodefenced() {
lineEndingIfNeeded()
tag('<pre><code')
setData('fencesCount', 0)
}
/** @type {Handle} */
function onexitcodefencedfenceinfo() {
const value = resume()
tag(' class="language-' + value + '"')
}
/** @type {Handle} */
function onexitcodefencedfence() {
const count = getData('fencesCount') || 0
if (!count) {
tag('>')
setData('slurpOneLineEnding', true)
}
setData('fencesCount', count + 1)
}
/** @type {Handle} */
function onentercodeindented() {
lineEndingIfNeeded()
tag('<pre><code>')
}
/** @type {Handle} */
function onexitflowcode() {
const count = getData('fencesCount')
// One special case is if we are inside a container, and the fenced code was
// not closed (meaning it runs to the end).
// In that case, the following line ending, is considered *outside* the
// fenced code and block quote by micromark, but CM wants to treat that
// ending as part of the code.
if (
count !== undefined &&
count < 2 &&
// @ts-expect-error `tightStack` is always set.
data.tightStack.length > 0 &&
!getData('lastWasTag')
) {
lineEnding()
}
// But in most cases, its simpler: when weve seen some data, emit an extra
// line ending when needed.
if (getData('flowCodeSeenData')) {
lineEndingIfNeeded()
}
tag('</code></pre>')
if (count !== undefined && count < 2) lineEndingIfNeeded()
setData('flowCodeSeenData')
setData('fencesCount')
setData('slurpOneLineEnding')
}
/** @type {Handle} */
function onenterimage() {
mediaStack.push({image: true})
tags = undefined // Disallow tags.
}
/** @type {Handle} */
function onenterlink() {
mediaStack.push({})
}
/** @type {Handle} */
function onexitlabeltext(token) {
mediaStack[mediaStack.length - 1].labelId = this.sliceSerialize(token)
}
/** @type {Handle} */
function onexitlabel() {
mediaStack[mediaStack.length - 1].label = resume()
}
/** @type {Handle} */
function onexitreferencestring(token) {
mediaStack[mediaStack.length - 1].referenceId = this.sliceSerialize(token)
}
/** @type {Handle} */
function onenterresource() {
buffer() // We can have line endings in the resource, ignore them.
mediaStack[mediaStack.length - 1].destination = ''
}
/** @type {Handle} */
function onenterresourcedestinationstring() {
buffer()
// Ignore encoding the result, as well first percent encode the url and
// encode manually after.
setData('ignoreEncode', true)
}
/** @type {Handle} */
function onexitresourcedestinationstring() {
mediaStack[mediaStack.length - 1].destination = resume()
setData('ignoreEncode')
}
/** @type {Handle} */
function onexitresourcetitlestring() {
mediaStack[mediaStack.length - 1].title = resume()
}
/** @type {Handle} */
function onexitmedia() {
let index = mediaStack.length - 1 // Skip current.
const media = mediaStack[index]
const id = media.referenceId || media.labelId
assert(id !== undefined, 'media should have `referenceId` or `labelId`')
assert(media.label !== undefined, 'media should have `label`')
const context =
media.destination === undefined
? definitions[normalizeIdentifier(id)]
: media
tags = true
while (index--) {
if (mediaStack[index].image) {
tags = undefined
break
}
}
if (media.image) {
tag(
'<img src="' +
sanitizeUri(
context.destination,
options.allowDangerousProtocol ? undefined : protocolSrc
) +
'" alt="'
)
raw(media.label)
tag('"')
} else {
tag(
'<a href="' +
sanitizeUri(
context.destination,
options.allowDangerousProtocol ? undefined : protocolHref
) +
'"'
)
}
tag(context.title ? ' title="' + context.title + '"' : '')
if (media.image) {
tag(' />')
} else {
tag('>')
raw(media.label)
tag('</a>')
}
mediaStack.pop()
}
/** @type {Handle} */
function onenterdefinition() {
buffer()
mediaStack.push({})
}
/** @type {Handle} */
function onexitdefinitionlabelstring(token) {
// Discard label, use the source content instead.
resume()
mediaStack[mediaStack.length - 1].labelId = this.sliceSerialize(token)
}
/** @type {Handle} */
function onenterdefinitiondestinationstring() {
buffer()
setData('ignoreEncode', true)
}
/** @type {Handle} */
function onexitdefinitiondestinationstring() {
mediaStack[mediaStack.length - 1].destination = resume()
setData('ignoreEncode')
}
/** @type {Handle} */
function onexitdefinitiontitlestring() {
mediaStack[mediaStack.length - 1].title = resume()
}
/** @type {Handle} */
function onexitdefinition() {
const media = mediaStack[mediaStack.length - 1]
assert(media.labelId !== undefined, 'media should have `labelId`')
const id = normalizeIdentifier(media.labelId)
resume()
if (!hasOwnProperty.call(definitions, id)) {
definitions[id] = mediaStack[mediaStack.length - 1]
}
mediaStack.pop()
}
/** @type {Handle} */
function onentercontent() {
setData('slurpAllLineEndings', true)
}
/** @type {Handle} */
function onexitatxheadingsequence(token) {
// Exit for further sequences.
if (getData('headingRank')) return
setData('headingRank', this.sliceSerialize(token).length)
lineEndingIfNeeded()
tag('<h' + getData('headingRank') + '>')
}
/** @type {Handle} */
function onentersetextheading() {
buffer()
setData('slurpAllLineEndings')
}
/** @type {Handle} */
function onexitsetextheadingtext() {
setData('slurpAllLineEndings', true)
}
/** @type {Handle} */
function onexitatxheading() {
tag('</h' + getData('headingRank') + '>')
setData('headingRank')
}
/** @type {Handle} */
function onexitsetextheadinglinesequence(token) {
setData(
'headingRank',
this.sliceSerialize(token).charCodeAt(0) === codes.equalsTo ? 1 : 2
)
}
/** @type {Handle} */
function onexitsetextheading() {
const value = resume()
lineEndingIfNeeded()
tag('<h' + getData('headingRank') + '>')
raw(value)
tag('</h' + getData('headingRank') + '>')
setData('slurpAllLineEndings')
setData('headingRank')
}
/** @type {Handle} */
function onexitdata(token) {
raw(encode(this.sliceSerialize(token)))
}
/** @type {Handle} */
function onexitlineending(token) {
if (getData('slurpAllLineEndings')) {
return
}
if (getData('slurpOneLineEnding')) {
setData('slurpOneLineEnding')
return
}
if (getData('inCodeText')) {
raw(' ')
return
}
raw(encode(this.sliceSerialize(token)))
}
/** @type {Handle} */
function onexitcodeflowvalue(token) {
raw(encode(this.sliceSerialize(token)))
setData('flowCodeSeenData', true)
}
/** @type {Handle} */
function onexithardbreak() {
tag('<br />')
}
/** @type {Handle} */
function onenterhtmlflow() {
lineEndingIfNeeded()
onenterhtml()
}
/** @type {Handle} */
function onexithtml() {
setData('ignoreEncode')
}
/** @type {Handle} */
function onenterhtml() {
if (options.allowDangerousHtml) {
setData('ignoreEncode', true)
}
}
/** @type {Handle} */
function onenteremphasis() {
tag('<em>')
}
/** @type {Handle} */
function onenterstrong() {
tag('<strong>')
}
/** @type {Handle} */
function onentercodetext() {
setData('inCodeText', true)
tag('<code>')
}
/** @type {Handle} */
function onexitcodetext() {
setData('inCodeText')
tag('</code>')
}
/** @type {Handle} */
function onexitemphasis() {
tag('</em>')
}
/** @type {Handle} */
function onexitstrong() {
tag('</strong>')
}
/** @type {Handle} */
function onexitthematicbreak() {
lineEndingIfNeeded()
tag('<hr />')
}
/** @type {Handle} */
function onexitcharacterreferencemarker(token) {
setData('characterReferenceType', token.type)
}
/** @type {Handle} */
function onexitcharacterreferencevalue(token) {
let value = this.sliceSerialize(token)
// @ts-expect-error `decodeNamedCharacterReference` can return false for
// invalid named character references, but everything weve tokenized is
// valid.
value = getData('characterReferenceType')
? decodeNumericCharacterReference(
value,
getData('characterReferenceType') ===
types.characterReferenceMarkerNumeric
? constants.numericBaseDecimal
: constants.numericBaseHexadecimal
)
: decodeNamedCharacterReference(value)
raw(encode(value))
setData('characterReferenceType')
}
/** @type {Handle} */
function onexitautolinkprotocol(token) {
const uri = this.sliceSerialize(token)
tag(
'<a href="' +
sanitizeUri(
uri,
options.allowDangerousProtocol ? undefined : protocolHref
) +
'">'
)
raw(encode(uri))
tag('</a>')
}
/** @type {Handle} */
function onexitautolinkemail(token) {
const uri = this.sliceSerialize(token)
tag('<a href="' + sanitizeUri('mailto:' + uri) + '">')
raw(encode(uri))
tag('</a>')
}
}

View File

@@ -0,0 +1,19 @@
/** @type {Extension['document']} */
export const document: Extension['document']
/** @type {Extension['contentInitial']} */
export const contentInitial: Extension['contentInitial']
/** @type {Extension['flowInitial']} */
export const flowInitial: Extension['flowInitial']
/** @type {Extension['flow']} */
export const flow: Extension['flow']
/** @type {Extension['string']} */
export const string: Extension['string']
/** @type {Extension['text']} */
export const text: Extension['text']
/** @type {Extension['insideSpan']} */
export const insideSpan: Extension['insideSpan']
/** @type {Extension['attentionMarkers']} */
export const attentionMarkers: Extension['attentionMarkers']
/** @type {Extension['disable']} */
export const disable: Extension['disable']
export type Extension = import('micromark-util-types').Extension

View File

@@ -0,0 +1,101 @@
/**
* @typedef {import('micromark-util-types').Extension} Extension
*/
import {
attention,
autolink,
blockQuote,
characterEscape,
characterReference,
codeFenced,
codeIndented,
codeText,
definition,
hardBreakEscape,
headingAtx,
htmlFlow,
htmlText,
labelEnd,
labelStartImage,
labelStartLink,
lineEnding,
list,
setextUnderline,
thematicBreak
} from 'micromark-core-commonmark'
import {codes} from 'micromark-util-symbol/codes.js'
import {resolver as resolveText} from './initialize/text.js'
/** @type {Extension['document']} */
export const document = {
[codes.asterisk]: list,
[codes.plusSign]: list,
[codes.dash]: list,
[codes.digit0]: list,
[codes.digit1]: list,
[codes.digit2]: list,
[codes.digit3]: list,
[codes.digit4]: list,
[codes.digit5]: list,
[codes.digit6]: list,
[codes.digit7]: list,
[codes.digit8]: list,
[codes.digit9]: list,
[codes.greaterThan]: blockQuote
}
/** @type {Extension['contentInitial']} */
export const contentInitial = {
[codes.leftSquareBracket]: definition
}
/** @type {Extension['flowInitial']} */
export const flowInitial = {
[codes.horizontalTab]: codeIndented,
[codes.virtualSpace]: codeIndented,
[codes.space]: codeIndented
}
/** @type {Extension['flow']} */
export const flow = {
[codes.numberSign]: headingAtx,
[codes.asterisk]: thematicBreak,
[codes.dash]: [setextUnderline, thematicBreak],
[codes.lessThan]: htmlFlow,
[codes.equalsTo]: setextUnderline,
[codes.underscore]: thematicBreak,
[codes.graveAccent]: codeFenced,
[codes.tilde]: codeFenced
}
/** @type {Extension['string']} */
export const string = {
[codes.ampersand]: characterReference,
[codes.backslash]: characterEscape
}
/** @type {Extension['text']} */
export const text = {
[codes.carriageReturn]: lineEnding,
[codes.lineFeed]: lineEnding,
[codes.carriageReturnLineFeed]: lineEnding,
[codes.exclamationMark]: labelStartImage,
[codes.ampersand]: characterReference,
[codes.asterisk]: attention,
[codes.lessThan]: [autolink, htmlText],
[codes.leftSquareBracket]: labelStartLink,
[codes.backslash]: [hardBreakEscape, characterEscape],
[codes.rightSquareBracket]: labelEnd,
[codes.underscore]: attention,
[codes.graveAccent]: codeText
}
/** @type {Extension['insideSpan']} */
export const insideSpan = {null: [attention, resolveText]}
/** @type {Extension['attentionMarkers']} */
export const attentionMarkers = {null: [codes.asterisk, codes.underscore]}
/** @type {Extension['disable']} */
export const disable = {null: []}

View File

@@ -0,0 +1,40 @@
/**
* Create a tokenizer.
* Tokenizers deal with one type of data (e.g., containers, flow, text).
* The parser is the object dealing with it all.
* `initialize` works like other constructs, except that only its `tokenize`
* function is used, in which case it doesnt receive an `ok` or `nok`.
* `from` can be given to set the point before the first character, although
* when further lines are indented, they must be set with `defineSkip`.
*
* @param {ParseContext} parser
* @param {InitialConstruct} initialize
* @param {Omit<Point, '_index'|'_bufferIndex'>} [from]
* @returns {TokenizeContext}
*/
export function createTokenizer(
parser: ParseContext,
initialize: InitialConstruct,
from?:
| Omit<import('micromark-util-types').Point, '_index' | '_bufferIndex'>
| undefined
): TokenizeContext
export type Code = import('micromark-util-types').Code
export type Chunk = import('micromark-util-types').Chunk
export type Point = import('micromark-util-types').Point
export type Token = import('micromark-util-types').Token
export type Effects = import('micromark-util-types').Effects
export type State = import('micromark-util-types').State
export type Construct = import('micromark-util-types').Construct
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type ConstructRecord = import('micromark-util-types').ConstructRecord
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type ParseContext = import('micromark-util-types').ParseContext
export type Info = {
restore: () => void
from: number
}
/**
* Handle a successful run.
*/
export type ReturnHandle = (construct: Construct, info: Info) => void

View File

@@ -0,0 +1,654 @@
/**
* @typedef {import('micromark-util-types').Code} Code
* @typedef {import('micromark-util-types').Chunk} Chunk
* @typedef {import('micromark-util-types').Point} Point
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').Effects} Effects
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').ConstructRecord} ConstructRecord
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').ParseContext} ParseContext
*/
/**
* @typedef Info
* @property {() => void} restore
* @property {number} from
*
* @callback ReturnHandle
* Handle a successful run.
* @param {Construct} construct
* @param {Info} info
* @returns {void}
*/
import {ok as assert} from 'uvu/assert'
import createDebug from 'debug'
import {markdownLineEnding} from 'micromark-util-character'
import {push, splice} from 'micromark-util-chunked'
import {resolveAll} from 'micromark-util-resolve-all'
import {codes} from 'micromark-util-symbol/codes.js'
import {values} from 'micromark-util-symbol/values.js'
const debug = createDebug('micromark')
/**
* Create a tokenizer.
* Tokenizers deal with one type of data (e.g., containers, flow, text).
* The parser is the object dealing with it all.
* `initialize` works like other constructs, except that only its `tokenize`
* function is used, in which case it doesnt receive an `ok` or `nok`.
* `from` can be given to set the point before the first character, although
* when further lines are indented, they must be set with `defineSkip`.
*
* @param {ParseContext} parser
* @param {InitialConstruct} initialize
* @param {Omit<Point, '_index'|'_bufferIndex'>} [from]
* @returns {TokenizeContext}
*/
export function createTokenizer(parser, initialize, from) {
/** @type {Point} */
let point = Object.assign(
from ? Object.assign({}, from) : {line: 1, column: 1, offset: 0},
{_index: 0, _bufferIndex: -1}
)
/** @type {Record<string, number>} */
const columnStart = {}
/** @type {Array<Construct>} */
const resolveAllConstructs = []
/** @type {Array<Chunk>} */
let chunks = []
/** @type {Array<Token>} */
let stack = []
/** @type {boolean|undefined} */
let consumed = true
/**
* Tools used for tokenizing.
*
* @type {Effects}
*/
const effects = {
consume,
enter,
exit,
attempt: constructFactory(onsuccessfulconstruct),
check: constructFactory(onsuccessfulcheck),
interrupt: constructFactory(onsuccessfulcheck, {interrupt: true})
}
/**
* State and tools for resolving and serializing.
*
* @type {TokenizeContext}
*/
const context = {
previous: codes.eof,
code: codes.eof,
containerState: {},
events: [],
parser,
sliceStream,
sliceSerialize,
now,
defineSkip,
write
}
/**
* The state function.
*
* @type {State|void}
*/
let state = initialize.tokenize.call(context, effects)
/**
* Track which character we expect to be consumed, to catch bugs.
*
* @type {Code}
*/
let expectedCode
if (initialize.resolveAll) {
resolveAllConstructs.push(initialize)
}
return context
/** @type {TokenizeContext['write']} */
function write(slice) {
chunks = push(chunks, slice)
main()
// Exit if were not done, resolve might change stuff.
if (chunks[chunks.length - 1] !== codes.eof) {
return []
}
addResult(initialize, 0)
// Otherwise, resolve, and exit.
context.events = resolveAll(resolveAllConstructs, context.events, context)
return context.events
}
//
// Tools.
//
/** @type {TokenizeContext['sliceSerialize']} */
function sliceSerialize(token, expandTabs) {
return serializeChunks(sliceStream(token), expandTabs)
}
/** @type {TokenizeContext['sliceStream']} */
function sliceStream(token) {
return sliceChunks(chunks, token)
}
/** @type {TokenizeContext['now']} */
function now() {
return Object.assign({}, point)
}
/** @type {TokenizeContext['defineSkip']} */
function defineSkip(value) {
columnStart[value.line] = value.column
accountForPotentialSkip()
debug('position: define skip: `%j`', point)
}
//
// State management.
//
/**
* Main loop (note that `_index` and `_bufferIndex` in `point` are modified by
* `consume`).
* Here is where we walk through the chunks, which either include strings of
* several characters, or numerical character codes.
* The reason to do this in a loop instead of a call is so the stack can
* drain.
*
* @returns {void}
*/
function main() {
/** @type {number} */
let chunkIndex
while (point._index < chunks.length) {
const chunk = chunks[point._index]
// If were in a buffer chunk, loop through it.
if (typeof chunk === 'string') {
chunkIndex = point._index
if (point._bufferIndex < 0) {
point._bufferIndex = 0
}
while (
point._index === chunkIndex &&
point._bufferIndex < chunk.length
) {
go(chunk.charCodeAt(point._bufferIndex))
}
} else {
go(chunk)
}
}
}
/**
* Deal with one code.
*
* @param {Code} code
* @returns {void}
*/
function go(code) {
assert(consumed === true, 'expected character to be consumed')
consumed = undefined
debug('main: passing `%s` to %s', code, state && state.name)
expectedCode = code
assert(typeof state === 'function', 'expected state')
state = state(code)
}
/** @type {Effects['consume']} */
function consume(code) {
assert(code === expectedCode, 'expected given code to equal expected code')
debug('consume: `%s`', code)
assert(
consumed === undefined,
'expected code to not have been consumed: this might be because `return x(code)` instead of `return x` was used'
)
assert(
code === null
? context.events.length === 0 ||
context.events[context.events.length - 1][0] === 'exit'
: context.events[context.events.length - 1][0] === 'enter',
'expected last token to be open'
)
if (markdownLineEnding(code)) {
point.line++
point.column = 1
point.offset += code === codes.carriageReturnLineFeed ? 2 : 1
accountForPotentialSkip()
debug('position: after eol: `%j`', point)
} else if (code !== codes.virtualSpace) {
point.column++
point.offset++
}
// Not in a string chunk.
if (point._bufferIndex < 0) {
point._index++
} else {
point._bufferIndex++
// At end of string chunk.
// @ts-expect-error Points w/ non-negative `_bufferIndex` reference
// strings.
if (point._bufferIndex === chunks[point._index].length) {
point._bufferIndex = -1
point._index++
}
}
// Expose the previous character.
context.previous = code
// Mark as consumed.
consumed = true
}
/** @type {Effects['enter']} */
function enter(type, fields) {
/** @type {Token} */
// @ts-expect-error Patch instead of assign required fields to help GC.
const token = fields || {}
token.type = type
token.start = now()
assert(typeof type === 'string', 'expected string type')
assert(type.length > 0, 'expected non-empty string')
debug('enter: `%s`', type)
context.events.push(['enter', token, context])
stack.push(token)
return token
}
/** @type {Effects['exit']} */
function exit(type) {
assert(typeof type === 'string', 'expected string type')
assert(type.length > 0, 'expected non-empty string')
const token = stack.pop()
assert(token, 'cannot close w/o open tokens')
token.end = now()
assert(type === token.type, 'expected exit token to match current token')
assert(
!(
token.start._index === token.end._index &&
token.start._bufferIndex === token.end._bufferIndex
),
'expected non-empty token (`' + type + '`)'
)
debug('exit: `%s`', token.type)
context.events.push(['exit', token, context])
return token
}
/**
* Use results.
*
* @type {ReturnHandle}
*/
function onsuccessfulconstruct(construct, info) {
addResult(construct, info.from)
}
/**
* Discard results.
*
* @type {ReturnHandle}
*/
function onsuccessfulcheck(_, info) {
info.restore()
}
/**
* Factory to attempt/check/interrupt.
*
* @param {ReturnHandle} onreturn
* @param {Record<string, unknown>} [fields]
*/
function constructFactory(onreturn, fields) {
return hook
/**
* Handle either an object mapping codes to constructs, a list of
* constructs, or a single construct.
*
* @param {Construct|Array<Construct>|ConstructRecord} constructs
* @param {State} returnState
* @param {State} [bogusState]
* @returns {State}
*/
function hook(constructs, returnState, bogusState) {
/** @type {Array<Construct>} */
let listOfConstructs
/** @type {number} */
let constructIndex
/** @type {Construct} */
let currentConstruct
/** @type {Info} */
let info
return Array.isArray(constructs)
? /* c8 ignore next 1 */
handleListOfConstructs(constructs)
: 'tokenize' in constructs
? // @ts-expect-error Looks like a construct.
handleListOfConstructs([constructs])
: handleMapOfConstructs(constructs)
/**
* Handle a list of construct.
*
* @param {ConstructRecord} map
* @returns {State}
*/
function handleMapOfConstructs(map) {
return start
/** @type {State} */
function start(code) {
const def = code !== null && map[code]
const all = code !== null && map.null
const list = [
// To do: add more extension tests.
/* c8 ignore next 2 */
...(Array.isArray(def) ? def : def ? [def] : []),
...(Array.isArray(all) ? all : all ? [all] : [])
]
return handleListOfConstructs(list)(code)
}
}
/**
* Handle a list of construct.
*
* @param {Array<Construct>} list
* @returns {State}
*/
function handleListOfConstructs(list) {
listOfConstructs = list
constructIndex = 0
if (list.length === 0) {
assert(bogusState, 'expected `bogusState` to be given')
return bogusState
}
return handleConstruct(list[constructIndex])
}
/**
* Handle a single construct.
*
* @param {Construct} construct
* @returns {State}
*/
function handleConstruct(construct) {
return start
/** @type {State} */
function start(code) {
// To do: not needed to store if there is no bogus state, probably?
// Currently doesnt work because `inspect` in document does a check
// w/o a bogus, which doesnt make sense. But it does seem to help perf
// by not storing.
info = store()
currentConstruct = construct
if (!construct.partial) {
context.currentConstruct = construct
}
if (
construct.name &&
context.parser.constructs.disable.null.includes(construct.name)
) {
return nok(code)
}
return construct.tokenize.call(
// If we do have fields, create an object w/ `context` as its
// prototype.
// This allows a “live binding”, which is needed for `interrupt`.
fields ? Object.assign(Object.create(context), fields) : context,
effects,
ok,
nok
)(code)
}
}
/** @type {State} */
function ok(code) {
assert(code === expectedCode, 'expected code')
consumed = true
onreturn(currentConstruct, info)
return returnState
}
/** @type {State} */
function nok(code) {
assert(code === expectedCode, 'expected code')
consumed = true
info.restore()
if (++constructIndex < listOfConstructs.length) {
return handleConstruct(listOfConstructs[constructIndex])
}
return bogusState
}
}
}
/**
* @param {Construct} construct
* @param {number} from
* @returns {void}
*/
function addResult(construct, from) {
if (construct.resolveAll && !resolveAllConstructs.includes(construct)) {
resolveAllConstructs.push(construct)
}
if (construct.resolve) {
splice(
context.events,
from,
context.events.length - from,
construct.resolve(context.events.slice(from), context)
)
}
if (construct.resolveTo) {
context.events = construct.resolveTo(context.events, context)
}
assert(
construct.partial ||
context.events.length === 0 ||
context.events[context.events.length - 1][0] === 'exit',
'expected last token to end'
)
}
/**
* Store state.
*
* @returns {Info}
*/
function store() {
const startPoint = now()
const startPrevious = context.previous
const startCurrentConstruct = context.currentConstruct
const startEventsIndex = context.events.length
const startStack = Array.from(stack)
return {restore, from: startEventsIndex}
/**
* Restore state.
*
* @returns {void}
*/
function restore() {
point = startPoint
context.previous = startPrevious
context.currentConstruct = startCurrentConstruct
context.events.length = startEventsIndex
stack = startStack
accountForPotentialSkip()
debug('position: restore: `%j`', point)
}
}
/**
* Move the current point a bit forward in the line when its on a column
* skip.
*
* @returns {void}
*/
function accountForPotentialSkip() {
if (point.line in columnStart && point.column < 2) {
point.column = columnStart[point.line]
point.offset += columnStart[point.line] - 1
}
}
}
/**
* Get the chunks from a slice of chunks in the range of a token.
*
* @param {Array<Chunk>} chunks
* @param {Pick<Token, 'start'|'end'>} token
* @returns {Array<Chunk>}
*/
function sliceChunks(chunks, token) {
const startIndex = token.start._index
const startBufferIndex = token.start._bufferIndex
const endIndex = token.end._index
const endBufferIndex = token.end._bufferIndex
/** @type {Array<Chunk>} */
let view
if (startIndex === endIndex) {
assert(endBufferIndex > -1, 'expected non-negative end buffer index')
assert(startBufferIndex > -1, 'expected non-negative start buffer index')
// @ts-expect-error `_bufferIndex` is used on string chunks.
view = [chunks[startIndex].slice(startBufferIndex, endBufferIndex)]
} else {
view = chunks.slice(startIndex, endIndex)
if (startBufferIndex > -1) {
// @ts-expect-error `_bufferIndex` is used on string chunks.
view[0] = view[0].slice(startBufferIndex)
}
if (endBufferIndex > 0) {
// @ts-expect-error `_bufferIndex` is used on string chunks.
view.push(chunks[endIndex].slice(0, endBufferIndex))
}
}
return view
}
/**
* Get the string value of a slice of chunks.
*
* @param {Array<Chunk>} chunks
* @param {boolean} [expandTabs=false]
* @returns {string}
*/
function serializeChunks(chunks, expandTabs) {
let index = -1
/** @type {Array<string>} */
const result = []
/** @type {boolean|undefined} */
let atTab
while (++index < chunks.length) {
const chunk = chunks[index]
/** @type {string} */
let value
if (typeof chunk === 'string') {
value = chunk
} else
switch (chunk) {
case codes.carriageReturn: {
value = values.cr
break
}
case codes.lineFeed: {
value = values.lf
break
}
case codes.carriageReturnLineFeed: {
value = values.cr + values.lf
break
}
case codes.horizontalTab: {
value = expandTabs ? values.space : values.ht
break
}
case codes.virtualSpace: {
if (!expandTabs && atTab) continue
value = values.space
break
}
default: {
assert(typeof chunk === 'number', 'expected number')
// Currently only replacement character.
value = String.fromCharCode(chunk)
}
}
atTab = chunk === codes.horizontalTab
result.push(value)
}
return result.join('')
}

View File

@@ -0,0 +1,6 @@
/** @type {InitialConstruct} */
export const content: InitialConstruct
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type Initializer = import('micromark-util-types').Initializer
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State

View File

@@ -0,0 +1,93 @@
/**
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').Initializer} Initializer
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
*/
import {ok as assert} from 'uvu/assert'
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {codes} from 'micromark-util-symbol/codes.js'
import {constants} from 'micromark-util-symbol/constants.js'
import {types} from 'micromark-util-symbol/types.js'
/** @type {InitialConstruct} */
export const content = {tokenize: initializeContent}
/** @type {Initializer} */
function initializeContent(effects) {
const contentStart = effects.attempt(
this.parser.constructs.contentInitial,
afterContentStartConstruct,
paragraphInitial
)
/** @type {Token} */
let previous
return contentStart
/** @type {State} */
function afterContentStartConstruct(code) {
assert(
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
return factorySpace(effects, contentStart, types.linePrefix)
}
/** @type {State} */
function paragraphInitial(code) {
assert(
code !== codes.eof && !markdownLineEnding(code),
'expected anything other than a line ending or EOF'
)
effects.enter(types.paragraph)
return lineStart(code)
}
/** @type {State} */
function lineStart(code) {
const token = effects.enter(types.chunkText, {
contentType: constants.contentTypeText,
previous
})
if (previous) {
previous.next = token
}
previous = token
return data(code)
}
/** @type {State} */
function data(code) {
if (code === codes.eof) {
effects.exit(types.chunkText)
effects.exit(types.paragraph)
effects.consume(code)
return
}
if (markdownLineEnding(code)) {
effects.consume(code)
effects.exit(types.chunkText)
return lineStart
}
// Data.
effects.consume(code)
return data
}
}

View File

@@ -0,0 +1,12 @@
/** @type {InitialConstruct} */
export const document: InitialConstruct
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type Initializer = import('micromark-util-types').Initializer
export type Construct = import('micromark-util-types').Construct
export type TokenizeContext = import('micromark-util-types').TokenizeContext
export type Tokenizer = import('micromark-util-types').Tokenizer
export type Token = import('micromark-util-types').Token
export type State = import('micromark-util-types').State
export type Point = import('micromark-util-types').Point
export type StackState = Record<string, unknown>
export type StackItem = [Construct, StackState]

View File

@@ -0,0 +1,422 @@
/**
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').Initializer} Initializer
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').TokenizeContext} TokenizeContext
* @typedef {import('micromark-util-types').Tokenizer} Tokenizer
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Point} Point
*/
/**
* @typedef {Record<string, unknown>} StackState
* @typedef {[Construct, StackState]} StackItem
*/
import {ok as assert} from 'uvu/assert'
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {codes} from 'micromark-util-symbol/codes.js'
import {constants} from 'micromark-util-symbol/constants.js'
import {types} from 'micromark-util-symbol/types.js'
import {splice} from 'micromark-util-chunked'
/** @type {InitialConstruct} */
export const document = {tokenize: initializeDocument}
/** @type {Construct} */
const containerConstruct = {tokenize: tokenizeContainer}
/** @type {Initializer} */
function initializeDocument(effects) {
const self = this
/** @type {Array<StackItem>} */
const stack = []
let continued = 0
/** @type {TokenizeContext|undefined} */
let childFlow
/** @type {Token|undefined} */
let childToken
/** @type {number} */
let lineStartOffset
return start
/** @type {State} */
function start(code) {
// First we iterate through the open blocks, starting with the root
// document, and descending through last children down to the last open
// block.
// Each block imposes a condition that the line must satisfy if the block is
// to remain open.
// For example, a block quote requires a `>` character.
// A paragraph requires a non-blank line.
// In this phase we may match all or just some of the open blocks.
// But we cannot close unmatched blocks yet, because we may have a lazy
// continuation line.
if (continued < stack.length) {
const item = stack[continued]
self.containerState = item[1]
assert(
item[0].continuation,
'expected `continuation` to be defined on container construct'
)
return effects.attempt(
item[0].continuation,
documentContinue,
checkNewContainers
)(code)
}
// Done.
return checkNewContainers(code)
}
/** @type {State} */
function documentContinue(code) {
assert(
self.containerState,
'expected `containerState` to be defined after continuation'
)
continued++
// Note: this field is called `_closeFlow` but it also closes containers.
// Perhaps a good idea to rename it but its already used in the wild by
// extensions.
if (self.containerState._closeFlow) {
self.containerState._closeFlow = undefined
if (childFlow) {
closeFlow()
}
// Note: this algorithm for moving events around is similar to the
// algorithm when dealing with lazy lines in `writeToChild`.
const indexBeforeExits = self.events.length
let indexBeforeFlow = indexBeforeExits
/** @type {Point|undefined} */
let point
// Find the flow chunk.
while (indexBeforeFlow--) {
if (
self.events[indexBeforeFlow][0] === 'exit' &&
self.events[indexBeforeFlow][1].type === types.chunkFlow
) {
point = self.events[indexBeforeFlow][1].end
break
}
}
assert(point, 'could not find previous flow chunk')
exitContainers(continued)
// Fix positions.
let index = indexBeforeExits
while (index < self.events.length) {
self.events[index][1].end = Object.assign({}, point)
index++
}
// Inject the exits earlier (theyre still also at the end).
splice(
self.events,
indexBeforeFlow + 1,
0,
self.events.slice(indexBeforeExits)
)
// Discard the duplicate exits.
self.events.length = index
return checkNewContainers(code)
}
return start(code)
}
/** @type {State} */
function checkNewContainers(code) {
// Next, after consuming the continuation markers for existing blocks, we
// look for new block starts (e.g. `>` for a block quote).
// If we encounter a new block start, we close any blocks unmatched in
// step 1 before creating the new block as a child of the last matched
// block.
if (continued === stack.length) {
// No need to `check` whether theres a container, of `exitContainers`
// would be moot.
// We can instead immediately `attempt` to parse one.
if (!childFlow) {
return documentContinued(code)
}
// If we have concrete content, such as block HTML or fenced code,
// we cant have containers “pierce” into them, so we can immediately
// start.
if (childFlow.currentConstruct && childFlow.currentConstruct.concrete) {
return flowStart(code)
}
// If we do have flow, it could still be a blank line,
// but wed be interrupting it w/ a new container if theres a current
// construct.
self.interrupt = Boolean(
childFlow.currentConstruct && !childFlow._gfmTableDynamicInterruptHack
)
}
// Check if there is a new container.
self.containerState = {}
return effects.check(
containerConstruct,
thereIsANewContainer,
thereIsNoNewContainer
)(code)
}
/** @type {State} */
function thereIsANewContainer(code) {
if (childFlow) closeFlow()
exitContainers(continued)
return documentContinued(code)
}
/** @type {State} */
function thereIsNoNewContainer(code) {
self.parser.lazy[self.now().line] = continued !== stack.length
lineStartOffset = self.now().offset
return flowStart(code)
}
/** @type {State} */
function documentContinued(code) {
// Try new containers.
self.containerState = {}
return effects.attempt(
containerConstruct,
containerContinue,
flowStart
)(code)
}
/** @type {State} */
function containerContinue(code) {
assert(
self.currentConstruct,
'expected `currentConstruct` to be defined on tokenizer'
)
assert(
self.containerState,
'expected `containerState` to be defined on tokenizer'
)
continued++
stack.push([self.currentConstruct, self.containerState])
// Try another.
return documentContinued(code)
}
/** @type {State} */
function flowStart(code) {
if (code === codes.eof) {
if (childFlow) closeFlow()
exitContainers(0)
effects.consume(code)
return
}
childFlow = childFlow || self.parser.flow(self.now())
effects.enter(types.chunkFlow, {
contentType: constants.contentTypeFlow,
previous: childToken,
_tokenizer: childFlow
})
return flowContinue(code)
}
/** @type {State} */
function flowContinue(code) {
if (code === codes.eof) {
writeToChild(effects.exit(types.chunkFlow), true)
exitContainers(0)
effects.consume(code)
return
}
if (markdownLineEnding(code)) {
effects.consume(code)
writeToChild(effects.exit(types.chunkFlow))
// Get ready for the next line.
continued = 0
self.interrupt = undefined
return start
}
effects.consume(code)
return flowContinue
}
/**
* @param {Token} token
* @param {boolean} [eof]
* @returns {void}
*/
function writeToChild(token, eof) {
assert(childFlow, 'expected `childFlow` to be defined when continuing')
const stream = self.sliceStream(token)
if (eof) stream.push(null)
token.previous = childToken
if (childToken) childToken.next = token
childToken = token
childFlow.defineSkip(token.start)
childFlow.write(stream)
// Alright, so we just added a lazy line:
//
// ```markdown
// > a
// b.
//
// Or:
//
// > ~~~c
// d
//
// Or:
//
// > | e |
// f
// ```
//
// The construct in the second example (fenced code) does not accept lazy
// lines, so it marked itself as done at the end of its first line, and
// then the content construct parses `d`.
// Most constructs in markdown match on the first line: if the first line
// forms a construct, a non-lazy line cant “unmake” it.
//
// The construct in the third example is potentially a GFM table, and
// those are *weird*.
// It *could* be a table, from the first line, if the following line
// matches a condition.
// In this case, that second line is lazy, which “unmakes” the first line
// and turns the whole into one content block.
//
// Weve now parsed the non-lazy and the lazy line, and can figure out
// whether the lazy line started a new flow block.
// If it did, we exit the current containers between the two flow blocks.
if (self.parser.lazy[token.start.line]) {
let index = childFlow.events.length
while (index--) {
if (
// The token starts before the line ending…
childFlow.events[index][1].start.offset < lineStartOffset &&
// …and either is not ended yet…
(!childFlow.events[index][1].end ||
// …or ends after it.
childFlow.events[index][1].end.offset > lineStartOffset)
) {
// Exit: theres still something open, which means its a lazy line
// part of something.
return
}
}
// Note: this algorithm for moving events around is similar to the
// algorithm when closing flow in `documentContinue`.
const indexBeforeExits = self.events.length
let indexBeforeFlow = indexBeforeExits
/** @type {boolean|undefined} */
let seen
/** @type {Point|undefined} */
let point
// Find the previous chunk (the one before the lazy line).
while (indexBeforeFlow--) {
if (
self.events[indexBeforeFlow][0] === 'exit' &&
self.events[indexBeforeFlow][1].type === types.chunkFlow
) {
if (seen) {
point = self.events[indexBeforeFlow][1].end
break
}
seen = true
}
}
assert(point, 'could not find previous flow chunk')
exitContainers(continued)
// Fix positions.
index = indexBeforeExits
while (index < self.events.length) {
self.events[index][1].end = Object.assign({}, point)
index++
}
// Inject the exits earlier (theyre still also at the end).
splice(
self.events,
indexBeforeFlow + 1,
0,
self.events.slice(indexBeforeExits)
)
// Discard the duplicate exits.
self.events.length = index
}
}
/**
* @param {number} size
* @returns {void}
*/
function exitContainers(size) {
let index = stack.length
// Exit open containers.
while (index-- > size) {
const entry = stack[index]
self.containerState = entry[1]
assert(
entry[0].exit,
'expected `exit` to be defined on container construct'
)
entry[0].exit.call(self, effects)
}
stack.length = size
}
function closeFlow() {
assert(
self.containerState,
'expected `containerState` to be defined when closing flow'
)
assert(childFlow, 'expected `childFlow` to be defined when closing it')
childFlow.write([codes.eof])
childToken = undefined
childFlow = undefined
self.containerState._closeFlow = undefined
}
}
/** @type {Tokenizer} */
function tokenizeContainer(effects, ok, nok) {
return factorySpace(
effects,
effects.attempt(this.parser.constructs.document, ok, nok),
types.linePrefix,
this.parser.constructs.disable.null.includes('codeIndented')
? undefined
: constants.tabSize
)
}

View File

@@ -0,0 +1,5 @@
/** @type {InitialConstruct} */
export const flow: InitialConstruct
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type Initializer = import('micromark-util-types').Initializer
export type State = import('micromark-util-types').State

View File

@@ -0,0 +1,79 @@
/**
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').Initializer} Initializer
* @typedef {import('micromark-util-types').State} State
*/
import {ok as assert} from 'uvu/assert'
import {blankLine, content} from 'micromark-core-commonmark'
import {factorySpace} from 'micromark-factory-space'
import {markdownLineEnding} from 'micromark-util-character'
import {codes} from 'micromark-util-symbol/codes.js'
import {types} from 'micromark-util-symbol/types.js'
/** @type {InitialConstruct} */
export const flow = {tokenize: initializeFlow}
/** @type {Initializer} */
function initializeFlow(effects) {
const self = this
const initial = effects.attempt(
// Try to parse a blank line.
blankLine,
atBlankEnding,
// Try to parse initial flow (essentially, only code).
effects.attempt(
this.parser.constructs.flowInitial,
afterConstruct,
factorySpace(
effects,
effects.attempt(
this.parser.constructs.flow,
afterConstruct,
effects.attempt(content, afterConstruct)
),
types.linePrefix
)
)
)
return initial
/** @type {State} */
function atBlankEnding(code) {
assert(
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEndingBlank)
effects.consume(code)
effects.exit(types.lineEndingBlank)
self.currentConstruct = undefined
return initial
}
/** @type {State} */
function afterConstruct(code) {
assert(
code === codes.eof || markdownLineEnding(code),
'expected eol or eof'
)
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.lineEnding)
effects.consume(code)
effects.exit(types.lineEnding)
self.currentConstruct = undefined
return initial
}
}

View File

@@ -0,0 +1,11 @@
export namespace resolver {
const resolveAll: import('micromark-util-types').Resolver
}
export const string: import('micromark-util-types').InitialConstruct
export const text: import('micromark-util-types').InitialConstruct
export type Resolver = import('micromark-util-types').Resolver
export type Initializer = import('micromark-util-types').Initializer
export type Construct = import('micromark-util-types').Construct
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type State = import('micromark-util-types').State
export type Code = import('micromark-util-types').Code

View File

@@ -0,0 +1,225 @@
/**
* @typedef {import('micromark-util-types').Resolver} Resolver
* @typedef {import('micromark-util-types').Initializer} Initializer
* @typedef {import('micromark-util-types').Construct} Construct
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').State} State
* @typedef {import('micromark-util-types').Code} Code
*/
import {codes} from 'micromark-util-symbol/codes.js'
import {constants} from 'micromark-util-symbol/constants.js'
import {types} from 'micromark-util-symbol/types.js'
export const resolver = {resolveAll: createResolver()}
export const string = initializeFactory('string')
export const text = initializeFactory('text')
/**
* @param {'string'|'text'} field
* @returns {InitialConstruct}
*/
function initializeFactory(field) {
return {
tokenize: initializeText,
resolveAll: createResolver(
field === 'text' ? resolveAllLineSuffixes : undefined
)
}
/** @type {Initializer} */
function initializeText(effects) {
const self = this
const constructs = this.parser.constructs[field]
const text = effects.attempt(constructs, start, notText)
return start
/** @type {State} */
function start(code) {
return atBreak(code) ? text(code) : notText(code)
}
/** @type {State} */
function notText(code) {
if (code === codes.eof) {
effects.consume(code)
return
}
effects.enter(types.data)
effects.consume(code)
return data
}
/** @type {State} */
function data(code) {
if (atBreak(code)) {
effects.exit(types.data)
return text(code)
}
// Data.
effects.consume(code)
return data
}
/**
* @param {Code} code
* @returns {boolean}
*/
function atBreak(code) {
if (code === codes.eof) {
return true
}
const list = constructs[code]
let index = -1
if (list) {
while (++index < list.length) {
const item = list[index]
if (!item.previous || item.previous.call(self, self.previous)) {
return true
}
}
}
return false
}
}
}
/**
* @param {Resolver} [extraResolver]
* @returns {Resolver}
*/
function createResolver(extraResolver) {
return resolveAllText
/** @type {Resolver} */
function resolveAllText(events, context) {
let index = -1
/** @type {number|undefined} */
let enter
// A rather boring computation (to merge adjacent `data` events) which
// improves mm performance by 29%.
while (++index <= events.length) {
if (enter === undefined) {
if (events[index] && events[index][1].type === types.data) {
enter = index
index++
}
} else if (!events[index] || events[index][1].type !== types.data) {
// Dont do anything if there is one data token.
if (index !== enter + 2) {
events[enter][1].end = events[index - 1][1].end
events.splice(enter + 2, index - enter - 2)
index = enter + 2
}
enter = undefined
}
}
return extraResolver ? extraResolver(events, context) : events
}
}
/**
* A rather ugly set of instructions which again looks at chunks in the input
* stream.
* The reason to do this here is that it is *much* faster to parse in reverse.
* And that we cant hook into `null` to split the line suffix before an EOF.
* To do: figure out if we can make this into a clean utility, or even in core.
* As it will be useful for GFMs literal autolink extension (and maybe even
* tables?)
*
* @type {Resolver}
*/
function resolveAllLineSuffixes(events, context) {
let eventIndex = 0 // Skip first.
while (++eventIndex <= events.length) {
if (
(eventIndex === events.length ||
events[eventIndex][1].type === types.lineEnding) &&
events[eventIndex - 1][1].type === types.data
) {
const data = events[eventIndex - 1][1]
const chunks = context.sliceStream(data)
let index = chunks.length
let bufferIndex = -1
let size = 0
/** @type {boolean|undefined} */
let tabs
while (index--) {
const chunk = chunks[index]
if (typeof chunk === 'string') {
bufferIndex = chunk.length
while (chunk.charCodeAt(bufferIndex - 1) === codes.space) {
size++
bufferIndex--
}
if (bufferIndex) break
bufferIndex = -1
}
// Number
else if (chunk === codes.horizontalTab) {
tabs = true
size++
} else if (chunk === codes.virtualSpace) {
// Empty
} else {
// Replacement character, exit.
index++
break
}
}
if (size) {
const token = {
type:
eventIndex === events.length ||
tabs ||
size < constants.hardBreakPrefixSizeMin
? types.lineSuffix
: types.hardBreakTrailing,
start: {
line: data.end.line,
column: data.end.column - size,
offset: data.end.offset - size,
_index: data.start._index + index,
_bufferIndex: index
? bufferIndex
: data.start._bufferIndex + bufferIndex
},
end: Object.assign({}, data.end)
}
data.end = Object.assign({}, token.start)
if (data.start.offset === data.end.offset) {
Object.assign(data, token)
} else {
events.splice(
eventIndex,
0,
['enter', token, context],
['exit', token, context]
)
eventIndex += 2
}
}
eventIndex++
}
}
return events
}

View File

@@ -0,0 +1,13 @@
/**
* @param {ParseOptions} [options]
* @returns {ParseContext}
*/
export function parse(
options?: import('micromark-util-types').ParseOptions | undefined
): ParseContext
export type InitialConstruct = import('micromark-util-types').InitialConstruct
export type FullNormalizedExtension =
import('micromark-util-types').FullNormalizedExtension
export type ParseOptions = import('micromark-util-types').ParseOptions
export type ParseContext = import('micromark-util-types').ParseContext
export type Create = import('micromark-util-types').Create

View File

@@ -0,0 +1,52 @@
/**
* @typedef {import('micromark-util-types').InitialConstruct} InitialConstruct
* @typedef {import('micromark-util-types').FullNormalizedExtension} FullNormalizedExtension
* @typedef {import('micromark-util-types').ParseOptions} ParseOptions
* @typedef {import('micromark-util-types').ParseContext} ParseContext
* @typedef {import('micromark-util-types').Create} Create
*/
import {combineExtensions} from 'micromark-util-combine-extensions'
import {content} from './initialize/content.js'
import {document} from './initialize/document.js'
import {flow} from './initialize/flow.js'
import {text, string} from './initialize/text.js'
import {createTokenizer} from './create-tokenizer.js'
import * as defaultConstructs from './constructs.js'
/**
* @param {ParseOptions} [options]
* @returns {ParseContext}
*/
export function parse(options = {}) {
/** @type {FullNormalizedExtension} */
// @ts-expect-error `defaultConstructs` is full, so the result will be too.
const constructs = combineExtensions(
// @ts-expect-error Same as above.
[defaultConstructs].concat(options.extensions || [])
)
/** @type {ParseContext} */
const parser = {
defined: [],
lazy: {},
constructs,
content: create(content),
document: create(document),
flow: create(flow),
string: create(string),
text: create(text)
}
return parser
/**
* @param {InitialConstruct} initial
*/
function create(initial) {
return creator
/** @type {Create} */
function creator(from) {
return createTokenizer(parser, initial, from)
}
}
}

View File

@@ -0,0 +1,8 @@
/**
* @param {Array<Event>} events
* @returns {Array<Event>}
*/
export function postprocess(
events: Array<import('micromark-util-types').Event>
): Array<import('micromark-util-types').Event>
export type Event = import('micromark-util-types').Event

View File

@@ -0,0 +1,17 @@
/**
* @typedef {import('micromark-util-types').Event} Event
*/
import {subtokenize} from 'micromark-util-subtokenize'
/**
* @param {Array<Event>} events
* @returns {Array<Event>}
*/
export function postprocess(events) {
while (!subtokenize(events)) {
// Empty
}
return events
}

View File

@@ -0,0 +1,13 @@
/**
* @returns {Preprocessor}
*/
export function preprocess(): Preprocessor
export type Encoding = import('micromark-util-types').Encoding
export type Value = import('micromark-util-types').Value
export type Chunk = import('micromark-util-types').Chunk
export type Code = import('micromark-util-types').Code
export type Preprocessor = (
value: Value,
encoding?: import('micromark-util-types').Encoding | undefined,
end?: boolean | undefined
) => Array<Chunk>

View File

@@ -0,0 +1,133 @@
/**
* @typedef {import('micromark-util-types').Encoding} Encoding
* @typedef {import('micromark-util-types').Value} Value
* @typedef {import('micromark-util-types').Chunk} Chunk
* @typedef {import('micromark-util-types').Code} Code
*/
/**
* @callback Preprocessor
* @param {Value} value
* @param {Encoding} [encoding]
* @param {boolean} [end=false]
* @returns {Array<Chunk>}
*/
import {codes} from 'micromark-util-symbol/codes.js'
import {constants} from 'micromark-util-symbol/constants.js'
const search = /[\0\t\n\r]/g
/**
* @returns {Preprocessor}
*/
export function preprocess() {
let column = 1
let buffer = ''
/** @type {boolean|undefined} */
let start = true
/** @type {boolean|undefined} */
let atCarriageReturn
return preprocessor
/** @type {Preprocessor} */
function preprocessor(value, encoding, end) {
/** @type {Array<Chunk>} */
const chunks = []
/** @type {RegExpMatchArray|null} */
let match
/** @type {number} */
let next
/** @type {number} */
let startPosition
/** @type {number} */
let endPosition
/** @type {Code} */
let code
// @ts-expect-error `Buffer` does allow an encoding.
value = buffer + value.toString(encoding)
startPosition = 0
buffer = ''
if (start) {
if (value.charCodeAt(0) === codes.byteOrderMarker) {
startPosition++
}
start = undefined
}
while (startPosition < value.length) {
search.lastIndex = startPosition
match = search.exec(value)
endPosition =
match && match.index !== undefined ? match.index : value.length
code = value.charCodeAt(endPosition)
if (!match) {
buffer = value.slice(startPosition)
break
}
if (
code === codes.lf &&
startPosition === endPosition &&
atCarriageReturn
) {
chunks.push(codes.carriageReturnLineFeed)
atCarriageReturn = undefined
} else {
if (atCarriageReturn) {
chunks.push(codes.carriageReturn)
atCarriageReturn = undefined
}
if (startPosition < endPosition) {
chunks.push(value.slice(startPosition, endPosition))
column += endPosition - startPosition
}
switch (code) {
case codes.nul: {
chunks.push(codes.replacementCharacter)
column++
break
}
case codes.ht: {
next = Math.ceil(column / constants.tabSize) * constants.tabSize
chunks.push(codes.horizontalTab)
while (column++ < next) chunks.push(codes.virtualSpace)
break
}
case codes.lf: {
chunks.push(codes.lineFeed)
column = 1
break
}
default: {
atCarriageReturn = true
column = 1
}
}
}
startPosition = endPosition + 1
}
if (end) {
if (atCarriageReturn) chunks.push(codes.carriageReturn)
if (buffer) chunks.push(buffer)
chunks.push(codes.eof)
}
return chunks
}
}

View File

@@ -0,0 +1,22 @@
/**
* @param {Options} [options]
* @returns {MinimalDuplex}
*/
export function stream(
options?: import('micromark-util-types').Options | undefined
): MinimalDuplex
export type Options = import('micromark-util-types').Options
export type Value = import('micromark-util-types').Value
export type Encoding = import('micromark-util-types').Encoding
export type Callback = (error?: Error) => void
export type MinimalDuplex = Omit<
NodeJS.ReadableStream & NodeJS.WritableStream,
| 'read'
| 'setEncoding'
| 'pause'
| 'resume'
| 'isPaused'
| 'unpipe'
| 'unshift'
| 'wrap'
>

206
vendor/spatie/ignition/node_modules/micromark/dev/stream.js generated vendored Executable file
View File

@@ -0,0 +1,206 @@
/**
* @typedef {import('micromark-util-types').Options} Options
* @typedef {import('micromark-util-types').Value} Value
* @typedef {import('micromark-util-types').Encoding} Encoding
*/
/**
* @typedef {((error?: Error) => void)} Callback
* @typedef {Omit<NodeJS.ReadableStream & NodeJS.WritableStream, 'read'|'setEncoding'|'pause'|'resume'|'isPaused'|'unpipe'|'unshift'|'wrap'>} MinimalDuplex
*/
import {EventEmitter} from 'events'
import {compile} from './lib/compile.js'
import {parse} from './lib/parse.js'
import {postprocess} from './lib/postprocess.js'
import {preprocess} from './lib/preprocess.js'
/**
* @param {Options} [options]
* @returns {MinimalDuplex}
*/
export function stream(options) {
const prep = preprocess()
const tokenize = parse(options).document().write
const comp = compile(options)
/** @type {boolean} */
let ended
/**
* Write a chunk into memory.
*
* @param {Value} chunk
* @param {Encoding} encoding
* @param {Callback} callback
*/
const write =
/**
* @type {(
* ((value?: Value, encoding?: Encoding, callback?: Callback) => boolean) &
* ((value: Value, callback?: Callback) => boolean)
* )}
*/
(
/**
* @param {Value} [chunk]
* @param {Encoding} [encoding]
* @param {Callback} [callback]
*/
function (chunk, encoding, callback) {
if (typeof encoding === 'function') {
callback = encoding
encoding = undefined
}
if (ended) {
throw new Error('Did not expect `write` after `end`')
}
tokenize(prep(chunk || '', encoding))
if (callback) {
callback()
}
// Signal succesful write.
return true
}
)
/**
* End the writing.
* Passes all arguments to a final `write`.
*
* @param {Value} chunk
* @param {Encoding} encoding
* @param {Callback} callback
*/
const end =
/**
* @type {(
* ((value?: Value, encoding?: Encoding, callback?: Callback) => boolean) &
* ((value: Value, callback?: Callback) => boolean)
* )}
*/
(
/**
* @param {Value} [chunk]
* @param {Encoding} [encoding]
* @param {Callback} [callback]
*/
function (chunk, encoding, callback) {
if (typeof chunk === 'function') {
encoding = chunk
chunk = undefined
}
write(chunk, encoding, callback)
emitter.emit(
'data',
comp(postprocess(tokenize(prep('', encoding, true))))
)
emitter.emit('end')
ended = true
return true
}
)
/** @type {MinimalDuplex} */
// @ts-expect-error `addListener` is fine.
const emitter = Object.assign(new EventEmitter(), {
writable: true,
readable: true,
write,
end,
pipe
})
return emitter
/**
* Pipe the processor into a writable stream.
* Basically `Stream#pipe`, but inlined and simplified to keep the bundled
* size down.
* See: <https://github.com/nodejs/node/blob/43a5170/lib/internal/streams/legacy.js#L13>.
*
* @template {NodeJS.WritableStream} T
* @param {T} dest
* @param {{end?: boolean}} [options]
* @returns {T}
*/
function pipe(dest, options) {
emitter.on('data', ondata)
emitter.on('error', onerror)
emitter.on('end', cleanup)
emitter.on('close', cleanup)
// If the `end` option is not supplied, `dest.end()` will be
// called when the `end` or `close` events are received.
// @ts-expect-error `_isStdio` is available on `std{err,out}`
if (!dest._isStdio && (!options || options.end !== false)) {
emitter.on('end', onend)
}
dest.on('error', onerror)
dest.on('close', cleanup)
dest.emit('pipe', emitter)
return dest
/**
* End destination.
*
* @returns {void}
*/
function onend() {
if (dest.end) {
dest.end()
}
}
/**
* Handle data.
*
* @param {string} chunk
* @returns {void}
*/
function ondata(chunk) {
if (dest.writable) {
dest.write(chunk)
}
}
/**
* Clean listeners.
*
* @returns {void}
*/
function cleanup() {
emitter.removeListener('data', ondata)
emitter.removeListener('end', onend)
emitter.removeListener('error', onerror)
emitter.removeListener('end', cleanup)
emitter.removeListener('close', cleanup)
dest.removeListener('error', onerror)
dest.removeListener('close', cleanup)
}
/**
* Close dangling pipes and handle unheard errors.
*
* @param {Error?} [error]
* @returns {void}
*/
function onerror(error) {
cleanup()
if (!emitter.listenerCount('error')) {
throw error // Unhandled stream error in pipe.
}
}
}
}