Fix bin/publish: copy docs.dist from project root

Fix bin/publish: use correct .env path for rspade_system
Fix bin/publish script: prevent grep exit code 1 from terminating script

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
root
2025-10-21 02:08:33 +00:00
commit f6fac6c4bc
79758 changed files with 10547827 additions and 0 deletions

View File

@@ -0,0 +1,12 @@
/**
* Tokenize subcontent.
*
* @param {Event[]} events
* @returns {boolean}
*/
export function subtokenize(
events: import('micromark-util-types').Event[]
): boolean
export type Token = import('micromark-util-types').Token
export type Chunk = import('micromark-util-types').Chunk
export type Event = import('micromark-util-types').Event

View File

@@ -0,0 +1,259 @@
/**
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').Chunk} Chunk
* @typedef {import('micromark-util-types').Event} Event
*/
import {ok as assert} from 'uvu/assert'
import {splice} from 'micromark-util-chunked'
import {codes} from 'micromark-util-symbol/codes.js'
import {types} from 'micromark-util-symbol/types.js'
/**
* Tokenize subcontent.
*
* @param {Event[]} events
* @returns {boolean}
*/
export function subtokenize(events) {
/** @type {Record<string, number>} */
const jumps = {}
let index = -1
/** @type {Event} */
let event
/** @type {number|undefined} */
let lineIndex
/** @type {number} */
let otherIndex
/** @type {Event} */
let otherEvent
/** @type {Event[]} */
let parameters
/** @type {Event[]} */
let subevents
/** @type {boolean|undefined} */
let more
while (++index < events.length) {
while (index in jumps) {
index = jumps[index]
}
event = events[index]
// Add a hook for the GFM tasklist extension, which needs to know if text
// is in the first content of a list item.
if (
index &&
event[1].type === types.chunkFlow &&
events[index - 1][1].type === types.listItemPrefix
) {
assert(event[1]._tokenizer, 'expected `_tokenizer` on subtokens')
subevents = event[1]._tokenizer.events
otherIndex = 0
if (
otherIndex < subevents.length &&
subevents[otherIndex][1].type === types.lineEndingBlank
) {
otherIndex += 2
}
if (
otherIndex < subevents.length &&
subevents[otherIndex][1].type === types.content
) {
while (++otherIndex < subevents.length) {
if (subevents[otherIndex][1].type === types.content) {
break
}
if (subevents[otherIndex][1].type === types.chunkText) {
subevents[otherIndex][1]._isInFirstContentOfListItem = true
otherIndex++
}
}
}
}
// Enter.
if (event[0] === 'enter') {
if (event[1].contentType) {
Object.assign(jumps, subcontent(events, index))
index = jumps[index]
more = true
}
}
// Exit.
else if (event[1]._container) {
otherIndex = index
lineIndex = undefined
while (otherIndex--) {
otherEvent = events[otherIndex]
if (
otherEvent[1].type === types.lineEnding ||
otherEvent[1].type === types.lineEndingBlank
) {
if (otherEvent[0] === 'enter') {
if (lineIndex) {
events[lineIndex][1].type = types.lineEndingBlank
}
otherEvent[1].type = types.lineEnding
lineIndex = otherIndex
}
} else {
break
}
}
if (lineIndex) {
// Fix position.
event[1].end = Object.assign({}, events[lineIndex][1].start)
// Switch container exit w/ line endings.
parameters = events.slice(lineIndex, index)
parameters.unshift(event)
splice(events, lineIndex, index - lineIndex + 1, parameters)
}
}
}
return !more
}
/**
* Tokenize embedded tokens.
*
* @param {Event[]} events
* @param {number} eventIndex
* @returns {Record<string, number>}
*/
function subcontent(events, eventIndex) {
const token = events[eventIndex][1]
const context = events[eventIndex][2]
let startPosition = eventIndex - 1
/** @type {number[]} */
const startPositions = []
assert(token.contentType, 'expected `contentType` on subtokens')
const tokenizer =
token._tokenizer || context.parser[token.contentType](token.start)
const childEvents = tokenizer.events
/** @type {[number, number][]} */
const jumps = []
/** @type {Record<string, number>} */
const gaps = {}
/** @type {Chunk[]} */
let stream
/** @type {Token|undefined} */
let previous
let index = -1
/** @type {Token|undefined} */
let current = token
let adjust = 0
let start = 0
const breaks = [start]
// Loop forward through the linked tokens to pass them in order to the
// subtokenizer.
while (current) {
// Find the position of the event for this token.
while (events[++startPosition][1] !== current) {
// Empty.
}
assert(
!previous || current.previous === previous,
'expected previous to match'
)
assert(!previous || previous.next === current, 'expected next to match')
startPositions.push(startPosition)
if (!current._tokenizer) {
stream = context.sliceStream(current)
if (!current.next) {
stream.push(codes.eof)
}
if (previous) {
tokenizer.defineSkip(current.start)
}
if (current._isInFirstContentOfListItem) {
tokenizer._gfmTasklistFirstContentOfListItem = true
}
tokenizer.write(stream)
if (current._isInFirstContentOfListItem) {
tokenizer._gfmTasklistFirstContentOfListItem = undefined
}
}
// Unravel the next token.
previous = current
current = current.next
}
// Now, loop back through all events (and linked tokens), to figure out which
// parts belong where.
current = token
while (++index < childEvents.length) {
if (
// Find a void token that includes a break.
childEvents[index][0] === 'exit' &&
childEvents[index - 1][0] === 'enter' &&
childEvents[index][1].type === childEvents[index - 1][1].type &&
childEvents[index][1].start.line !== childEvents[index][1].end.line
) {
assert(current, 'expected a current token')
start = index + 1
breaks.push(start)
// Help GC.
current._tokenizer = undefined
current.previous = undefined
current = current.next
}
}
// Help GC.
tokenizer.events = []
// If theres one more token (which is the cases for lines that end in an
// EOF), thats perfect: the last point we found starts it.
// If there isnt then make sure any remaining content is added to it.
if (current) {
// Help GC.
current._tokenizer = undefined
current.previous = undefined
assert(!current.next, 'expected no next token')
} else {
breaks.pop()
}
// Now splice the events from the subtokenizer into the current events,
// moving back to front so that splice indices arent affected.
index = breaks.length
while (index--) {
const slice = childEvents.slice(breaks[index], breaks[index + 1])
const start = startPositions.pop()
assert(start !== undefined, 'expected a start position when splicing')
jumps.unshift([start, start + slice.length - 1])
splice(events, start, 2, slice)
}
index = -1
while (++index < jumps.length) {
gaps[adjust + jumps[index][0]] = adjust + jumps[index][1]
adjust += jumps[index][1] - jumps[index][0] - 1
}
return gaps
}

View File

@@ -0,0 +1,12 @@
/**
* Tokenize subcontent.
*
* @param {Event[]} events
* @returns {boolean}
*/
export function subtokenize(
events: import('micromark-util-types').Event[]
): boolean
export type Token = import('micromark-util-types').Token
export type Chunk = import('micromark-util-types').Chunk
export type Event = import('micromark-util-types').Event

View File

@@ -0,0 +1,247 @@
/**
* @typedef {import('micromark-util-types').Token} Token
* @typedef {import('micromark-util-types').Chunk} Chunk
* @typedef {import('micromark-util-types').Event} Event
*/
import {splice} from 'micromark-util-chunked'
/**
* Tokenize subcontent.
*
* @param {Event[]} events
* @returns {boolean}
*/
export function subtokenize(events) {
/** @type {Record<string, number>} */
const jumps = {}
let index = -1
/** @type {Event} */
let event
/** @type {number|undefined} */
let lineIndex
/** @type {number} */
let otherIndex
/** @type {Event} */
let otherEvent
/** @type {Event[]} */
let parameters
/** @type {Event[]} */
let subevents
/** @type {boolean|undefined} */
let more
while (++index < events.length) {
while (index in jumps) {
index = jumps[index]
}
event = events[index] // Add a hook for the GFM tasklist extension, which needs to know if text
// is in the first content of a list item.
if (
index &&
event[1].type === 'chunkFlow' &&
events[index - 1][1].type === 'listItemPrefix'
) {
subevents = event[1]._tokenizer.events
otherIndex = 0
if (
otherIndex < subevents.length &&
subevents[otherIndex][1].type === 'lineEndingBlank'
) {
otherIndex += 2
}
if (
otherIndex < subevents.length &&
subevents[otherIndex][1].type === 'content'
) {
while (++otherIndex < subevents.length) {
if (subevents[otherIndex][1].type === 'content') {
break
}
if (subevents[otherIndex][1].type === 'chunkText') {
subevents[otherIndex][1]._isInFirstContentOfListItem = true
otherIndex++
}
}
}
} // Enter.
if (event[0] === 'enter') {
if (event[1].contentType) {
Object.assign(jumps, subcontent(events, index))
index = jumps[index]
more = true
}
} // Exit.
else if (event[1]._container) {
otherIndex = index
lineIndex = undefined
while (otherIndex--) {
otherEvent = events[otherIndex]
if (
otherEvent[1].type === 'lineEnding' ||
otherEvent[1].type === 'lineEndingBlank'
) {
if (otherEvent[0] === 'enter') {
if (lineIndex) {
events[lineIndex][1].type = 'lineEndingBlank'
}
otherEvent[1].type = 'lineEnding'
lineIndex = otherIndex
}
} else {
break
}
}
if (lineIndex) {
// Fix position.
event[1].end = Object.assign({}, events[lineIndex][1].start) // Switch container exit w/ line endings.
parameters = events.slice(lineIndex, index)
parameters.unshift(event)
splice(events, lineIndex, index - lineIndex + 1, parameters)
}
}
}
return !more
}
/**
* Tokenize embedded tokens.
*
* @param {Event[]} events
* @param {number} eventIndex
* @returns {Record<string, number>}
*/
function subcontent(events, eventIndex) {
const token = events[eventIndex][1]
const context = events[eventIndex][2]
let startPosition = eventIndex - 1
/** @type {number[]} */
const startPositions = []
const tokenizer =
token._tokenizer || context.parser[token.contentType](token.start)
const childEvents = tokenizer.events
/** @type {[number, number][]} */
const jumps = []
/** @type {Record<string, number>} */
const gaps = {}
/** @type {Chunk[]} */
let stream
/** @type {Token|undefined} */
let previous
let index = -1
/** @type {Token|undefined} */
let current = token
let adjust = 0
let start = 0
const breaks = [start] // Loop forward through the linked tokens to pass them in order to the
// subtokenizer.
while (current) {
// Find the position of the event for this token.
while (events[++startPosition][1] !== current) {
// Empty.
}
startPositions.push(startPosition)
if (!current._tokenizer) {
stream = context.sliceStream(current)
if (!current.next) {
stream.push(null)
}
if (previous) {
tokenizer.defineSkip(current.start)
}
if (current._isInFirstContentOfListItem) {
tokenizer._gfmTasklistFirstContentOfListItem = true
}
tokenizer.write(stream)
if (current._isInFirstContentOfListItem) {
tokenizer._gfmTasklistFirstContentOfListItem = undefined
}
} // Unravel the next token.
previous = current
current = current.next
} // Now, loop back through all events (and linked tokens), to figure out which
// parts belong where.
current = token
while (++index < childEvents.length) {
if (
// Find a void token that includes a break.
childEvents[index][0] === 'exit' &&
childEvents[index - 1][0] === 'enter' &&
childEvents[index][1].type === childEvents[index - 1][1].type &&
childEvents[index][1].start.line !== childEvents[index][1].end.line
) {
start = index + 1
breaks.push(start) // Help GC.
current._tokenizer = undefined
current.previous = undefined
current = current.next
}
} // Help GC.
tokenizer.events = [] // If theres one more token (which is the cases for lines that end in an
// EOF), thats perfect: the last point we found starts it.
// If there isnt then make sure any remaining content is added to it.
if (current) {
// Help GC.
current._tokenizer = undefined
current.previous = undefined
} else {
breaks.pop()
} // Now splice the events from the subtokenizer into the current events,
// moving back to front so that splice indices arent affected.
index = breaks.length
while (index--) {
const slice = childEvents.slice(breaks[index], breaks[index + 1])
const start = startPositions.pop()
jumps.unshift([start, start + slice.length - 1])
splice(events, start, 2, slice)
}
index = -1
while (++index < jumps.length) {
gaps[adjust + jumps[index][0]] = adjust + jumps[index][1]
adjust += jumps[index][1] - jumps[index][0] - 1
}
return gaps
}

View File

@@ -0,0 +1 @@
../../../uvu/bin.js

View File

@@ -0,0 +1,126 @@
# micromark-util-subtokenize
[![Build][build-badge]][build]
[![Coverage][coverage-badge]][coverage]
[![Downloads][downloads-badge]][downloads]
[![Size][bundle-size-badge]][bundle-size]
[![Sponsors][sponsors-badge]][opencollective]
[![Backers][backers-badge]][opencollective]
[![Chat][chat-badge]][chat]
micromark utility to tokenize subtokens.
## Contents
* [Install](#install)
* [Use](#use)
* [API](#api)
* [`subtokenize(events)`](#subtokenizeevents)
* [Security](#security)
* [Contribute](#contribute)
* [License](#license)
## Install
[npm][]:
```sh
npm install micromark-util-subtokenize
```
## Use
```js
import {subtokenize} from 'micromark-util-subtokenize'
/**
* Content is transparent: its parsed right now. That way, definitions are also
* parsed right now: before text in paragraphs (specifically, media) are parsed.
*
* @type {Resolver}
*/
function resolveContent(events) {
subtokenize(events)
return events
}
```
## API
This module exports the following identifiers: `subtokenize`.
There is no default export.
### `subtokenize(events)`
Tokenize subcontent.
###### Parameters
* `events` (`Event[]`) — List of events
###### Returns
`boolean` — Whether subtokens were found.
## Security
See [`security.md`][securitymd] in [`micromark/.github`][health] for how to
submit a security report.
## Contribute
See [`contributing.md`][contributing] in [`micromark/.github`][health] for ways
to get started.
See [`support.md`][support] for ways to get help.
This project has a [code of conduct][coc].
By interacting with this repository, organisation, or community you agree to
abide by its terms.
## License
[MIT][license] © [Titus Wormer][author]
<!-- Definitions -->
[build-badge]: https://github.com/micromark/micromark/workflows/main/badge.svg
[build]: https://github.com/micromark/micromark/actions
[coverage-badge]: https://img.shields.io/codecov/c/github/micromark/micromark.svg
[coverage]: https://codecov.io/github/micromark/micromark
[downloads-badge]: https://img.shields.io/npm/dm/micromark-util-subtokenize.svg
[downloads]: https://www.npmjs.com/package/micromark-util-subtokenize
[bundle-size-badge]: https://img.shields.io/bundlephobia/minzip/micromark-util-subtokenize.svg
[bundle-size]: https://bundlephobia.com/result?p=micromark-util-subtokenize
[sponsors-badge]: https://opencollective.com/unified/sponsors/badge.svg
[backers-badge]: https://opencollective.com/unified/backers/badge.svg
[opencollective]: https://opencollective.com/unified
[npm]: https://docs.npmjs.com/cli/install
[chat-badge]: https://img.shields.io/badge/chat-discussions-success.svg
[chat]: https://github.com/micromark/micromark/discussions
[license]: https://github.com/micromark/micromark/blob/main/license
[author]: https://wooorm.com
[health]: https://github.com/micromark/.github
[securitymd]: https://github.com/micromark/.github/blob/HEAD/security.md
[contributing]: https://github.com/micromark/.github/blob/HEAD/contributing.md
[support]: https://github.com/micromark/.github/blob/HEAD/support.md
[coc]: https://github.com/micromark/.github/blob/HEAD/code-of-conduct.md