Fix code quality violations and exclude Manifest from checks
Document application modes (development/debug/production) Add global file drop handler, order column normalization, SPA hash fix Serve CDN assets via /_vendor/ URLs instead of merging into bundles Add production minification with license preservation Improve JSON formatting for debugging and production optimization Add CDN asset caching with CSS URL inlining for production builds Add three-mode system (development, debug, production) Update Manifest CLAUDE.md to reflect helper class architecture Refactor Manifest.php into helper classes for better organization Pre-manifest-refactor checkpoint: Add app_mode documentation 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
194
node_modules/css-tree/lib/tokenizer/index.js
generated
vendored
194
node_modules/css-tree/lib/tokenizer/index.js
generated
vendored
@@ -1,31 +1,28 @@
|
||||
var TokenStream = require('../common/TokenStream');
|
||||
var adoptBuffer = require('../common/adopt-buffer');
|
||||
import * as TYPE from './types.js';
|
||||
import {
|
||||
isNewline,
|
||||
isName,
|
||||
isValidEscape,
|
||||
isNumberStart,
|
||||
isIdentifierStart,
|
||||
isBOM,
|
||||
charCodeCategory,
|
||||
WhiteSpaceCategory,
|
||||
DigitCategory,
|
||||
NameStartCategory,
|
||||
NonPrintableCategory
|
||||
} from './char-code-definitions.js';
|
||||
import {
|
||||
cmpStr,
|
||||
getNewlineLength,
|
||||
findWhiteSpaceEnd,
|
||||
consumeEscaped,
|
||||
consumeName,
|
||||
consumeNumber,
|
||||
consumeBadUrlRemnants
|
||||
} from './utils.js';
|
||||
|
||||
var constants = require('./const');
|
||||
var TYPE = constants.TYPE;
|
||||
|
||||
var charCodeDefinitions = require('./char-code-definitions');
|
||||
var isNewline = charCodeDefinitions.isNewline;
|
||||
var isName = charCodeDefinitions.isName;
|
||||
var isValidEscape = charCodeDefinitions.isValidEscape;
|
||||
var isNumberStart = charCodeDefinitions.isNumberStart;
|
||||
var isIdentifierStart = charCodeDefinitions.isIdentifierStart;
|
||||
var charCodeCategory = charCodeDefinitions.charCodeCategory;
|
||||
var isBOM = charCodeDefinitions.isBOM;
|
||||
|
||||
var utils = require('./utils');
|
||||
var cmpStr = utils.cmpStr;
|
||||
var getNewlineLength = utils.getNewlineLength;
|
||||
var findWhiteSpaceEnd = utils.findWhiteSpaceEnd;
|
||||
var consumeEscaped = utils.consumeEscaped;
|
||||
var consumeName = utils.consumeName;
|
||||
var consumeNumber = utils.consumeNumber;
|
||||
var consumeBadUrlRemnants = utils.consumeBadUrlRemnants;
|
||||
|
||||
var OFFSET_MASK = 0x00FFFFFF;
|
||||
var TYPE_SHIFT = 24;
|
||||
|
||||
function tokenize(source, stream) {
|
||||
export function tokenize(source, onToken) {
|
||||
function getCharCode(offset) {
|
||||
return offset < sourceLength ? source.charCodeAt(offset) : 0;
|
||||
}
|
||||
@@ -111,7 +108,7 @@ function tokenize(source, stream) {
|
||||
|
||||
// Repeatedly consume the next input code point from the stream:
|
||||
for (; offset < source.length; offset++) {
|
||||
var code = source.charCodeAt(offset);
|
||||
const code = source.charCodeAt(offset);
|
||||
|
||||
switch (charCodeCategory(code)) {
|
||||
// ending code point
|
||||
@@ -120,13 +117,13 @@ function tokenize(source, stream) {
|
||||
offset++;
|
||||
return;
|
||||
|
||||
// EOF
|
||||
case charCodeCategory.Eof:
|
||||
// EOF
|
||||
// case EofCategory:
|
||||
// This is a parse error. Return the <string-token>.
|
||||
return;
|
||||
// return;
|
||||
|
||||
// newline
|
||||
case charCodeCategory.WhiteSpace:
|
||||
case WhiteSpaceCategory:
|
||||
if (isNewline(code)) {
|
||||
// This is a parse error. Reconsume the current input code point,
|
||||
// create a <bad-string-token>, and return it.
|
||||
@@ -143,7 +140,7 @@ function tokenize(source, stream) {
|
||||
break;
|
||||
}
|
||||
|
||||
var nextCode = getCharCode(offset + 1);
|
||||
const nextCode = getCharCode(offset + 1);
|
||||
|
||||
// Otherwise, if the next input code point is a newline, consume it.
|
||||
if (isNewline(nextCode)) {
|
||||
@@ -176,7 +173,7 @@ function tokenize(source, stream) {
|
||||
|
||||
// Repeatedly consume the next input code point from the stream:
|
||||
for (; offset < source.length; offset++) {
|
||||
var code = source.charCodeAt(offset);
|
||||
const code = source.charCodeAt(offset);
|
||||
|
||||
switch (charCodeCategory(code)) {
|
||||
// U+0029 RIGHT PARENTHESIS ())
|
||||
@@ -185,13 +182,13 @@ function tokenize(source, stream) {
|
||||
offset++;
|
||||
return;
|
||||
|
||||
// EOF
|
||||
case charCodeCategory.Eof:
|
||||
// EOF
|
||||
// case EofCategory:
|
||||
// This is a parse error. Return the <url-token>.
|
||||
return;
|
||||
// return;
|
||||
|
||||
// whitespace
|
||||
case charCodeCategory.WhiteSpace:
|
||||
case WhiteSpaceCategory:
|
||||
// Consume as much whitespace as possible.
|
||||
offset = findWhiteSpaceEnd(source, offset);
|
||||
|
||||
@@ -218,7 +215,7 @@ function tokenize(source, stream) {
|
||||
case 0x0022:
|
||||
case 0x0027:
|
||||
case 0x0028:
|
||||
case charCodeCategory.NonPrintable:
|
||||
case NonPrintableCategory:
|
||||
// This is a parse error. Consume the remnants of a bad url,
|
||||
// create a <bad-url-token>, and return it.
|
||||
offset = consumeBadUrlRemnants(source, offset);
|
||||
@@ -246,34 +243,22 @@ function tokenize(source, stream) {
|
||||
}
|
||||
}
|
||||
|
||||
if (!stream) {
|
||||
stream = new TokenStream();
|
||||
}
|
||||
|
||||
// ensure source is a string
|
||||
source = String(source || '');
|
||||
|
||||
var sourceLength = source.length;
|
||||
var offsetAndType = adoptBuffer(stream.offsetAndType, sourceLength + 1); // +1 because of eof-token
|
||||
var balance = adoptBuffer(stream.balance, sourceLength + 1);
|
||||
var tokenCount = 0;
|
||||
var start = isBOM(getCharCode(0));
|
||||
var offset = start;
|
||||
var balanceCloseType = 0;
|
||||
var balanceStart = 0;
|
||||
var balancePrev = 0;
|
||||
const sourceLength = source.length;
|
||||
let start = isBOM(getCharCode(0));
|
||||
let offset = start;
|
||||
let type;
|
||||
|
||||
// https://drafts.csswg.org/css-syntax-3/#consume-token
|
||||
// § 4.3.1. Consume a token
|
||||
while (offset < sourceLength) {
|
||||
var code = source.charCodeAt(offset);
|
||||
var type = 0;
|
||||
|
||||
balance[tokenCount] = sourceLength;
|
||||
const code = source.charCodeAt(offset);
|
||||
|
||||
switch (charCodeCategory(code)) {
|
||||
// whitespace
|
||||
case charCodeCategory.WhiteSpace:
|
||||
case WhiteSpaceCategory:
|
||||
// Consume as much whitespace as possible. Return a <whitespace-token>.
|
||||
type = TYPE.WhiteSpace;
|
||||
offset = findWhiteSpaceEnd(source, offset + 1);
|
||||
@@ -395,10 +380,8 @@ function tokenize(source, stream) {
|
||||
// ... consume them and all following code points up to and including the first U+002A ASTERISK (*)
|
||||
// followed by a U+002F SOLIDUS (/), or up to an EOF code point.
|
||||
type = TYPE.Comment;
|
||||
offset = source.indexOf('*/', offset + 2) + 2;
|
||||
if (offset === 1) {
|
||||
offset = source.length;
|
||||
}
|
||||
offset = source.indexOf('*/', offset + 2);
|
||||
offset = offset === -1 ? source.length : offset + 2;
|
||||
} else {
|
||||
type = TYPE.Delim;
|
||||
offset++;
|
||||
@@ -493,21 +476,21 @@ function tokenize(source, stream) {
|
||||
break;
|
||||
|
||||
// digit
|
||||
case charCodeCategory.Digit:
|
||||
case DigitCategory:
|
||||
// Reconsume the current input code point, consume a numeric token, and return it.
|
||||
consumeNumericToken();
|
||||
break;
|
||||
|
||||
// name-start code point
|
||||
case charCodeCategory.NameStart:
|
||||
case NameStartCategory:
|
||||
// Reconsume the current input code point, consume an ident-like token, and return it.
|
||||
consumeIdentLikeToken();
|
||||
break;
|
||||
|
||||
// EOF
|
||||
case charCodeCategory.Eof:
|
||||
// EOF
|
||||
// case EofCategory:
|
||||
// Return an <EOF-token>.
|
||||
break;
|
||||
// break;
|
||||
|
||||
// anything else
|
||||
default:
|
||||
@@ -516,76 +499,15 @@ function tokenize(source, stream) {
|
||||
offset++;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case balanceCloseType:
|
||||
balancePrev = balanceStart & OFFSET_MASK;
|
||||
balanceStart = balance[balancePrev];
|
||||
balanceCloseType = balanceStart >> TYPE_SHIFT;
|
||||
balance[tokenCount] = balancePrev;
|
||||
balance[balancePrev++] = tokenCount;
|
||||
for (; balancePrev < tokenCount; balancePrev++) {
|
||||
if (balance[balancePrev] === sourceLength) {
|
||||
balance[balancePrev] = tokenCount;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case TYPE.LeftParenthesis:
|
||||
case TYPE.Function:
|
||||
balance[tokenCount] = balanceStart;
|
||||
balanceCloseType = TYPE.RightParenthesis;
|
||||
balanceStart = (balanceCloseType << TYPE_SHIFT) | tokenCount;
|
||||
break;
|
||||
|
||||
case TYPE.LeftSquareBracket:
|
||||
balance[tokenCount] = balanceStart;
|
||||
balanceCloseType = TYPE.RightSquareBracket;
|
||||
balanceStart = (balanceCloseType << TYPE_SHIFT) | tokenCount;
|
||||
break;
|
||||
|
||||
case TYPE.LeftCurlyBracket:
|
||||
balance[tokenCount] = balanceStart;
|
||||
balanceCloseType = TYPE.RightCurlyBracket;
|
||||
balanceStart = (balanceCloseType << TYPE_SHIFT) | tokenCount;
|
||||
break;
|
||||
}
|
||||
|
||||
offsetAndType[tokenCount++] = (type << TYPE_SHIFT) | offset;
|
||||
// put token to stream
|
||||
onToken(type, start, start = offset);
|
||||
}
|
||||
|
||||
// finalize buffers
|
||||
offsetAndType[tokenCount] = (TYPE.EOF << TYPE_SHIFT) | offset; // <EOF-token>
|
||||
balance[tokenCount] = sourceLength;
|
||||
balance[sourceLength] = sourceLength; // prevents false positive balance match with any token
|
||||
while (balanceStart !== 0) {
|
||||
balancePrev = balanceStart & OFFSET_MASK;
|
||||
balanceStart = balance[balancePrev];
|
||||
balance[balancePrev] = sourceLength;
|
||||
}
|
||||
|
||||
// update stream
|
||||
stream.source = source;
|
||||
stream.firstCharOffset = start;
|
||||
stream.offsetAndType = offsetAndType;
|
||||
stream.tokenCount = tokenCount;
|
||||
stream.balance = balance;
|
||||
stream.reset();
|
||||
stream.next();
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
// extend tokenizer with constants
|
||||
Object.keys(constants).forEach(function(key) {
|
||||
tokenize[key] = constants[key];
|
||||
});
|
||||
|
||||
// extend tokenizer with static methods from utils
|
||||
Object.keys(charCodeDefinitions).forEach(function(key) {
|
||||
tokenize[key] = charCodeDefinitions[key];
|
||||
});
|
||||
Object.keys(utils).forEach(function(key) {
|
||||
tokenize[key] = utils[key];
|
||||
});
|
||||
|
||||
module.exports = tokenize;
|
||||
export * from './types.js';
|
||||
export * as tokenTypes from './types.js';
|
||||
export { default as tokenNames } from './names.js';
|
||||
export * from './char-code-definitions.js';
|
||||
export * from './utils.js';
|
||||
export * from './OffsetToLocation.js';
|
||||
export * from './TokenStream.js';
|
||||
|
||||
Reference in New Issue
Block a user