This commit is contained in:
2024-02-07 01:33:07 -05:00
commit c1af19d441
4088 changed files with 1260170 additions and 0 deletions

View File

@ -0,0 +1,157 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.LineExtractor = void 0;
var TSDocMessageId_1 = require("./TSDocMessageId");
// Internal parser state
var State;
(function (State) {
// Initial state, looking for "/*"
State[State["BeginComment1"] = 0] = "BeginComment1";
// Looking for "*" or "* " after "/*"
State[State["BeginComment2"] = 1] = "BeginComment2";
// Like State.CollectingLine except immediately after the "/**"
State[State["CollectingFirstLine"] = 2] = "CollectingFirstLine";
// Collecting characters until we reach a newline
State[State["CollectingLine"] = 3] = "CollectingLine";
// After a newline, looking for the "*" that begins a new line, or the "*/" to end the comment
State[State["AdvancingLine"] = 4] = "AdvancingLine";
// Exiting the parser loop
State[State["Done"] = 5] = "Done";
})(State || (State = {}));
/**
* The main API for parsing TSDoc comments.
*/
var LineExtractor = /** @class */ (function () {
function LineExtractor() {
}
/**
* This step parses an entire code comment from slash-star-star until star-slash,
* and extracts the content lines. The lines are stored in IDocCommentParameters.lines
* and the overall text range is assigned to IDocCommentParameters.range.
*/
LineExtractor.extract = function (parserContext) {
var range = parserContext.sourceRange;
var buffer = range.buffer;
var commentRangeStart = 0;
var commentRangeEnd = 0;
// These must be set before entering CollectingFirstLine, CollectingLine, or AdvancingLine
var collectingLineStart = 0;
var collectingLineEnd = 0;
var nextIndex = range.pos;
var state = State.BeginComment1;
var lines = [];
while (state !== State.Done) {
if (nextIndex >= range.end) {
// reached the end of the input
switch (state) {
case State.BeginComment1:
case State.BeginComment2:
parserContext.log.addMessageForTextRange(TSDocMessageId_1.TSDocMessageId.CommentNotFound, 'Expecting a "/**" comment', range);
return false;
default:
parserContext.log.addMessageForTextRange(TSDocMessageId_1.TSDocMessageId.CommentMissingClosingDelimiter, 'Unexpected end of input', range);
return false;
}
}
var current = buffer[nextIndex];
var currentIndex = nextIndex;
++nextIndex;
var next = nextIndex < range.end ? buffer[nextIndex] : '';
switch (state) {
case State.BeginComment1:
if (current === '/' && next === '*') {
commentRangeStart = currentIndex;
++nextIndex; // skip the star
state = State.BeginComment2;
}
else if (!LineExtractor._whitespaceCharacterRegExp.test(current)) {
parserContext.log.addMessageForTextRange(TSDocMessageId_1.TSDocMessageId.CommentOpeningDelimiterSyntax, 'Expecting a leading "/**"', range.getNewRange(currentIndex, currentIndex + 1));
return false;
}
break;
case State.BeginComment2:
if (current === '*') {
if (next === ' ') {
++nextIndex; // Discard the space after the star
}
collectingLineStart = nextIndex;
collectingLineEnd = nextIndex;
state = State.CollectingFirstLine;
}
else {
parserContext.log.addMessageForTextRange(TSDocMessageId_1.TSDocMessageId.CommentOpeningDelimiterSyntax, 'Expecting a leading "/**"', range.getNewRange(currentIndex, currentIndex + 1));
return false;
}
break;
case State.CollectingFirstLine:
case State.CollectingLine:
if (current === '\n') {
// Ignore an empty line if it is immediately after the "/**"
if (state !== State.CollectingFirstLine || collectingLineEnd > collectingLineStart) {
// Record the line that we collected
lines.push(range.getNewRange(collectingLineStart, collectingLineEnd));
}
collectingLineStart = nextIndex;
collectingLineEnd = nextIndex;
state = State.AdvancingLine;
}
else if (current === '*' && next === '/') {
if (collectingLineEnd > collectingLineStart) {
lines.push(range.getNewRange(collectingLineStart, collectingLineEnd));
}
collectingLineStart = 0;
collectingLineEnd = 0;
++nextIndex; // skip the slash
commentRangeEnd = nextIndex;
state = State.Done;
}
else if (!LineExtractor._whitespaceCharacterRegExp.test(current)) {
collectingLineEnd = nextIndex;
}
break;
case State.AdvancingLine:
if (current === '*') {
if (next === '/') {
collectingLineStart = 0;
collectingLineEnd = 0;
++nextIndex; // skip the slash
commentRangeEnd = nextIndex;
state = State.Done;
}
else {
// Discard the "*" at the start of a line
if (next === ' ') {
++nextIndex; // Discard the space after the star
}
collectingLineStart = nextIndex;
collectingLineEnd = nextIndex;
state = State.CollectingLine;
}
}
else if (current === '\n') {
// Blank line
lines.push(range.getNewRange(currentIndex, currentIndex));
collectingLineStart = nextIndex;
}
else if (!LineExtractor._whitespaceCharacterRegExp.test(current)) {
// If the star is missing, then start the line here
// Example: "/**\nL1*/"
// (collectingLineStart was the start of this line)
collectingLineEnd = nextIndex;
state = State.CollectingLine;
}
break;
}
}
/**
* Only fill in these if we successfully scanned a comment
*/
parserContext.commentRange = range.getNewRange(commentRangeStart, commentRangeEnd);
parserContext.lines = lines;
return true;
};
LineExtractor._whitespaceCharacterRegExp = /^\s$/;
return LineExtractor;
}());
exports.LineExtractor = LineExtractor;
//# sourceMappingURL=LineExtractor.js.map

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,118 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ParagraphSplitter = void 0;
var nodes_1 = require("../nodes");
/**
* The ParagraphSplitter is a secondary stage that runs after the NodeParser has constructed
* the DocComment. It splits DocParagraph nodes into multiple paragraphs by looking for
* paragraph delimiters. Following CommonMark conventions, paragraphs are delimited by
* one or more blank lines. (These lines end with SoftBreak nodes.) The blank lines are
* not discarded. Instead, they are attached to the preceding paragraph. If the DocParagraph
* starts with blank lines, they are preserved to avoid creating a paragraph containing only
* whitespace.
*/
var ParagraphSplitter = /** @class */ (function () {
function ParagraphSplitter() {
}
/**
* Split all paragraphs belonging to the provided subtree.
*/
ParagraphSplitter.splitParagraphs = function (node) {
if (node instanceof nodes_1.DocSection) {
ParagraphSplitter.splitParagraphsForSection(node);
// (We don't recurse here, since sections cannot contain subsections)
}
else {
for (var _i = 0, _a = node.getChildNodes(); _i < _a.length; _i++) {
var childNode = _a[_i];
ParagraphSplitter.splitParagraphs(childNode);
}
}
};
/**
* Split all paragraphs belonging to the provided DocSection.
*/
ParagraphSplitter.splitParagraphsForSection = function (docSection) {
var inputNodes = docSection.nodes;
var outputNodes = [];
for (var _i = 0, inputNodes_1 = inputNodes; _i < inputNodes_1.length; _i++) {
var oldNode = inputNodes_1[_i];
if (oldNode.kind === nodes_1.DocNodeKind.Paragraph) {
ParagraphSplitter._splitParagraph(oldNode, outputNodes);
}
else {
outputNodes.push(oldNode);
}
}
// Replace the inputNodes with the outputNodes
docSection.clearNodes();
docSection.appendNodes(outputNodes);
};
ParagraphSplitter._splitParagraph = function (oldParagraph, outputNodes) {
var inputParagraphNodes = oldParagraph.nodes;
var currentParagraph = new nodes_1.DocParagraph({ configuration: oldParagraph.configuration });
outputNodes.push(currentParagraph);
var state = 0 /* Start */;
var currentIndex = 0;
while (currentIndex < inputParagraphNodes.length) {
// Scan forwards to the end of the line
var isBlankLine = true;
var lineEndIndex = currentIndex; // non-inclusive
do {
var node = inputParagraphNodes[lineEndIndex++];
if (node.kind === nodes_1.DocNodeKind.SoftBreak) {
break;
}
if (isBlankLine) {
if (!this._isWhitespace(node)) {
isBlankLine = false;
}
}
} while (lineEndIndex < inputParagraphNodes.length);
// At this point, the line and SoftBreak will be in inputParagraphNodes.slice(currentIndex, lineEndIndex)
switch (state) {
case 0 /* Start */:
// We're skipping any blank lines that start the first paragraph
if (!isBlankLine) {
state = 1 /* AwaitingTrailer */;
}
break;
case 1 /* AwaitingTrailer */:
// We already saw some content, so now we're looking for a blank line that starts the trailer
// at the end of this paragraph
if (isBlankLine) {
state = 2 /* ReadingTrailer */;
}
break;
case 2 /* ReadingTrailer */:
// We already found the trailer, so now we're looking for a non-blank line that will
// begin a new paragraph
if (!isBlankLine) {
// Start a new paragraph
currentParagraph = new nodes_1.DocParagraph({ configuration: oldParagraph.configuration });
outputNodes.push(currentParagraph);
state = 1 /* AwaitingTrailer */;
}
break;
}
// Append the line onto the current paragraph
for (var i = currentIndex; i < lineEndIndex; ++i) {
currentParagraph.appendNode(inputParagraphNodes[i]);
}
currentIndex = lineEndIndex;
}
};
ParagraphSplitter._isWhitespace = function (node) {
switch (node.kind) {
case nodes_1.DocNodeKind.PlainText:
var docPlainText = node;
return ParagraphSplitter._whitespaceRegExp.test(docPlainText.text);
default:
return false;
}
};
ParagraphSplitter._whitespaceRegExp = /^\s*$/;
return ParagraphSplitter;
}());
exports.ParagraphSplitter = ParagraphSplitter;
//# sourceMappingURL=ParagraphSplitter.js.map

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,34 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ParserContext = void 0;
var TextRange_1 = require("./TextRange");
var nodes_1 = require("../nodes");
var ParserMessageLog_1 = require("./ParserMessageLog");
/**
* An internal data structure that tracks all the state being built up by the various
* parser stages.
*/
var ParserContext = /** @class */ (function () {
function ParserContext(configuration, sourceRange) {
/**
* The text range starting from the opening `/**` and ending with
* the closing `*\/` delimiter.
*/
this.commentRange = TextRange_1.TextRange.empty;
/**
* The text ranges corresponding to the lines of content inside the comment.
*/
this.lines = [];
/**
* A complete list of all tokens that were extracted from the input lines.
*/
this.tokens = [];
this.configuration = configuration;
this.sourceRange = sourceRange;
this.docComment = new nodes_1.DocComment({ configuration: this.configuration });
this.log = new ParserMessageLog_1.ParserMessageLog();
}
return ParserContext;
}());
exports.ParserContext = ParserContext;
//# sourceMappingURL=ParserContext.js.map

View File

@ -0,0 +1 @@
{"version":3,"file":"ParserContext.js","sourceRoot":"","sources":["../../src/parser/ParserContext.ts"],"names":[],"mappings":";;;AAAA,yCAAwC;AAExC,kCAAsC;AAEtC,uDAAsD;AAEtD;;;GAGG;AACH;IAqCE,uBAAmB,aAAiC,EAAE,WAAsB;QA1B5E;;;WAGG;QACI,iBAAY,GAAc,qBAAS,CAAC,KAAK,CAAC;QAEjD;;WAEG;QACI,UAAK,GAAgB,EAAE,CAAC;QAE/B;;WAEG;QACI,WAAM,GAAY,EAAE,CAAC;QAa1B,IAAI,CAAC,aAAa,GAAG,aAAa,CAAC;QACnC,IAAI,CAAC,WAAW,GAAG,WAAW,CAAC;QAE/B,IAAI,CAAC,UAAU,GAAG,IAAI,kBAAU,CAAC,EAAE,aAAa,EAAE,IAAI,CAAC,aAAa,EAAE,CAAC,CAAC;QAExE,IAAI,CAAC,GAAG,GAAG,IAAI,mCAAgB,EAAE,CAAC;IACpC,CAAC;IACH,oBAAC;AAAD,CAAC,AA7CD,IA6CC;AA7CY,sCAAa","sourcesContent":["import { TextRange } from './TextRange';\r\nimport { Token } from './Token';\r\nimport { DocComment } from '../nodes';\r\nimport { TSDocConfiguration } from '../configuration/TSDocConfiguration';\r\nimport { ParserMessageLog } from './ParserMessageLog';\r\n\r\n/**\r\n * An internal data structure that tracks all the state being built up by the various\r\n * parser stages.\r\n */\r\nexport class ParserContext {\r\n /**\r\n * The configuration that was provided for the TSDocParser.\r\n */\r\n public readonly configuration: TSDocConfiguration;\r\n\r\n /**\r\n * The `sourceRange` indicates the start and end of the original input that was parsed.\r\n */\r\n public readonly sourceRange: TextRange;\r\n\r\n /**\r\n * The text range starting from the opening `/**` and ending with\r\n * the closing `*\\/` delimiter.\r\n */\r\n public commentRange: TextRange = TextRange.empty;\r\n\r\n /**\r\n * The text ranges corresponding to the lines of content inside the comment.\r\n */\r\n public lines: TextRange[] = [];\r\n\r\n /**\r\n * A complete list of all tokens that were extracted from the input lines.\r\n */\r\n public tokens: Token[] = [];\r\n\r\n /**\r\n * The parsed doc comment object. This is the primary output of the parser.\r\n */\r\n public readonly docComment: DocComment;\r\n\r\n /**\r\n * A queryable log that reports warnings and error messages that occurred during parsing.\r\n */\r\n public readonly log: ParserMessageLog;\r\n\r\n public constructor(configuration: TSDocConfiguration, sourceRange: TextRange) {\r\n this.configuration = configuration;\r\n this.sourceRange = sourceRange;\r\n\r\n this.docComment = new DocComment({ configuration: this.configuration });\r\n\r\n this.log = new ParserMessageLog();\r\n }\r\n}\r\n"]}

View File

@ -0,0 +1,58 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ParserMessage = void 0;
/**
* Represents an error or warning that occurred during parsing.
*/
var ParserMessage = /** @class */ (function () {
function ParserMessage(parameters) {
this.messageId = parameters.messageId;
this.unformattedText = parameters.messageText;
this.textRange = parameters.textRange;
this.tokenSequence = parameters.tokenSequence;
this.docNode = parameters.docNode;
this._text = undefined;
}
/**
* Generates a line/column prefix. Example with line=2 and column=5
* and message="An error occurred":
* ```
* "(2,5): An error occurred"
* ```
*/
ParserMessage._formatMessageText = function (message, range) {
if (!message) {
message = 'An unknown error occurred';
}
if (range.pos !== 0 || range.end !== 0) {
// NOTE: This currently a potentially expensive operation, since TSDoc currently doesn't
// have a full newline analysis for the input buffer.
var location_1 = range.getLocation(range.pos);
if (location_1.line) {
return "(" + location_1.line + "," + location_1.column + "): " + message;
}
}
return message;
};
Object.defineProperty(ParserMessage.prototype, "text", {
/**
* The message text.
*/
get: function () {
if (this._text === undefined) {
// NOTE: This currently a potentially expensive operation, since TSDoc currently doesn't
// have a full newline analysis for the input buffer.
this._text = ParserMessage._formatMessageText(this.unformattedText, this.textRange);
}
return this._text;
},
enumerable: false,
configurable: true
});
ParserMessage.prototype.toString = function () {
return this.text;
};
return ParserMessage;
}());
exports.ParserMessage = ParserMessage;
//# sourceMappingURL=ParserMessage.js.map

View File

@ -0,0 +1 @@
{"version":3,"file":"ParserMessage.js","sourceRoot":"","sources":["../../src/parser/ParserMessage.ts"],"names":[],"mappings":";;;AAgBA;;GAEG;AACH;IAmBE,uBAAmB,UAAoC;QACrD,IAAI,CAAC,SAAS,GAAG,UAAU,CAAC,SAAS,CAAC;QACtC,IAAI,CAAC,eAAe,GAAG,UAAU,CAAC,WAAW,CAAC;QAC9C,IAAI,CAAC,SAAS,GAAG,UAAU,CAAC,SAAS,CAAC;QACtC,IAAI,CAAC,aAAa,GAAG,UAAU,CAAC,aAAa,CAAC;QAC9C,IAAI,CAAC,OAAO,GAAG,UAAU,CAAC,OAAO,CAAC;QAClC,IAAI,CAAC,KAAK,GAAG,SAAS,CAAC;IACzB,CAAC;IAED;;;;;;OAMG;IACY,gCAAkB,GAAjC,UAAkC,OAAe,EAAE,KAAgB;QACjE,IAAI,CAAC,OAAO,EAAE;YACZ,OAAO,GAAG,2BAA2B,CAAC;SACvC;QAED,IAAI,KAAK,CAAC,GAAG,KAAK,CAAC,IAAI,KAAK,CAAC,GAAG,KAAK,CAAC,EAAE;YACtC,wFAAwF;YACxF,qDAAqD;YACrD,IAAM,UAAQ,GAAkB,KAAK,CAAC,WAAW,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;YAC7D,IAAI,UAAQ,CAAC,IAAI,EAAE;gBACjB,OAAO,MAAI,UAAQ,CAAC,IAAI,SAAI,UAAQ,CAAC,MAAM,QAAK,GAAG,OAAO,CAAC;aAC5D;SACF;QACD,OAAO,OAAO,CAAC;IACjB,CAAC;IAKD,sBAAW,+BAAI;QAHf;;WAEG;aACH;YACE,IAAI,IAAI,CAAC,KAAK,KAAK,SAAS,EAAE;gBAC5B,wFAAwF;gBACxF,qDAAqD;gBACrD,IAAI,CAAC,KAAK,GAAG,aAAa,CAAC,kBAAkB,CAAC,IAAI,CAAC,eAAe,EAAE,IAAI,CAAC,SAAS,CAAC,CAAC;aACrF;YACD,OAAO,IAAI,CAAC,KAAK,CAAC;QACpB,CAAC;;;OAAA;IAEM,gCAAQ,GAAf;QACE,OAAO,IAAI,CAAC,IAAI,CAAC;IACnB,CAAC;IACH,oBAAC;AAAD,CAAC,AAlED,IAkEC;AAlEY,sCAAa","sourcesContent":["import { TextRange, ITextLocation } from './TextRange';\r\nimport { TokenSequence } from './TokenSequence';\r\nimport { DocNode } from '../nodes/DocNode';\r\nimport { TSDocMessageId } from './TSDocMessageId';\r\n\r\n/**\r\n * Constructor parameters for {@link ParserMessage}.\r\n */\r\nexport interface IParserMessageParameters {\r\n messageId: TSDocMessageId;\r\n messageText: string;\r\n textRange: TextRange;\r\n tokenSequence?: TokenSequence;\r\n docNode?: DocNode;\r\n}\r\n\r\n/**\r\n * Represents an error or warning that occurred during parsing.\r\n */\r\nexport class ParserMessage {\r\n /**\r\n * A string that uniquely identifies the messages reported by the TSDoc parser.\r\n */\r\n public readonly messageId: TSDocMessageId;\r\n\r\n /**\r\n * The message text without the default prefix that shows line/column information.\r\n */\r\n public readonly unformattedText: string;\r\n\r\n public readonly textRange: TextRange;\r\n\r\n public readonly tokenSequence: TokenSequence | undefined;\r\n\r\n public readonly docNode: DocNode | undefined;\r\n\r\n private _text: string | undefined;\r\n\r\n public constructor(parameters: IParserMessageParameters) {\r\n this.messageId = parameters.messageId;\r\n this.unformattedText = parameters.messageText;\r\n this.textRange = parameters.textRange;\r\n this.tokenSequence = parameters.tokenSequence;\r\n this.docNode = parameters.docNode;\r\n this._text = undefined;\r\n }\r\n\r\n /**\r\n * Generates a line/column prefix. Example with line=2 and column=5\r\n * and message=\"An error occurred\":\r\n * ```\r\n * \"(2,5): An error occurred\"\r\n * ```\r\n */\r\n private static _formatMessageText(message: string, range: TextRange): string {\r\n if (!message) {\r\n message = 'An unknown error occurred';\r\n }\r\n\r\n if (range.pos !== 0 || range.end !== 0) {\r\n // NOTE: This currently a potentially expensive operation, since TSDoc currently doesn't\r\n // have a full newline analysis for the input buffer.\r\n const location: ITextLocation = range.getLocation(range.pos);\r\n if (location.line) {\r\n return `(${location.line},${location.column}): ` + message;\r\n }\r\n }\r\n return message;\r\n }\r\n\r\n /**\r\n * The message text.\r\n */\r\n public get text(): string {\r\n if (this._text === undefined) {\r\n // NOTE: This currently a potentially expensive operation, since TSDoc currently doesn't\r\n // have a full newline analysis for the input buffer.\r\n this._text = ParserMessage._formatMessageText(this.unformattedText, this.textRange);\r\n }\r\n return this._text;\r\n }\r\n\r\n public toString(): string {\r\n return this.text;\r\n }\r\n}\r\n"]}

View File

@ -0,0 +1,75 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.ParserMessageLog = void 0;
var ParserMessage_1 = require("./ParserMessage");
/**
* Used to report errors and warnings that occurred during parsing.
*/
var ParserMessageLog = /** @class */ (function () {
function ParserMessageLog() {
this._messages = [];
}
Object.defineProperty(ParserMessageLog.prototype, "messages", {
/**
* The unfiltered list of all messages.
*/
get: function () {
return this._messages;
},
enumerable: false,
configurable: true
});
/**
* Append a message to the log.
*/
ParserMessageLog.prototype.addMessage = function (parserMessage) {
this._messages.push(parserMessage);
};
/**
* Append a message associated with a TextRange.
*/
ParserMessageLog.prototype.addMessageForTextRange = function (messageId, messageText, textRange) {
this.addMessage(new ParserMessage_1.ParserMessage({
messageId: messageId,
messageText: messageText,
textRange: textRange
}));
};
/**
* Append a message associated with a TokenSequence.
*/
ParserMessageLog.prototype.addMessageForTokenSequence = function (messageId, messageText, tokenSequence, docNode) {
this.addMessage(new ParserMessage_1.ParserMessage({
messageId: messageId,
messageText: messageText,
textRange: tokenSequence.getContainingTextRange(),
tokenSequence: tokenSequence,
docNode: docNode
}));
};
/**
* Append a message associated with a TokenSequence.
*/
ParserMessageLog.prototype.addMessageForDocErrorText = function (docErrorText) {
var tokenSequence;
if (docErrorText.textExcerpt) {
// If there is an excerpt directly associated with the DocErrorText, highlight that:
tokenSequence = docErrorText.textExcerpt;
}
else {
// Otherwise we can use the errorLocation, but typically that is meant to give additional
// details, not to indicate the primary location of the problem.
tokenSequence = docErrorText.errorLocation;
}
this.addMessage(new ParserMessage_1.ParserMessage({
messageId: docErrorText.messageId,
messageText: docErrorText.errorMessage,
textRange: tokenSequence.getContainingTextRange(),
tokenSequence: tokenSequence,
docNode: docErrorText
}));
};
return ParserMessageLog;
}());
exports.ParserMessageLog = ParserMessageLog;
//# sourceMappingURL=ParserMessageLog.js.map

View File

@ -0,0 +1 @@
{"version":3,"file":"ParserMessageLog.js","sourceRoot":"","sources":["../../src/parser/ParserMessageLog.ts"],"names":[],"mappings":";;;AAAA,iDAAgD;AAOhD;;GAEG;AACH;IAAA;QACU,cAAS,GAAoB,EAAE,CAAC;IA0E1C,CAAC;IArEC,sBAAW,sCAAQ;QAHnB;;WAEG;aACH;YACE,OAAO,IAAI,CAAC,SAAS,CAAC;QACxB,CAAC;;;OAAA;IAED;;OAEG;IACI,qCAAU,GAAjB,UAAkB,aAA4B;QAC5C,IAAI,CAAC,SAAS,CAAC,IAAI,CAAC,aAAa,CAAC,CAAC;IACrC,CAAC;IAED;;OAEG;IACI,iDAAsB,GAA7B,UAA8B,SAAyB,EAAE,WAAmB,EAAE,SAAoB;QAChG,IAAI,CAAC,UAAU,CACb,IAAI,6BAAa,CAAC;YAChB,SAAS,WAAA;YACT,WAAW,aAAA;YACX,SAAS,WAAA;SACV,CAAC,CACH,CAAC;IACJ,CAAC;IAED;;OAEG;IACI,qDAA0B,GAAjC,UACE,SAAyB,EACzB,WAAmB,EACnB,aAA4B,EAC5B,OAAiB;QAEjB,IAAI,CAAC,UAAU,CACb,IAAI,6BAAa,CAAC;YAChB,SAAS,WAAA;YACT,WAAW,aAAA;YACX,SAAS,EAAE,aAAa,CAAC,sBAAsB,EAAE;YACjD,aAAa,eAAA;YACb,OAAO,SAAA;SACR,CAAC,CACH,CAAC;IACJ,CAAC;IAED;;OAEG;IACI,oDAAyB,GAAhC,UAAiC,YAA0B;QACzD,IAAI,aAA4B,CAAC;QAEjC,IAAI,YAAY,CAAC,WAAW,EAAE;YAC5B,oFAAoF;YACpF,aAAa,GAAG,YAAY,CAAC,WAAW,CAAC;SAC1C;aAAM;YACL,yFAAyF;YACzF,gEAAgE;YAChE,aAAa,GAAG,YAAY,CAAC,aAAa,CAAC;SAC5C;QAED,IAAI,CAAC,UAAU,CACb,IAAI,6BAAa,CAAC;YAChB,SAAS,EAAE,YAAY,CAAC,SAAS;YACjC,WAAW,EAAE,YAAY,CAAC,YAAY;YACtC,SAAS,EAAE,aAAa,CAAC,sBAAsB,EAAE;YACjD,aAAa,EAAE,aAAa;YAC5B,OAAO,EAAE,YAAY;SACtB,CAAC,CACH,CAAC;IACJ,CAAC;IACH,uBAAC;AAAD,CAAC,AA3ED,IA2EC;AA3EY,4CAAgB","sourcesContent":["import { ParserMessage } from './ParserMessage';\r\nimport { TextRange } from './TextRange';\r\nimport { TokenSequence } from './TokenSequence';\r\nimport { DocNode } from '../nodes/DocNode';\r\nimport { DocErrorText } from '../nodes/DocErrorText';\r\nimport { TSDocMessageId } from './TSDocMessageId';\r\n\r\n/**\r\n * Used to report errors and warnings that occurred during parsing.\r\n */\r\nexport class ParserMessageLog {\r\n private _messages: ParserMessage[] = [];\r\n\r\n /**\r\n * The unfiltered list of all messages.\r\n */\r\n public get messages(): ReadonlyArray<ParserMessage> {\r\n return this._messages;\r\n }\r\n\r\n /**\r\n * Append a message to the log.\r\n */\r\n public addMessage(parserMessage: ParserMessage): void {\r\n this._messages.push(parserMessage);\r\n }\r\n\r\n /**\r\n * Append a message associated with a TextRange.\r\n */\r\n public addMessageForTextRange(messageId: TSDocMessageId, messageText: string, textRange: TextRange): void {\r\n this.addMessage(\r\n new ParserMessage({\r\n messageId,\r\n messageText,\r\n textRange\r\n })\r\n );\r\n }\r\n\r\n /**\r\n * Append a message associated with a TokenSequence.\r\n */\r\n public addMessageForTokenSequence(\r\n messageId: TSDocMessageId,\r\n messageText: string,\r\n tokenSequence: TokenSequence,\r\n docNode?: DocNode\r\n ): void {\r\n this.addMessage(\r\n new ParserMessage({\r\n messageId,\r\n messageText,\r\n textRange: tokenSequence.getContainingTextRange(),\r\n tokenSequence,\r\n docNode\r\n })\r\n );\r\n }\r\n\r\n /**\r\n * Append a message associated with a TokenSequence.\r\n */\r\n public addMessageForDocErrorText(docErrorText: DocErrorText): void {\r\n let tokenSequence: TokenSequence;\r\n\r\n if (docErrorText.textExcerpt) {\r\n // If there is an excerpt directly associated with the DocErrorText, highlight that:\r\n tokenSequence = docErrorText.textExcerpt;\r\n } else {\r\n // Otherwise we can use the errorLocation, but typically that is meant to give additional\r\n // details, not to indicate the primary location of the problem.\r\n tokenSequence = docErrorText.errorLocation;\r\n }\r\n\r\n this.addMessage(\r\n new ParserMessage({\r\n messageId: docErrorText.messageId,\r\n messageText: docErrorText.errorMessage,\r\n textRange: tokenSequence.getContainingTextRange(),\r\n tokenSequence: tokenSequence,\r\n docNode: docErrorText\r\n })\r\n );\r\n }\r\n}\r\n"]}

View File

@ -0,0 +1,180 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.StringChecks = void 0;
/**
* Helpers for validating various text string formats.
*/
var StringChecks = /** @class */ (function () {
function StringChecks() {
}
/**
* Tests whether the input string is a valid TSDoc tag name; if not, returns an error message.
* TSDoc tag names start with an at-sign ("@") followed by ASCII letters using
* "camelCase" capitalization.
*/
StringChecks.explainIfInvalidTSDocTagName = function (tagName) {
if (tagName[0] !== '@') {
return 'A TSDoc tag name must start with an "@" symbol';
}
if (!StringChecks._tsdocTagNameRegExp.test(tagName)) {
return 'A TSDoc tag name must start with a letter and contain only letters and numbers';
}
return undefined;
};
/**
* Throws an exception if the input string is not a valid TSDoc tag name.
* TSDoc tag names start with an at-sign ("@") followed by ASCII letters using
* "camelCase" capitalization.
*/
StringChecks.validateTSDocTagName = function (tagName) {
var explanation = StringChecks.explainIfInvalidTSDocTagName(tagName);
if (explanation) {
throw new Error(explanation);
}
};
/**
* Tests whether the input string is a URL form supported inside an "@link" tag; if not,
* returns an error message.
*/
StringChecks.explainIfInvalidLinkUrl = function (url) {
if (url.length === 0) {
return 'The URL cannot be empty';
}
if (!StringChecks._urlSchemeRegExp.test(url)) {
return ('An @link URL must begin with a scheme comprised only of letters and numbers followed by "://".' +
' (For general URLs, use an HTML "<a>" tag instead.)');
}
if (!StringChecks._urlSchemeAfterRegExp.test(url)) {
return 'An @link URL must have at least one character after "://"';
}
return undefined;
};
/**
* Tests whether the input string is a valid HTML element or attribute name.
*/
StringChecks.explainIfInvalidHtmlName = function (htmlName) {
if (!StringChecks._htmlNameRegExp.test(htmlName)) {
return 'An HTML name must be an ASCII letter followed by zero or more letters, digits, or hyphens';
}
return undefined;
};
/**
* Throws an exception if the input string is a not valid HTML element or attribute name.
*/
StringChecks.validateHtmlName = function (htmlName) {
var explanation = StringChecks.explainIfInvalidHtmlName(htmlName);
if (explanation) {
throw new Error(explanation);
}
};
/**
* Tests whether the input string is a valid NPM package name.
*/
StringChecks.explainIfInvalidPackageName = function (packageName) {
if (packageName.length === 0) {
return 'The package name cannot be an empty string';
}
if (!StringChecks._validPackageNameRegExp.test(packageName)) {
return "The package name " + JSON.stringify(packageName) + " is not a valid package name";
}
return undefined;
};
/**
* Tests whether the input string is a valid declaration reference import path.
*/
StringChecks.explainIfInvalidImportPath = function (importPath, prefixedByPackageName) {
if (importPath.length > 0) {
if (importPath.indexOf('//') >= 0) {
return 'An import path must not contain "//"';
}
if (importPath[importPath.length - 1] === '/') {
return 'An import path must not end with "/"';
}
if (!prefixedByPackageName) {
if (importPath[0] === '/') {
return 'An import path must not start with "/" unless prefixed by a package name';
}
}
}
return undefined;
};
/**
* Returns true if the input string is a TSDoc system selector.
*/
StringChecks.isSystemSelector = function (selector) {
return StringChecks._systemSelectors.has(selector);
};
/**
* Tests whether the input string is a valid ECMAScript identifier.
* A precise check is extremely complicated and highly dependent on the standard version
* and how faithfully the interpreter implements it, so here we use a conservative heuristic.
*/
StringChecks.explainIfInvalidUnquotedIdentifier = function (identifier) {
if (identifier.length === 0) {
return 'The identifier cannot be an empty string';
}
if (StringChecks._identifierBadCharRegExp.test(identifier)) {
return 'The identifier cannot non-word characters';
}
if (StringChecks._identifierNumberStartRegExp.test(identifier)) {
return 'The identifier must not start with a number';
}
return undefined;
};
/**
* Tests whether the input string can be used without quotes as a member identifier in a declaration reference.
* If not, it should be enclosed in quotes.
*/
StringChecks.explainIfInvalidUnquotedMemberIdentifier = function (identifier) {
var explanation = StringChecks.explainIfInvalidUnquotedIdentifier(identifier);
if (explanation !== undefined) {
return explanation;
}
if (StringChecks.isSystemSelector(identifier)) {
// We do this to avoid confusion about the declaration reference syntax rules.
// For example if someone were to see "MyClass.(static:instance)" it would be unclear which
// side the colon is the selector.
return "The identifier \"" + identifier + "\" must be quoted because it is a TSDoc system selector name";
}
return undefined;
};
StringChecks._tsdocTagNameRegExp = /^@[a-z][a-z0-9]*$/i;
StringChecks._urlSchemeRegExp = /^[a-z][a-z0-9]*\:\/\//i;
StringChecks._urlSchemeAfterRegExp = /^[a-z][a-z0-9]*\:\/\/./i;
// HTML element definitions:
// https://spec.commonmark.org/0.29/#tag-name
// https://www.w3.org/TR/html5/syntax.html#tag-name
// https://html.spec.whatwg.org/multipage/custom-elements.html#valid-custom-element-name
//
// We use the CommonMark spec:
// "A tag name consists of an ASCII letter followed by zero or more ASCII letters, digits, or hyphens (-)."
StringChecks._htmlNameRegExp = /^[a-z]+[a-z0-9\-]*$/i;
// Note: In addition to letters, numbers, underscores, and dollar signs, modern ECMAScript
// also allows Unicode categories such as letters, combining marks, digits, and connector punctuation.
// These are mostly supported in all environments except IE11, so if someone wants it, we would accept
// a PR to allow them (although the test surface might be somewhat large).
StringChecks._identifierBadCharRegExp = /[^a-z0-9_$]/i;
// Identifiers most not start with a number.
StringChecks._identifierNumberStartRegExp = /^[0-9]/;
// For detailed notes about NPM package name syntax, see:
// tslint:disable-next-line:max-line-length
// https://github.com/Microsoft/web-build-tools/blob/a417ca25c63aca31dba43a34d39cc9cd529b9c78/libraries/node-core-library/src/PackageName.ts
StringChecks._validPackageNameRegExp = /^(?:@[a-z0-9\-_\.]+\/)?[a-z0-9\-_\.]+$/i;
StringChecks._systemSelectors = new Set([
// For classes:
'instance',
'static',
'constructor',
// For merged declarations:
'class',
'enum',
'function',
'interface',
'namespace',
'type',
'variable'
]);
return StringChecks;
}());
exports.StringChecks = StringChecks;
//# sourceMappingURL=StringChecks.js.map

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,426 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.allTsdocMessageIdsSet = exports.allTsdocMessageIds = exports.TSDocMessageId = void 0;
/**
* Unique identifiers for messages reported by the TSDoc parser.
*
* @remarks
*
* These strings are possible values for the {@link ParserMessage.messageId} property.
* These identifiers can be used to suppress or configure the reporting of individual messages.
* They are also useful when searching for help about a particular error.
*
* @public
*/
var TSDocMessageId;
(function (TSDocMessageId) {
/**
* File not found
* @remarks
* Reported by the `@microsoft/tsdoc-config` package when it failed to find a `tsdoc.json` file.
*/
TSDocMessageId["ConfigFileNotFound"] = "tsdoc-config-file-not-found";
/**
* Error parsing JSON input: ___
* @remarks
* Reported by the `@microsoft/tsdoc-config` package when the `tsdoc.json` file has invalid JSON syntax.
*/
TSDocMessageId["ConfigInvalidJson"] = "tsdoc-config-invalid-json";
/**
* Unsupported JSON "$schema" value
* @remarks
* Reported by the `@microsoft/tsdoc-config` package when the file format is not supported.
*/
TSDocMessageId["ConfigFileUnsupportedSchema"] = "tsdoc-config-unsupported-schema";
/**
* Error loading config file: ___
* @remarks
* Reported by the `@microsoft/tsdoc-config` package when the config file doesn't conform to its schema.
*/
TSDocMessageId["ConfigFileSchemaError"] = "tsdoc-config-schema-error";
/**
* Circular reference encountered for "extends" field of "___"
* @remarks
* Reported by the `@microsoft/tsdoc-config` package when the "extends" field creates a chain of references
* that causes a file to indirectly extend itself.
*/
TSDocMessageId["ConfigFileCyclicExtends"] = "tsdoc-config-cyclic-extends";
/**
* Unable to resolve "extends" reference to "___"
* @remarks
* Reported by the `@microsoft/tsdoc-config` package when module resolution fails for the "extends" field.
*/
TSDocMessageId["ConfigFileUnresolvedExtends"] = "tsdoc-config-unresolved-extends";
/**
* The "supportForTags" field refers to an undefined tag "___".
* @remarks
* Reported by the `@microsoft/tsdoc-config` package when loading the tsdoc.json config file.
*/
TSDocMessageId["ConfigFileUndefinedTag"] = "tsdoc-config-undefined-tag";
/**
* The "tagDefinitions" field specifies more than one tag with the name "___".
* @remarks
* Reported by the `@microsoft/tsdoc-config` package when loading the tsdoc.json config file.
*/
TSDocMessageId["ConfigFileDuplicateTagName"] = "tsdoc-config-duplicate-tag-name";
/**
* A TSDoc tag name must start with a letter and contain only letters and numbers.
* @remarks
* Reported by the `@microsoft/tsdoc-config` package when loading the tsdoc.json config file.
*/
TSDocMessageId["ConfigFileInvalidTagName"] = "tsdoc-config-invalid-tag-name";
/**
* Expecting a `/**` comment.
* Unexpected end of input.
*/
TSDocMessageId["CommentNotFound"] = "tsdoc-comment-not-found";
/**
* Expecting a leading `/**`
*/
TSDocMessageId["CommentOpeningDelimiterSyntax"] = "tsdoc-comment-missing-opening-delimiter";
/**
* Unexpected end of input.
*/
TSDocMessageId["CommentMissingClosingDelimiter"] = "tsdoc-comment-missing-closing-delimiter";
/**
* A doc comment cannot have more than one `@inheritDoc` tag
*/
TSDocMessageId["ExtraInheritDocTag"] = "tsdoc-extra-inheritdoc-tag";
/**
* The `}` character should be escaped using a backslash to avoid confusion with a TSDoc inline tag.
*/
TSDocMessageId["EscapeRightBrace"] = "tsdoc-escape-right-brace";
/**
* The `>` character should be escaped using a backslash to avoid confusion with an HTML tag.
*/
TSDocMessageId["EscapeGreaterThan"] = "tsdoc-escape-greater-than";
/**
* The ___ block must include a deprecation message, e.g. describing the recommended alternative.
*/
TSDocMessageId["MissingDeprecationMessage"] = "tsdoc-missing-deprecation-message";
/**
* A ___ block must not be used, because that content is provided by the `@inheritDoc` tag.
*/
TSDocMessageId["InheritDocIncompatibleTag"] = "tsdoc-inheritdoc-incompatible-tag";
/**
* The summary section must not have any content, because that content is provided by the `@inheritDoc` tag.
*/
TSDocMessageId["InheritDocIncompatibleSummary"] = "tsdoc-inheritdoc-incompatible-summary";
/**
* The TSDoc tag ___ is an inline tag; it must be enclosed in `{ }` braces.
*/
TSDocMessageId["InlineTagMissingBraces"] = "tsdoc-inline-tag-missing-braces";
/**
* The TSDoc tag ___ is not an inline tag; it must not be enclosed in `{ }` braces.
*/
TSDocMessageId["TagShouldNotHaveBraces"] = "tsdoc-tag-should-not-have-braces";
/**
* The TSDoc tag ___ is not supported by this tool.
*/
TSDocMessageId["UnsupportedTag"] = "tsdoc-unsupported-tag";
/**
* The TSDoc tag ___ is not defined in this configuration.
*/
TSDocMessageId["UndefinedTag"] = "tsdoc-undefined-tag";
/**
* The `@param` block should not include a JSDoc-style `{type}`.
*/
TSDocMessageId["ParamTagWithInvalidType"] = "tsdoc-param-tag-with-invalid-type";
/**
* The `@param` block should not include a JSDoc-style optional name; it must not be enclosed in `[ ]` brackets.
*/
TSDocMessageId["ParamTagWithInvalidOptionalName"] = "tsdoc-param-tag-with-invalid-optional-name";
/**
* The `@param` block should be followed by a parameter name.
*/
TSDocMessageId["ParamTagWithInvalidName"] = "tsdoc-param-tag-with-invalid-name";
/**
* The `@param` block should be followed by a parameter name and then a hyphen.
*/
TSDocMessageId["ParamTagMissingHyphen"] = "tsdoc-param-tag-missing-hyphen";
/**
* A backslash must precede another character that is being escaped. OR
* A backslash can only be used to escape a punctuation character.
*/
TSDocMessageId["UnnecessaryBackslash"] = "tsdoc-unnecessary-backslash";
/**
* Expecting a TSDoc tag starting with `@`. OR
* Expecting a TSDoc tag starting with `{`.
*/
TSDocMessageId["MissingTag"] = "tsdoc-missing-tag";
/**
* The `@` character looks like part of a TSDoc tag; use a backslash to escape it.
*/
TSDocMessageId["AtSignInWord"] = "tsdoc-at-sign-in-word";
/**
* Expecting a TSDoc tag name after `@`; if it is not a tag, use a backslash to escape this character.
*/
TSDocMessageId["AtSignWithoutTagName"] = "tsdoc-at-sign-without-tag-name";
/**
* Expecting a TSDoc tag starting with `{@`. OR
* Expecting a TSDoc inline tag name after the `{@` characters.
*/
TSDocMessageId["MalformedInlineTag"] = "tsdoc-malformed-inline-tag";
/**
* The token ___ looks like a TSDoc tag but contains an invalid character ___; if it is not a tag,
* use a backslash to escape the `@`.
*/
TSDocMessageId["CharactersAfterBlockTag"] = "tsdoc-characters-after-block-tag";
/**
* A TSDoc tag name must start with a letter and contain only letters and numbers.
*/
TSDocMessageId["MalformedTagName"] = "tsdoc-malformed-tag-name";
/**
* The character ___ cannot appear after the TSDoc tag name; expecting a space.
*/
TSDocMessageId["CharactersAfterInlineTag"] = "tsdoc-characters-after-inline-tag";
/**
* The TSDoc inline tag name is missing its closing `}`.
*/
TSDocMessageId["InlineTagMissingRightBrace"] = "tsdoc-inline-tag-missing-right-brace";
/**
* The `{` character must be escaped with a backslash when used inside a TSDoc inline tag.
*/
TSDocMessageId["InlineTagUnescapedBrace"] = "tsdoc-inline-tag-unescaped-brace";
/**
* Unexpected character after declaration reference.
*/
TSDocMessageId["InheritDocTagSyntax"] = "tsdoc-inheritdoc-tag-syntax";
/**
* The `@link` tag content is missing.
*/
TSDocMessageId["LinkTagEmpty"] = "tsdoc-link-tag-empty";
/**
* The ___ character may not be used in the link text without escaping it.
*/
TSDocMessageId["LinkTagUnescapedText"] = "tsdoc-link-tag-unescaped-text";
/**
* Unexpected character after link destination.
*/
TSDocMessageId["LinkTagDestinationSyntax"] = "tsdoc-link-tag-destination-syntax";
/**
* The URL cannot be empty. OR
* An `@link` URL must begin with a scheme comprised only of letters and numbers followed by `://`. OR
* An `@link` URL must have at least one character after `://`.
*/
TSDocMessageId["LinkTagInvalidUrl"] = "tsdoc-link-tag-invalid-url";
/**
* The declaration reference appears to contain a package name or import path, but it is missing the `#` delimiter.
*/
TSDocMessageId["ReferenceMissingHash"] = "tsdoc-reference-missing-hash";
/**
* The hash character must be preceded by a package name or import path.
*/
TSDocMessageId["ReferenceHashSyntax"] = "tsdoc-reference-hash-syntax";
/**
* The package name cannot be an empty string. OR
* The package name ___ is not a valid package name.
*/
TSDocMessageId["ReferenceMalformedPackageName"] = "tsdoc-reference-malformed-package-name";
/**
* An import path must not contain `//`. OR
* An import path must not end with `/`. OR
* An import path must not start with `/` unless prefixed by a package name.
*/
TSDocMessageId["ReferenceMalformedImportPath"] = "tsdoc-reference-malformed-import-path";
/**
* Expecting a declaration reference.
*/
TSDocMessageId["MissingReference"] = "tsdoc-missing-reference";
/**
* Expecting a period before the next component of a declaration reference
*/
TSDocMessageId["ReferenceMissingDot"] = "tsdoc-reference-missing-dot";
/**
* Syntax error in declaration reference: the member selector must be enclosed in parentheses.
*/
TSDocMessageId["ReferenceSelectorMissingParens"] = "tsdoc-reference-selector-missing-parens";
/**
* Expecting a colon after the identifier because the expression is in parentheses.
*/
TSDocMessageId["ReferenceMissingColon"] = "tsdoc-reference-missing-colon";
/**
* Expecting a matching right parenthesis.
*/
TSDocMessageId["ReferenceMissingRightParen"] = "tsdoc-reference-missing-right-paren";
/**
* Missing declaration reference in symbol reference
*/
TSDocMessageId["ReferenceSymbolSyntax"] = "tsdoc-reference-symbol-syntax";
/**
* Missing closing square bracket for symbol reference
*/
TSDocMessageId["ReferenceMissingRightBracket"] = "tsdoc-reference-missing-right-bracket";
/**
* Unexpected end of input inside quoted member identifier.
*/
TSDocMessageId["ReferenceMissingQuote"] = "tsdoc-reference-missing-quote";
/**
* The quoted identifier cannot be empty.
*/
TSDocMessageId["ReferenceEmptyIdentifier"] = "tsdoc-reference-empty-identifier";
/**
* Syntax error in declaration reference: expecting a member identifier.
*/
TSDocMessageId["ReferenceMissingIdentifier"] = "tsdoc-reference-missing-identifier";
/**
* The identifier cannot be an empty string. OR
* The identifier cannot non-word characters. OR
* The identifier must not start with a number. OR
* The identifier ___ must be quoted because it is a TSDoc system selector name.
*/
TSDocMessageId["ReferenceUnquotedIdentifier"] = "tsdoc-reference-unquoted-identifier";
/**
* Expecting a selector label after the colon.
*/
TSDocMessageId["ReferenceMissingLabel"] = "tsdoc-reference-missing-label";
/**
* The selector cannot be an empty string. OR
* If the selector begins with a number, it must be a positive integer value. OR
* A label selector must be comprised of upper case letters, numbers, and underscores
* and must not start with a number. OR
* The selector ___ is not a recognized TSDoc system selector name.
*/
TSDocMessageId["ReferenceSelectorSyntax"] = "tsdoc-reference-selector-syntax";
/**
* Expecting an attribute or `>` or `/>`.
*/
TSDocMessageId["HtmlTagMissingGreaterThan"] = "tsdoc-html-tag-missing-greater-than";
/**
* Expecting `=` after HTML attribute name.
*/
TSDocMessageId["HtmlTagMissingEquals"] = "tsdoc-html-tag-missing-equals";
/**
* Expecting an HTML string starting with a single-quote or double-quote character.
*/
TSDocMessageId["HtmlTagMissingString"] = "tsdoc-html-tag-missing-string";
/**
* The HTML string is missing its closing quote.
*/
TSDocMessageId["HtmlStringMissingQuote"] = "tsdoc-html-string-missing-quote";
/**
* The next character after a closing quote must be spacing or punctuation.
*/
TSDocMessageId["TextAfterHtmlString"] = "tsdoc-text-after-html-string";
/**
* Expecting an HTML tag starting with `</`.
*/
TSDocMessageId["MissingHtmlEndTag"] = "tsdoc-missing-html-end-tag";
/**
* A space is not allowed here. OR
* Expecting an HTML name. OR
* An HTML name must be a sequence of letters separated by hyphens.
*/
TSDocMessageId["MalformedHtmlName"] = "tsdoc-malformed-html-name";
/**
* This HTML element name is not defined by your TSDoc configuration.
*/
TSDocMessageId["UnsupportedHtmlElementName"] = "tsdoc-unsupported-html-name";
/**
* The opening backtick for a code fence must appear at the start of the line.
*/
TSDocMessageId["CodeFenceOpeningIndent"] = "tsdoc-code-fence-opening-indent";
/**
* The language specifier cannot contain backtick characters.
*/
TSDocMessageId["CodeFenceSpecifierSyntax"] = "tsdoc-code-fence-specifier-syntax";
/**
* The closing delimiter for a code fence must not be indented.
*/
TSDocMessageId["CodeFenceClosingIndent"] = "tsdoc-code-fence-closing-indent";
/**
* Missing closing delimiter.
*/
TSDocMessageId["CodeFenceMissingDelimiter"] = "tsdoc-code-fence-missing-delimiter";
/**
* Unexpected characters after closing delimiter for code fence.
*/
TSDocMessageId["CodeFenceClosingSyntax"] = "tsdoc-code-fence-closing-syntax";
/**
* A code span must contain at least one character between the backticks.
*/
TSDocMessageId["CodeSpanEmpty"] = "tsdoc-code-span-empty";
/**
* The code span is missing its closing backtick.
*/
TSDocMessageId["CodeSpanMissingDelimiter"] = "tsdoc-code-span-missing-delimiter";
})(TSDocMessageId = exports.TSDocMessageId || (exports.TSDocMessageId = {}));
// Exposed via TSDocConfiguration.allTsdocMessageIds()
exports.allTsdocMessageIds = [
// To make comparisons easy, keep these in the same order as the enum above:
'tsdoc-config-file-not-found',
'tsdoc-config-invalid-json',
'tsdoc-config-unsupported-schema',
'tsdoc-config-schema-error',
'tsdoc-config-cyclic-extends',
'tsdoc-config-unresolved-extends',
'tsdoc-config-undefined-tag',
'tsdoc-config-duplicate-tag-name',
'tsdoc-config-invalid-tag-name',
'tsdoc-comment-not-found',
'tsdoc-comment-missing-opening-delimiter',
'tsdoc-comment-missing-closing-delimiter',
'tsdoc-extra-inheritdoc-tag',
'tsdoc-escape-right-brace',
'tsdoc-escape-greater-than',
'tsdoc-missing-deprecation-message',
'tsdoc-inheritdoc-incompatible-tag',
'tsdoc-inheritdoc-incompatible-summary',
'tsdoc-inline-tag-missing-braces',
'tsdoc-tag-should-not-have-braces',
'tsdoc-unsupported-tag',
'tsdoc-undefined-tag',
'tsdoc-param-tag-with-invalid-type',
'tsdoc-param-tag-with-invalid-optional-name',
'tsdoc-param-tag-with-invalid-name',
'tsdoc-param-tag-missing-hyphen',
'tsdoc-unnecessary-backslash',
'tsdoc-missing-tag',
'tsdoc-at-sign-in-word',
'tsdoc-at-sign-without-tag-name',
'tsdoc-malformed-inline-tag',
'tsdoc-characters-after-block-tag',
'tsdoc-malformed-tag-name',
'tsdoc-characters-after-inline-tag',
'tsdoc-inline-tag-missing-right-brace',
'tsdoc-inline-tag-unescaped-brace',
'tsdoc-inheritdoc-tag-syntax',
'tsdoc-link-tag-empty',
'tsdoc-link-tag-unescaped-text',
'tsdoc-link-tag-destination-syntax',
'tsdoc-link-tag-invalid-url',
'tsdoc-reference-missing-hash',
'tsdoc-reference-hash-syntax',
'tsdoc-reference-malformed-package-name',
'tsdoc-reference-malformed-import-path',
'tsdoc-missing-reference',
'tsdoc-reference-missing-dot',
'tsdoc-reference-selector-missing-parens',
'tsdoc-reference-missing-colon',
'tsdoc-reference-missing-right-paren',
'tsdoc-reference-symbol-syntax',
'tsdoc-reference-missing-right-bracket',
'tsdoc-reference-missing-quote',
'tsdoc-reference-empty-identifier',
'tsdoc-reference-missing-identifier',
'tsdoc-reference-unquoted-identifier',
'tsdoc-reference-missing-label',
'tsdoc-reference-selector-syntax',
'tsdoc-html-tag-missing-greater-than',
'tsdoc-html-tag-missing-equals',
'tsdoc-html-tag-missing-string',
'tsdoc-html-string-missing-quote',
'tsdoc-text-after-html-string',
'tsdoc-missing-html-end-tag',
'tsdoc-malformed-html-name',
'tsdoc-code-fence-opening-indent',
'tsdoc-code-fence-specifier-syntax',
'tsdoc-code-fence-closing-indent',
'tsdoc-code-fence-missing-delimiter',
'tsdoc-code-fence-closing-syntax',
'tsdoc-code-span-empty',
'tsdoc-code-span-missing-delimiter'
];
exports.allTsdocMessageIds.sort();
exports.allTsdocMessageIdsSet = new Set(exports.allTsdocMessageIds);
//# sourceMappingURL=TSDocMessageId.js.map

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,39 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.TSDocParser = void 0;
var TextRange_1 = require("./TextRange");
var ParserContext_1 = require("./ParserContext");
var LineExtractor_1 = require("./LineExtractor");
var Tokenizer_1 = require("./Tokenizer");
var NodeParser_1 = require("./NodeParser");
var TSDocConfiguration_1 = require("../configuration/TSDocConfiguration");
var ParagraphSplitter_1 = require("./ParagraphSplitter");
/**
* The main API for parsing TSDoc comments.
*/
var TSDocParser = /** @class */ (function () {
function TSDocParser(configuration) {
if (configuration) {
this.configuration = configuration;
}
else {
this.configuration = new TSDocConfiguration_1.TSDocConfiguration();
}
}
TSDocParser.prototype.parseString = function (text) {
return this.parseRange(TextRange_1.TextRange.fromString(text));
};
TSDocParser.prototype.parseRange = function (range) {
var parserContext = new ParserContext_1.ParserContext(this.configuration, range);
if (LineExtractor_1.LineExtractor.extract(parserContext)) {
parserContext.tokens = Tokenizer_1.Tokenizer.readTokens(parserContext.lines);
var nodeParser = new NodeParser_1.NodeParser(parserContext);
nodeParser.parse();
ParagraphSplitter_1.ParagraphSplitter.splitParagraphs(parserContext.docComment);
}
return parserContext;
};
return TSDocParser;
}());
exports.TSDocParser = TSDocParser;
//# sourceMappingURL=TSDocParser.js.map

View File

@ -0,0 +1 @@
{"version":3,"file":"TSDocParser.js","sourceRoot":"","sources":["../../src/parser/TSDocParser.ts"],"names":[],"mappings":";;;AAAA,yCAAwC;AACxC,iDAAgD;AAChD,iDAAgD;AAChD,yCAAwC;AACxC,2CAA0C;AAC1C,0EAAyE;AACzE,yDAAwD;AAExD;;GAEG;AACH;IAME,qBAAmB,aAAkC;QACnD,IAAI,aAAa,EAAE;YACjB,IAAI,CAAC,aAAa,GAAG,aAAa,CAAC;SACpC;aAAM;YACL,IAAI,CAAC,aAAa,GAAG,IAAI,uCAAkB,EAAE,CAAC;SAC/C;IACH,CAAC;IAEM,iCAAW,GAAlB,UAAmB,IAAY;QAC7B,OAAO,IAAI,CAAC,UAAU,CAAC,qBAAS,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC,CAAC;IACrD,CAAC;IAEM,gCAAU,GAAjB,UAAkB,KAAgB;QAChC,IAAM,aAAa,GAAkB,IAAI,6BAAa,CAAC,IAAI,CAAC,aAAa,EAAE,KAAK,CAAC,CAAC;QAElF,IAAI,6BAAa,CAAC,OAAO,CAAC,aAAa,CAAC,EAAE;YACxC,aAAa,CAAC,MAAM,GAAG,qBAAS,CAAC,UAAU,CAAC,aAAa,CAAC,KAAK,CAAC,CAAC;YAEjE,IAAM,UAAU,GAAe,IAAI,uBAAU,CAAC,aAAa,CAAC,CAAC;YAC7D,UAAU,CAAC,KAAK,EAAE,CAAC;YAEnB,qCAAiB,CAAC,eAAe,CAAC,aAAa,CAAC,UAAU,CAAC,CAAC;SAC7D;QAED,OAAO,aAAa,CAAC;IACvB,CAAC;IACH,kBAAC;AAAD,CAAC,AAhCD,IAgCC;AAhCY,kCAAW","sourcesContent":["import { TextRange } from './TextRange';\r\nimport { ParserContext } from './ParserContext';\r\nimport { LineExtractor } from './LineExtractor';\r\nimport { Tokenizer } from './Tokenizer';\r\nimport { NodeParser } from './NodeParser';\r\nimport { TSDocConfiguration } from '../configuration/TSDocConfiguration';\r\nimport { ParagraphSplitter } from './ParagraphSplitter';\r\n\r\n/**\r\n * The main API for parsing TSDoc comments.\r\n */\r\nexport class TSDocParser {\r\n /**\r\n * The configuration that was provided for the TSDocParser.\r\n */\r\n public readonly configuration: TSDocConfiguration;\r\n\r\n public constructor(configuration?: TSDocConfiguration) {\r\n if (configuration) {\r\n this.configuration = configuration;\r\n } else {\r\n this.configuration = new TSDocConfiguration();\r\n }\r\n }\r\n\r\n public parseString(text: string): ParserContext {\r\n return this.parseRange(TextRange.fromString(text));\r\n }\r\n\r\n public parseRange(range: TextRange): ParserContext {\r\n const parserContext: ParserContext = new ParserContext(this.configuration, range);\r\n\r\n if (LineExtractor.extract(parserContext)) {\r\n parserContext.tokens = Tokenizer.readTokens(parserContext.lines);\r\n\r\n const nodeParser: NodeParser = new NodeParser(parserContext);\r\n nodeParser.parse();\r\n\r\n ParagraphSplitter.splitParagraphs(parserContext.docComment);\r\n }\r\n\r\n return parserContext;\r\n }\r\n}\r\n"]}

View File

@ -0,0 +1,133 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.TextRange = void 0;
/**
* Efficiently references a range of text from a string buffer.
*/
var TextRange = /** @class */ (function () {
function TextRange(buffer, pos, end) {
this.buffer = buffer;
this.pos = pos;
this.end = end;
this._validateBounds();
}
/**
* Constructs a TextRange that corresponds to an entire string object.
*/
TextRange.fromString = function (buffer) {
return new TextRange(buffer, 0, buffer.length);
};
/**
* Constructs a TextRange that corresponds to an entire string object.
*/
TextRange.fromStringRange = function (buffer, pos, end) {
return new TextRange(buffer, pos, end);
};
Object.defineProperty(TextRange.prototype, "length", {
/**
* Returns the length of the text range.
* @remarks
* This value is calculated as the `end` property minus the `pos` property.
*/
get: function () {
return this.end - this.pos;
},
enumerable: false,
configurable: true
});
/**
* Constructs a TextRange that corresponds to a different range of an existing buffer.
*/
TextRange.prototype.getNewRange = function (pos, end) {
return new TextRange(this.buffer, pos, end);
};
/**
* Returns true if the length of the range is zero. Note that the object reference may not
* be equal to `TextRange.empty`, and the buffer may be different.
*/
TextRange.prototype.isEmpty = function () {
return this.pos === this.end;
};
/**
* Returns the range from the associated string buffer.
*/
TextRange.prototype.toString = function () {
return this.buffer.substring(this.pos, this.end);
};
/**
* Returns a debugging dump of the range, indicated via custom delimiters.
* @remarks
* For example if the delimiters are "[" and "]", and the range is 3..5 inside "1234567",
* then the output would be "12[345]67".
*/
TextRange.prototype.getDebugDump = function (posDelimiter, endDelimiter) {
return (this.buffer.substring(0, this.pos) +
posDelimiter +
this.buffer.substring(this.pos, this.end) +
endDelimiter +
this.buffer.substring(this.end));
};
/**
* Calculates the line and column number for the specified offset into the buffer.
*
* @remarks
* This is a potentially expensive operation.
*
* @param index - an integer offset
* @param buffer - the buffer
*/
TextRange.prototype.getLocation = function (index) {
if (index < 0 || index > this.buffer.length) {
// No match
return { line: 0, column: 0 };
}
// TODO: Consider caching or optimizing this somehow
var line = 1;
var column = 1;
var currentIndex = 0;
while (currentIndex < index) {
var current = this.buffer[currentIndex];
++currentIndex;
if (current === '\r') {
// CR
// Ignore '\r' and assume it will always have an accompanying '\n'
continue;
}
if (current === '\n') {
// LF
++line;
column = 1;
}
else {
// NOTE: For consistency with the TypeScript compiler, a tab character is assumed
// to advance by one column
++column;
}
}
return { line: line, column: column };
};
TextRange.prototype._validateBounds = function () {
if (this.pos < 0) {
throw new Error('TextRange.pos cannot be negative');
}
if (this.end < 0) {
throw new Error('TextRange.end cannot be negative');
}
if (this.end < this.pos) {
throw new Error('TextRange.end cannot be smaller than TextRange.pos');
}
if (this.pos > this.buffer.length) {
throw new Error('TextRange.pos cannot exceed the associated text buffer length');
}
if (this.end > this.buffer.length) {
throw new Error('TextRange.end cannot exceed the associated text buffer length');
}
};
/**
* Used to represent an empty or unknown range.
*/
TextRange.empty = new TextRange('', 0, 0);
return TextRange;
}());
exports.TextRange = TextRange;
//# sourceMappingURL=TextRange.js.map

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,173 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.Token = exports.TokenKind = void 0;
/**
* Distinguishes different types of Token objects.
*/
var TokenKind;
(function (TokenKind) {
/**
* A token representing the end of the input. The Token.range will be an empty range
* at the end of the provided input.
*/
TokenKind[TokenKind["EndOfInput"] = 2001] = "EndOfInput";
/**
* A token representing a virtual newline.
* The Token.range will be an empty range, because the actual newline character may
* be noncontiguous due to the doc comment delimiter trimming.
*/
TokenKind[TokenKind["Newline"] = 2002] = "Newline";
/**
* A token representing one or more spaces and tabs (but not newlines or end of input).
*/
TokenKind[TokenKind["Spacing"] = 2003] = "Spacing";
/**
* A token representing one or more ASCII letters, numbers, and underscores.
*/
TokenKind[TokenKind["AsciiWord"] = 2004] = "AsciiWord";
/**
* A single ASCII character that behaves like punctuation, e.g. doesn't need whitespace
* around it when adjacent to a letter. The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["OtherPunctuation"] = 2005] = "OtherPunctuation";
/**
* A token representing a sequence of non-ASCII printable characters that are not punctuation.
*/
TokenKind[TokenKind["Other"] = 2006] = "Other";
/**
* The backslash character `\`.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["Backslash"] = 2007] = "Backslash";
/**
* The less-than character `<`.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["LessThan"] = 2008] = "LessThan";
/**
* The greater-than character `>`.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["GreaterThan"] = 2009] = "GreaterThan";
/**
* The equals character `=`.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["Equals"] = 2010] = "Equals";
/**
* The single-quote character `'`.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["SingleQuote"] = 2011] = "SingleQuote";
/**
* The double-quote character `"`.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["DoubleQuote"] = 2012] = "DoubleQuote";
/**
* The slash character `/`.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["Slash"] = 2013] = "Slash";
/**
* The hyphen character `-`.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["Hyphen"] = 2014] = "Hyphen";
/**
* The at-sign character `@`.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["AtSign"] = 2015] = "AtSign";
/**
* The left curly bracket character `{`.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["LeftCurlyBracket"] = 2016] = "LeftCurlyBracket";
/**
* The right curly bracket character `}`.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["RightCurlyBracket"] = 2017] = "RightCurlyBracket";
/**
* The backtick character.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["Backtick"] = 2018] = "Backtick";
/**
* The period character.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["Period"] = 2019] = "Period";
/**
* The colon character.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["Colon"] = 2020] = "Colon";
/**
* The comma character.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["Comma"] = 2021] = "Comma";
/**
* The left square bracket character.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["LeftSquareBracket"] = 2022] = "LeftSquareBracket";
/**
* The right square bracket character.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["RightSquareBracket"] = 2023] = "RightSquareBracket";
/**
* The pipe character `|`.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["Pipe"] = 2024] = "Pipe";
/**
* The left parenthesis character.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["LeftParenthesis"] = 2025] = "LeftParenthesis";
/**
* The right parenthesis character.
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["RightParenthesis"] = 2026] = "RightParenthesis";
/**
* The pound character ("#").
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["PoundSymbol"] = 2027] = "PoundSymbol";
/**
* The plus character ("+").
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["Plus"] = 2028] = "Plus";
/**
* The dollar sign character ("$").
* The Token.range will always be a string of length 1.
*/
TokenKind[TokenKind["DollarSign"] = 2029] = "DollarSign";
})(TokenKind = exports.TokenKind || (exports.TokenKind = {}));
/**
* Represents a contiguous range of characters extracted from one of the doc comment lines
* being processed by the Tokenizer. There is a token representing a newline, but otherwise
* a single token cannot span multiple lines.
*/
var Token = /** @class */ (function () {
function Token(kind, range, line) {
this.kind = kind;
this.range = range;
this.line = line;
}
Token.prototype.toString = function () {
if (this.kind === TokenKind.Newline) {
return '\n';
}
return this.range.toString();
};
return Token;
}());
exports.Token = Token;
//# sourceMappingURL=Token.js.map

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,174 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.TokenReader = void 0;
var Token_1 = require("./Token");
var TokenSequence_1 = require("./TokenSequence");
/**
* Manages a stream of tokens that are read by the parser.
*
* @remarks
* Use TokenReader.readToken() to read a token and advance the stream pointer.
* Use TokenReader.peekToken() to preview the next token.
* Use TokenReader.createMarker() and backtrackToMarker() to rewind to an earlier point.
* Whenever readToken() is called, the token is added to an accumulated TokenSequence
* that can be extracted by calling extractAccumulatedSequence().
*/
var TokenReader = /** @class */ (function () {
function TokenReader(parserContext, embeddedTokenSequence) {
this._parserContext = parserContext;
this.tokens = parserContext.tokens;
if (embeddedTokenSequence) {
if (embeddedTokenSequence.parserContext !== this._parserContext) {
throw new Error('The embeddedTokenSequence must use the same parser context');
}
this._readerStartIndex = embeddedTokenSequence.startIndex;
this._readerEndIndex = embeddedTokenSequence.endIndex;
}
else {
this._readerStartIndex = 0;
this._readerEndIndex = this.tokens.length;
}
this._currentIndex = this._readerStartIndex;
this._accumulatedStartIndex = this._readerStartIndex;
}
/**
* Extracts and returns the TokenSequence that was accumulated so far by calls to readToken().
* The next call to readToken() will start a new accumulated sequence.
*/
TokenReader.prototype.extractAccumulatedSequence = function () {
if (this._accumulatedStartIndex === this._currentIndex) {
// If this happens, it indicates a parser bug:
throw new Error('Parser assertion failed: The queue should not be empty when' +
' extractAccumulatedSequence() is called');
}
var sequence = new TokenSequence_1.TokenSequence({
parserContext: this._parserContext,
startIndex: this._accumulatedStartIndex,
endIndex: this._currentIndex
});
this._accumulatedStartIndex = this._currentIndex;
return sequence;
};
/**
* Returns true if the accumulated sequence has any tokens yet. This will be false
* when the TokenReader starts, and it will be false immediately after a call
* to extractAccumulatedSequence(). Otherwise, it will become true whenever readToken()
* is called.
*/
TokenReader.prototype.isAccumulatedSequenceEmpty = function () {
return this._accumulatedStartIndex === this._currentIndex;
};
/**
* Like extractAccumulatedSequence(), but returns undefined if nothing has been
* accumulated yet.
*/
TokenReader.prototype.tryExtractAccumulatedSequence = function () {
if (this.isAccumulatedSequenceEmpty()) {
return undefined;
}
return this.extractAccumulatedSequence();
};
/**
* Asserts that isAccumulatedSequenceEmpty() should return false. If not, an exception
* is throw indicating a parser bug.
*/
TokenReader.prototype.assertAccumulatedSequenceIsEmpty = function () {
if (!this.isAccumulatedSequenceEmpty()) {
// If this happens, it indicates a parser bug:
var sequence = new TokenSequence_1.TokenSequence({
parserContext: this._parserContext,
startIndex: this._accumulatedStartIndex,
endIndex: this._currentIndex
});
var tokenStrings = sequence.tokens.map(function (x) { return x.toString(); });
throw new Error('Parser assertion failed: The queue should be empty, but it contains:\n' +
JSON.stringify(tokenStrings));
}
};
/**
* Returns the next token that would be returned by _readToken(), without
* consuming anything.
*/
TokenReader.prototype.peekToken = function () {
return this.tokens[this._currentIndex];
};
/**
* Returns the TokenKind for the next token that would be returned by _readToken(), without
* consuming anything.
*/
TokenReader.prototype.peekTokenKind = function () {
if (this._currentIndex >= this._readerEndIndex) {
return Token_1.TokenKind.EndOfInput;
}
return this.tokens[this._currentIndex].kind;
};
/**
* Like peekTokenKind(), but looks ahead two tokens.
*/
TokenReader.prototype.peekTokenAfterKind = function () {
if (this._currentIndex + 1 >= this._readerEndIndex) {
return Token_1.TokenKind.EndOfInput;
}
return this.tokens[this._currentIndex + 1].kind;
};
/**
* Like peekTokenKind(), but looks ahead three tokens.
*/
TokenReader.prototype.peekTokenAfterAfterKind = function () {
if (this._currentIndex + 2 >= this._readerEndIndex) {
return Token_1.TokenKind.EndOfInput;
}
return this.tokens[this._currentIndex + 2].kind;
};
/**
* Extract the next token from the input stream and return it.
* The token will also be appended to the accumulated sequence, which can
* later be accessed via extractAccumulatedSequence().
*/
TokenReader.prototype.readToken = function () {
if (this._currentIndex >= this._readerEndIndex) {
// If this happens, it's a parser bug
throw new Error('Cannot read past end of stream');
}
var token = this.tokens[this._currentIndex];
if (token.kind === Token_1.TokenKind.EndOfInput) {
// We don't allow reading the EndOfInput token, because we want _peekToken()
// to be always guaranteed to return a valid result.
// If this happens, it's a parser bug
throw new Error('The EndOfInput token cannot be read');
}
this._currentIndex++;
return token;
};
/**
* Returns the kind of the token immediately before the current token.
*/
TokenReader.prototype.peekPreviousTokenKind = function () {
if (this._currentIndex === 0) {
return Token_1.TokenKind.EndOfInput;
}
return this.tokens[this._currentIndex - 1].kind;
};
/**
* Remembers the current position in the stream.
*/
TokenReader.prototype.createMarker = function () {
return this._currentIndex;
};
/**
* Rewinds the stream pointer to a previous position in the stream.
*/
TokenReader.prototype.backtrackToMarker = function (marker) {
if (marker > this._currentIndex) {
// If this happens, it's a parser bug
throw new Error('The marker has expired');
}
this._currentIndex = marker;
if (marker < this._accumulatedStartIndex) {
this._accumulatedStartIndex = marker;
}
};
return TokenReader;
}());
exports.TokenReader = TokenReader;
//# sourceMappingURL=TokenReader.js.map

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,99 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.TokenSequence = void 0;
var TextRange_1 = require("./TextRange");
/**
* Represents a sequence of tokens extracted from `ParserContext.tokens`.
* This sequence is defined by a starting index and ending index into that array.
*/
var TokenSequence = /** @class */ (function () {
function TokenSequence(parameters) {
this.parserContext = parameters.parserContext;
this._startIndex = parameters.startIndex;
this._endIndex = parameters.endIndex;
this._validateBounds();
}
/**
* Constructs a TokenSequence object with no tokens.
*/
TokenSequence.createEmpty = function (parserContext) {
return new TokenSequence({ parserContext: parserContext, startIndex: 0, endIndex: 0 });
};
Object.defineProperty(TokenSequence.prototype, "startIndex", {
/**
* The starting index into the associated `ParserContext.tokens` list.
*/
get: function () {
return this._startIndex;
},
enumerable: false,
configurable: true
});
Object.defineProperty(TokenSequence.prototype, "endIndex", {
/**
* The (non-inclusive) ending index into the associated `ParserContext.tokens` list.
*/
get: function () {
return this._endIndex;
},
enumerable: false,
configurable: true
});
Object.defineProperty(TokenSequence.prototype, "tokens", {
get: function () {
return this.parserContext.tokens.slice(this._startIndex, this._endIndex);
},
enumerable: false,
configurable: true
});
/**
* Constructs a TokenSequence that corresponds to a different range of tokens,
* e.g. a subrange.
*/
TokenSequence.prototype.getNewSequence = function (startIndex, endIndex) {
return new TokenSequence({
parserContext: this.parserContext,
startIndex: startIndex,
endIndex: endIndex
});
};
/**
* Returns a TextRange that includes all tokens in the sequence (including any additional
* characters between doc comment lines).
*/
TokenSequence.prototype.getContainingTextRange = function () {
if (this.isEmpty()) {
return TextRange_1.TextRange.empty;
}
return this.parserContext.sourceRange.getNewRange(this.parserContext.tokens[this._startIndex].range.pos, this.parserContext.tokens[this._endIndex - 1].range.end);
};
TokenSequence.prototype.isEmpty = function () {
return this._startIndex === this._endIndex;
};
/**
* Returns the concatenated text of all the tokens.
*/
TokenSequence.prototype.toString = function () {
return this.tokens.map(function (x) { return x.toString(); }).join('');
};
TokenSequence.prototype._validateBounds = function () {
if (this.startIndex < 0) {
throw new Error('TokenSequence.startIndex cannot be negative');
}
if (this.endIndex < 0) {
throw new Error('TokenSequence.endIndex cannot be negative');
}
if (this.endIndex < this.startIndex) {
throw new Error('TokenSequence.endIndex cannot be smaller than TokenSequence.startIndex');
}
if (this.startIndex > this.parserContext.tokens.length) {
throw new Error('TokenSequence.startIndex cannot exceed the associated token array');
}
if (this.endIndex > this.parserContext.tokens.length) {
throw new Error('TokenSequence.endIndex cannot exceed the associated token array');
}
};
return TokenSequence;
}());
exports.TokenSequence = TokenSequence;
//# sourceMappingURL=TokenSequence.js.map

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,146 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.Tokenizer = void 0;
var TextRange_1 = require("./TextRange");
var Token_1 = require("./Token");
var Tokenizer = /** @class */ (function () {
function Tokenizer() {
}
/**
* Given a list of input lines, this returns an array of extracted tokens.
* The last token will always be TokenKind.EndOfInput.
*/
Tokenizer.readTokens = function (lines) {
Tokenizer._ensureInitialized();
var tokens = [];
var lastLine = undefined;
for (var _i = 0, lines_1 = lines; _i < lines_1.length; _i++) {
var line = lines_1[_i];
Tokenizer._pushTokensForLine(tokens, line);
lastLine = line;
}
if (lastLine) {
tokens.push(new Token_1.Token(Token_1.TokenKind.EndOfInput, lastLine.getNewRange(lastLine.end, lastLine.end), lastLine));
}
else {
tokens.push(new Token_1.Token(Token_1.TokenKind.EndOfInput, TextRange_1.TextRange.empty, TextRange_1.TextRange.empty));
}
return tokens;
};
/**
* Returns true if the token is a CommonMark punctuation character.
* These are basically all the ASCII punctuation characters.
*/
Tokenizer.isPunctuation = function (tokenKind) {
Tokenizer._ensureInitialized();
return Tokenizer._punctuationTokens[tokenKind] || false;
};
Tokenizer._pushTokensForLine = function (tokens, line) {
var buffer = line.buffer;
var end = line.end;
var bufferIndex = line.pos;
var tokenKind = undefined;
var tokenPos = bufferIndex;
while (bufferIndex < end) {
// Read a character and determine its kind
var charCode = buffer.charCodeAt(bufferIndex);
var characterKind = Tokenizer._charCodeMap[charCode];
if (characterKind === undefined) {
characterKind = Token_1.TokenKind.Other;
}
// Can we append to an existing token? Yes if:
// 1. There is an existing token, AND
// 2. It is the same kind of token, AND
// 3. It's not punctuation (which is always one character)
if (tokenKind !== undefined &&
characterKind === tokenKind &&
Tokenizer._isMultiCharacterToken(tokenKind)) {
// yes, append
}
else {
// Is there a previous completed token to push?
if (tokenKind !== undefined) {
tokens.push(new Token_1.Token(tokenKind, line.getNewRange(tokenPos, bufferIndex), line));
}
tokenPos = bufferIndex;
tokenKind = characterKind;
}
++bufferIndex;
}
// Is there a previous completed token to push?
if (tokenKind !== undefined) {
tokens.push(new Token_1.Token(tokenKind, line.getNewRange(tokenPos, bufferIndex), line));
}
tokens.push(new Token_1.Token(Token_1.TokenKind.Newline, line.getNewRange(line.end, line.end), line));
};
/**
* Returns true if the token can be comprised of multiple characters
*/
Tokenizer._isMultiCharacterToken = function (kind) {
switch (kind) {
case Token_1.TokenKind.Spacing:
case Token_1.TokenKind.AsciiWord:
case Token_1.TokenKind.Other:
return true;
}
return false;
};
Tokenizer._ensureInitialized = function () {
if (Tokenizer._charCodeMap) {
return;
}
Tokenizer._charCodeMap = {};
Tokenizer._punctuationTokens = {};
// All Markdown punctuation characters
var punctuation = Tokenizer._commonMarkPunctuationCharacters;
for (var i = 0; i < punctuation.length; ++i) {
var charCode = punctuation.charCodeAt(i);
Tokenizer._charCodeMap[charCode] = Token_1.TokenKind.OtherPunctuation;
}
// Special symbols
// !"#$%&\'()*+,\-.\/:;<=>?@[\\]^_`{|}~
var specialMap = {
'\\': Token_1.TokenKind.Backslash,
'<': Token_1.TokenKind.LessThan,
'>': Token_1.TokenKind.GreaterThan,
'=': Token_1.TokenKind.Equals,
"'": Token_1.TokenKind.SingleQuote,
'"': Token_1.TokenKind.DoubleQuote,
'/': Token_1.TokenKind.Slash,
'-': Token_1.TokenKind.Hyphen,
'@': Token_1.TokenKind.AtSign,
'{': Token_1.TokenKind.LeftCurlyBracket,
'}': Token_1.TokenKind.RightCurlyBracket,
'`': Token_1.TokenKind.Backtick,
'.': Token_1.TokenKind.Period,
':': Token_1.TokenKind.Colon,
',': Token_1.TokenKind.Comma,
'[': Token_1.TokenKind.LeftSquareBracket,
']': Token_1.TokenKind.RightSquareBracket,
'|': Token_1.TokenKind.Pipe,
'(': Token_1.TokenKind.LeftParenthesis,
')': Token_1.TokenKind.RightParenthesis,
'#': Token_1.TokenKind.PoundSymbol,
'+': Token_1.TokenKind.Plus,
$: Token_1.TokenKind.DollarSign
};
for (var _i = 0, _a = Object.getOwnPropertyNames(specialMap); _i < _a.length; _i++) {
var key = _a[_i];
Tokenizer._charCodeMap[key.charCodeAt(0)] = specialMap[key];
Tokenizer._punctuationTokens[specialMap[key]] = true;
}
Tokenizer._punctuationTokens[Token_1.TokenKind.OtherPunctuation] = true;
var word = Tokenizer._wordCharacters;
for (var i = 0; i < word.length; ++i) {
var charCode = word.charCodeAt(i);
Tokenizer._charCodeMap[charCode] = Token_1.TokenKind.AsciiWord;
}
Tokenizer._charCodeMap[' '.charCodeAt(0)] = Token_1.TokenKind.Spacing;
Tokenizer._charCodeMap['\t'.charCodeAt(0)] = Token_1.TokenKind.Spacing;
};
Tokenizer._commonMarkPunctuationCharacters = '!"#$%&\'()*+,-./:;<=>?@[\\]^`{|}~';
Tokenizer._wordCharacters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_';
return Tokenizer;
}());
exports.Tokenizer = Tokenizer;
//# sourceMappingURL=Tokenizer.js.map

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
{"version":3,"file":"NodeParserBasics.test.js","sourceRoot":"","sources":["../../../src/parser/__tests__/NodeParserBasics.test.ts"],"names":[],"mappings":";;AAAA,6CAA4C;AAE5C,IAAI,CAAC,0BAA0B,EAAE;IAC/B,yBAAW,CAAC,+BAA+B,CACzC;QACE,KAAK;QACL,YAAY;QACZ,WAAW;QACX,KAAK;KACN,CAAC,IAAI,CAAC,IAAI,CAAC,CACb,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,+BAA+B,EAAE;IACpC,yBAAW,CAAC,+BAA+B,CAAC,OAAO,CAAC,CAAC;IAErD,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,IAAI,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;IAE7E,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,GAAG,EAAE,GAAG,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;AACnF,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,yCAAyC,EAAE;IAC9C,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,gBAAgB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;AAC3F,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,yCAAyC,EAAE;IAC9C,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,2CAA2C,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACvE,CAAC;AACJ,CAAC,CAAC,CAAC","sourcesContent":["import { TestHelpers } from './TestHelpers';\r\n\r\ntest('00 Tokenizer simple case', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n [\r\n '/**',\r\n ' * line 1 ', // extra space at end of line\r\n ' * line 2',\r\n ' */'\r\n ].join('\\n')\r\n );\r\n});\r\n\r\ntest('01 Tokenizer degenerate cases', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot('/***/');\r\n\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' *', ' */'].join('\\n'));\r\n\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' ', ' ', ' */'].join('\\n'));\r\n});\r\n\r\ntest('02 Backslash escapes: positive examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * \\\\$\\\\@param', ' */'].join('\\n'));\r\n});\r\n\r\ntest('03 Backslash escapes: negative examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * letter: \\\\A space: \\\\ end of line: \\\\', ' */'].join('\\n')\r\n );\r\n});\r\n"]}

View File

@ -0,0 +1 @@
{"version":3,"file":"NodeParserCode.test.js","sourceRoot":"","sources":["../../../src/parser/__tests__/NodeParserCode.test.ts"],"names":[],"mappings":";;AAAA,6CAA4C;AAE5C,IAAI,CAAC,8BAA8B,EAAE;IACnC,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,aAAa,EAAE,kBAAkB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;IAC1G,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,UAAU,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;AACrF,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,8BAA8B,EAAE;IACnC,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,WAAW,EAAE,UAAU,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;IAChG,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,OAAO,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;AAClF,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,yBAAyB,EAAE;IAC9B,yBAAW,CAAC,+BAA+B,CACzC;QACE,KAAK;QACL,yCAAyC;QACzC,sBAAsB;QACtB,uBAAuB;QACvB,WAAW;QACX,KAAK;KACN,CAAC,IAAI,CAAC,IAAI,CAAC,CACb,CAAC;IACF,yBAAW,CAAC,+BAA+B,CACzC;QACE,KAAK;QACL,kEAAkE;QAClE,QAAQ;QACR,uBAAuB;QACvB,UAAU;KACX,CAAC,IAAI,CAAC,IAAI,CAAC,CACb,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,yBAAyB,EAAE;IAC9B,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,qCAAqC,EAAE,WAAW,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAC9E,CAAC;IACF,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,sCAAsC,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAC5E,CAAC;IACF,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,uCAAuC,EAAE,UAAU,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACxE,CAAC;IACF,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,uCAAuC,EAAE,mBAAmB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACxF,CAAC;IACF,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,+BAA+B,EAAE,uBAAuB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACpF,CAAC;IACF,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,sCAAsC,EAAE,QAAQ,EAAE,SAAS,EAAE,aAAa,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACtG,CAAC;IACF,yBAAW,CAAC,+BAA+B,CACzC;QACE,KAAK;QACL,qDAAqD;QACrD,QAAQ;QACR,SAAS;QACT,WAAW;QACX,KAAK;KACN,CAAC,IAAI,CAAC,IAAI,CAAC,CACb,CAAC;AACJ,CAAC,CAAC,CAAC","sourcesContent":["import { TestHelpers } from './TestHelpers';\r\n\r\ntest('00 Code span basic, positive', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * line `1`', ' * line ` 2` sdf', ' */'].join('\\n'));\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * M`&`M', ' */'].join('\\n'));\r\n});\r\n\r\ntest('01 Code span basic, negative', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * `multi', ' * line`', ' */'].join('\\n'));\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * ``', ' */'].join('\\n'));\r\n});\r\n\r\ntest('03 Code fence, positive', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n [\r\n '/**',\r\n ' * This is a code fence with all parts:',\r\n ' * ```a language! ',\r\n ' * some `code` here',\r\n ' * ``` ',\r\n ' */'\r\n ].join('\\n')\r\n );\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n [\r\n '/**',\r\n ' * This is a code fence with no language or trailing whitespace:',\r\n ' * ```',\r\n ' * some `code` here',\r\n ' * ```*/'\r\n ].join('\\n')\r\n );\r\n});\r\n\r\ntest('04 Code fence, negative', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * Code fence incorrectly indented:', ' * ```', ' */'].join('\\n')\r\n );\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * Code fence not starting the line:', ' *a```', ' */'].join('\\n')\r\n );\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * Code fence not being terminated 1:', ' * ```*/'].join('\\n')\r\n );\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * Code fence not being terminated 2:', ' * ``` some stuff', ' */'].join('\\n')\r\n );\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * Language having backticks:', ' * ``` some stuff ```', ' */'].join('\\n')\r\n );\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * Closing delimiter being indented:', ' * ```', ' * code', ' * ```', ' */'].join('\\n')\r\n );\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n [\r\n '/**',\r\n ' * Closing delimiter not being on a line by itself:',\r\n ' * ```',\r\n ' * code',\r\n ' * ``` a',\r\n ' */'\r\n ].join('\\n')\r\n );\r\n});\r\n"]}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
{"version":3,"file":"NodeParserInheritDocTag.test.js","sourceRoot":"","sources":["../../../src/parser/__tests__/NodeParserInheritDocTag.test.ts"],"names":[],"mappings":";;AAAA,6CAA4C;AAE5C,IAAI,CAAC,sCAAsC,EAAE;IAC3C,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,kBAAkB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;IAC3F,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,+BAA+B,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;IACxG,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,0CAA0C,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACtE,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,sCAAsC,EAAE;IAC3C,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,8BAA8B,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;IACvG,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,+BAA+B,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;IACxG,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,kBAAkB,EAAE,kBAAkB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAClE,CAAC;IACF,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,iBAAiB,EAAE,aAAa,EAAE,kBAAkB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAChF,CAAC;IAEF,2BAA2B;IAC3B,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,wDAAwD,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACpF,CAAC;AACJ,CAAC,CAAC,CAAC","sourcesContent":["import { TestHelpers } from './TestHelpers';\r\n\r\ntest('00 InheritDoc tag: positive examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * {@inheritDoc}', ' */'].join('\\n'));\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * {@inheritDoc Class.member}', ' */'].join('\\n'));\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * {@inheritDoc package# Class . member}', ' */'].join('\\n')\r\n );\r\n});\r\n\r\ntest('01 InheritDoc tag: negative examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * {@inheritDoc | link text}', ' */'].join('\\n'));\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * {@inheritDoc Class % junk}', ' */'].join('\\n'));\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * {@inheritDoc}', ' * {@inheritDoc}', ' */'].join('\\n')\r\n );\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * summary text', ' * @remarks', ' * {@inheritDoc}', ' */'].join('\\n')\r\n );\r\n\r\n // Old API Extractor syntax\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * {@inheritdoc @scope/library:IDisposable.isDisposed}', ' */'].join('\\n')\r\n );\r\n});\r\n"]}

View File

@ -0,0 +1 @@
{"version":3,"file":"NodeParserLinkTag.test.js","sourceRoot":"","sources":["../../../src/parser/__tests__/NodeParserLinkTag.test.ts"],"names":[],"mappings":";;AAAA,6CAA4C;AAE5C,IAAI,CAAC,iCAAiC,EAAE;IACtC,yBAAW,CAAC,+BAA+B,CACzC;QACE,KAAK;QACL,gCAAgC;QAChC,iCAAiC;QACjC,kCAAkC;QAClC,0CAA0C;QAC1C,sCAAsC;QACtC,YAAY;QACZ,kCAAkC;QAClC,eAAe;QACf,QAAQ;QACR,KAAK;KACN,CAAC,IAAI,CAAC,IAAI,CAAC,CACb,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,iCAAiC,EAAE;IACtC,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,YAAY,EAAE,6CAA6C,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACvF,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,uCAAuC,EAAE;IAC5C,yBAAW,CAAC,+BAA+B,CACzC;QACE,KAAK;QACL,gCAAgC;QAChC,gDAAgD;QAChD,gCAAgC;QAChC,KAAK;KACN,CAAC,IAAI,CAAC,IAAI,CAAC,CACb,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,uCAAuC,EAAE;IAC5C,yBAAW,CAAC,+BAA+B,CACzC;QACE,KAAK;QACL,uCAAuC;QACvC,iDAAiD;QACjD,mCAAmC;QACnC,oCAAoC;QACpC,2BAA2B;QAC3B,oBAAoB;QACpB,KAAK;KACN,CAAC,IAAI,CAAC,IAAI,CAAC,CACb,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,+DAA+D,EAAE;IACpE,yBAAW,CAAC,+BAA+B,CACzC;QACE,KAAK;QACL,yBAAyB;QACzB,+BAA+B;QAC/B,qCAAqC;QACrC,4CAA4C;QAC5C,gCAAgC;QAChC,KAAK;KACN,CAAC,IAAI,CAAC,IAAI,CAAC,CACb,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,+DAA+D,EAAE;IACpE,yBAAW,CAAC,+BAA+B,CACzC;QACE,KAAK;QACL,uBAAuB;QACvB,2BAA2B;QAC3B,6BAA6B;QAC7B,wBAAwB;QACxB,iCAAiC;QACjC,oBAAoB;QACpB,KAAK;KACN,CAAC,IAAI,CAAC,IAAI,CAAC,CACb,CAAC;IACF,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,eAAe,EAAE,cAAc,EAAE,oBAAoB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACjF,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,mEAAmE,EAAE;IACxE,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,sBAAsB,EAAE,qBAAqB,EAAE,8BAA8B,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACzG,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,mEAAmE,EAAE;IACxE,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,oBAAoB,EAAE,0BAA0B,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAC5E,CAAC;AACJ,CAAC,CAAC,CAAC","sourcesContent":["import { TestHelpers } from './TestHelpers';\r\n\r\ntest('00 Link text: positive examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n [\r\n '/**',\r\n ' * {@link http://example1.com}',\r\n ' * {@link http://example2.com|}',\r\n ' * {@link http://example3.com| }',\r\n ' * {@link http://example4.com|link text}',\r\n ' * 1{@link http://example5.com| link',\r\n ' * text }2',\r\n ' * 3{@link http://example5.com| ',\r\n ' * link text ',\r\n ' * }4',\r\n ' */'\r\n ].join('\\n')\r\n );\r\n});\r\n\r\ntest('01 Link text: negative examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * {@link}', ' * {@link http://example1.com| link | text}', ' */'].join('\\n')\r\n );\r\n});\r\n\r\ntest('02 URL destination: positive examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n [\r\n '/**',\r\n ' * {@link http://example1.com}',\r\n ' * {@link https://example2.com#hash|link text}',\r\n ' * {@link customscheme://data}',\r\n ' */'\r\n ].join('\\n')\r\n );\r\n});\r\n\r\ntest('03 URL destination: negative examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n [\r\n '/**',\r\n ' * {@link http://example1.com spaces}',\r\n ' * {@link http://example2.com spaces|link text}',\r\n ' * {@link ftp+ssh://example3.com}',\r\n ' * {@link mailto:bob@example4.com}',\r\n ' * {@link //example5.com}',\r\n ' * {@link http://}',\r\n ' */'\r\n ].join('\\n')\r\n );\r\n});\r\n\r\ntest('04 Declaration reference with package name: positive examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n [\r\n '/**',\r\n ' * {@link my-example1#}',\r\n ' * {@link my-example2/path3#}',\r\n ' * {@link my-example4/path5/path6#}',\r\n ' * {@link @scope/my-example7/path8/path9#}',\r\n ' * {@link @scope/my-example7#}',\r\n ' */'\r\n ].join('\\n')\r\n );\r\n});\r\n\r\ntest('05 Declaration reference with package name: negative examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n [\r\n '/**',\r\n ' * {@link example1/#}',\r\n ' * {@link example2/a//b#}',\r\n ' * {@link @scope/ex@mple3#}',\r\n ' * {@link @/example4#}',\r\n ' * {@link @scope//my-example5#}',\r\n ' * {@link @scope#}',\r\n ' */'\r\n ].join('\\n')\r\n );\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * {@link @#}', ' * {@link #}', ' * {@link #Button}', ' */'].join('\\n')\r\n );\r\n});\r\n\r\ntest('06 Declaration reference with import path only: positive examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * {@link ../path1#}', ' * {@link ./path2#}', ' * {@link ./path3/../path4#}', ' */'].join('\\n')\r\n );\r\n});\r\n\r\ntest('07 Declaration reference with import path only: negative examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * {@link /path1#}', ' * {@link /path1 path2#}', ' */'].join('\\n')\r\n );\r\n});\r\n"]}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
{"version":3,"file":"NodeParserLinkTag3.test.js","sourceRoot":"","sources":["../../../src/parser/__tests__/NodeParserLinkTag3.test.ts"],"names":[],"mappings":";;AAAA,6CAA4C;AAE5C,IAAI,CAAC,yCAAyC,EAAE;IAC9C,yBAAW,CAAC,+BAA+B,CACzC;QACE,KAAK;QACL,wDAAwD;QACxD,sFAAsF;QACtF,KAAK;KACN,CAAC,IAAI,CAAC,IAAI,CAAC,CACb,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,yCAAyC,EAAE;IAC9C,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,uDAAuD,EAAE,sBAAsB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAC3G,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,yBAAyB,EAAE;IAC9B,yBAAW,CAAC,+BAA+B,CACzC;QACE,KAAK;QACL,qGAAqG;QACrG,KAAK;KACN,CAAC,IAAI,CAAC,IAAI,CAAC,CACb,CAAC;AACJ,CAAC,CAAC,CAAC","sourcesContent":["import { TestHelpers } from './TestHelpers';\r\n\r\ntest('00 Symbol references: positive examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n [\r\n '/**',\r\n ' * {@link Class1.[WellknownSymbols.toStringPrimitive]}',\r\n ' * {@link Class1 . ( [ WellknownSymbols . toStringPrimitive ] : static) | link text}',\r\n ' */'\r\n ].join('\\n')\r\n );\r\n});\r\n\r\ntest('01 Symbol references: negative examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * {@link Class1.[WellknownSymbols.toStringPrimitive}', ' * {@link Class1.[]}', ' */'].join('\\n')\r\n );\r\n});\r\n\r\ntest('02 Complicated examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n [\r\n '/**',\r\n ' * {@link ./lib/controls/Button#Button.([(WellknownSymbols:namespace).toStringPrimitive]:instance)}',\r\n ' */'\r\n ].join('\\n')\r\n );\r\n});\r\n"]}

View File

@ -0,0 +1 @@
{"version":3,"file":"NodeParserTags.test.js","sourceRoot":"","sources":["../../../src/parser/__tests__/NodeParserTags.test.ts"],"names":[],"mappings":";;AAAA,6CAA4C;AAE5C,IAAI,CAAC,kCAAkC,EAAE;IACvC,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,UAAU,EAAE,SAAS,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;AAChG,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,kCAAkC,EAAE;IACvC,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,WAAW,EAAE,WAAW,EAAE,WAAW,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACjE,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,kCAAkC,EAAE;IACvC,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,YAAY,EAAE,aAAa,EAAE,qBAAqB,EAAE,YAAY,EAAE,SAAS,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACvG,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,kCAAkC,EAAE;IACvC,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,aAAa,EAAE,aAAa,EAAE,eAAe,EAAE,WAAW,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACtF,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,mCAAmC,EAAE;IACxC,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,wBAAwB,EAAE,gBAAgB,EAAE,UAAU,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAClF,CAAC;IACF,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,sBAAsB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;AACjG,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,oCAAoC,EAAE;IACzC,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,8CAA8C,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAC1E,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,oCAAoC,EAAE;IACzC,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,mBAAmB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;IAC5F,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,mBAAmB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;IAC5F,yBAAW,CAAC,+BAA+B,CAAC,CAAC,KAAK,EAAE,aAAa,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC,CAAC;AACxF,CAAC,CAAC,CAAC","sourcesContent":["import { TestHelpers } from './TestHelpers';\r\n\r\ntest('00 Block tags: positive examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * @one ', ' * @two', ' */'].join('\\n'));\r\n});\r\n\r\ntest('01 Block tags: negative examples', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * @ one ', ' * +@two ', ' * @two+ ', ' */'].join('\\n')\r\n );\r\n});\r\n\r\ntest('02 Inline tags: simple, positive', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * {@one} ', ' * {@two } ', ' * {@three}{@four} ', ' * {@five ', ' * } ', ' */'].join('\\n')\r\n );\r\n});\r\n\r\ntest('03 Inline tags: simple, negative', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * {@ one} ', ' * {@two~} ', ' * { @three} ', ' * {@four', ' */'].join('\\n')\r\n );\r\n});\r\n\r\ntest('04 Inline tags: complex, positive', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * {@one some content}', ' * {@two multi', ' * line}', ' */'].join('\\n')\r\n );\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * {@three @taglike}', ' */'].join('\\n'));\r\n});\r\n\r\ntest('05 Inline tags: escaping, positive', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * {@one left \\\\{ right \\\\} backslash \\\\\\\\ }', ' */'].join('\\n')\r\n );\r\n});\r\n\r\ntest('06 Inline tags: escaping, negative', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * {@one curly\\\\}', ' */'].join('\\n'));\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * {@two curly{}}', ' */'].join('\\n'));\r\n TestHelpers.parseAndMatchNodeParserSnapshot(['/**', ' * three: }', ' */'].join('\\n'));\r\n});\r\n"]}

View File

@ -0,0 +1 @@
{"version":3,"file":"NodeParserValidationChecks.test.js","sourceRoot":"","sources":["../../../src/parser/__tests__/NodeParserValidationChecks.test.ts"],"names":[],"mappings":";;AAAA,6CAA4C;AAE5C,IAAI,CAAC,oCAAoC,EAAE;IACzC,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,gBAAgB,EAAE,wBAAwB,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACtE,CAAC;AACJ,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,oCAAoC,EAAE;IACzC,yBAAW,CAAC,+BAA+B,CACzC,CAAC,KAAK,EAAE,gBAAgB,EAAE,KAAK,EAAE,YAAY,EAAE,KAAK,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CACjE,CAAC;AACJ,CAAC,CAAC,CAAC","sourcesContent":["import { TestHelpers } from './TestHelpers';\r\n\r\ntest('00 Deprecated block: positive test', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * @deprecated', ' * Use the other thing', ' */'].join('\\n')\r\n );\r\n});\r\n\r\ntest('01 Deprecated block: negative test', () => {\r\n TestHelpers.parseAndMatchNodeParserSnapshot(\r\n ['/**', ' * @deprecated', ' * ', ' * @public', ' */'].join('\\n')\r\n );\r\n});\r\n"]}

View File

@ -0,0 +1,147 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.TestHelpers = void 0;
var TSDocParser_1 = require("../TSDocParser");
var nodes_1 = require("../../nodes");
var TSDocConfiguration_1 = require("../../configuration/TSDocConfiguration");
var TokenCoverageChecker_1 = require("./TokenCoverageChecker");
var TestHelpers = /** @class */ (function () {
function TestHelpers() {
}
/**
* Pretty print a line with "<" and ">" markers to indicate a text range.
*/
TestHelpers.formatLineSpan = function (line, range) {
if (range.pos < line.pos || range.end > line.end) {
throw new Error('Range must fall within the associated line');
}
var paddedSpace = ['', ' ', ' ', ' ', ' '];
var paddedLArrow = ['', '>', ' >', ' >', ' >'];
var paddedRArrow = ['', '<', '< ', '< ', '< '];
var buffer = line.buffer;
var span = '';
if (line.end > 0) {
var i = line.pos - 1;
while (i < range.pos - 1) {
span += paddedSpace[TestHelpers.getEscaped(buffer[i]).length];
++i;
}
span += paddedLArrow[TestHelpers.getEscaped(buffer[i]).length];
++i;
while (i < range.end) {
span += paddedSpace[TestHelpers.getEscaped(buffer[i]).length];
++i;
}
if (i === line.end) {
span += '<';
}
else {
span += paddedRArrow[TestHelpers.getEscaped(buffer[i]).length];
++i;
while (i < line.end) {
span += paddedSpace[TestHelpers.getEscaped(buffer[i]).length];
++i;
}
}
}
return span;
};
/**
* Workaround various characters that get ugly escapes in Jest snapshots
*/
TestHelpers.getEscaped = function (s) {
return s
.replace(/\n/g, '[n]')
.replace(/\r/g, '[r]')
.replace(/\t/g, '[t]')
.replace(/\f/g, '[f]')
.replace(/\\/g, '[b]')
.replace(/\"/g, '[q]')
.replace(/`/g, '[c]')
.replace(/\</g, '[<]')
.replace(/\>/g, '[>]');
};
/**
* Main harness for tests under `./parser/*`.
*/
TestHelpers.parseAndMatchNodeParserSnapshot = function (buffer, config) {
var configuration = config !== null && config !== void 0 ? config : new TSDocConfiguration_1.TSDocConfiguration();
// For the parser tests, we use lots of custom tags without bothering to define them
configuration.validation.ignoreUndefinedTags = true;
var tsdocParser = new TSDocParser_1.TSDocParser(configuration);
var parserContext = tsdocParser.parseString(buffer);
expect({
buffer: TestHelpers.getEscaped(buffer),
lines: parserContext.lines.map(function (x) { return TestHelpers.getEscaped(x.toString()); }),
logMessages: parserContext.log.messages.map(function (message) { return message.text; }),
nodes: TestHelpers.getDocNodeSnapshot(parserContext.docComment),
gaps: this._getTokenCoverageGapsSnapshot(parserContext)
}).toMatchSnapshot();
TestHelpers._getTokenCoverageGapsSnapshot(parserContext);
};
/**
* Main harness for tests under `./details/*`.
*/
TestHelpers.parseAndMatchDocCommentSnapshot = function (buffer, configuration) {
var tsdocParser = new TSDocParser_1.TSDocParser(configuration);
var parserContext = tsdocParser.parseString(buffer);
var docComment = parserContext.docComment;
expect({
s00_lines: parserContext.lines.map(function (x) { return TestHelpers.getEscaped(x.toString()); }),
s01_gaps: this._getTokenCoverageGapsSnapshot(parserContext),
s02_summarySection: TestHelpers.getDocNodeSnapshot(docComment.summarySection),
s03_remarksBlock: TestHelpers.getDocNodeSnapshot(docComment.remarksBlock),
s04_privateRemarksBlock: TestHelpers.getDocNodeSnapshot(docComment.privateRemarks),
s05_deprecatedBlock: TestHelpers.getDocNodeSnapshot(docComment.deprecatedBlock),
s06_paramBlocks: docComment.params.blocks.map(function (x) { return TestHelpers.getDocNodeSnapshot(x); }),
s07_typeParamBlocks: docComment.typeParams.blocks.map(function (x) { return TestHelpers.getDocNodeSnapshot(x); }),
s08_returnsBlock: TestHelpers.getDocNodeSnapshot(docComment.returnsBlock),
s09_customBlocks: docComment.customBlocks.map(function (x) { return TestHelpers.getDocNodeSnapshot(x); }),
s10_inheritDocTag: TestHelpers.getDocNodeSnapshot(docComment.inheritDocTag),
s11_modifierTags: docComment.modifierTagSet.nodes.map(function (x) { return TestHelpers.getDocNodeSnapshot(x); }),
s12_logMessages: parserContext.log.messages.map(function (message) { return message.text; })
}).toMatchSnapshot();
return parserContext;
};
/**
* Render a nice Jest snapshot object for a DocNode tree.
*/
TestHelpers.getDocNodeSnapshot = function (docNode) {
if (!docNode) {
return undefined;
}
var item = {
kind: docNode.kind
};
if (docNode instanceof nodes_1.DocExcerpt) {
item.kind += ': ' + docNode.excerptKind;
item.nodeExcerpt = TestHelpers.getEscaped(docNode.content.toString());
}
if (docNode instanceof nodes_1.DocPlainText) {
var docPlainText = docNode;
if (docPlainText.textExcerpt === undefined) {
item.nodePlainText = TestHelpers.getEscaped(docPlainText.text);
}
}
if (docNode instanceof nodes_1.DocErrorText) {
item.errorMessage = TestHelpers.getEscaped(docNode.errorMessage);
item.errorLocation = TestHelpers.getEscaped(docNode.errorLocation.toString());
if (docNode.errorLocation.startIndex > 0) {
// Show the preceding token to provide some context (e.g. is this the opening quote
// or closing quote?)
item.errorLocationPrecedingToken = docNode.errorLocation.parserContext.tokens[docNode.errorLocation.startIndex - 1].toString();
}
}
if (docNode.getChildNodes().length > 0) {
item.nodes = docNode.getChildNodes().map(function (x) { return TestHelpers.getDocNodeSnapshot(x); });
}
return item;
};
TestHelpers._getTokenCoverageGapsSnapshot = function (parserContext) {
var tokenCoverageChecker = new TokenCoverageChecker_1.TokenCoverageChecker(parserContext);
return tokenCoverageChecker.getGaps(parserContext.docComment).map(function (x) { return x.toString(); });
};
return TestHelpers;
}());
exports.TestHelpers = TestHelpers;
//# sourceMappingURL=TestHelpers.js.map

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
{"version":3,"file":"TextRange.test.js","sourceRoot":"","sources":["../../../src/parser/__tests__/TextRange.test.ts"],"names":[],"mappings":";;AAAA,0CAAyC;AACzC,6CAA4C;AAE5C,SAAS,aAAa,CAAC,SAAoB;IACzC,KAAK,IAAI,CAAC,GAAW,CAAC,CAAC,EAAE,CAAC,IAAI,SAAS,CAAC,GAAG,GAAG,CAAC,EAAE,EAAE,CAAC,EAAE;QACpD,6BAA6B;QAC7B,IAAM,CAAC,GAAW,yBAAW,CAAC,UAAU,CAAC,SAAS,CAAC,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;QAErF,yCAAyC;QACzC,IAAM,OAAO,GAAW,yBAAW,CAAC,UAAU,CAAC,SAAS,CAAC,MAAM,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC;QAE5F,MAAM,CAAC;YACL,CAAC,EAAE,CAAC;YACJ,OAAO,EAAE,OAAO;YAChB,CAAC,EAAE,CAAC;YACJ,QAAQ,EAAE,SAAS,CAAC,WAAW,CAAC,CAAC,CAAC;SACnC,CAAC,CAAC,eAAe,EAAE,CAAC;KACtB;AACH,CAAC;AAED,IAAI,CAAC,wBAAwB,EAAE;IAC7B,IAAM,MAAM,GAAW,YAAY,CAAC;IACpC,IAAM,SAAS,GAAc,qBAAS,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC;IAC1D,MAAM,CAAC,SAAS,CAAC,QAAQ,EAAE,CAAC,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;IAE7C,IAAM,QAAQ,GAAc,SAAS,CAAC,WAAW,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;IACxD,MAAM,CAAC,QAAQ,CAAC,CAAC,eAAe,CAAC,UAAU,CAAC,CAAC;AAC/C,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,qBAAqB,EAAE;IAC1B,IAAM,SAAS,GAAc,qBAAS,CAAC,UAAU,CAC/C;QACE,IAAI;QACJ,IAAI;QACJ,EAAE;QACF,IAAI;QACJ,iCAAiC;KAClC,CAAC,IAAI,CAAC,IAAI,CAAC,CACb,CAAC;IACF,aAAa,CAAC,SAAS,CAAC,CAAC;AAC3B,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,4BAA4B,EAAE;IACjC,IAAM,SAAS,GAAc,qBAAS,CAAC,UAAU,CAAC,EAAE,CAAC,CAAC;IACtD,aAAa,CAAC,SAAS,CAAC,CAAC;AAC3B,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,yBAAyB,EAAE;IAC9B,IAAM,SAAS,GAAc,qBAAS,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;IACxD,aAAa,CAAC,SAAS,CAAC,CAAC;AAC3B,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,yBAAyB,EAAE;IAC9B,IAAM,SAAS,GAAc,qBAAS,CAAC,UAAU,CAAC,IAAI,CAAC,CAAC;IACxD,aAAa,CAAC,SAAS,CAAC,CAAC;AAC3B,CAAC,CAAC,CAAC;AAEH,IAAI,CAAC,8BAA8B,EAAE;IACnC,4CAA4C;IAC5C,IAAM,SAAS,GAAc,qBAAS,CAAC,UAAU,CAAC,MAAM,CAAC,CAAC;IAC1D,aAAa,CAAC,SAAS,CAAC,CAAC;AAC3B,CAAC,CAAC,CAAC","sourcesContent":["import { TextRange } from '../TextRange';\r\nimport { TestHelpers } from './TestHelpers';\r\n\r\nfunction matchSnapshot(textRange: TextRange): void {\r\n for (let i: number = -1; i <= textRange.end + 1; ++i) {\r\n // Show the current character\r\n const c: string = TestHelpers.getEscaped(textRange.buffer.substr(Math.max(i, 0), 1));\r\n\r\n // Show the next 10 characters of context\r\n const context: string = TestHelpers.getEscaped(textRange.buffer.substr(Math.max(i, 0), 10));\r\n\r\n expect({\r\n c: c,\r\n context: context,\r\n i: i,\r\n location: textRange.getLocation(i)\r\n }).toMatchSnapshot();\r\n }\r\n}\r\n\r\ntest('construction scenarios', () => {\r\n const buffer: string = '0123456789';\r\n const textRange: TextRange = TextRange.fromString(buffer);\r\n expect(textRange.toString()).toEqual(buffer);\r\n\r\n const subRange: TextRange = textRange.getNewRange(3, 6);\r\n expect(subRange).toMatchSnapshot('subRange');\r\n});\r\n\r\ntest('getLocation() basic', () => {\r\n const textRange: TextRange = TextRange.fromString(\r\n [\r\n 'L1',\r\n 'L2',\r\n '', // (line 3 is blank)\r\n 'L4',\r\n 'L5+CR\\rL5+CRLF\\r\\nL6+LFCR\\n\\rL7'\r\n ].join('\\n')\r\n );\r\n matchSnapshot(textRange);\r\n});\r\n\r\ntest('getLocation() empty string', () => {\r\n const textRange: TextRange = TextRange.fromString('');\r\n matchSnapshot(textRange);\r\n});\r\n\r\ntest('getLocation() CR string', () => {\r\n const textRange: TextRange = TextRange.fromString('\\r');\r\n matchSnapshot(textRange);\r\n});\r\n\r\ntest('getLocation() LF string', () => {\r\n const textRange: TextRange = TextRange.fromString('\\n');\r\n matchSnapshot(textRange);\r\n});\r\n\r\ntest('getLocation() tab characters', () => {\r\n // Tab character advances by only one column\r\n const textRange: TextRange = TextRange.fromString('1\\t3');\r\n matchSnapshot(textRange);\r\n});\r\n"]}

View File

@ -0,0 +1,121 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.TokenCoverageChecker = void 0;
var nodes_1 = require("../../nodes");
var TokenSequence_1 = require("../TokenSequence");
var Token_1 = require("../Token");
/**
* The TokenCoverageChecker performs two diagnostics to detect parser bugs:
* 1. It checks for two DocNode objects whose excerpt contains overlapping tokens.
* By design, a single character from the input stream should be associated with
* at most one TokenSequence.
* 2. It checks for gaps, i.e. input tokens that were not associated with any DocNode
* (that is reachable from the final DocCommon node tree). In some cases this is
* okay. For example, if `@public` appears twice inside a comment, the second
* redundant instance is ignored. But in general we want to track the gaps in the
* unit test snapshots to ensure in general that every input character is associated
* with an excerpt for a DocNode.
*/
var TokenCoverageChecker = /** @class */ (function () {
function TokenCoverageChecker(parserContext) {
this._parserContext = parserContext;
this._tokenAssociations = [];
this._tokenAssociations.length = parserContext.tokens.length;
}
TokenCoverageChecker.prototype.getGaps = function (rootNode) {
this._addNodeTree(rootNode);
return this._checkForGaps(false);
};
TokenCoverageChecker.prototype.reportGaps = function (rootNode) {
this._addNodeTree(rootNode);
this._checkForGaps(true);
};
TokenCoverageChecker.prototype._addNodeTree = function (node) {
if (node instanceof nodes_1.DocExcerpt) {
this._addSequence(node.content, node);
}
for (var _i = 0, _a = node.getChildNodes(); _i < _a.length; _i++) {
var childNode = _a[_i];
this._addNodeTree(childNode);
}
};
TokenCoverageChecker.prototype._addSequence = function (tokenSequence, docNode) {
var newTokenAssociation = { docNode: docNode, tokenSequence: tokenSequence };
for (var i = tokenSequence.startIndex; i < tokenSequence.endIndex; ++i) {
var tokenAssociation = this._tokenAssociations[i];
if (tokenAssociation) {
throw new Error("Overlapping content encountered between" +
(" " + this._formatTokenAssociation(tokenAssociation) + " and") +
(" " + this._formatTokenAssociation(newTokenAssociation)));
}
this._tokenAssociations[i] = newTokenAssociation;
}
};
TokenCoverageChecker.prototype._checkForGaps = function (reportGaps) {
var gaps = [];
var gapStartIndex = undefined;
var tokenAssociationBeforeGap = undefined;
var tokens = this._parserContext.tokens;
if (tokens[tokens.length - 1].kind !== Token_1.TokenKind.EndOfInput) {
throw new Error('Missing EndOfInput token');
}
for (var i = 0; i < this._parserContext.tokens.length - 1; ++i) {
var tokenAssociation = this._tokenAssociations[i];
if (gapStartIndex === undefined) {
// No gap found yet
if (tokenAssociation) {
tokenAssociationBeforeGap = tokenAssociation;
}
else {
// We found the start of a gap
gapStartIndex = i;
}
}
else {
// Is this the end of the gap?
if (tokenAssociation) {
var gap = new TokenSequence_1.TokenSequence({
parserContext: this._parserContext,
startIndex: gapStartIndex,
endIndex: i
});
if (reportGaps) {
this._reportGap(gap, tokenAssociationBeforeGap, tokenAssociation);
}
gaps.push(gap);
gapStartIndex = undefined;
tokenAssociationBeforeGap = undefined;
}
}
}
if (gapStartIndex) {
var gap = new TokenSequence_1.TokenSequence({
parserContext: this._parserContext,
startIndex: gapStartIndex,
endIndex: this._parserContext.tokens.length
});
if (reportGaps) {
this._reportGap(gap, tokenAssociationBeforeGap, undefined);
}
gaps.push(gap);
}
return gaps;
};
TokenCoverageChecker.prototype._reportGap = function (gap, tokenAssociationBeforeGap, tokenAssociationAfterGap) {
var message = 'Gap encountered';
if (tokenAssociationBeforeGap) {
message += ' before ' + this._formatTokenAssociation(tokenAssociationBeforeGap);
}
if (tokenAssociationAfterGap) {
message += ' after ' + this._formatTokenAssociation(tokenAssociationAfterGap);
}
message += ': ' + JSON.stringify(gap.toString());
throw new Error(message);
};
TokenCoverageChecker.prototype._formatTokenAssociation = function (tokenAssociation) {
return tokenAssociation.docNode.kind + " (" + JSON.stringify(tokenAssociation.tokenSequence.toString()) + ")";
};
return TokenCoverageChecker;
}());
exports.TokenCoverageChecker = TokenCoverageChecker;
//# sourceMappingURL=TokenCoverageChecker.js.map

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long