mirror of
https://github.com/amehime/hexo-renderer-multi-markdown-it.git
synced 2026-04-05 04:59:04 +08:00
first
This commit is contained in:
7
LICENSE
Normal file
7
LICENSE
Normal file
@@ -0,0 +1,7 @@
|
||||
Copyright (c) 2015 Celso Miranda
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
57
README.md
Normal file
57
README.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# hexo-renderer-markdown-it
|
||||
|
||||
[](https://travis-ci.org/hexojs/hexo-renderer-markdown-it)
|
||||
[](https://www.npmjs.com/package/hexo-renderer-markdown-it)
|
||||
[](https://david-dm.org/hexojs/hexo-renderer-markdown-it)
|
||||
[](https://coveralls.io/github/hexojs/hexo-renderer-markdown-it?branch=master)
|
||||
|
||||
This renderer plugin uses [Markdown-it] as a render engine on [Hexo]. Adds support for [Markdown] and [CommonMark].
|
||||
|
||||
## Main Features
|
||||
- Support for [Markdown], [GFM] and [CommonMark]
|
||||
- Extensive configuration
|
||||
- Faster than the default renderer | `hexo-renderer-marked`
|
||||
- Safe ID for headings
|
||||
- Anchors for headings with ID
|
||||
- Footnotes
|
||||
- `<sub>` H<sub>2</sub>O
|
||||
- `<sup>` x<sup>2</sup>
|
||||
- `<ins>` <ins>Inserted</ins>
|
||||
|
||||
## Installation
|
||||
Follow the [installation guide](https://github.com/hexojs/hexo-renderer-markdown-it/wiki/Getting-Started).
|
||||
|
||||
## Options
|
||||
|
||||
``` yml
|
||||
markdown:
|
||||
render:
|
||||
html: true
|
||||
xhtmlOut: false
|
||||
breaks: true
|
||||
linkify: true
|
||||
typographer: true
|
||||
quotes: '“”‘’'
|
||||
plugins:
|
||||
anchors:
|
||||
level: 2
|
||||
collisionSuffix: ''
|
||||
permalink: false
|
||||
permalinkClass: 'header-anchor'
|
||||
permalinkSide: 'left'
|
||||
permalinkSymbol: '¶'
|
||||
case: 0
|
||||
separator: ''
|
||||
```
|
||||
|
||||
Refer to [the wiki](https://github.com/hexojs/hexo-renderer-markdown-it/wiki) for more details.
|
||||
|
||||
## Requests and bug reports
|
||||
If you have any feature requests or bugs to report, you're welcome to [file an issue](https://github.com/hexojs/hexo-renderer-markdown-it/issues).
|
||||
|
||||
|
||||
[CommonMark]: http://commonmark.org/
|
||||
[Markdown]: http://daringfireball.net/projects/markdown/
|
||||
[GFM]: https://help.github.com/articles/github-flavored-markdown/
|
||||
[Markdown-it]: https://github.com/markdown-it/markdown-it
|
||||
[Hexo]: http://hexo.io/
|
||||
28
index.js
Normal file
28
index.js
Normal file
@@ -0,0 +1,28 @@
|
||||
/* global hexo */
|
||||
|
||||
'use strict';
|
||||
|
||||
hexo.config.markdown = Object.assign({
|
||||
render: {},
|
||||
plugins: {}
|
||||
}, hexo.config.markdown);
|
||||
|
||||
hexo.config.markdown = Object.assign({
|
||||
html: true,
|
||||
xhtmlOut: false,
|
||||
breaks: true,
|
||||
linkify: true,
|
||||
typographer: true,
|
||||
quotes: '“”‘’'
|
||||
}, hexo.config.markdown.render);
|
||||
|
||||
|
||||
const renderer = require('./lib/renderer');
|
||||
|
||||
hexo.extend.renderer.register('md', 'html', renderer, true);
|
||||
hexo.extend.renderer.register('markdown', 'html', renderer, true);
|
||||
hexo.extend.renderer.register('mkd', 'html', renderer, true);
|
||||
hexo.extend.renderer.register('mkdn', 'html', renderer, true);
|
||||
hexo.extend.renderer.register('mdwn', 'html', renderer, true);
|
||||
hexo.extend.renderer.register('mdtxt', 'html', renderer, true);
|
||||
hexo.extend.renderer.register('mdtext', 'html', renderer, true);
|
||||
53
lib/markdown-it-abbr/README.md
Normal file
53
lib/markdown-it-abbr/README.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# markdown-it-abbr
|
||||
|
||||
[](https://travis-ci.org/markdown-it/markdown-it-abbr)
|
||||
[](https://www.npmjs.org/package/markdown-it-abbr)
|
||||
[](https://coveralls.io/r/markdown-it/markdown-it-abbr?branch=master)
|
||||
|
||||
> Abbreviation (`<abbr>`) tag plugin for [markdown-it](https://github.com/markdown-it/markdown-it) markdown parser.
|
||||
|
||||
__v1.+ requires `markdown-it` v4.+, see changelog.__
|
||||
|
||||
Markup is based on [php markdown extra](https://michelf.ca/projects/php-markdown/extra/#abbr) definition, but without multiline support.
|
||||
|
||||
Markdown:
|
||||
|
||||
```
|
||||
*[HTML]: Hyper Text Markup Language
|
||||
*[W3C]: World Wide Web Consortium
|
||||
The HTML specification
|
||||
is maintained by the W3C.
|
||||
```
|
||||
|
||||
HTML:
|
||||
|
||||
```html
|
||||
<p>The <abbr title="Hyper Text Markup Language">HTML</abbr> specification
|
||||
is maintained by the <abbr title="World Wide Web Consortium">W3C</abbr>.</p>
|
||||
```
|
||||
|
||||
## Install
|
||||
|
||||
node.js, browser:
|
||||
|
||||
```bash
|
||||
npm install markdown-it-abbr --save
|
||||
bower install markdown-it-abbr --save
|
||||
```
|
||||
|
||||
## Use
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')()
|
||||
.use(require('markdown-it-abbr'));
|
||||
|
||||
md.render(/*...*/) // see example above
|
||||
```
|
||||
|
||||
_Differences in browser._ If you load script directly into the page, without
|
||||
package system, module will add itself globally as `window.markdownitAbbr`.
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[MIT](https://github.com/markdown-it/markdown-it-abbr/blob/master/LICENSE)
|
||||
148
lib/markdown-it-abbr/index.js
Normal file
148
lib/markdown-it-abbr/index.js
Normal file
@@ -0,0 +1,148 @@
|
||||
// Enclose abbreviations in <abbr> tags
|
||||
//
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = function sub_plugin(md) {
|
||||
var escapeRE = md.utils.escapeRE,
|
||||
arrayReplaceAt = md.utils.arrayReplaceAt;
|
||||
|
||||
// ASCII characters in Cc, Sc, Sm, Sk categories we should terminate on;
|
||||
// you can check character classes here:
|
||||
// http://www.unicode.org/Public/UNIDATA/UnicodeData.txt
|
||||
var OTHER_CHARS = ' \r\n$+<=>^`|~';
|
||||
|
||||
var UNICODE_PUNCT_RE = md.utils.lib.ucmicro.P.source;
|
||||
var UNICODE_SPACE_RE = md.utils.lib.ucmicro.Z.source;
|
||||
|
||||
|
||||
function abbr_def(state, startLine, endLine, silent) {
|
||||
var label, title, ch, labelStart, labelEnd,
|
||||
pos = state.bMarks[startLine] + state.tShift[startLine],
|
||||
max = state.eMarks[startLine];
|
||||
|
||||
if (pos + 2 >= max) { return false; }
|
||||
|
||||
if (state.src.charCodeAt(pos++) !== 0x2A/* * */) { return false; }
|
||||
if (state.src.charCodeAt(pos++) !== 0x5B/* [ */) { return false; }
|
||||
|
||||
labelStart = pos;
|
||||
|
||||
for (; pos < max; pos++) {
|
||||
ch = state.src.charCodeAt(pos);
|
||||
if (ch === 0x5B /* [ */) {
|
||||
return false;
|
||||
} else if (ch === 0x5D /* ] */) {
|
||||
labelEnd = pos;
|
||||
break;
|
||||
} else if (ch === 0x5C /* \ */) {
|
||||
pos++;
|
||||
}
|
||||
}
|
||||
|
||||
if (labelEnd < 0 || state.src.charCodeAt(labelEnd + 1) !== 0x3A/* : */) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (silent) { return true; }
|
||||
|
||||
label = state.src.slice(labelStart, labelEnd).replace(/\\(.)/g, '$1');
|
||||
title = state.src.slice(labelEnd + 2, max).trim();
|
||||
if (label.length === 0) { return false; }
|
||||
if (title.length === 0) { return false; }
|
||||
if (!state.env.abbreviations) { state.env.abbreviations = {}; }
|
||||
// prepend ':' to avoid conflict with Object.prototype members
|
||||
if (typeof state.env.abbreviations[':' + label] === 'undefined') {
|
||||
state.env.abbreviations[':' + label] = title;
|
||||
}
|
||||
|
||||
state.line = startLine + 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
function abbr_replace(state) {
|
||||
var i, j, l, tokens, token, text, nodes, pos, reg, m, regText, regSimple,
|
||||
currentToken,
|
||||
blockTokens = state.tokens;
|
||||
|
||||
if (!state.env.abbreviations) { return; }
|
||||
|
||||
regSimple = new RegExp('(?:' +
|
||||
Object.keys(state.env.abbreviations).map(function (x) {
|
||||
return x.substr(1);
|
||||
}).sort(function (a, b) {
|
||||
return b.length - a.length;
|
||||
}).map(escapeRE).join('|') +
|
||||
')');
|
||||
|
||||
regText = '(^|' + UNICODE_PUNCT_RE + '|' + UNICODE_SPACE_RE +
|
||||
'|[' + OTHER_CHARS.split('').map(escapeRE).join('') + '])'
|
||||
+ '(' + Object.keys(state.env.abbreviations).map(function (x) {
|
||||
return x.substr(1);
|
||||
}).sort(function (a, b) {
|
||||
return b.length - a.length;
|
||||
}).map(escapeRE).join('|') + ')'
|
||||
+ '($|' + UNICODE_PUNCT_RE + '|' + UNICODE_SPACE_RE +
|
||||
'|[' + OTHER_CHARS.split('').map(escapeRE).join('') + '])';
|
||||
|
||||
reg = new RegExp(regText, 'g');
|
||||
|
||||
for (j = 0, l = blockTokens.length; j < l; j++) {
|
||||
if (blockTokens[j].type !== 'inline') { continue; }
|
||||
tokens = blockTokens[j].children;
|
||||
|
||||
// We scan from the end, to keep position when new tags added.
|
||||
for (i = tokens.length - 1; i >= 0; i--) {
|
||||
currentToken = tokens[i];
|
||||
if (currentToken.type !== 'text') { continue; }
|
||||
|
||||
pos = 0;
|
||||
text = currentToken.content;
|
||||
reg.lastIndex = 0;
|
||||
nodes = [];
|
||||
|
||||
// fast regexp run to determine whether there are any abbreviated words
|
||||
// in the current token
|
||||
if (!regSimple.test(text)) { continue; }
|
||||
|
||||
while ((m = reg.exec(text))) {
|
||||
if (m.index > 0 || m[1].length > 0) {
|
||||
token = new state.Token('text', '', 0);
|
||||
token.content = text.slice(pos, m.index + m[1].length);
|
||||
nodes.push(token);
|
||||
}
|
||||
|
||||
token = new state.Token('abbr_open', 'abbr', 1);
|
||||
token.attrs = [ [ 'title', state.env.abbreviations[':' + m[2]] ] ];
|
||||
nodes.push(token);
|
||||
|
||||
token = new state.Token('text', '', 0);
|
||||
token.content = m[2];
|
||||
nodes.push(token);
|
||||
|
||||
token = new state.Token('abbr_close', 'abbr', -1);
|
||||
nodes.push(token);
|
||||
|
||||
reg.lastIndex -= m[3].length;
|
||||
pos = reg.lastIndex;
|
||||
}
|
||||
|
||||
if (!nodes.length) { continue; }
|
||||
|
||||
if (pos < text.length) {
|
||||
token = new state.Token('text', '', 0);
|
||||
token.content = text.slice(pos);
|
||||
nodes.push(token);
|
||||
}
|
||||
|
||||
// replace current node
|
||||
blockTokens[j].children = tokens = arrayReplaceAt(tokens, i, nodes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
md.block.ruler.before('reference', 'abbr_def', abbr_def, { alt: [ 'paragraph', 'reference' ] });
|
||||
|
||||
md.core.ruler.after('linkify', 'abbr_replace', abbr_replace);
|
||||
};
|
||||
29
lib/markdown-it-cjk-breaks/CHANGELOG.md
Normal file
29
lib/markdown-it-cjk-breaks/CHANGELOG.md
Normal file
@@ -0,0 +1,29 @@
|
||||
1.1.3 / 2019-02-01
|
||||
------------------
|
||||
|
||||
- Fixed browser module name, #3.
|
||||
|
||||
|
||||
1.1.2 / 2018-11-27
|
||||
------------------
|
||||
|
||||
- Dev deps bump.
|
||||
- `dist/` files rebuild, missed in prev release.
|
||||
|
||||
|
||||
1.1.1 / 2018-11-27
|
||||
------------------
|
||||
|
||||
- Fix crash due to incorrect softbreak handling, #2.
|
||||
|
||||
|
||||
1.1.0 / 2018-02-26
|
||||
------------------
|
||||
|
||||
- Switch to algorithm recommended for browsers in [css-text-3](https://www.w3.org/TR/css-text-3/#line-break-transform), #1.
|
||||
|
||||
|
||||
1.0.0 / 2018-02-23
|
||||
------------------
|
||||
|
||||
- First release.
|
||||
49
lib/markdown-it-cjk-breaks/README.md
Normal file
49
lib/markdown-it-cjk-breaks/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# markdown-it-cjk-breaks
|
||||
|
||||
[](https://travis-ci.org/markdown-it/markdown-it-cjk-breaks)
|
||||
[](https://www.npmjs.org/package/markdown-it-cjk-breaks)
|
||||
[](https://coveralls.io/github/markdown-it/markdown-it-cjk-breaks?branch=master)
|
||||
|
||||
> Plugin for [markdown-it](https://github.com/markdown-it/markdown-it) that suppresses linebreaks between east asian characters.
|
||||
|
||||
Normally newlines in a markdown text get rendered as newlines in output html text. Then browsers will usually render those newlines as whitespace (more smart behavior is included in w3c drafts, but not actually implemented by vendors).
|
||||
|
||||
This plugin finds and removes newlines that cannot be converted to space, algorithm matches [CSS Text Module Level 3](https://www.w3.org/TR/css-text-3/#line-break-transform):
|
||||
|
||||
- If the character immediately before or immediately after the segment break is the zero-width space character (U+200B), then the break is removed, leaving behind the zero-width space.
|
||||
- Otherwise, if the East Asian Width property [UAX11] of both the character before and after the segment break is F, W, or H (not A), and neither side is Hangul, then the segment break is removed.
|
||||
- Otherwise, the segment break is converted to a space (U+0020).
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
yarn add markdown-it-cjk-breaks
|
||||
```
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')();
|
||||
var cjk_breaks = require('markdown-it-cjk-breaks');
|
||||
|
||||
md.use(cjk_breaks);
|
||||
|
||||
md.render(`
|
||||
あおえ
|
||||
うい
|
||||
aoe
|
||||
ui
|
||||
`);
|
||||
|
||||
// returns:
|
||||
//
|
||||
//<p>あおえうい
|
||||
//aoe
|
||||
//ui</p>
|
||||
```
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[MIT](https://github.com/markdown-it/markdown-it-cjk-breaks/blob/master/LICENSE)
|
||||
78
lib/markdown-it-cjk-breaks/index.js
Normal file
78
lib/markdown-it-cjk-breaks/index.js
Normal file
@@ -0,0 +1,78 @@
|
||||
'use strict';
|
||||
|
||||
var eastAsianWidth = require('eastasianwidth').eastAsianWidth;
|
||||
|
||||
|
||||
function is_surrogate(c1, c2) {
|
||||
return c1 >= 0xD800 && c1 <= 0xDBFF && c2 >= 0xDC00 && c2 <= 0xDFFF;
|
||||
}
|
||||
|
||||
|
||||
function is_hangul(c) {
|
||||
// require('unicode-10.0.0/Script/Hangul/regex')
|
||||
/* eslint-disable max-len */
|
||||
return /[\u1100-\u11FF\u302E\u302F\u3131-\u318E\u3200-\u321E\u3260-\u327E\uA960-\uA97C\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uFFA0-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF\uFFD2-\uFFD7\uFFDA-\uFFDC]/.test(c);
|
||||
/* eslint-enable max-len */
|
||||
}
|
||||
|
||||
|
||||
function process_inlines(tokens) {
|
||||
var i, j, last, next, c1, c2, remove_break;
|
||||
|
||||
for (i = 0; i < tokens.length; i++) {
|
||||
if (tokens[i].type !== 'softbreak') continue;
|
||||
|
||||
// default last/next character to space
|
||||
last = next = ' ';
|
||||
|
||||
for (j = i - 1; j >= 0; j--) {
|
||||
if (tokens[j].type !== 'text') continue;
|
||||
|
||||
c1 = tokens[j].content.charCodeAt(tokens[j].content.length - 2);
|
||||
c2 = tokens[j].content.charCodeAt(tokens[j].content.length - 1);
|
||||
|
||||
last = tokens[j].content.slice(is_surrogate(c1, c2) ? -2 : -1);
|
||||
break;
|
||||
}
|
||||
|
||||
for (j = i + 1; j < tokens.length; j++) {
|
||||
if (tokens[j].type !== 'text') continue;
|
||||
|
||||
c1 = tokens[j].content.charCodeAt(0);
|
||||
c2 = tokens[j].content.charCodeAt(1);
|
||||
|
||||
next = tokens[j].content.slice(0, is_surrogate(c1, c2) ? 2 : 1);
|
||||
break;
|
||||
}
|
||||
|
||||
remove_break = false;
|
||||
|
||||
// remove newline if it's adjacent to ZWSP
|
||||
if (last === '\u200b' || next === '\u200b') remove_break = true;
|
||||
|
||||
// remove newline if both characters are fullwidth (F), wide (W) or
|
||||
// halfwidth (H), but not Hangul
|
||||
if (/^[FWH]$/.test(eastAsianWidth(last)) && /^[FWH]$/.test(eastAsianWidth(next))) {
|
||||
if (!is_hangul(last) && !is_hangul(next)) remove_break = true;
|
||||
}
|
||||
|
||||
if (remove_break) {
|
||||
tokens[i].type = 'text';
|
||||
tokens[i].content = '';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function cjk_breaks(state) {
|
||||
for (var blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {
|
||||
if (state.tokens[blkIdx].type !== 'inline') continue;
|
||||
|
||||
process_inlines(state.tokens[blkIdx].children, state);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
module.exports = function cjk_breaks_plugin(md) {
|
||||
md.core.ruler.push('cjk_breaks', cjk_breaks);
|
||||
};
|
||||
95
lib/markdown-it-container/README.md
Normal file
95
lib/markdown-it-container/README.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# markdown-it-container
|
||||
|
||||
[](https://travis-ci.org/markdown-it/markdown-it-container)
|
||||
[](https://www.npmjs.org/package/markdown-it-container)
|
||||
[](https://coveralls.io/r/markdown-it/markdown-it-container?branch=master)
|
||||
|
||||
> Plugin for creating block-level custom containers for [markdown-it](https://github.com/markdown-it/markdown-it) markdown parser.
|
||||
|
||||
__v2.+ requires `markdown-it` v5.+, see changelog.__
|
||||
|
||||
With this plugin you can create block containers like:
|
||||
|
||||
```
|
||||
::: warning
|
||||
*here be dragons*
|
||||
:::
|
||||
```
|
||||
|
||||
.... and specify how they should be rendered. If no renderer defined, `<div>` with
|
||||
container name class will be created:
|
||||
|
||||
```html
|
||||
<div class="warning">
|
||||
<em>here be dragons</em>
|
||||
</div>
|
||||
```
|
||||
|
||||
Markup is the same as for [fenced code blocks](http://spec.commonmark.org/0.18/#fenced-code-blocks).
|
||||
Difference is, that marker use another character and content is rendered as markdown markup.
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
node.js, browser:
|
||||
|
||||
```bash
|
||||
$ npm install markdown-it-container --save
|
||||
$ bower install markdown-it-container --save
|
||||
```
|
||||
|
||||
|
||||
## API
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')()
|
||||
.use(require('markdown-it-container'), name [, options]);
|
||||
```
|
||||
|
||||
Params:
|
||||
|
||||
- __name__ - container name (mandatory)
|
||||
- __options:__
|
||||
- __validate__ - optional, function to validate tail after opening marker, should
|
||||
return `true` on success.
|
||||
- __render__ - optional, renderer function for opening/closing tokens.
|
||||
- __marker__ - optional (`:`), character to use in delimiter.
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')();
|
||||
|
||||
md.use(require('markdown-it-container'), 'spoiler', {
|
||||
|
||||
validate: function(params) {
|
||||
return params.trim().match(/^spoiler\s+(.*)$/);
|
||||
},
|
||||
|
||||
render: function (tokens, idx) {
|
||||
var m = tokens[idx].info.trim().match(/^spoiler\s+(.*)$/);
|
||||
|
||||
if (tokens[idx].nesting === 1) {
|
||||
// opening tag
|
||||
return '<details><summary>' + m[1] + '</summary>\n';
|
||||
|
||||
} else {
|
||||
// closing tag
|
||||
return '</details>\n';
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
console.log(md.render('::: spoiler click me\n*content*\n:::\n'));
|
||||
|
||||
// Output:
|
||||
//
|
||||
// <details><summary>click me</summary>
|
||||
// <p><em>content</em></p>
|
||||
// </details>
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
[MIT](https://github.com/markdown-it/markdown-it-container/blob/master/LICENSE)
|
||||
143
lib/markdown-it-container/index.js
Normal file
143
lib/markdown-it-container/index.js
Normal file
@@ -0,0 +1,143 @@
|
||||
// Process block-level custom containers
|
||||
//
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = function container_plugin(md, name, options) {
|
||||
|
||||
function validateDefault(params) {
|
||||
return params.trim().split(' ', 2)[0] === name;
|
||||
}
|
||||
|
||||
function renderDefault(tokens, idx, _options, env, self) {
|
||||
|
||||
// add a class to the opening tag
|
||||
if (tokens[idx].nesting === 1) {
|
||||
tokens[idx].attrPush([ 'class', name ]);
|
||||
}
|
||||
|
||||
return self.renderToken(tokens, idx, _options, env, self);
|
||||
}
|
||||
|
||||
options = options || {};
|
||||
|
||||
var min_markers = 3,
|
||||
marker_str = options.marker || ':',
|
||||
marker_char = marker_str.charCodeAt(0),
|
||||
marker_len = marker_str.length,
|
||||
validate = options.validate || validateDefault,
|
||||
render = options.render || renderDefault;
|
||||
|
||||
function container(state, startLine, endLine, silent) {
|
||||
var pos, nextLine, marker_count, markup, params, token,
|
||||
old_parent, old_line_max,
|
||||
auto_closed = false,
|
||||
start = state.bMarks[startLine] + state.tShift[startLine],
|
||||
max = state.eMarks[startLine];
|
||||
|
||||
// Check out the first character quickly,
|
||||
// this should filter out most of non-containers
|
||||
//
|
||||
if (marker_char !== state.src.charCodeAt(start)) { return false; }
|
||||
|
||||
// Check out the rest of the marker string
|
||||
//
|
||||
for (pos = start + 1; pos <= max; pos++) {
|
||||
if (marker_str[(pos - start) % marker_len] !== state.src[pos]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
marker_count = Math.floor((pos - start) / marker_len);
|
||||
if (marker_count < min_markers) { return false; }
|
||||
pos -= (pos - start) % marker_len;
|
||||
|
||||
markup = state.src.slice(start, pos);
|
||||
params = state.src.slice(pos, max);
|
||||
if (!validate(params)) { return false; }
|
||||
|
||||
// Since start is found, we can report success here in validation mode
|
||||
//
|
||||
if (silent) { return true; }
|
||||
|
||||
// Search for the end of the block
|
||||
//
|
||||
nextLine = startLine;
|
||||
|
||||
for (;;) {
|
||||
nextLine++;
|
||||
if (nextLine >= endLine) {
|
||||
// unclosed block should be autoclosed by end of document.
|
||||
// also block seems to be autoclosed by end of parent
|
||||
break;
|
||||
}
|
||||
|
||||
start = state.bMarks[nextLine] + state.tShift[nextLine];
|
||||
max = state.eMarks[nextLine];
|
||||
|
||||
if (start < max && state.sCount[nextLine] < state.blkIndent) {
|
||||
// non-empty line with negative indent should stop the list:
|
||||
// - ```
|
||||
// test
|
||||
break;
|
||||
}
|
||||
|
||||
if (marker_char !== state.src.charCodeAt(start)) { continue; }
|
||||
|
||||
if (state.sCount[nextLine] - state.blkIndent >= 4) {
|
||||
// closing fence should be indented less than 4 spaces
|
||||
continue;
|
||||
}
|
||||
|
||||
for (pos = start + 1; pos <= max; pos++) {
|
||||
if (marker_str[(pos - start) % marker_len] !== state.src[pos]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// closing code fence must be at least as long as the opening one
|
||||
if (Math.floor((pos - start) / marker_len) < marker_count) { continue; }
|
||||
|
||||
// make sure tail has spaces only
|
||||
pos -= (pos - start) % marker_len;
|
||||
pos = state.skipSpaces(pos);
|
||||
|
||||
if (pos < max) { continue; }
|
||||
|
||||
// found!
|
||||
auto_closed = true;
|
||||
break;
|
||||
}
|
||||
|
||||
old_parent = state.parentType;
|
||||
old_line_max = state.lineMax;
|
||||
state.parentType = 'container';
|
||||
|
||||
// this will prevent lazy continuations from ever going past our end marker
|
||||
state.lineMax = nextLine;
|
||||
|
||||
token = state.push('container_' + name + '_open', 'div', 1);
|
||||
token.markup = markup;
|
||||
token.block = true;
|
||||
token.info = params;
|
||||
token.map = [ startLine, nextLine ];
|
||||
|
||||
state.md.block.tokenize(state, startLine + 1, nextLine);
|
||||
|
||||
token = state.push('container_' + name + '_close', 'div', -1);
|
||||
token.markup = state.src.slice(start, pos);
|
||||
token.block = true;
|
||||
|
||||
state.parentType = old_parent;
|
||||
state.lineMax = old_line_max;
|
||||
state.line = nextLine + (auto_closed ? 1 : 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
md.block.ruler.before('fence', 'container_' + name, container, {
|
||||
alt: [ 'paragraph', 'reference', 'blockquote', 'list' ]
|
||||
});
|
||||
md.renderer.rules['container_' + name + '_open'] = render;
|
||||
md.renderer.rules['container_' + name + '_close'] = render;
|
||||
};
|
||||
38
lib/markdown-it-deflist/README.md
Normal file
38
lib/markdown-it-deflist/README.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# markdown-it-deflist
|
||||
|
||||
[](https://travis-ci.org/markdown-it/markdown-it-deflist)
|
||||
[](https://www.npmjs.org/package/markdown-it-deflist)
|
||||
[](https://coveralls.io/r/markdown-it/markdown-it-deflist?branch=master)
|
||||
|
||||
> Definition list (`<dl>`) tag plugin for [markdown-it](https://github.com/markdown-it/markdown-it) markdown parser.
|
||||
|
||||
__v2.+ requires `markdown-it` v5.+, see changelog.__
|
||||
|
||||
Syntax is based on [pandoc definition lists](http://johnmacfarlane.net/pandoc/README.html#definition-lists).
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
node.js, browser:
|
||||
|
||||
```bash
|
||||
npm install markdown-it-deflist --save
|
||||
bower install markdown-it-deflist --save
|
||||
```
|
||||
|
||||
## Use
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')()
|
||||
.use(require('markdown-it-deflist'));
|
||||
|
||||
md.render(/*...*/);
|
||||
```
|
||||
|
||||
_Differences in browser._ If you load script directly into the page, without
|
||||
package system, module will add itself globally as `window.markdownitDeflist`.
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[MIT](https://github.com/markdown-it/markdown-it-deflist/blob/master/LICENSE)
|
||||
228
lib/markdown-it-deflist/index.js
Normal file
228
lib/markdown-it-deflist/index.js
Normal file
@@ -0,0 +1,228 @@
|
||||
// Process definition lists
|
||||
//
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = function deflist_plugin(md) {
|
||||
var isSpace = md.utils.isSpace;
|
||||
|
||||
// Search `[:~][\n ]`, returns next pos after marker on success
|
||||
// or -1 on fail.
|
||||
function skipMarker(state, line) {
|
||||
var pos, marker,
|
||||
start = state.bMarks[line] + state.tShift[line],
|
||||
max = state.eMarks[line];
|
||||
|
||||
if (start >= max) { return -1; }
|
||||
|
||||
// Check bullet
|
||||
marker = state.src.charCodeAt(start++);
|
||||
if (marker !== 0x7E/* ~ */ && marker !== 0x3A/* : */) { return -1; }
|
||||
|
||||
pos = state.skipSpaces(start);
|
||||
|
||||
// require space after ":"
|
||||
if (start === pos) { return -1; }
|
||||
|
||||
// no empty definitions, e.g. " : "
|
||||
if (pos >= max) { return -1; }
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
function markTightParagraphs(state, idx) {
|
||||
var i, l,
|
||||
level = state.level + 2;
|
||||
|
||||
for (i = idx + 2, l = state.tokens.length - 2; i < l; i++) {
|
||||
if (state.tokens[i].level === level && state.tokens[i].type === 'paragraph_open') {
|
||||
state.tokens[i + 2].hidden = true;
|
||||
state.tokens[i].hidden = true;
|
||||
i += 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function deflist(state, startLine, endLine, silent) {
|
||||
var ch,
|
||||
contentStart,
|
||||
ddLine,
|
||||
dtLine,
|
||||
itemLines,
|
||||
listLines,
|
||||
listTokIdx,
|
||||
max,
|
||||
nextLine,
|
||||
offset,
|
||||
oldDDIndent,
|
||||
oldIndent,
|
||||
oldParentType,
|
||||
oldSCount,
|
||||
oldTShift,
|
||||
oldTight,
|
||||
pos,
|
||||
prevEmptyEnd,
|
||||
tight,
|
||||
token;
|
||||
|
||||
if (silent) {
|
||||
// quirk: validation mode validates a dd block only, not a whole deflist
|
||||
if (state.ddIndent < 0) { return false; }
|
||||
return skipMarker(state, startLine) >= 0;
|
||||
}
|
||||
|
||||
nextLine = startLine + 1;
|
||||
if (nextLine >= endLine) { return false; }
|
||||
|
||||
if (state.isEmpty(nextLine)) {
|
||||
nextLine++;
|
||||
if (nextLine >= endLine) { return false; }
|
||||
}
|
||||
|
||||
if (state.sCount[nextLine] < state.blkIndent) { return false; }
|
||||
contentStart = skipMarker(state, nextLine);
|
||||
if (contentStart < 0) { return false; }
|
||||
|
||||
// Start list
|
||||
listTokIdx = state.tokens.length;
|
||||
tight = true;
|
||||
|
||||
token = state.push('dl_open', 'dl', 1);
|
||||
token.map = listLines = [ startLine, 0 ];
|
||||
|
||||
//
|
||||
// Iterate list items
|
||||
//
|
||||
|
||||
dtLine = startLine;
|
||||
ddLine = nextLine;
|
||||
|
||||
// One definition list can contain multiple DTs,
|
||||
// and one DT can be followed by multiple DDs.
|
||||
//
|
||||
// Thus, there is two loops here, and label is
|
||||
// needed to break out of the second one
|
||||
//
|
||||
/*eslint no-labels:0,block-scoped-var:0*/
|
||||
OUTER:
|
||||
for (;;) {
|
||||
prevEmptyEnd = false;
|
||||
|
||||
token = state.push('dt_open', 'dt', 1);
|
||||
token.map = [ dtLine, dtLine ];
|
||||
|
||||
token = state.push('inline', '', 0);
|
||||
token.map = [ dtLine, dtLine ];
|
||||
token.content = state.getLines(dtLine, dtLine + 1, state.blkIndent, false).trim();
|
||||
token.children = [];
|
||||
|
||||
token = state.push('dt_close', 'dt', -1);
|
||||
|
||||
for (;;) {
|
||||
token = state.push('dd_open', 'dd', 1);
|
||||
token.map = itemLines = [ nextLine, 0 ];
|
||||
|
||||
pos = contentStart;
|
||||
max = state.eMarks[ddLine];
|
||||
offset = state.sCount[ddLine] + contentStart - (state.bMarks[ddLine] + state.tShift[ddLine]);
|
||||
|
||||
while (pos < max) {
|
||||
ch = state.src.charCodeAt(pos);
|
||||
|
||||
if (isSpace(ch)) {
|
||||
if (ch === 0x09) {
|
||||
offset += 4 - offset % 4;
|
||||
} else {
|
||||
offset++;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
contentStart = pos;
|
||||
|
||||
oldTight = state.tight;
|
||||
oldDDIndent = state.ddIndent;
|
||||
oldIndent = state.blkIndent;
|
||||
oldTShift = state.tShift[ddLine];
|
||||
oldSCount = state.sCount[ddLine];
|
||||
oldParentType = state.parentType;
|
||||
state.blkIndent = state.ddIndent = state.sCount[ddLine] + 2;
|
||||
state.tShift[ddLine] = contentStart - state.bMarks[ddLine];
|
||||
state.sCount[ddLine] = offset;
|
||||
state.tight = true;
|
||||
state.parentType = 'deflist';
|
||||
|
||||
state.md.block.tokenize(state, ddLine, endLine, true);
|
||||
|
||||
// If any of list item is tight, mark list as tight
|
||||
if (!state.tight || prevEmptyEnd) {
|
||||
tight = false;
|
||||
}
|
||||
// Item become loose if finish with empty line,
|
||||
// but we should filter last element, because it means list finish
|
||||
prevEmptyEnd = (state.line - ddLine) > 1 && state.isEmpty(state.line - 1);
|
||||
|
||||
state.tShift[ddLine] = oldTShift;
|
||||
state.sCount[ddLine] = oldSCount;
|
||||
state.tight = oldTight;
|
||||
state.parentType = oldParentType;
|
||||
state.blkIndent = oldIndent;
|
||||
state.ddIndent = oldDDIndent;
|
||||
|
||||
token = state.push('dd_close', 'dd', -1);
|
||||
|
||||
itemLines[1] = nextLine = state.line;
|
||||
|
||||
if (nextLine >= endLine) { break OUTER; }
|
||||
|
||||
if (state.sCount[nextLine] < state.blkIndent) { break OUTER; }
|
||||
contentStart = skipMarker(state, nextLine);
|
||||
if (contentStart < 0) { break; }
|
||||
|
||||
ddLine = nextLine;
|
||||
|
||||
// go to the next loop iteration:
|
||||
// insert DD tag and repeat checking
|
||||
}
|
||||
|
||||
if (nextLine >= endLine) { break; }
|
||||
dtLine = nextLine;
|
||||
|
||||
if (state.isEmpty(dtLine)) { break; }
|
||||
if (state.sCount[dtLine] < state.blkIndent) { break; }
|
||||
|
||||
ddLine = dtLine + 1;
|
||||
if (ddLine >= endLine) { break; }
|
||||
if (state.isEmpty(ddLine)) { ddLine++; }
|
||||
if (ddLine >= endLine) { break; }
|
||||
|
||||
if (state.sCount[ddLine] < state.blkIndent) { break; }
|
||||
contentStart = skipMarker(state, ddLine);
|
||||
if (contentStart < 0) { break; }
|
||||
|
||||
// go to the next loop iteration:
|
||||
// insert DT and DD tags and repeat checking
|
||||
}
|
||||
|
||||
// Finilize list
|
||||
token = state.push('dl_close', 'dl', -1);
|
||||
|
||||
listLines[1] = nextLine;
|
||||
|
||||
state.line = nextLine;
|
||||
|
||||
// mark paragraphs tight if needed
|
||||
if (tight) {
|
||||
markTightParagraphs(state, listTokIdx);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
md.block.ruler.before('paragraph', 'deflist', deflist, { alt: [ 'paragraph', 'reference' ] });
|
||||
};
|
||||
101
lib/markdown-it-emoji/README.md
Normal file
101
lib/markdown-it-emoji/README.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# markdown-it-emoji
|
||||
|
||||
[](https://travis-ci.org/markdown-it/markdown-it-emoji)
|
||||
[](https://www.npmjs.org/package/markdown-it-emoji)
|
||||
[](https://coveralls.io/github/markdown-it/markdown-it-emoji?branch=master)
|
||||
|
||||
> Plugin for [markdown-it](https://github.com/markdown-it/markdown-it) markdown parser, adding emoji & emoticon syntax support.
|
||||
|
||||
__v1.+ requires `markdown-it` v4.+, see changelog.__
|
||||
|
||||
Two versions:
|
||||
|
||||
- __Full__ (default), with all github supported emojis.
|
||||
- [Light](https://github.com/markdown-it/markdown-it-emoji/blob/master/lib/data/light.json), with only well-supported unicode emojis and reduced size.
|
||||
|
||||
Also supports emoticons [shortcuts](https://github.com/markdown-it/markdown-it-emoji/blob/master/lib/data/shortcuts.js) like `:)`, `:-(`, and others. See the full list in the link above.
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
node.js, browser:
|
||||
|
||||
```bash
|
||||
npm install markdown-it-emoji --save
|
||||
bower install markdown-it-emoji --save
|
||||
```
|
||||
|
||||
## Use
|
||||
|
||||
### init
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')();
|
||||
var emoji = require('markdown-it-emoji');
|
||||
// Or for light version
|
||||
// var emoji = require('markdown-it-emoji/light');
|
||||
|
||||
md.use(emoji [, options]);
|
||||
```
|
||||
|
||||
Options are not mandatory:
|
||||
|
||||
- __defs__ (Object) - rewrite available emoji definitions
|
||||
- example: `{ name1: char1, name2: char2, ... }`
|
||||
- __enabled__ (Array) - disable all emojis except whitelisted
|
||||
- __shortcuts__ (Object) - rewrite default shortcuts
|
||||
- example: `{ "smile": [ ":)", ":-)" ], "laughing": ":D" }`
|
||||
|
||||
_Differences in browser._ If you load the script directly into the page without
|
||||
using a package system, the module will add itself globally with the name `markdownitEmoji`.
|
||||
Init code will look a bit different in this case:
|
||||
|
||||
```js
|
||||
var md = window.markdownit().use(window.markdownitEmoji);
|
||||
```
|
||||
|
||||
|
||||
### change output
|
||||
|
||||
By default, emojis are rendered as appropriate unicode chars. But you can change
|
||||
the renderer function as you wish.
|
||||
|
||||
Render as span blocks (for example, to use a custom iconic font):
|
||||
|
||||
```js
|
||||
// ...
|
||||
// initialize
|
||||
|
||||
md.renderer.rules.emoji = function(token, idx) {
|
||||
return '<span class="emoji emoji_' + token[idx].markup + '"></span>';
|
||||
};
|
||||
```
|
||||
|
||||
Or use [twemoji](https://github.com/twitter/twemoji):
|
||||
|
||||
```js
|
||||
// ...
|
||||
// initialize
|
||||
|
||||
var twemoji = require('twemoji')
|
||||
|
||||
md.renderer.rules.emoji = function(token, idx) {
|
||||
return twemoji.parse(token[idx].content);
|
||||
};
|
||||
```
|
||||
|
||||
__NB 1__. Read [twemoji docs](https://github.com/twitter/twemoji#string-parsing)!
|
||||
In case you need more options to change image size & type.
|
||||
|
||||
__NB 2__. When using twemoji you can make image height match the line height with this
|
||||
style:
|
||||
|
||||
```css
|
||||
.emoji {
|
||||
height: 1.2em;
|
||||
}
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
[MIT](https://github.com/markdown-it/markdown-it-emoji/blob/master/LICENSE)
|
||||
23
lib/markdown-it-emoji/index.js
Normal file
23
lib/markdown-it-emoji/index.js
Normal file
@@ -0,0 +1,23 @@
|
||||
'use strict';
|
||||
|
||||
|
||||
var emojies_defs = require('./lib/data/full.json');
|
||||
var emojies_shortcuts = require('./lib/data/shortcuts');
|
||||
var emoji_html = require('./lib/render');
|
||||
var emoji_replace = require('./lib/replace');
|
||||
var normalize_opts = require('./lib/normalize_opts');
|
||||
|
||||
|
||||
module.exports = function emoji_plugin(md, options) {
|
||||
var defaults = {
|
||||
defs: emojies_defs,
|
||||
shortcuts: emojies_shortcuts,
|
||||
enabled: []
|
||||
};
|
||||
|
||||
var opts = normalize_opts(md.utils.assign({}, defaults, options || {}));
|
||||
|
||||
md.renderer.rules.emoji = emoji_html;
|
||||
|
||||
md.core.ruler.push('emoji', emoji_replace(md, opts.defs, opts.shortcuts, opts.scanRE, opts.replaceRE));
|
||||
};
|
||||
1482
lib/markdown-it-emoji/lib/data/full.json
Normal file
1482
lib/markdown-it-emoji/lib/data/full.json
Normal file
File diff suppressed because it is too large
Load Diff
153
lib/markdown-it-emoji/lib/data/light.json
Normal file
153
lib/markdown-it-emoji/lib/data/light.json
Normal file
@@ -0,0 +1,153 @@
|
||||
{
|
||||
"grinning": "😀",
|
||||
"smiley": "😃",
|
||||
"smile": "😄",
|
||||
"grin": "😁",
|
||||
"laughing": "😆",
|
||||
"satisfied": "😆",
|
||||
"sweat_smile": "😅",
|
||||
"joy": "😂",
|
||||
"blush": "😊",
|
||||
"innocent": "😇",
|
||||
"wink": "😉",
|
||||
"relieved": "😌",
|
||||
"heart_eyes": "😍",
|
||||
"kissing_heart": "😘",
|
||||
"kissing": "😗",
|
||||
"kissing_smiling_eyes": "😙",
|
||||
"kissing_closed_eyes": "😚",
|
||||
"yum": "😋",
|
||||
"stuck_out_tongue_winking_eye": "😜",
|
||||
"stuck_out_tongue_closed_eyes": "😝",
|
||||
"stuck_out_tongue": "😛",
|
||||
"sunglasses": "😎",
|
||||
"smirk": "😏",
|
||||
"unamused": "😒",
|
||||
"disappointed": "😞",
|
||||
"pensive": "😔",
|
||||
"worried": "😟",
|
||||
"confused": "😕",
|
||||
"persevere": "😣",
|
||||
"confounded": "😖",
|
||||
"tired_face": "😫",
|
||||
"weary": "😩",
|
||||
"angry": "😠",
|
||||
"rage": "😡",
|
||||
"pout": "😡",
|
||||
"no_mouth": "😶",
|
||||
"neutral_face": "😐",
|
||||
"expressionless": "😑",
|
||||
"hushed": "😯",
|
||||
"frowning": "😦",
|
||||
"anguished": "😧",
|
||||
"open_mouth": "😮",
|
||||
"astonished": "😲",
|
||||
"dizzy_face": "😵",
|
||||
"flushed": "😳",
|
||||
"scream": "😱",
|
||||
"fearful": "😨",
|
||||
"cold_sweat": "😰",
|
||||
"cry": "😢",
|
||||
"disappointed_relieved": "😥",
|
||||
"sob": "😭",
|
||||
"sweat": "😓",
|
||||
"sleepy": "😪",
|
||||
"sleeping": "😴",
|
||||
"mask": "😷",
|
||||
"smiling_imp": "😈",
|
||||
"smiley_cat": "😺",
|
||||
"smile_cat": "😸",
|
||||
"joy_cat": "😹",
|
||||
"heart_eyes_cat": "😻",
|
||||
"smirk_cat": "😼",
|
||||
"kissing_cat": "😽",
|
||||
"scream_cat": "🙀",
|
||||
"crying_cat_face": "😿",
|
||||
"pouting_cat": "😾",
|
||||
"fist_raised": "✊",
|
||||
"fist": "✊",
|
||||
"v": "✌️",
|
||||
"point_up": "☝️",
|
||||
"hand": "✋",
|
||||
"raised_hand": "✋",
|
||||
"cat": "🐱",
|
||||
"mouse": "🐭",
|
||||
"cow": "🐮",
|
||||
"monkey_face": "🐵",
|
||||
"star": "⭐️",
|
||||
"sparkles": "✨",
|
||||
"zap": "⚡️",
|
||||
"sunny": "☀️",
|
||||
"cloud": "☁️",
|
||||
"snowflake": "❄️",
|
||||
"umbrella": "☔️",
|
||||
"coffee": "☕️",
|
||||
"airplane": "✈️",
|
||||
"anchor": "⚓️",
|
||||
"watch": "⌚️",
|
||||
"phone": "☎️",
|
||||
"telephone": "☎️",
|
||||
"hourglass": "⌛️",
|
||||
"email": "✉️",
|
||||
"envelope": "✉️",
|
||||
"scissors": "✂️",
|
||||
"black_nib": "✒️",
|
||||
"pencil2": "✏️",
|
||||
"heart": "❤️",
|
||||
"aries": "♈️",
|
||||
"taurus": "♉️",
|
||||
"gemini": "♊️",
|
||||
"cancer": "♋️",
|
||||
"leo": "♌️",
|
||||
"virgo": "♍️",
|
||||
"libra": "♎️",
|
||||
"scorpius": "♏️",
|
||||
"sagittarius": "♐️",
|
||||
"capricorn": "♑️",
|
||||
"aquarius": "♒️",
|
||||
"pisces": "♓️",
|
||||
"eight_pointed_black_star": "✴️",
|
||||
"x": "❌",
|
||||
"hotsprings": "♨️",
|
||||
"exclamation": "❗️",
|
||||
"heavy_exclamation_mark": "❗️",
|
||||
"grey_exclamation": "❕",
|
||||
"question": "❓",
|
||||
"grey_question": "❔",
|
||||
"bangbang": "‼️",
|
||||
"interrobang": "⁉️",
|
||||
"part_alternation_mark": "〽️",
|
||||
"warning": "⚠️",
|
||||
"recycle": "♻️",
|
||||
"white_check_mark": "✅",
|
||||
"sparkle": "❇️",
|
||||
"eight_spoked_asterisk": "✳️",
|
||||
"negative_squared_cross_mark": "❎",
|
||||
"m": "Ⓜ️",
|
||||
"wheelchair": "♿️",
|
||||
"information_source": "ℹ️",
|
||||
"heavy_plus_sign": "➕",
|
||||
"heavy_minus_sign": "➖",
|
||||
"heavy_division_sign": "➗",
|
||||
"heavy_multiplication_x": "✖️",
|
||||
"tm": "™️",
|
||||
"copyright": "©️",
|
||||
"registered": "®️",
|
||||
"wavy_dash": "〰️",
|
||||
"curly_loop": "➰",
|
||||
"loop": "➿",
|
||||
"heavy_check_mark": "✔️",
|
||||
"ballot_box_with_check": "☑️",
|
||||
"white_circle": "⚪️",
|
||||
"black_circle": "⚫️",
|
||||
"black_small_square": "▪️",
|
||||
"white_small_square": "▫️",
|
||||
"black_medium_small_square": "◾️",
|
||||
"white_medium_small_square": "◽️",
|
||||
"black_medium_square": "◼️",
|
||||
"white_medium_square": "◻️",
|
||||
"black_large_square": "⬛️",
|
||||
"white_large_square": "⬜️",
|
||||
"black_joker": "🃏",
|
||||
"mahjong": "🀄️"
|
||||
}
|
||||
41
lib/markdown-it-emoji/lib/data/shortcuts.js
Normal file
41
lib/markdown-it-emoji/lib/data/shortcuts.js
Normal file
@@ -0,0 +1,41 @@
|
||||
// Emoticons -> Emoji mapping.
|
||||
//
|
||||
// (!) Some patterns skipped, to avoid collisions
|
||||
// without increase matcher complicity. Than can change in future.
|
||||
//
|
||||
// Places to look for more emoticons info:
|
||||
//
|
||||
// - http://en.wikipedia.org/wiki/List_of_emoticons#Western
|
||||
// - https://github.com/wooorm/emoticon/blob/master/Support.md
|
||||
// - http://factoryjoe.com/projects/emoticons/
|
||||
//
|
||||
'use strict';
|
||||
|
||||
module.exports = {
|
||||
angry: [ '>:(', '>:-(' ],
|
||||
blush: [ ':")', ':-")' ],
|
||||
broken_heart: [ '</3', '<\\3' ],
|
||||
// :\ and :-\ not used because of conflict with markdown escaping
|
||||
confused: [ ':/', ':-/' ], // twemoji shows question
|
||||
cry: [ ":'(", ":'-(", ':,(', ':,-(' ],
|
||||
frowning: [ ':(', ':-(' ],
|
||||
heart: [ '<3' ],
|
||||
imp: [ ']:(', ']:-(' ],
|
||||
innocent: [ 'o:)', 'O:)', 'o:-)', 'O:-)', '0:)', '0:-)' ],
|
||||
joy: [ ":')", ":'-)", ':,)', ':,-)', ":'D", ":'-D", ':,D', ':,-D' ],
|
||||
kissing: [ ':*', ':-*' ],
|
||||
laughing: [ 'x-)', 'X-)' ],
|
||||
neutral_face: [ ':|', ':-|' ],
|
||||
open_mouth: [ ':o', ':-o', ':O', ':-O' ],
|
||||
rage: [ ':@', ':-@' ],
|
||||
smile: [ ':D', ':-D' ],
|
||||
smiley: [ ':)', ':-)' ],
|
||||
smiling_imp: [ ']:)', ']:-)' ],
|
||||
sob: [ ":,'(", ":,'-(", ';(', ';-(' ],
|
||||
stuck_out_tongue: [ ':P', ':-P' ],
|
||||
sunglasses: [ '8-)', 'B-)' ],
|
||||
sweat: [ ',:(', ',:-(' ],
|
||||
sweat_smile: [ ',:)', ',:-)' ],
|
||||
unamused: [ ':s', ':-S', ':z', ':-Z', ':$', ':-$' ],
|
||||
wink: [ ';)', ';-)' ]
|
||||
};
|
||||
59
lib/markdown-it-emoji/lib/normalize_opts.js
Normal file
59
lib/markdown-it-emoji/lib/normalize_opts.js
Normal file
@@ -0,0 +1,59 @@
|
||||
// Convert input options to more useable format
|
||||
// and compile search regexp
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
function quoteRE(str) {
|
||||
return str.replace(/[.?*+^$[\]\\(){}|-]/g, '\\$&');
|
||||
}
|
||||
|
||||
|
||||
module.exports = function normalize_opts(options) {
|
||||
var emojies = options.defs,
|
||||
shortcuts;
|
||||
|
||||
// Filter emojies by whitelist, if needed
|
||||
if (options.enabled.length) {
|
||||
emojies = Object.keys(emojies).reduce(function (acc, key) {
|
||||
if (options.enabled.indexOf(key) >= 0) {
|
||||
acc[key] = emojies[key];
|
||||
}
|
||||
return acc;
|
||||
}, {});
|
||||
}
|
||||
|
||||
// Flatten shortcuts to simple object: { alias: emoji_name }
|
||||
shortcuts = Object.keys(options.shortcuts).reduce(function (acc, key) {
|
||||
// Skip aliases for filtered emojies, to reduce regexp
|
||||
if (!emojies[key]) { return acc; }
|
||||
|
||||
if (Array.isArray(options.shortcuts[key])) {
|
||||
options.shortcuts[key].forEach(function (alias) {
|
||||
acc[alias] = key;
|
||||
});
|
||||
return acc;
|
||||
}
|
||||
|
||||
acc[options.shortcuts[key]] = key;
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
// Compile regexp
|
||||
var names = Object.keys(emojies)
|
||||
.map(function (name) { return ':' + name + ':'; })
|
||||
.concat(Object.keys(shortcuts))
|
||||
.sort()
|
||||
.reverse()
|
||||
.map(function (name) { return quoteRE(name); })
|
||||
.join('|');
|
||||
var scanRE = RegExp(names);
|
||||
var replaceRE = RegExp(names, 'g');
|
||||
|
||||
return {
|
||||
defs: emojies,
|
||||
shortcuts: shortcuts,
|
||||
scanRE: scanRE,
|
||||
replaceRE: replaceRE
|
||||
};
|
||||
};
|
||||
5
lib/markdown-it-emoji/lib/render.js
Normal file
5
lib/markdown-it-emoji/lib/render.js
Normal file
@@ -0,0 +1,5 @@
|
||||
'use strict';
|
||||
|
||||
module.exports = function emoji_html(tokens, idx /*, options, env */) {
|
||||
return tokens[idx].content;
|
||||
};
|
||||
89
lib/markdown-it-emoji/lib/replace.js
Normal file
89
lib/markdown-it-emoji/lib/replace.js
Normal file
@@ -0,0 +1,89 @@
|
||||
// Emojies & shortcuts replacement logic.
|
||||
//
|
||||
// Note: In theory, it could be faster to parse :smile: in inline chain and
|
||||
// leave only shortcuts here. But, who care...
|
||||
//
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = function create_rule(md, emojies, shortcuts, scanRE, replaceRE) {
|
||||
var arrayReplaceAt = md.utils.arrayReplaceAt,
|
||||
ucm = md.utils.lib.ucmicro,
|
||||
ZPCc = new RegExp([ ucm.Z.source, ucm.P.source, ucm.Cc.source ].join('|'));
|
||||
|
||||
function splitTextToken(text, level, Token) {
|
||||
var token, last_pos = 0, nodes = [];
|
||||
|
||||
text.replace(replaceRE, function (match, offset, src) {
|
||||
var emoji_name;
|
||||
// Validate emoji name
|
||||
if (shortcuts.hasOwnProperty(match)) {
|
||||
// replace shortcut with full name
|
||||
emoji_name = shortcuts[match];
|
||||
|
||||
// Don't allow letters before any shortcut (as in no ":/" in http://)
|
||||
if (offset > 0 && !ZPCc.test(src[offset - 1])) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't allow letters after any shortcut
|
||||
if (offset + match.length < src.length && !ZPCc.test(src[offset + match.length])) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
emoji_name = match.slice(1, -1);
|
||||
}
|
||||
|
||||
// Add new tokens to pending list
|
||||
if (offset > last_pos) {
|
||||
token = new Token('text', '', 0);
|
||||
token.content = text.slice(last_pos, offset);
|
||||
nodes.push(token);
|
||||
}
|
||||
|
||||
token = new Token('emoji', '', 0);
|
||||
token.markup = emoji_name;
|
||||
token.content = emojies[emoji_name];
|
||||
nodes.push(token);
|
||||
|
||||
last_pos = offset + match.length;
|
||||
});
|
||||
|
||||
if (last_pos < text.length) {
|
||||
token = new Token('text', '', 0);
|
||||
token.content = text.slice(last_pos);
|
||||
nodes.push(token);
|
||||
}
|
||||
|
||||
return nodes;
|
||||
}
|
||||
|
||||
return function emoji_replace(state) {
|
||||
var i, j, l, tokens, token,
|
||||
blockTokens = state.tokens,
|
||||
autolinkLevel = 0;
|
||||
|
||||
for (j = 0, l = blockTokens.length; j < l; j++) {
|
||||
if (blockTokens[j].type !== 'inline') { continue; }
|
||||
tokens = blockTokens[j].children;
|
||||
|
||||
// We scan from the end, to keep position when new tags added.
|
||||
// Use reversed logic in links start/end match
|
||||
for (i = tokens.length - 1; i >= 0; i--) {
|
||||
token = tokens[i];
|
||||
|
||||
if (token.type === 'link_open' || token.type === 'link_close') {
|
||||
if (token.info === 'auto') { autolinkLevel -= token.nesting; }
|
||||
}
|
||||
|
||||
if (token.type === 'text' && autolinkLevel === 0 && scanRE.test(token.content)) {
|
||||
// replace current node
|
||||
blockTokens[j].children = tokens = arrayReplaceAt(
|
||||
tokens, i, splitTextToken(token.content, token.level, state.Token)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
23
lib/markdown-it-emoji/light.js
Normal file
23
lib/markdown-it-emoji/light.js
Normal file
@@ -0,0 +1,23 @@
|
||||
'use strict';
|
||||
|
||||
|
||||
var emojies_defs = require('./lib/data/light.json');
|
||||
var emojies_shortcuts = require('./lib/data/shortcuts');
|
||||
var emoji_html = require('./lib/render');
|
||||
var emoji_replace = require('./lib/replace');
|
||||
var normalize_opts = require('./lib/normalize_opts');
|
||||
|
||||
|
||||
module.exports = function emoji_plugin(md, options) {
|
||||
var defaults = {
|
||||
defs: emojies_defs,
|
||||
shortcuts: emojies_shortcuts,
|
||||
enabled: []
|
||||
};
|
||||
|
||||
var opts = normalize_opts(md.utils.assign({}, defaults, options || {}));
|
||||
|
||||
md.renderer.rules.emoji = emoji_html;
|
||||
|
||||
md.core.ruler.push('emoji', emoji_replace(md, opts.defs, opts.shortcuts, opts.scanRE, opts.replaceRE));
|
||||
};
|
||||
117
lib/markdown-it-footnote/README.md
Normal file
117
lib/markdown-it-footnote/README.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# markdown-it-footnote
|
||||
|
||||
[](https://travis-ci.org/markdown-it/markdown-it-footnote)
|
||||
[](https://www.npmjs.org/package/markdown-it-footnote)
|
||||
[](https://coveralls.io/r/markdown-it/markdown-it-footnote?branch=master)
|
||||
|
||||
> Footnotes plugin for [markdown-it](https://github.com/markdown-it/markdown-it) markdown parser.
|
||||
|
||||
__v2.+ requires `markdown-it` v5.+, see changelog.__
|
||||
|
||||
Markup is based on [pandoc](http://johnmacfarlane.net/pandoc/README.html#footnotes) definition.
|
||||
|
||||
__Normal footnote__:
|
||||
|
||||
```
|
||||
Here is a footnote reference,[^1] and another.[^longnote]
|
||||
|
||||
[^1]: Here is the footnote.
|
||||
|
||||
[^longnote]: Here's one with multiple blocks.
|
||||
|
||||
Subsequent paragraphs are indented to show that they
|
||||
belong to the previous footnote.
|
||||
```
|
||||
|
||||
html:
|
||||
|
||||
```html
|
||||
<p>Here is a footnote reference,<sup class="footnote-ref"><a href="#fn1" id="fnref1">[1]</a></sup> and another.<sup class="footnote-ref"><a href="#fn2" id="fnref2">[2]</a></sup></p>
|
||||
<p>This paragraph won’t be part of the note, because it
|
||||
isn’t indented.</p>
|
||||
<hr class="footnotes-sep">
|
||||
<section class="footnotes">
|
||||
<ol class="footnotes-list">
|
||||
<li id="fn1" class="footnote-item"><p>Here is the footnote. <a href="#fnref1" class="footnote-backref">↩</a></p>
|
||||
</li>
|
||||
<li id="fn2" class="footnote-item"><p>Here’s one with multiple blocks.</p>
|
||||
<p>Subsequent paragraphs are indented to show that they
|
||||
belong to the previous footnote. <a href="#fnref2" class="footnote-backref">↩</a></p>
|
||||
</li>
|
||||
</ol>
|
||||
</section>
|
||||
```
|
||||
|
||||
__Inline footnote__:
|
||||
|
||||
```
|
||||
Here is an inline note.^[Inlines notes are easier to write, since
|
||||
you don't have to pick an identifier and move down to type the
|
||||
note.]
|
||||
```
|
||||
|
||||
html:
|
||||
|
||||
```html
|
||||
<p>Here is an inline note.<sup class="footnote-ref"><a href="#fn1" id="fnref1">[1]</a></sup></p>
|
||||
<hr class="footnotes-sep">
|
||||
<section class="footnotes">
|
||||
<ol class="footnotes-list">
|
||||
<li id="fn1" class="footnote-item"><p>Inlines notes are easier to write, since
|
||||
you don’t have to pick an identifier and move down to type the
|
||||
note. <a href="#fnref1" class="footnote-backref">↩</a></p>
|
||||
</li>
|
||||
</ol>
|
||||
</section>
|
||||
```
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
node.js, browser:
|
||||
|
||||
```bash
|
||||
npm install markdown-it-footnote --save
|
||||
bower install markdown-it-footnote --save
|
||||
```
|
||||
|
||||
## Use
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')()
|
||||
.use(require('markdown-it-footnote'));
|
||||
|
||||
md.render(/*...*/) // See examples above
|
||||
```
|
||||
|
||||
_Differences in browser._ If you load script directly into the page, without
|
||||
package system, module will add itself globally as `window.markdownitFootnote`.
|
||||
|
||||
|
||||
### Customize
|
||||
|
||||
If you want to customize the output, you'll need to replace the template
|
||||
functions. To see which templates exist and their default implementations,
|
||||
look in [`index.js`](index.js). The API of these template functions is out of
|
||||
scope for this plugin's documentation; you can read more about it [in the
|
||||
markdown-it
|
||||
documentation](https://github.com/markdown-it/markdown-it/blob/master/docs/architecture.md#renderer).
|
||||
|
||||
To demonstrate with an example, here is how you might replace the `<hr>` that
|
||||
this plugin emits by default with an `<h4>` emitted by your own template
|
||||
function override:
|
||||
|
||||
```js
|
||||
const md = require('markdown-it')().use(require('markdown-it-footnote'));
|
||||
|
||||
md.renderer.rules.footnote_block_open = () => (
|
||||
'<h4 class="mt-3">Footnotes</h4>\n' +
|
||||
'<section class="footnotes">\n' +
|
||||
'<ol class="footnotes-list">\n'
|
||||
);
|
||||
```
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[MIT](https://github.com/markdown-it/markdown-it-footnote/blob/master/LICENSE)
|
||||
367
lib/markdown-it-footnote/index.js
Normal file
367
lib/markdown-it-footnote/index.js
Normal file
@@ -0,0 +1,367 @@
|
||||
// Process footnotes
|
||||
//
|
||||
'use strict';
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Renderer partials
|
||||
|
||||
function render_footnote_anchor_name(tokens, idx, options, env/*, slf*/) {
|
||||
var n = Number(tokens[idx].meta.id + 1).toString();
|
||||
var prefix = '';
|
||||
|
||||
if (typeof env.docId === 'string') {
|
||||
prefix = '-' + env.docId + '-';
|
||||
}
|
||||
|
||||
return prefix + n;
|
||||
}
|
||||
|
||||
function render_footnote_caption(tokens, idx/*, options, env, slf*/) {
|
||||
var n = Number(tokens[idx].meta.id + 1).toString();
|
||||
|
||||
if (tokens[idx].meta.subId > 0) {
|
||||
n += ':' + tokens[idx].meta.subId;
|
||||
}
|
||||
|
||||
return '[' + n + ']';
|
||||
}
|
||||
|
||||
function render_footnote_ref(tokens, idx, options, env, slf) {
|
||||
var id = slf.rules.footnote_anchor_name(tokens, idx, options, env, slf);
|
||||
var caption = slf.rules.footnote_caption(tokens, idx, options, env, slf);
|
||||
var refid = id;
|
||||
|
||||
if (tokens[idx].meta.subId > 0) {
|
||||
refid += ':' + tokens[idx].meta.subId;
|
||||
}
|
||||
|
||||
return '<sup class="footnote-ref"><a href="#fn' + id + '" id="fnref' + refid + '">' + caption + '</a></sup>';
|
||||
}
|
||||
|
||||
function render_footnote_block_open(tokens, idx, options) {
|
||||
return (options.xhtmlOut ? '<hr class="footnotes-sep" />\n' : '<hr class="footnotes-sep">\n') +
|
||||
'<section class="footnotes">\n' +
|
||||
'<ol class="footnotes-list">\n';
|
||||
}
|
||||
|
||||
function render_footnote_block_close() {
|
||||
return '</ol>\n</section>\n';
|
||||
}
|
||||
|
||||
function render_footnote_open(tokens, idx, options, env, slf) {
|
||||
var id = slf.rules.footnote_anchor_name(tokens, idx, options, env, slf);
|
||||
|
||||
if (tokens[idx].meta.subId > 0) {
|
||||
id += ':' + tokens[idx].meta.subId;
|
||||
}
|
||||
|
||||
return '<li id="fn' + id + '" class="footnote-item">';
|
||||
}
|
||||
|
||||
function render_footnote_close() {
|
||||
return '</li>\n';
|
||||
}
|
||||
|
||||
function render_footnote_anchor(tokens, idx, options, env, slf) {
|
||||
var id = slf.rules.footnote_anchor_name(tokens, idx, options, env, slf);
|
||||
|
||||
if (tokens[idx].meta.subId > 0) {
|
||||
id += ':' + tokens[idx].meta.subId;
|
||||
}
|
||||
|
||||
/* ↩ with escape code to prevent display as Apple Emoji on iOS */
|
||||
return ' <a href="#fnref' + id + '" class="footnote-backref">\u21a9\uFE0E</a>';
|
||||
}
|
||||
|
||||
|
||||
module.exports = function footnote_plugin(md) {
|
||||
var parseLinkLabel = md.helpers.parseLinkLabel,
|
||||
isSpace = md.utils.isSpace;
|
||||
|
||||
md.renderer.rules.footnote_ref = render_footnote_ref;
|
||||
md.renderer.rules.footnote_block_open = render_footnote_block_open;
|
||||
md.renderer.rules.footnote_block_close = render_footnote_block_close;
|
||||
md.renderer.rules.footnote_open = render_footnote_open;
|
||||
md.renderer.rules.footnote_close = render_footnote_close;
|
||||
md.renderer.rules.footnote_anchor = render_footnote_anchor;
|
||||
|
||||
// helpers (only used in other rules, no tokens are attached to those)
|
||||
md.renderer.rules.footnote_caption = render_footnote_caption;
|
||||
md.renderer.rules.footnote_anchor_name = render_footnote_anchor_name;
|
||||
|
||||
// Process footnote block definition
|
||||
function footnote_def(state, startLine, endLine, silent) {
|
||||
var oldBMark, oldTShift, oldSCount, oldParentType, pos, label, token,
|
||||
initial, offset, ch, posAfterColon,
|
||||
start = state.bMarks[startLine] + state.tShift[startLine],
|
||||
max = state.eMarks[startLine];
|
||||
|
||||
// line should be at least 5 chars - "[^x]:"
|
||||
if (start + 4 > max) { return false; }
|
||||
|
||||
if (state.src.charCodeAt(start) !== 0x5B/* [ */) { return false; }
|
||||
if (state.src.charCodeAt(start + 1) !== 0x5E/* ^ */) { return false; }
|
||||
|
||||
for (pos = start + 2; pos < max; pos++) {
|
||||
if (state.src.charCodeAt(pos) === 0x20) { return false; }
|
||||
if (state.src.charCodeAt(pos) === 0x5D /* ] */) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pos === start + 2) { return false; } // no empty footnote labels
|
||||
if (pos + 1 >= max || state.src.charCodeAt(++pos) !== 0x3A /* : */) { return false; }
|
||||
if (silent) { return true; }
|
||||
pos++;
|
||||
|
||||
if (!state.env.footnotes) { state.env.footnotes = {}; }
|
||||
if (!state.env.footnotes.refs) { state.env.footnotes.refs = {}; }
|
||||
label = state.src.slice(start + 2, pos - 2);
|
||||
state.env.footnotes.refs[':' + label] = -1;
|
||||
|
||||
token = new state.Token('footnote_reference_open', '', 1);
|
||||
token.meta = { label: label };
|
||||
token.level = state.level++;
|
||||
state.tokens.push(token);
|
||||
|
||||
oldBMark = state.bMarks[startLine];
|
||||
oldTShift = state.tShift[startLine];
|
||||
oldSCount = state.sCount[startLine];
|
||||
oldParentType = state.parentType;
|
||||
|
||||
posAfterColon = pos;
|
||||
initial = offset = state.sCount[startLine] + pos - (state.bMarks[startLine] + state.tShift[startLine]);
|
||||
|
||||
while (pos < max) {
|
||||
ch = state.src.charCodeAt(pos);
|
||||
|
||||
if (isSpace(ch)) {
|
||||
if (ch === 0x09) {
|
||||
offset += 4 - offset % 4;
|
||||
} else {
|
||||
offset++;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
state.tShift[startLine] = pos - posAfterColon;
|
||||
state.sCount[startLine] = offset - initial;
|
||||
|
||||
state.bMarks[startLine] = posAfterColon;
|
||||
state.blkIndent += 4;
|
||||
state.parentType = 'footnote';
|
||||
|
||||
if (state.sCount[startLine] < state.blkIndent) {
|
||||
state.sCount[startLine] += state.blkIndent;
|
||||
}
|
||||
|
||||
state.md.block.tokenize(state, startLine, endLine, true);
|
||||
|
||||
state.parentType = oldParentType;
|
||||
state.blkIndent -= 4;
|
||||
state.tShift[startLine] = oldTShift;
|
||||
state.sCount[startLine] = oldSCount;
|
||||
state.bMarks[startLine] = oldBMark;
|
||||
|
||||
token = new state.Token('footnote_reference_close', '', -1);
|
||||
token.level = --state.level;
|
||||
state.tokens.push(token);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Process inline footnotes (^[...])
|
||||
function footnote_inline(state, silent) {
|
||||
var labelStart,
|
||||
labelEnd,
|
||||
footnoteId,
|
||||
token,
|
||||
tokens,
|
||||
max = state.posMax,
|
||||
start = state.pos;
|
||||
|
||||
if (start + 2 >= max) { return false; }
|
||||
if (state.src.charCodeAt(start) !== 0x5E/* ^ */) { return false; }
|
||||
if (state.src.charCodeAt(start + 1) !== 0x5B/* [ */) { return false; }
|
||||
|
||||
labelStart = start + 2;
|
||||
labelEnd = parseLinkLabel(state, start + 1);
|
||||
|
||||
// parser failed to find ']', so it's not a valid note
|
||||
if (labelEnd < 0) { return false; }
|
||||
|
||||
// We found the end of the link, and know for a fact it's a valid link;
|
||||
// so all that's left to do is to call tokenizer.
|
||||
//
|
||||
if (!silent) {
|
||||
if (!state.env.footnotes) { state.env.footnotes = {}; }
|
||||
if (!state.env.footnotes.list) { state.env.footnotes.list = []; }
|
||||
footnoteId = state.env.footnotes.list.length;
|
||||
|
||||
state.md.inline.parse(
|
||||
state.src.slice(labelStart, labelEnd),
|
||||
state.md,
|
||||
state.env,
|
||||
tokens = []
|
||||
);
|
||||
|
||||
token = state.push('footnote_ref', '', 0);
|
||||
token.meta = { id: footnoteId };
|
||||
|
||||
state.env.footnotes.list[footnoteId] = {
|
||||
content: state.src.slice(labelStart, labelEnd),
|
||||
tokens: tokens
|
||||
};
|
||||
}
|
||||
|
||||
state.pos = labelEnd + 1;
|
||||
state.posMax = max;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Process footnote references ([^...])
|
||||
function footnote_ref(state, silent) {
|
||||
var label,
|
||||
pos,
|
||||
footnoteId,
|
||||
footnoteSubId,
|
||||
token,
|
||||
max = state.posMax,
|
||||
start = state.pos;
|
||||
|
||||
// should be at least 4 chars - "[^x]"
|
||||
if (start + 3 > max) { return false; }
|
||||
|
||||
if (!state.env.footnotes || !state.env.footnotes.refs) { return false; }
|
||||
if (state.src.charCodeAt(start) !== 0x5B/* [ */) { return false; }
|
||||
if (state.src.charCodeAt(start + 1) !== 0x5E/* ^ */) { return false; }
|
||||
|
||||
for (pos = start + 2; pos < max; pos++) {
|
||||
if (state.src.charCodeAt(pos) === 0x20) { return false; }
|
||||
if (state.src.charCodeAt(pos) === 0x0A) { return false; }
|
||||
if (state.src.charCodeAt(pos) === 0x5D /* ] */) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pos === start + 2) { return false; } // no empty footnote labels
|
||||
if (pos >= max) { return false; }
|
||||
pos++;
|
||||
|
||||
label = state.src.slice(start + 2, pos - 1);
|
||||
if (typeof state.env.footnotes.refs[':' + label] === 'undefined') { return false; }
|
||||
|
||||
if (!silent) {
|
||||
if (!state.env.footnotes.list) { state.env.footnotes.list = []; }
|
||||
|
||||
if (state.env.footnotes.refs[':' + label] < 0) {
|
||||
footnoteId = state.env.footnotes.list.length;
|
||||
state.env.footnotes.list[footnoteId] = { label: label, count: 0 };
|
||||
state.env.footnotes.refs[':' + label] = footnoteId;
|
||||
} else {
|
||||
footnoteId = state.env.footnotes.refs[':' + label];
|
||||
}
|
||||
|
||||
footnoteSubId = state.env.footnotes.list[footnoteId].count;
|
||||
state.env.footnotes.list[footnoteId].count++;
|
||||
|
||||
token = state.push('footnote_ref', '', 0);
|
||||
token.meta = { id: footnoteId, subId: footnoteSubId, label: label };
|
||||
}
|
||||
|
||||
state.pos = pos;
|
||||
state.posMax = max;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Glue footnote tokens to end of token stream
|
||||
function footnote_tail(state) {
|
||||
var i, l, j, t, lastParagraph, list, token, tokens, current, currentLabel,
|
||||
insideRef = false,
|
||||
refTokens = {};
|
||||
|
||||
if (!state.env.footnotes) { return; }
|
||||
|
||||
state.tokens = state.tokens.filter(function (tok) {
|
||||
if (tok.type === 'footnote_reference_open') {
|
||||
insideRef = true;
|
||||
current = [];
|
||||
currentLabel = tok.meta.label;
|
||||
return false;
|
||||
}
|
||||
if (tok.type === 'footnote_reference_close') {
|
||||
insideRef = false;
|
||||
// prepend ':' to avoid conflict with Object.prototype members
|
||||
refTokens[':' + currentLabel] = current;
|
||||
return false;
|
||||
}
|
||||
if (insideRef) { current.push(tok); }
|
||||
return !insideRef;
|
||||
});
|
||||
|
||||
if (!state.env.footnotes.list) { return; }
|
||||
list = state.env.footnotes.list;
|
||||
|
||||
token = new state.Token('footnote_block_open', '', 1);
|
||||
state.tokens.push(token);
|
||||
|
||||
for (i = 0, l = list.length; i < l; i++) {
|
||||
token = new state.Token('footnote_open', '', 1);
|
||||
token.meta = { id: i, label: list[i].label };
|
||||
state.tokens.push(token);
|
||||
|
||||
if (list[i].tokens) {
|
||||
tokens = [];
|
||||
|
||||
token = new state.Token('paragraph_open', 'p', 1);
|
||||
token.block = true;
|
||||
tokens.push(token);
|
||||
|
||||
token = new state.Token('inline', '', 0);
|
||||
token.children = list[i].tokens;
|
||||
token.content = list[i].content;
|
||||
tokens.push(token);
|
||||
|
||||
token = new state.Token('paragraph_close', 'p', -1);
|
||||
token.block = true;
|
||||
tokens.push(token);
|
||||
|
||||
} else if (list[i].label) {
|
||||
tokens = refTokens[':' + list[i].label];
|
||||
}
|
||||
|
||||
state.tokens = state.tokens.concat(tokens);
|
||||
if (state.tokens[state.tokens.length - 1].type === 'paragraph_close') {
|
||||
lastParagraph = state.tokens.pop();
|
||||
} else {
|
||||
lastParagraph = null;
|
||||
}
|
||||
|
||||
t = list[i].count > 0 ? list[i].count : 1;
|
||||
for (j = 0; j < t; j++) {
|
||||
token = new state.Token('footnote_anchor', '', 0);
|
||||
token.meta = { id: i, subId: j, label: list[i].label };
|
||||
state.tokens.push(token);
|
||||
}
|
||||
|
||||
if (lastParagraph) {
|
||||
state.tokens.push(lastParagraph);
|
||||
}
|
||||
|
||||
token = new state.Token('footnote_close', '', -1);
|
||||
state.tokens.push(token);
|
||||
}
|
||||
|
||||
token = new state.Token('footnote_block_close', '', -1);
|
||||
state.tokens.push(token);
|
||||
}
|
||||
|
||||
md.block.ruler.before('reference', 'footnote_def', footnote_def, { alt: [ 'paragraph', 'reference' ] });
|
||||
md.inline.ruler.after('image', 'footnote_inline', footnote_inline);
|
||||
md.inline.ruler.after('footnote_inline', 'footnote_ref', footnote_ref);
|
||||
md.core.ruler.after('inline', 'footnote_tail', footnote_tail);
|
||||
};
|
||||
40
lib/markdown-it-ins/README.md
Normal file
40
lib/markdown-it-ins/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# markdown-it-ins
|
||||
|
||||
[](https://travis-ci.org/markdown-it/markdown-it-ins)
|
||||
[](https://www.npmjs.org/package/markdown-it-ins)
|
||||
[](https://coveralls.io/r/markdown-it/markdown-it-ins?branch=master)
|
||||
|
||||
> `<ins>` tag plugin for [markdown-it](https://github.com/markdown-it/markdown-it) markdown parser.
|
||||
|
||||
__v3.+ requires `markdown-it` v10.+, see changelog.__
|
||||
|
||||
`++inserted++` => `<ins>inserted</ins>`
|
||||
|
||||
Markup uses the same conditions as CommonMark [emphasis](http://spec.commonmark.org/0.15/#emphasis-and-strong-emphasis).
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
node.js, browser:
|
||||
|
||||
```bash
|
||||
npm install markdown-it-ins --save
|
||||
bower install markdown-it-ins --save
|
||||
```
|
||||
|
||||
## Use
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')()
|
||||
.use(require('markdown-it-ins'));
|
||||
|
||||
md.render('++inserted++') // => '<p><ins>inserted</ins></p>'
|
||||
```
|
||||
|
||||
_Differences in browser._ If you load script directly into the page, without
|
||||
package system, module will add itself globally as `window.markdownitIns`.
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[MIT](https://github.com/markdown-it/markdown-it-ins/blob/master/LICENSE)
|
||||
133
lib/markdown-it-ins/index.js
Normal file
133
lib/markdown-it-ins/index.js
Normal file
@@ -0,0 +1,133 @@
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = function ins_plugin(md) {
|
||||
// Insert each marker as a separate text token, and add it to delimiter list
|
||||
//
|
||||
function tokenize(state, silent) {
|
||||
var i, scanned, token, len, ch,
|
||||
start = state.pos,
|
||||
marker = state.src.charCodeAt(start);
|
||||
|
||||
if (silent) { return false; }
|
||||
|
||||
if (marker !== 0x2B/* + */) { return false; }
|
||||
|
||||
scanned = state.scanDelims(state.pos, true);
|
||||
len = scanned.length;
|
||||
ch = String.fromCharCode(marker);
|
||||
|
||||
if (len < 2) { return false; }
|
||||
|
||||
if (len % 2) {
|
||||
token = state.push('text', '', 0);
|
||||
token.content = ch;
|
||||
len--;
|
||||
}
|
||||
|
||||
for (i = 0; i < len; i += 2) {
|
||||
token = state.push('text', '', 0);
|
||||
token.content = ch + ch;
|
||||
|
||||
if (!scanned.can_open && !scanned.can_close) { continue; }
|
||||
|
||||
state.delimiters.push({
|
||||
marker: marker,
|
||||
length: 0, // disable "rule of 3" length checks meant for emphasis
|
||||
jump: i,
|
||||
token: state.tokens.length - 1,
|
||||
end: -1,
|
||||
open: scanned.can_open,
|
||||
close: scanned.can_close
|
||||
});
|
||||
}
|
||||
|
||||
state.pos += scanned.length;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Walk through delimiter list and replace text tokens with tags
|
||||
//
|
||||
function postProcess(state, delimiters) {
|
||||
var i, j,
|
||||
startDelim,
|
||||
endDelim,
|
||||
token,
|
||||
loneMarkers = [],
|
||||
max = delimiters.length;
|
||||
|
||||
for (i = 0; i < max; i++) {
|
||||
startDelim = delimiters[i];
|
||||
|
||||
if (startDelim.marker !== 0x2B/* + */) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (startDelim.end === -1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
endDelim = delimiters[startDelim.end];
|
||||
|
||||
token = state.tokens[startDelim.token];
|
||||
token.type = 'ins_open';
|
||||
token.tag = 'ins';
|
||||
token.nesting = 1;
|
||||
token.markup = '++';
|
||||
token.content = '';
|
||||
|
||||
token = state.tokens[endDelim.token];
|
||||
token.type = 'ins_close';
|
||||
token.tag = 'ins';
|
||||
token.nesting = -1;
|
||||
token.markup = '++';
|
||||
token.content = '';
|
||||
|
||||
if (state.tokens[endDelim.token - 1].type === 'text' &&
|
||||
state.tokens[endDelim.token - 1].content === '+') {
|
||||
|
||||
loneMarkers.push(endDelim.token - 1);
|
||||
}
|
||||
}
|
||||
|
||||
// If a marker sequence has an odd number of characters, it's splitted
|
||||
// like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the
|
||||
// start of the sequence.
|
||||
//
|
||||
// So, we have to move all those markers after subsequent s_close tags.
|
||||
//
|
||||
while (loneMarkers.length) {
|
||||
i = loneMarkers.pop();
|
||||
j = i + 1;
|
||||
|
||||
while (j < state.tokens.length && state.tokens[j].type === 'ins_close') {
|
||||
j++;
|
||||
}
|
||||
|
||||
j--;
|
||||
|
||||
if (i !== j) {
|
||||
token = state.tokens[j];
|
||||
state.tokens[j] = state.tokens[i];
|
||||
state.tokens[i] = token;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
md.inline.ruler.before('emphasis', 'ins', tokenize);
|
||||
md.inline.ruler2.before('emphasis', 'ins', function (state) {
|
||||
var curr,
|
||||
tokens_meta = state.tokens_meta,
|
||||
max = (state.tokens_meta || []).length;
|
||||
|
||||
postProcess(state, state.delimiters);
|
||||
|
||||
for (curr = 0; curr < max; curr++) {
|
||||
if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
|
||||
postProcess(state, tokens_meta[curr].delimiters);
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
40
lib/markdown-it-mark/README.md
Normal file
40
lib/markdown-it-mark/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# markdown-it-mark
|
||||
|
||||
[](https://travis-ci.org/markdown-it/markdown-it-mark)
|
||||
[](https://www.npmjs.org/package/markdown-it-mark)
|
||||
[](https://coveralls.io/r/markdown-it/markdown-it-mark?branch=master)
|
||||
|
||||
> `<mark>` tag plugin for [markdown-it](https://github.com/markdown-it/markdown-it) markdown parser.
|
||||
|
||||
__v3.+ requires `markdown-it` v10.+, see changelog.__
|
||||
|
||||
`==marked==` => `<mark>inserted</mark>`
|
||||
|
||||
Markup uses the same conditions as CommonMark [emphasis](http://spec.commonmark.org/0.15/#emphasis-and-strong-emphasis).
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
node.js, browser:
|
||||
|
||||
```bash
|
||||
npm install markdown-it-mark --save
|
||||
bower install markdown-it-mark --save
|
||||
```
|
||||
|
||||
## Use
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')()
|
||||
.use(require('markdown-it-mark'));
|
||||
|
||||
md.render('==marked==') // => '<p><mark>marked</mark></p>'
|
||||
```
|
||||
|
||||
_Differences in browser._ If you load script directly into the page, without
|
||||
package system, module will add itself globally as `window.markdownitMark`.
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[MIT](https://github.com/markdown-it/markdown-it-mark/blob/master/LICENSE)
|
||||
133
lib/markdown-it-mark/index.js
Normal file
133
lib/markdown-it-mark/index.js
Normal file
@@ -0,0 +1,133 @@
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = function ins_plugin(md) {
|
||||
// Insert each marker as a separate text token, and add it to delimiter list
|
||||
//
|
||||
function tokenize(state, silent) {
|
||||
var i, scanned, token, len, ch,
|
||||
start = state.pos,
|
||||
marker = state.src.charCodeAt(start);
|
||||
|
||||
if (silent) { return false; }
|
||||
|
||||
if (marker !== 0x3D/* = */) { return false; }
|
||||
|
||||
scanned = state.scanDelims(state.pos, true);
|
||||
len = scanned.length;
|
||||
ch = String.fromCharCode(marker);
|
||||
|
||||
if (len < 2) { return false; }
|
||||
|
||||
if (len % 2) {
|
||||
token = state.push('text', '', 0);
|
||||
token.content = ch;
|
||||
len--;
|
||||
}
|
||||
|
||||
for (i = 0; i < len; i += 2) {
|
||||
token = state.push('text', '', 0);
|
||||
token.content = ch + ch;
|
||||
|
||||
if (!scanned.can_open && !scanned.can_close) { continue; }
|
||||
|
||||
state.delimiters.push({
|
||||
marker: marker,
|
||||
length: 0, // disable "rule of 3" length checks meant for emphasis
|
||||
jump: i,
|
||||
token: state.tokens.length - 1,
|
||||
end: -1,
|
||||
open: scanned.can_open,
|
||||
close: scanned.can_close
|
||||
});
|
||||
}
|
||||
|
||||
state.pos += scanned.length;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Walk through delimiter list and replace text tokens with tags
|
||||
//
|
||||
function postProcess(state, delimiters) {
|
||||
var i, j,
|
||||
startDelim,
|
||||
endDelim,
|
||||
token,
|
||||
loneMarkers = [],
|
||||
max = delimiters.length;
|
||||
|
||||
for (i = 0; i < max; i++) {
|
||||
startDelim = delimiters[i];
|
||||
|
||||
if (startDelim.marker !== 0x3D/* = */) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (startDelim.end === -1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
endDelim = delimiters[startDelim.end];
|
||||
|
||||
token = state.tokens[startDelim.token];
|
||||
token.type = 'mark_open';
|
||||
token.tag = 'mark';
|
||||
token.nesting = 1;
|
||||
token.markup = '==';
|
||||
token.content = '';
|
||||
|
||||
token = state.tokens[endDelim.token];
|
||||
token.type = 'mark_close';
|
||||
token.tag = 'mark';
|
||||
token.nesting = -1;
|
||||
token.markup = '==';
|
||||
token.content = '';
|
||||
|
||||
if (state.tokens[endDelim.token - 1].type === 'text' &&
|
||||
state.tokens[endDelim.token - 1].content === '=') {
|
||||
|
||||
loneMarkers.push(endDelim.token - 1);
|
||||
}
|
||||
}
|
||||
|
||||
// If a marker sequence has an odd number of characters, it's splitted
|
||||
// like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the
|
||||
// start of the sequence.
|
||||
//
|
||||
// So, we have to move all those markers after subsequent s_close tags.
|
||||
//
|
||||
while (loneMarkers.length) {
|
||||
i = loneMarkers.pop();
|
||||
j = i + 1;
|
||||
|
||||
while (j < state.tokens.length && state.tokens[j].type === 'mark_close') {
|
||||
j++;
|
||||
}
|
||||
|
||||
j--;
|
||||
|
||||
if (i !== j) {
|
||||
token = state.tokens[j];
|
||||
state.tokens[j] = state.tokens[i];
|
||||
state.tokens[i] = token;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
md.inline.ruler.before('emphasis', 'mark', tokenize);
|
||||
md.inline.ruler2.before('emphasis', 'mark', function (state) {
|
||||
var curr,
|
||||
tokens_meta = state.tokens_meta,
|
||||
max = (state.tokens_meta || []).length;
|
||||
|
||||
postProcess(state, state.delimiters);
|
||||
|
||||
for (curr = 0; curr < max; curr++) {
|
||||
if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
|
||||
postProcess(state, tokens_meta[curr].delimiters);
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
151
lib/markdown-it-prism/index.js
Normal file
151
lib/markdown-it-prism/index.js
Normal file
@@ -0,0 +1,151 @@
|
||||
import Prism from 'prismjs';
|
||||
import loadLanguages from 'prismjs/components/';
|
||||
|
||||
/**
|
||||
* A callback that can be used to perform custom initialisation of the Prism instance.
|
||||
*
|
||||
* @callback PrismInitialisationCallback
|
||||
* @param {Prism} prism
|
||||
* The Prism instance
|
||||
*/
|
||||
|
||||
/**
|
||||
* The options for the markdown-it-prism plugin
|
||||
*
|
||||
* @typedef {Object} MarkdownItPrismOptions
|
||||
* @property {String[]} plugins
|
||||
* Names of Prism plugins to load
|
||||
* @property {PrismInitialisationCallback} init
|
||||
* Callback for Prism initialisation
|
||||
* @property {String} defaultLanguageForUnknown
|
||||
* The language to use for code blocks that specify a language that Prism does not know
|
||||
* @property {String} defaultLanguageForUnspecified
|
||||
* The language to use for code block that do not specify a language
|
||||
* @property {String} defaultLanguage
|
||||
* Shorthand to set both {@code defaultLanguageForUnknown} and {@code defaultLanguageForUnspecified} to the same value
|
||||
*/
|
||||
const DEFAULTS = {
|
||||
plugins: [],
|
||||
init: () => {
|
||||
},
|
||||
defaultLanguageForUnknown: undefined,
|
||||
defaultLanguageForUnspecified: undefined,
|
||||
defaultLanguage: undefined
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Loads the provided {@code lang} into prism.
|
||||
*
|
||||
* @param {String} lang
|
||||
* Code of the language to load.
|
||||
* @return {Object} The Prism language object for the provided {@code lang} code. {@code undefined} if the language is not known to Prism.
|
||||
*/
|
||||
function loadPrismLang(lang) {
|
||||
if (!lang) return undefined;
|
||||
let langObject = Prism.languages[lang];
|
||||
if (langObject === undefined) {
|
||||
loadLanguages([lang]);
|
||||
langObject = Prism.languages[lang];
|
||||
}
|
||||
return langObject;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the provided Prism plugin.a
|
||||
* @param name
|
||||
* Name of the plugin to load
|
||||
* @throws {Error} If there is no plugin with the provided {@code name}
|
||||
*/
|
||||
function loadPrismPlugin(name) {
|
||||
try {
|
||||
require(`prismjs/plugins/${name}/prism-${name}`);
|
||||
} catch (e) {
|
||||
throw new Error(`Cannot load Prism plugin "${name}". Please check the spelling.`);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Select the language to use for highlighting, based on the provided options and the specified language.
|
||||
*
|
||||
* @param {Object} options
|
||||
* The options that were used to initialise the plugin.
|
||||
* @param {String} lang
|
||||
* Code of the language to highlight the text in.
|
||||
* @return {Array} An array where the first element is the name of the language to use, and the second element is the PRISM language object for that language.
|
||||
*/
|
||||
function selectLanguage(options, lang) {
|
||||
let langToUse = lang;
|
||||
if (langToUse === '' && options.defaultLanguageForUnspecified !== undefined) {
|
||||
langToUse = options.defaultLanguageForUnspecified;
|
||||
}
|
||||
let prismLang = loadPrismLang(langToUse);
|
||||
if (prismLang === undefined && options.defaultLanguageForUnknown !== undefined) {
|
||||
langToUse = options.defaultLanguageForUnknown;
|
||||
prismLang = loadPrismLang(langToUse);
|
||||
}
|
||||
return [langToUse, prismLang];
|
||||
}
|
||||
|
||||
/**
|
||||
* Highlights the provided text using Prism.
|
||||
*
|
||||
* @param {MarkdownIt} markdownit
|
||||
* Instance of MarkdownIt Class. This argument is bound in markdownItPrism().
|
||||
* @param {MarkdownItPrismOptions} options
|
||||
* The options that have been used to initialise the plugin. This argument is bound in markdownItPrism().
|
||||
* @param {String} text
|
||||
* The text to highlight.
|
||||
* @param {String} lang
|
||||
* Code of the language to highlight the text in.
|
||||
* @return {String} {@code text} wrapped in {@code <pre>} and {@code <code>}, both equipped with the appropriate class (markdown-it’s langPrefix + lang). If Prism knows {@code lang}, {@code text} will be highlighted by it.
|
||||
*/
|
||||
function highlight(markdownit, options, text, lang) {
|
||||
let langToUse, prismLang;
|
||||
[langToUse, prismLang] = selectLanguage(options, lang);
|
||||
const code = prismLang ? Prism.highlight(text, prismLang) : markdownit.utils.escapeHtml(text);
|
||||
const classAttribute = langToUse ? ` class="${markdownit.options.langPrefix}${langToUse}"` : '';
|
||||
return `<pre${classAttribute}><code${classAttribute}>${code}</code></pre>`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether an option represents a valid Prism language
|
||||
*
|
||||
* @param {MarkdownItPrismOptions} options
|
||||
* The options that have been used to initialise the plugin.
|
||||
* @param optionName
|
||||
* The key of the option insides {@code options} that shall be checked.
|
||||
* @throws {Error} If the option is not set to a valid Prism language.
|
||||
*/
|
||||
function checkLanguageOption(options, optionName) {
|
||||
const language = options[optionName];
|
||||
if (language !== undefined && loadPrismLang(language) === undefined) {
|
||||
throw new Error(`Bad option ${optionName}: There is no Prism language '${language}'.`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialisation function of the plugin. This function is not called directly by clients, but is rather provided
|
||||
* to MarkdownIt’s {@link MarkdownIt.use} function.
|
||||
*
|
||||
* @param {MarkdownIt} markdownit
|
||||
* The markdown it instance the plugin is being registered to.
|
||||
* @param {MarkdownItPrismOptions} useroptions
|
||||
* The options this plugin is being initialised with.
|
||||
*/
|
||||
export default function markdownItPrism(markdownit, useroptions) {
|
||||
const options = Object.assign({}, DEFAULTS, useroptions);
|
||||
|
||||
checkLanguageOption(options, 'defaultLanguage');
|
||||
checkLanguageOption(options, 'defaultLanguageForUnknown');
|
||||
checkLanguageOption(options, 'defaultLanguageForUnspecified');
|
||||
options.defaultLanguageForUnknown = options.defaultLanguageForUnknown || options.defaultLanguage;
|
||||
options.defaultLanguageForUnspecified = options.defaultLanguageForUnspecified || options.defaultLanguage;
|
||||
|
||||
options.plugins.forEach(loadPrismPlugin);
|
||||
options.init(Prism);
|
||||
|
||||
// register ourselves as highlighter
|
||||
markdownit.options.highlight = (...args) => highlight(markdownit, options, ...args);
|
||||
}
|
||||
40
lib/markdown-it-sub/README.md
Normal file
40
lib/markdown-it-sub/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# markdown-it-sub
|
||||
|
||||
[](https://travis-ci.org/markdown-it/markdown-it-sub)
|
||||
[](https://www.npmjs.org/package/markdown-it-sub)
|
||||
[](https://coveralls.io/r/markdown-it/markdown-it-sub?branch=master)
|
||||
|
||||
> Subscript (`<sub>`) tag plugin for [markdown-it](https://github.com/markdown-it/markdown-it) markdown parser.
|
||||
|
||||
__v1.+ requires `markdown-it` v4.+, see changelog.__
|
||||
|
||||
`H~2~0` => `H<sub>2</sub>O`
|
||||
|
||||
Markup is based on [pandoc](http://johnmacfarlane.net/pandoc/README.html#superscripts-and-subscripts) definition. But nested markup is currently not supported.
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
node.js, browser:
|
||||
|
||||
```bash
|
||||
npm install markdown-it-sub --save
|
||||
bower install markdown-it-sub --save
|
||||
```
|
||||
|
||||
## Use
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')()
|
||||
.use(require('markdown-it-sub'));
|
||||
|
||||
md.render('H~2~0') // => '<p>H<sub>2</sub>O</p>'
|
||||
```
|
||||
|
||||
_Differences in browser._ If you load script directly into the page, without
|
||||
package system, module will add itself globally as `window.markdownitSub`.
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[MIT](https://github.com/markdown-it/markdown-it-sub/blob/master/LICENSE)
|
||||
66
lib/markdown-it-sub/index.js
Normal file
66
lib/markdown-it-sub/index.js
Normal file
@@ -0,0 +1,66 @@
|
||||
// Process ~subscript~
|
||||
|
||||
'use strict';
|
||||
|
||||
// same as UNESCAPE_MD_RE plus a space
|
||||
var UNESCAPE_RE = /\\([ \\!"#$%&'()*+,.\/:;<=>?@[\]^_`{|}~-])/g;
|
||||
|
||||
|
||||
function subscript(state, silent) {
|
||||
var found,
|
||||
content,
|
||||
token,
|
||||
max = state.posMax,
|
||||
start = state.pos;
|
||||
|
||||
if (state.src.charCodeAt(start) !== 0x7E/* ~ */) { return false; }
|
||||
if (silent) { return false; } // don't run any pairs in validation mode
|
||||
if (start + 2 >= max) { return false; }
|
||||
|
||||
state.pos = start + 1;
|
||||
|
||||
while (state.pos < max) {
|
||||
if (state.src.charCodeAt(state.pos) === 0x7E/* ~ */) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
|
||||
state.md.inline.skipToken(state);
|
||||
}
|
||||
|
||||
if (!found || start + 1 === state.pos) {
|
||||
state.pos = start;
|
||||
return false;
|
||||
}
|
||||
|
||||
content = state.src.slice(start + 1, state.pos);
|
||||
|
||||
// don't allow unescaped spaces/newlines inside
|
||||
if (content.match(/(^|[^\\])(\\\\)*\s/)) {
|
||||
state.pos = start;
|
||||
return false;
|
||||
}
|
||||
|
||||
// found!
|
||||
state.posMax = state.pos;
|
||||
state.pos = start + 1;
|
||||
|
||||
// Earlier we checked !silent, but this implementation does not need it
|
||||
token = state.push('sub_open', 'sub', 1);
|
||||
token.markup = '~';
|
||||
|
||||
token = state.push('text', '', 0);
|
||||
token.content = content.replace(UNESCAPE_RE, '$1');
|
||||
|
||||
token = state.push('sub_close', 'sub', -1);
|
||||
token.markup = '~';
|
||||
|
||||
state.pos = state.posMax + 1;
|
||||
state.posMax = max;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
module.exports = function sub_plugin(md) {
|
||||
md.inline.ruler.after('emphasis', 'sub', subscript);
|
||||
};
|
||||
40
lib/markdown-it-sup/README.md
Normal file
40
lib/markdown-it-sup/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# markdown-it-sup
|
||||
|
||||
[](https://travis-ci.org/markdown-it/markdown-it-sup)
|
||||
[](https://www.npmjs.org/package/markdown-it-sup)
|
||||
[](https://coveralls.io/r/markdown-it/markdown-it-sup?branch=master)
|
||||
|
||||
> Superscript (`<sup>`) tag plugin for [markdown-it](https://github.com/markdown-it/markdown-it) markdown parser.
|
||||
|
||||
__v1.+ requires `markdown-it` v4.+, see changelog.__
|
||||
|
||||
`29^th^` => `29<sup>th</sup>`
|
||||
|
||||
Markup is based on [pandoc](http://johnmacfarlane.net/pandoc/README.html#superscripts-and-subscripts) definition. But nested markup is currently not supported.
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
node.js, browser:
|
||||
|
||||
```bash
|
||||
npm install markdown-it-sup --save
|
||||
bower install markdown-it-sup --save
|
||||
```
|
||||
|
||||
## Use
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')()
|
||||
.use(require('markdown-it-sup'));
|
||||
|
||||
md.render('29^th^') // => '<p>29<sup>th</sup></p>'
|
||||
```
|
||||
|
||||
_Differences in browser._ If you load script directly into the page, without
|
||||
package system, module will add itself globally as `window.markdownitSup`.
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[MIT](https://github.com/markdown-it/markdown-it-sup/blob/master/LICENSE)
|
||||
65
lib/markdown-it-sup/index.js
Normal file
65
lib/markdown-it-sup/index.js
Normal file
@@ -0,0 +1,65 @@
|
||||
// Process ^superscript^
|
||||
|
||||
'use strict';
|
||||
|
||||
// same as UNESCAPE_MD_RE plus a space
|
||||
var UNESCAPE_RE = /\\([ \\!"#$%&'()*+,.\/:;<=>?@[\]^_`{|}~-])/g;
|
||||
|
||||
function superscript(state, silent) {
|
||||
var found,
|
||||
content,
|
||||
token,
|
||||
max = state.posMax,
|
||||
start = state.pos;
|
||||
|
||||
if (state.src.charCodeAt(start) !== 0x5E/* ^ */) { return false; }
|
||||
if (silent) { return false; } // don't run any pairs in validation mode
|
||||
if (start + 2 >= max) { return false; }
|
||||
|
||||
state.pos = start + 1;
|
||||
|
||||
while (state.pos < max) {
|
||||
if (state.src.charCodeAt(state.pos) === 0x5E/* ^ */) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
|
||||
state.md.inline.skipToken(state);
|
||||
}
|
||||
|
||||
if (!found || start + 1 === state.pos) {
|
||||
state.pos = start;
|
||||
return false;
|
||||
}
|
||||
|
||||
content = state.src.slice(start + 1, state.pos);
|
||||
|
||||
// don't allow unescaped spaces/newlines inside
|
||||
if (content.match(/(^|[^\\])(\\\\)*\s/)) {
|
||||
state.pos = start;
|
||||
return false;
|
||||
}
|
||||
|
||||
// found!
|
||||
state.posMax = state.pos;
|
||||
state.pos = start + 1;
|
||||
|
||||
// Earlier we checked !silent, but this implementation does not need it
|
||||
token = state.push('sup_open', 'sup', 1);
|
||||
token.markup = '^';
|
||||
|
||||
token = state.push('text', '', 0);
|
||||
token.content = content.replace(UNESCAPE_RE, '$1');
|
||||
|
||||
token = state.push('sup_close', 'sup', -1);
|
||||
token.markup = '^';
|
||||
|
||||
state.pos = state.posMax + 1;
|
||||
state.posMax = max;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
module.exports = function sup_plugin(md) {
|
||||
md.inline.ruler.after('emphasis', 'sup', superscript);
|
||||
};
|
||||
313
lib/markdown-it-toc-and-anchor/index.js
Normal file
313
lib/markdown-it-toc-and-anchor/index.js
Normal file
@@ -0,0 +1,313 @@
|
||||
import clone from "clone";
|
||||
import uslug from "uslug";
|
||||
import Token from "./markdown-it/lib/token";
|
||||
|
||||
const TOC = "@[toc]";
|
||||
const TOC_RE = /^@\[toc\]/im;
|
||||
|
||||
let markdownItSecondInstance = () => {};
|
||||
let headingIds = {};
|
||||
let tocHtml = "";
|
||||
|
||||
const repeat = (string, num) => new Array(num + 1).join(string);
|
||||
|
||||
const makeSafe = (string, headingIds, slugifyFn) => {
|
||||
const key = slugifyFn(string); // slugify
|
||||
if (!headingIds[key]) {
|
||||
headingIds[key] = 0;
|
||||
}
|
||||
headingIds[key]++;
|
||||
return key + (headingIds[key] > 1 ? `-${headingIds[key]}` : "");
|
||||
};
|
||||
|
||||
const space = () => {
|
||||
return { ...new Token("text", "", 0), content: " " };
|
||||
};
|
||||
|
||||
const renderAnchorLinkSymbol = options => {
|
||||
if (options.anchorLinkSymbolClassName) {
|
||||
return [
|
||||
{
|
||||
...new Token("span_open", "span", 1),
|
||||
attrs: [["class", options.anchorLinkSymbolClassName]]
|
||||
},
|
||||
{
|
||||
...new Token("text", "", 0),
|
||||
content: options.anchorLinkSymbol
|
||||
},
|
||||
new Token("span_close", "span", -1)
|
||||
];
|
||||
} else {
|
||||
return [
|
||||
{
|
||||
...new Token("text", "", 0),
|
||||
content: options.anchorLinkSymbol
|
||||
}
|
||||
];
|
||||
}
|
||||
};
|
||||
|
||||
const renderAnchorLink = (anchor, options, tokens, idx) => {
|
||||
const attrs = [];
|
||||
|
||||
if (options.anchorClassName != null) {
|
||||
attrs.push(["class", options.anchorClassName]);
|
||||
}
|
||||
|
||||
attrs.push(["href", `#${anchor}`]);
|
||||
|
||||
const openLinkToken = {
|
||||
...new Token("link_open", "a", 1),
|
||||
attrs
|
||||
};
|
||||
const closeLinkToken = new Token("link_close", "a", -1);
|
||||
|
||||
if (options.wrapHeadingTextInAnchor) {
|
||||
tokens[idx + 1].children.unshift(openLinkToken);
|
||||
tokens[idx + 1].children.push(closeLinkToken);
|
||||
} else {
|
||||
const linkTokens = [
|
||||
openLinkToken,
|
||||
...renderAnchorLinkSymbol(options),
|
||||
closeLinkToken
|
||||
];
|
||||
|
||||
// `push` or `unshift` according to anchorLinkBefore option
|
||||
// space is at the opposite side.
|
||||
const actionOnArray = {
|
||||
false: "push",
|
||||
true: "unshift"
|
||||
};
|
||||
|
||||
// insert space between anchor link and heading ?
|
||||
if (options.anchorLinkSpace) {
|
||||
linkTokens[actionOnArray[!options.anchorLinkBefore]](space());
|
||||
}
|
||||
tokens[idx + 1].children[actionOnArray[options.anchorLinkBefore]](
|
||||
...linkTokens
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
const treeToMarkdownBulletList = (tree, indent = 0) =>
|
||||
tree
|
||||
.map(item => {
|
||||
const indentation = " ";
|
||||
let node = `${repeat(indentation, indent)}*`;
|
||||
if (item.heading.content) {
|
||||
const contentWithoutAnchor = item.heading.content.replace(
|
||||
/\[([^\]]*)\]\([^)]*\)/g,
|
||||
"$1"
|
||||
);
|
||||
node += " " + `[${contentWithoutAnchor}](#${item.heading.anchor})\n`;
|
||||
} else {
|
||||
node += "\n";
|
||||
}
|
||||
if (item.nodes.length) {
|
||||
node += treeToMarkdownBulletList(item.nodes, indent + 1);
|
||||
}
|
||||
return node;
|
||||
})
|
||||
.join("");
|
||||
|
||||
const generateTocMarkdownFromArray = (headings, options) => {
|
||||
const tree = { nodes: [] };
|
||||
// create an ast
|
||||
headings.forEach(heading => {
|
||||
if (
|
||||
heading.level < options.tocFirstLevel ||
|
||||
heading.level > options.tocLastLevel
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
let i = 1;
|
||||
let lastItem = tree;
|
||||
for (; i < heading.level - options.tocFirstLevel + 1; i++) {
|
||||
if (lastItem.nodes.length === 0) {
|
||||
lastItem.nodes.push({
|
||||
heading: {},
|
||||
nodes: []
|
||||
});
|
||||
}
|
||||
lastItem = lastItem.nodes[lastItem.nodes.length - 1];
|
||||
}
|
||||
lastItem.nodes.push({
|
||||
heading: heading,
|
||||
nodes: []
|
||||
});
|
||||
});
|
||||
|
||||
return treeToMarkdownBulletList(tree.nodes);
|
||||
};
|
||||
|
||||
export default function(md, options) {
|
||||
options = {
|
||||
toc: true,
|
||||
tocClassName: "header-toc",
|
||||
tocFirstLevel: 1,
|
||||
tocLastLevel: 6,
|
||||
tocCallback: null,
|
||||
anchorLink: true,
|
||||
anchorLinkSymbol: "#",
|
||||
anchorLinkBefore: true,
|
||||
anchorClassName: "header-anchor",
|
||||
resetIds: true,
|
||||
anchorLinkSpace: true,
|
||||
anchorLinkSymbolClassName: null,
|
||||
wrapHeadingTextInAnchor: false,
|
||||
...options
|
||||
};
|
||||
|
||||
markdownItSecondInstance = clone(md);
|
||||
|
||||
// initialize key ids for each instance
|
||||
headingIds = {};
|
||||
|
||||
md.core.ruler.push("init_toc", function(state) {
|
||||
const tokens = state.tokens;
|
||||
|
||||
// reset key ids for each document
|
||||
if (options.resetIds) {
|
||||
headingIds = {};
|
||||
}
|
||||
|
||||
const tocArray = [];
|
||||
let tocMarkdown = "";
|
||||
let tocTokens = [];
|
||||
|
||||
const slugifyFn =
|
||||
(typeof options.slugify === "function" && options.slugify) || uslug;
|
||||
|
||||
for (let i = 0; i < tokens.length; i++) {
|
||||
if (tokens[i].type !== "heading_close") {
|
||||
continue;
|
||||
}
|
||||
|
||||
const heading = tokens[i - 1];
|
||||
const heading_close = tokens[i];
|
||||
|
||||
if (heading.type === "inline") {
|
||||
let content;
|
||||
if (
|
||||
heading.children &&
|
||||
heading.children.length > 0 &&
|
||||
heading.children[0].type === "link_open"
|
||||
) {
|
||||
// headings that contain links have to be processed
|
||||
// differently since nested links aren't allowed in markdown
|
||||
content = heading.children[1].content;
|
||||
heading._tocAnchor = makeSafe(content, headingIds, slugifyFn);
|
||||
} else {
|
||||
content = heading.content;
|
||||
heading._tocAnchor = makeSafe(
|
||||
heading.children.reduce((acc, t) => acc + t.content, ""),
|
||||
headingIds,
|
||||
slugifyFn
|
||||
);
|
||||
}
|
||||
|
||||
if (options.anchorLinkPrefix) {
|
||||
heading._tocAnchor = options.anchorLinkPrefix + heading._tocAnchor;
|
||||
}
|
||||
|
||||
tocArray.push({
|
||||
content,
|
||||
anchor: heading._tocAnchor,
|
||||
level: +heading_close.tag.substr(1, 1)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
tocMarkdown = generateTocMarkdownFromArray(tocArray, options);
|
||||
|
||||
tocTokens = markdownItSecondInstance.parse(tocMarkdown, {});
|
||||
|
||||
// Adding tocClassName to 'ul' element
|
||||
if (
|
||||
typeof tocTokens[0] === "object" &&
|
||||
tocTokens[0].type === "bullet_list_open"
|
||||
) {
|
||||
const attrs = (tocTokens[0].attrs = tocTokens[0].attrs || []);
|
||||
|
||||
if (options.tocClassName != null) {
|
||||
attrs.push(["class", options.tocClassName]);
|
||||
}
|
||||
}
|
||||
|
||||
tocHtml = markdownItSecondInstance.renderer.render(
|
||||
tocTokens,
|
||||
markdownItSecondInstance.options
|
||||
);
|
||||
|
||||
if (typeof state.env.tocCallback === "function") {
|
||||
state.env.tocCallback.call(undefined, tocMarkdown, tocArray, tocHtml);
|
||||
} else if (typeof options.tocCallback === "function") {
|
||||
options.tocCallback.call(undefined, tocMarkdown, tocArray, tocHtml);
|
||||
} else if (typeof md.options.tocCallback === "function") {
|
||||
md.options.tocCallback.call(undefined, tocMarkdown, tocArray, tocHtml);
|
||||
}
|
||||
});
|
||||
|
||||
md.inline.ruler.after("emphasis", "toc", (state, silent) => {
|
||||
let token;
|
||||
let match;
|
||||
|
||||
if (
|
||||
// Reject if the token does not start with @[
|
||||
state.src.charCodeAt(state.pos) !== 0x40 ||
|
||||
state.src.charCodeAt(state.pos + 1) !== 0x5b ||
|
||||
// Don’t run any pairs in validation mode
|
||||
silent
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Detect TOC markdown
|
||||
match = TOC_RE.exec(state.src);
|
||||
match = !match ? [] : match.filter(m => m);
|
||||
if (match.length < 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Build content
|
||||
token = state.push("toc_open", "toc", 1);
|
||||
token.markup = TOC;
|
||||
token = state.push("toc_body", "", 0);
|
||||
token = state.push("toc_close", "toc", -1);
|
||||
|
||||
// Update pos so the parser can continue
|
||||
state.pos = state.pos + 6;
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
const originalHeadingOpen =
|
||||
md.renderer.rules.heading_open ||
|
||||
function(...args) {
|
||||
const [tokens, idx, options, , self] = args;
|
||||
return self.renderToken(tokens, idx, options);
|
||||
};
|
||||
|
||||
md.renderer.rules.heading_open = function(...args) {
|
||||
const [tokens, idx, , ,] = args;
|
||||
|
||||
const attrs = (tokens[idx].attrs = tokens[idx].attrs || []);
|
||||
const anchor = tokens[idx + 1]._tocAnchor;
|
||||
attrs.push(["id", anchor]);
|
||||
|
||||
if (options.anchorLink) {
|
||||
renderAnchorLink(anchor, options, ...args);
|
||||
}
|
||||
|
||||
return originalHeadingOpen.apply(this, args);
|
||||
};
|
||||
|
||||
md.renderer.rules.toc_open = () => "";
|
||||
md.renderer.rules.toc_close = () => "";
|
||||
md.renderer.rules.toc_body = () => "";
|
||||
|
||||
if (options.toc) {
|
||||
md.renderer.rules.toc_body = () => tocHtml;
|
||||
}
|
||||
}
|
||||
302
lib/markdown-it/README.md
Normal file
302
lib/markdown-it/README.md
Normal file
@@ -0,0 +1,302 @@
|
||||
# markdown-it
|
||||
|
||||
[](https://travis-ci.org/markdown-it/markdown-it)
|
||||
[](https://www.npmjs.org/package/markdown-it)
|
||||
[](https://coveralls.io/github/markdown-it/markdown-it?branch=master)
|
||||
[](https://gitter.im/markdown-it/markdown-it)
|
||||
|
||||
> Markdown parser done right. Fast and easy to extend.
|
||||
|
||||
__[Live demo](https://markdown-it.github.io)__
|
||||
|
||||
- Follows the __[CommonMark spec](http://spec.commonmark.org/)__ + adds syntax extensions & sugar (URL autolinking, typographer).
|
||||
- Configurable syntax! You can add new rules and even replace existing ones.
|
||||
- High speed.
|
||||
- [Safe](https://github.com/markdown-it/markdown-it/tree/master/docs/security.md) by default.
|
||||
- Community-written __[plugins](https://www.npmjs.org/browse/keyword/markdown-it-plugin)__ and [other packages](https://www.npmjs.org/browse/keyword/markdown-it) on npm.
|
||||
|
||||
__Table of content__
|
||||
|
||||
- [markdown-it](#markdown-it)
|
||||
- [Install](#install)
|
||||
- [Usage examples](#usage-examples)
|
||||
- [Simple](#simple)
|
||||
- [Init with presets and options](#init-with-presets-and-options)
|
||||
- [Plugins load](#plugins-load)
|
||||
- [Syntax highlighting](#syntax-highlighting)
|
||||
- [Linkify](#linkify)
|
||||
- [API](#api)
|
||||
- [Syntax extensions](#syntax-extensions)
|
||||
- [Manage rules](#manage-rules)
|
||||
- [Benchmark](#benchmark)
|
||||
- [Support markdown-it](#support-markdown-it)
|
||||
- [Authors](#authors)
|
||||
- [References / Thanks](#references--thanks)
|
||||
|
||||
## Install
|
||||
|
||||
**node.js** & **bower**:
|
||||
|
||||
```bash
|
||||
npm install markdown-it --save
|
||||
bower install markdown-it --save
|
||||
```
|
||||
|
||||
**browser (CDN):**
|
||||
|
||||
- [jsDeliver CDN](http://www.jsdelivr.com/#!markdown-it "jsDelivr CDN")
|
||||
- [cdnjs.com CDN](https://cdnjs.com/libraries/markdown-it "cdnjs.com")
|
||||
|
||||
|
||||
## Usage examples
|
||||
|
||||
See also:
|
||||
|
||||
- __[API documentation](https://markdown-it.github.io/markdown-it/)__ - for more
|
||||
info and examples.
|
||||
- [Development info](https://github.com/markdown-it/markdown-it/tree/master/docs) -
|
||||
for plugins writers.
|
||||
|
||||
|
||||
### Simple
|
||||
|
||||
```js
|
||||
// node.js, "classic" way:
|
||||
var MarkdownIt = require('markdown-it'),
|
||||
md = new MarkdownIt();
|
||||
var result = md.render('# markdown-it rulezz!');
|
||||
|
||||
// node.js, the same, but with sugar:
|
||||
var md = require('markdown-it')();
|
||||
var result = md.render('# markdown-it rulezz!');
|
||||
|
||||
// browser without AMD, added to "window" on script load
|
||||
// Note, there is no dash in "markdownit".
|
||||
var md = window.markdownit();
|
||||
var result = md.render('# markdown-it rulezz!');
|
||||
```
|
||||
|
||||
Single line rendering, without paragraph wrap:
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')();
|
||||
var result = md.renderInline('__markdown-it__ rulezz!');
|
||||
```
|
||||
|
||||
|
||||
### Init with presets and options
|
||||
|
||||
(*) presets define combinations of active rules and options. Can be
|
||||
`"commonmark"`, `"zero"` or `"default"` (if skipped). See
|
||||
[API docs](https://markdown-it.github.io/markdown-it/#MarkdownIt.new) for more details.
|
||||
|
||||
```js
|
||||
// commonmark mode
|
||||
var md = require('markdown-it')('commonmark');
|
||||
|
||||
// default mode
|
||||
var md = require('markdown-it')();
|
||||
|
||||
// enable everything
|
||||
var md = require('markdown-it')({
|
||||
html: true,
|
||||
linkify: true,
|
||||
typographer: true
|
||||
});
|
||||
|
||||
// full options list (defaults)
|
||||
var md = require('markdown-it')({
|
||||
html: false, // Enable HTML tags in source
|
||||
xhtmlOut: false, // Use '/' to close single tags (<br />).
|
||||
// This is only for full CommonMark compatibility.
|
||||
breaks: false, // Convert '\n' in paragraphs into <br>
|
||||
langPrefix: 'language-', // CSS language prefix for fenced blocks. Can be
|
||||
// useful for external highlighters.
|
||||
linkify: false, // Autoconvert URL-like text to links
|
||||
|
||||
// Enable some language-neutral replacement + quotes beautification
|
||||
typographer: false,
|
||||
|
||||
// Double + single quotes replacement pairs, when typographer enabled,
|
||||
// and smartquotes on. Could be either a String or an Array.
|
||||
//
|
||||
// For example, you can use '«»„“' for Russian, '„“‚‘' for German,
|
||||
// and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
|
||||
quotes: '“”‘’',
|
||||
|
||||
// Highlighter function. Should return escaped HTML,
|
||||
// or '' if the source string is not changed and should be escaped externally.
|
||||
// If result starts with <pre... internal wrapper is skipped.
|
||||
highlight: function (/*str, lang*/) { return ''; }
|
||||
});
|
||||
```
|
||||
|
||||
### Plugins load
|
||||
|
||||
```js
|
||||
var md = require('markdown-it')()
|
||||
.use(plugin1)
|
||||
.use(plugin2, opts, ...)
|
||||
.use(plugin3);
|
||||
```
|
||||
|
||||
|
||||
### Syntax highlighting
|
||||
|
||||
Apply syntax highlighting to fenced code blocks with the `highlight` option:
|
||||
|
||||
```js
|
||||
var hljs = require('highlight.js'); // https://highlightjs.org/
|
||||
|
||||
// Actual default values
|
||||
var md = require('markdown-it')({
|
||||
highlight: function (str, lang) {
|
||||
if (lang && hljs.getLanguage(lang)) {
|
||||
try {
|
||||
return hljs.highlight(lang, str).value;
|
||||
} catch (__) {}
|
||||
}
|
||||
|
||||
return ''; // use external default escaping
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
Or with full wrapper override (if you need assign class to `<pre>`):
|
||||
|
||||
```js
|
||||
var hljs = require('highlight.js'); // https://highlightjs.org/
|
||||
|
||||
// Actual default values
|
||||
var md = require('markdown-it')({
|
||||
highlight: function (str, lang) {
|
||||
if (lang && hljs.getLanguage(lang)) {
|
||||
try {
|
||||
return '<pre class="hljs"><code>' +
|
||||
hljs.highlight(lang, str, true).value +
|
||||
'</code></pre>';
|
||||
} catch (__) {}
|
||||
}
|
||||
|
||||
return '<pre class="hljs"><code>' + md.utils.escapeHtml(str) + '</code></pre>';
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Linkify
|
||||
|
||||
`linkify: true` uses [linkify-it](https://github.com/markdown-it/linkify-it). To
|
||||
configure linkify-it, access the linkify instance through `md.linkify`:
|
||||
|
||||
```js
|
||||
md.linkify.tlds('.py', false); // disables .py as top level domain
|
||||
```
|
||||
|
||||
|
||||
## API
|
||||
|
||||
__[API documentation](https://markdown-it.github.io/markdown-it/)__
|
||||
|
||||
If you are going to write plugins - take a look at
|
||||
[Development info](https://github.com/markdown-it/markdown-it/tree/master/docs).
|
||||
|
||||
|
||||
## Syntax extensions
|
||||
|
||||
Embedded (enabled by default):
|
||||
|
||||
- [Tables](https://help.github.com/articles/organizing-information-with-tables/) (GFM)
|
||||
- [Strikethrough](https://help.github.com/articles/basic-writing-and-formatting-syntax/#styling-text) (GFM)
|
||||
|
||||
Via plugins:
|
||||
|
||||
- [subscript](https://github.com/markdown-it/markdown-it-sub)
|
||||
- [superscript](https://github.com/markdown-it/markdown-it-sup)
|
||||
- [footnote](https://github.com/markdown-it/markdown-it-footnote)
|
||||
- [definition list](https://github.com/markdown-it/markdown-it-deflist)
|
||||
- [abbreviation](https://github.com/markdown-it/markdown-it-abbr)
|
||||
- [emoji](https://github.com/markdown-it/markdown-it-emoji)
|
||||
- [custom container](https://github.com/markdown-it/markdown-it-container)
|
||||
- [insert](https://github.com/markdown-it/markdown-it-ins)
|
||||
- [mark](https://github.com/markdown-it/markdown-it-mark)
|
||||
- ... and [others](https://www.npmjs.org/browse/keyword/markdown-it-plugin)
|
||||
|
||||
|
||||
### Manage rules
|
||||
|
||||
By default all rules are enabled, but can be restricted by options. On plugin
|
||||
load all its rules are enabled automatically.
|
||||
|
||||
```js
|
||||
// Activate/deactivate rules, with curring
|
||||
var md = require('markdown-it')()
|
||||
.disable([ 'link', 'image' ])
|
||||
.enable([ 'link' ])
|
||||
.enable('image');
|
||||
|
||||
// Enable everything
|
||||
md = require('markdown-it')({
|
||||
html: true,
|
||||
linkify: true,
|
||||
typographer: true,
|
||||
});
|
||||
```
|
||||
|
||||
|
||||
## Benchmark
|
||||
|
||||
Here is the result of readme parse at MB Pro Retina 2013 (2.4 GHz):
|
||||
|
||||
```bash
|
||||
make benchmark-deps
|
||||
benchmark/benchmark.js readme
|
||||
|
||||
Selected samples: (1 of 28)
|
||||
> README
|
||||
|
||||
Sample: README.md (7774 bytes)
|
||||
> commonmark-reference x 1,222 ops/sec ±0.96% (97 runs sampled)
|
||||
> current x 743 ops/sec ±0.84% (97 runs sampled)
|
||||
> current-commonmark x 1,568 ops/sec ±0.84% (98 runs sampled)
|
||||
> marked x 1,587 ops/sec ±4.31% (93 runs sampled)
|
||||
```
|
||||
|
||||
__Note.__ CommonMark version runs with [simplified link normalizers](https://github.com/markdown-it/markdown-it/blob/master/benchmark/implementations/current-commonmark/index.js)
|
||||
for more "honest" compare. Difference is ~ 1.5x.
|
||||
|
||||
As you can see, `markdown-it` doesn't pay with speed for it's flexibility.
|
||||
Slowdown of "full" version caused by additional features not available in
|
||||
other implementations.
|
||||
|
||||
|
||||
Support markdown-it
|
||||
-------------------
|
||||
|
||||
You can support this project via [Tidelift subscription](https://tidelift.com/subscription/pkg/npm-markdown-it?utm_source=npm-markdown-it&utm_medium=referral&utm_campaign=readme).
|
||||
|
||||
|
||||
## Authors
|
||||
|
||||
- Alex Kocharin [github/rlidwka](https://github.com/rlidwka)
|
||||
- Vitaly Puzrin [github/puzrin](https://github.com/puzrin)
|
||||
|
||||
_markdown-it_ is the result of the decision of the authors who contributed to
|
||||
99% of the _Remarkable_ code to move to a project with the same authorship but
|
||||
new leadership (Vitaly and Alex). It's not a fork.
|
||||
|
||||
## References / Thanks
|
||||
|
||||
Big thanks to [John MacFarlane](https://github.com/jgm) for his work on the
|
||||
CommonMark spec and reference implementations. His work saved us a lot of time
|
||||
during this project's development.
|
||||
|
||||
**Related Links:**
|
||||
|
||||
- https://github.com/jgm/CommonMark - reference CommonMark implementations in C & JS,
|
||||
also contains latest spec & online demo.
|
||||
- http://talk.commonmark.org - CommonMark forum, good place to collaborate
|
||||
developers' efforts.
|
||||
|
||||
**Ports**
|
||||
|
||||
- [motion-markdown-it](https://github.com/digitalmoksha/motion-markdown-it) - Ruby/RubyMotion
|
||||
4
lib/markdown-it/index.js
Normal file
4
lib/markdown-it/index.js
Normal file
@@ -0,0 +1,4 @@
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = require('./lib/');
|
||||
6
lib/markdown-it/lib/common/entities.js
Normal file
6
lib/markdown-it/lib/common/entities.js
Normal file
@@ -0,0 +1,6 @@
|
||||
// HTML5 entities map: { name -> utf16string }
|
||||
//
|
||||
'use strict';
|
||||
|
||||
/*eslint quotes:0*/
|
||||
module.exports = require('entities/lib/maps/entities.json');
|
||||
71
lib/markdown-it/lib/common/html_blocks.js
Normal file
71
lib/markdown-it/lib/common/html_blocks.js
Normal file
@@ -0,0 +1,71 @@
|
||||
// List of valid html blocks names, accorting to commonmark spec
|
||||
// http://jgm.github.io/CommonMark/spec.html#html-blocks
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = [
|
||||
'address',
|
||||
'article',
|
||||
'aside',
|
||||
'base',
|
||||
'basefont',
|
||||
'blockquote',
|
||||
'body',
|
||||
'caption',
|
||||
'center',
|
||||
'col',
|
||||
'colgroup',
|
||||
'dd',
|
||||
'details',
|
||||
'dialog',
|
||||
'dir',
|
||||
'div',
|
||||
'dl',
|
||||
'dt',
|
||||
'fieldset',
|
||||
'figcaption',
|
||||
'figure',
|
||||
'footer',
|
||||
'form',
|
||||
'frame',
|
||||
'frameset',
|
||||
'h1',
|
||||
'h2',
|
||||
'h3',
|
||||
'h4',
|
||||
'h5',
|
||||
'h6',
|
||||
'head',
|
||||
'header',
|
||||
'hr',
|
||||
'html',
|
||||
'iframe',
|
||||
'legend',
|
||||
'li',
|
||||
'link',
|
||||
'main',
|
||||
'menu',
|
||||
'menuitem',
|
||||
'meta',
|
||||
'nav',
|
||||
'noframes',
|
||||
'ol',
|
||||
'optgroup',
|
||||
'option',
|
||||
'p',
|
||||
'param',
|
||||
'section',
|
||||
'source',
|
||||
'summary',
|
||||
'table',
|
||||
'tbody',
|
||||
'td',
|
||||
'tfoot',
|
||||
'th',
|
||||
'thead',
|
||||
'title',
|
||||
'tr',
|
||||
'track',
|
||||
'ul'
|
||||
];
|
||||
28
lib/markdown-it/lib/common/html_re.js
Normal file
28
lib/markdown-it/lib/common/html_re.js
Normal file
@@ -0,0 +1,28 @@
|
||||
// Regexps to match html elements
|
||||
|
||||
'use strict';
|
||||
|
||||
var attr_name = '[a-zA-Z_:][a-zA-Z0-9:._-]*';
|
||||
|
||||
var unquoted = '[^"\'=<>`\\x00-\\x20]+';
|
||||
var single_quoted = "'[^']*'";
|
||||
var double_quoted = '"[^"]*"';
|
||||
|
||||
var attr_value = '(?:' + unquoted + '|' + single_quoted + '|' + double_quoted + ')';
|
||||
|
||||
var attribute = '(?:\\s+' + attr_name + '(?:\\s*=\\s*' + attr_value + ')?)';
|
||||
|
||||
var open_tag = '<[A-Za-z][A-Za-z0-9\\-]*' + attribute + '*\\s*\\/?>';
|
||||
|
||||
var close_tag = '<\\/[A-Za-z][A-Za-z0-9\\-]*\\s*>';
|
||||
var comment = '<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->';
|
||||
var processing = '<[?].*?[?]>';
|
||||
var declaration = '<![A-Z]+\\s+[^>]*>';
|
||||
var cdata = '<!\\[CDATA\\[[\\s\\S]*?\\]\\]>';
|
||||
|
||||
var HTML_TAG_RE = new RegExp('^(?:' + open_tag + '|' + close_tag + '|' + comment +
|
||||
'|' + processing + '|' + declaration + '|' + cdata + ')');
|
||||
var HTML_OPEN_CLOSE_TAG_RE = new RegExp('^(?:' + open_tag + '|' + close_tag + ')');
|
||||
|
||||
module.exports.HTML_TAG_RE = HTML_TAG_RE;
|
||||
module.exports.HTML_OPEN_CLOSE_TAG_RE = HTML_OPEN_CLOSE_TAG_RE;
|
||||
317
lib/markdown-it/lib/common/utils.js
Normal file
317
lib/markdown-it/lib/common/utils.js
Normal file
@@ -0,0 +1,317 @@
|
||||
// Utilities
|
||||
//
|
||||
'use strict';
|
||||
|
||||
|
||||
function _class(obj) { return Object.prototype.toString.call(obj); }
|
||||
|
||||
function isString(obj) { return _class(obj) === '[object String]'; }
|
||||
|
||||
var _hasOwnProperty = Object.prototype.hasOwnProperty;
|
||||
|
||||
function has(object, key) {
|
||||
return _hasOwnProperty.call(object, key);
|
||||
}
|
||||
|
||||
// Merge objects
|
||||
//
|
||||
function assign(obj /*from1, from2, from3, ...*/) {
|
||||
var sources = Array.prototype.slice.call(arguments, 1);
|
||||
|
||||
sources.forEach(function (source) {
|
||||
if (!source) { return; }
|
||||
|
||||
if (typeof source !== 'object') {
|
||||
throw new TypeError(source + 'must be object');
|
||||
}
|
||||
|
||||
Object.keys(source).forEach(function (key) {
|
||||
obj[key] = source[key];
|
||||
});
|
||||
});
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
// Remove element from array and put another array at those position.
|
||||
// Useful for some operations with tokens
|
||||
function arrayReplaceAt(src, pos, newElements) {
|
||||
return [].concat(src.slice(0, pos), newElements, src.slice(pos + 1));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function isValidEntityCode(c) {
|
||||
/*eslint no-bitwise:0*/
|
||||
// broken sequence
|
||||
if (c >= 0xD800 && c <= 0xDFFF) { return false; }
|
||||
// never used
|
||||
if (c >= 0xFDD0 && c <= 0xFDEF) { return false; }
|
||||
if ((c & 0xFFFF) === 0xFFFF || (c & 0xFFFF) === 0xFFFE) { return false; }
|
||||
// control codes
|
||||
if (c >= 0x00 && c <= 0x08) { return false; }
|
||||
if (c === 0x0B) { return false; }
|
||||
if (c >= 0x0E && c <= 0x1F) { return false; }
|
||||
if (c >= 0x7F && c <= 0x9F) { return false; }
|
||||
// out of range
|
||||
if (c > 0x10FFFF) { return false; }
|
||||
return true;
|
||||
}
|
||||
|
||||
function fromCodePoint(c) {
|
||||
/*eslint no-bitwise:0*/
|
||||
if (c > 0xffff) {
|
||||
c -= 0x10000;
|
||||
var surrogate1 = 0xd800 + (c >> 10),
|
||||
surrogate2 = 0xdc00 + (c & 0x3ff);
|
||||
|
||||
return String.fromCharCode(surrogate1, surrogate2);
|
||||
}
|
||||
return String.fromCharCode(c);
|
||||
}
|
||||
|
||||
|
||||
var UNESCAPE_MD_RE = /\\([!"#$%&'()*+,\-.\/:;<=>?@[\\\]^_`{|}~])/g;
|
||||
var ENTITY_RE = /&([a-z#][a-z0-9]{1,31});/gi;
|
||||
var UNESCAPE_ALL_RE = new RegExp(UNESCAPE_MD_RE.source + '|' + ENTITY_RE.source, 'gi');
|
||||
|
||||
var DIGITAL_ENTITY_TEST_RE = /^#((?:x[a-f0-9]{1,8}|[0-9]{1,8}))/i;
|
||||
|
||||
var entities = require('./entities');
|
||||
|
||||
function replaceEntityPattern(match, name) {
|
||||
var code = 0;
|
||||
|
||||
if (has(entities, name)) {
|
||||
return entities[name];
|
||||
}
|
||||
|
||||
if (name.charCodeAt(0) === 0x23/* # */ && DIGITAL_ENTITY_TEST_RE.test(name)) {
|
||||
code = name[1].toLowerCase() === 'x' ?
|
||||
parseInt(name.slice(2), 16) : parseInt(name.slice(1), 10);
|
||||
|
||||
if (isValidEntityCode(code)) {
|
||||
return fromCodePoint(code);
|
||||
}
|
||||
}
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
/*function replaceEntities(str) {
|
||||
if (str.indexOf('&') < 0) { return str; }
|
||||
|
||||
return str.replace(ENTITY_RE, replaceEntityPattern);
|
||||
}*/
|
||||
|
||||
function unescapeMd(str) {
|
||||
if (str.indexOf('\\') < 0) { return str; }
|
||||
return str.replace(UNESCAPE_MD_RE, '$1');
|
||||
}
|
||||
|
||||
function unescapeAll(str) {
|
||||
if (str.indexOf('\\') < 0 && str.indexOf('&') < 0) { return str; }
|
||||
|
||||
return str.replace(UNESCAPE_ALL_RE, function (match, escaped, entity) {
|
||||
if (escaped) { return escaped; }
|
||||
return replaceEntityPattern(match, entity);
|
||||
});
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var HTML_ESCAPE_TEST_RE = /[&<>"]/;
|
||||
var HTML_ESCAPE_REPLACE_RE = /[&<>"]/g;
|
||||
var HTML_REPLACEMENTS = {
|
||||
'&': '&',
|
||||
'<': '<',
|
||||
'>': '>',
|
||||
'"': '"'
|
||||
};
|
||||
|
||||
function replaceUnsafeChar(ch) {
|
||||
return HTML_REPLACEMENTS[ch];
|
||||
}
|
||||
|
||||
function escapeHtml(str) {
|
||||
if (HTML_ESCAPE_TEST_RE.test(str)) {
|
||||
return str.replace(HTML_ESCAPE_REPLACE_RE, replaceUnsafeChar);
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var REGEXP_ESCAPE_RE = /[.?*+^$[\]\\(){}|-]/g;
|
||||
|
||||
function escapeRE(str) {
|
||||
return str.replace(REGEXP_ESCAPE_RE, '\\$&');
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function isSpace(code) {
|
||||
switch (code) {
|
||||
case 0x09:
|
||||
case 0x20:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Zs (unicode class) || [\t\f\v\r\n]
|
||||
function isWhiteSpace(code) {
|
||||
if (code >= 0x2000 && code <= 0x200A) { return true; }
|
||||
switch (code) {
|
||||
case 0x09: // \t
|
||||
case 0x0A: // \n
|
||||
case 0x0B: // \v
|
||||
case 0x0C: // \f
|
||||
case 0x0D: // \r
|
||||
case 0x20:
|
||||
case 0xA0:
|
||||
case 0x1680:
|
||||
case 0x202F:
|
||||
case 0x205F:
|
||||
case 0x3000:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/*eslint-disable max-len*/
|
||||
var UNICODE_PUNCT_RE = require('uc.micro/categories/P/regex');
|
||||
|
||||
// Currently without astral characters support.
|
||||
function isPunctChar(ch) {
|
||||
return UNICODE_PUNCT_RE.test(ch);
|
||||
}
|
||||
|
||||
|
||||
// Markdown ASCII punctuation characters.
|
||||
//
|
||||
// !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \, ], ^, _, `, {, |, }, or ~
|
||||
// http://spec.commonmark.org/0.15/#ascii-punctuation-character
|
||||
//
|
||||
// Don't confuse with unicode punctuation !!! It lacks some chars in ascii range.
|
||||
//
|
||||
function isMdAsciiPunct(ch) {
|
||||
switch (ch) {
|
||||
case 0x21/* ! */:
|
||||
case 0x22/* " */:
|
||||
case 0x23/* # */:
|
||||
case 0x24/* $ */:
|
||||
case 0x25/* % */:
|
||||
case 0x26/* & */:
|
||||
case 0x27/* ' */:
|
||||
case 0x28/* ( */:
|
||||
case 0x29/* ) */:
|
||||
case 0x2A/* * */:
|
||||
case 0x2B/* + */:
|
||||
case 0x2C/* , */:
|
||||
case 0x2D/* - */:
|
||||
case 0x2E/* . */:
|
||||
case 0x2F/* / */:
|
||||
case 0x3A/* : */:
|
||||
case 0x3B/* ; */:
|
||||
case 0x3C/* < */:
|
||||
case 0x3D/* = */:
|
||||
case 0x3E/* > */:
|
||||
case 0x3F/* ? */:
|
||||
case 0x40/* @ */:
|
||||
case 0x5B/* [ */:
|
||||
case 0x5C/* \ */:
|
||||
case 0x5D/* ] */:
|
||||
case 0x5E/* ^ */:
|
||||
case 0x5F/* _ */:
|
||||
case 0x60/* ` */:
|
||||
case 0x7B/* { */:
|
||||
case 0x7C/* | */:
|
||||
case 0x7D/* } */:
|
||||
case 0x7E/* ~ */:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Hepler to unify [reference labels].
|
||||
//
|
||||
function normalizeReference(str) {
|
||||
// Trim and collapse whitespace
|
||||
//
|
||||
str = str.trim().replace(/\s+/g, ' ');
|
||||
|
||||
// In node v10 'ẞ'.toLowerCase() === 'Ṿ', which is presumed to be a bug
|
||||
// fixed in v12 (couldn't find any details).
|
||||
//
|
||||
// So treat this one as a special case
|
||||
// (remove this when node v10 is no longer supported).
|
||||
//
|
||||
if ('ẞ'.toLowerCase() === 'Ṿ') {
|
||||
str = str.replace(/ẞ/g, 'ß');
|
||||
}
|
||||
|
||||
// .toLowerCase().toUpperCase() should get rid of all differences
|
||||
// between letter variants.
|
||||
//
|
||||
// Simple .toLowerCase() doesn't normalize 125 code points correctly,
|
||||
// and .toUpperCase doesn't normalize 6 of them (list of exceptions:
|
||||
// İ, ϴ, ẞ, Ω, K, Å - those are already uppercased, but have differently
|
||||
// uppercased versions).
|
||||
//
|
||||
// Here's an example showing how it happens. Lets take greek letter omega:
|
||||
// uppercase U+0398 (Θ), U+03f4 (ϴ) and lowercase U+03b8 (θ), U+03d1 (ϑ)
|
||||
//
|
||||
// Unicode entries:
|
||||
// 0398;GREEK CAPITAL LETTER THETA;Lu;0;L;;;;;N;;;;03B8;
|
||||
// 03B8;GREEK SMALL LETTER THETA;Ll;0;L;;;;;N;;;0398;;0398
|
||||
// 03D1;GREEK THETA SYMBOL;Ll;0;L;<compat> 03B8;;;;N;GREEK SMALL LETTER SCRIPT THETA;;0398;;0398
|
||||
// 03F4;GREEK CAPITAL THETA SYMBOL;Lu;0;L;<compat> 0398;;;;N;;;;03B8;
|
||||
//
|
||||
// Case-insensitive comparison should treat all of them as equivalent.
|
||||
//
|
||||
// But .toLowerCase() doesn't change ϑ (it's already lowercase),
|
||||
// and .toUpperCase() doesn't change ϴ (already uppercase).
|
||||
//
|
||||
// Applying first lower then upper case normalizes any character:
|
||||
// '\u0398\u03f4\u03b8\u03d1'.toLowerCase().toUpperCase() === '\u0398\u0398\u0398\u0398'
|
||||
//
|
||||
// Note: this is equivalent to unicode case folding; unicode normalization
|
||||
// is a different step that is not required here.
|
||||
//
|
||||
// Final result should be uppercased, because it's later stored in an object
|
||||
// (this avoid a conflict with Object.prototype members,
|
||||
// most notably, `__proto__`)
|
||||
//
|
||||
return str.toLowerCase().toUpperCase();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Re-export libraries commonly used in both markdown-it and its plugins,
|
||||
// so plugins won't have to depend on them explicitly, which reduces their
|
||||
// bundled size (e.g. a browser build).
|
||||
//
|
||||
exports.lib = {};
|
||||
exports.lib.mdurl = require('mdurl');
|
||||
exports.lib.ucmicro = require('uc.micro');
|
||||
|
||||
exports.assign = assign;
|
||||
exports.isString = isString;
|
||||
exports.has = has;
|
||||
exports.unescapeMd = unescapeMd;
|
||||
exports.unescapeAll = unescapeAll;
|
||||
exports.isValidEntityCode = isValidEntityCode;
|
||||
exports.fromCodePoint = fromCodePoint;
|
||||
// exports.replaceEntities = replaceEntities;
|
||||
exports.escapeHtml = escapeHtml;
|
||||
exports.arrayReplaceAt = arrayReplaceAt;
|
||||
exports.isSpace = isSpace;
|
||||
exports.isWhiteSpace = isWhiteSpace;
|
||||
exports.isMdAsciiPunct = isMdAsciiPunct;
|
||||
exports.isPunctChar = isPunctChar;
|
||||
exports.escapeRE = escapeRE;
|
||||
exports.normalizeReference = normalizeReference;
|
||||
7
lib/markdown-it/lib/helpers/index.js
Normal file
7
lib/markdown-it/lib/helpers/index.js
Normal file
@@ -0,0 +1,7 @@
|
||||
// Just a shortcut for bulk export
|
||||
'use strict';
|
||||
|
||||
|
||||
exports.parseLinkLabel = require('./parse_link_label');
|
||||
exports.parseLinkDestination = require('./parse_link_destination');
|
||||
exports.parseLinkTitle = require('./parse_link_title');
|
||||
79
lib/markdown-it/lib/helpers/parse_link_destination.js
Normal file
79
lib/markdown-it/lib/helpers/parse_link_destination.js
Normal file
@@ -0,0 +1,79 @@
|
||||
// Parse link destination
|
||||
//
|
||||
'use strict';
|
||||
|
||||
|
||||
var unescapeAll = require('../common/utils').unescapeAll;
|
||||
|
||||
|
||||
module.exports = function parseLinkDestination(str, pos, max) {
|
||||
var code, level,
|
||||
lines = 0,
|
||||
start = pos,
|
||||
result = {
|
||||
ok: false,
|
||||
pos: 0,
|
||||
lines: 0,
|
||||
str: ''
|
||||
};
|
||||
|
||||
if (str.charCodeAt(pos) === 0x3C /* < */) {
|
||||
pos++;
|
||||
while (pos < max) {
|
||||
code = str.charCodeAt(pos);
|
||||
if (code === 0x0A /* \n */) { return result; }
|
||||
if (code === 0x3E /* > */) {
|
||||
result.pos = pos + 1;
|
||||
result.str = unescapeAll(str.slice(start + 1, pos));
|
||||
result.ok = true;
|
||||
return result;
|
||||
}
|
||||
if (code === 0x5C /* \ */ && pos + 1 < max) {
|
||||
pos += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
// no closing '>'
|
||||
return result;
|
||||
}
|
||||
|
||||
// this should be ... } else { ... branch
|
||||
|
||||
level = 0;
|
||||
while (pos < max) {
|
||||
code = str.charCodeAt(pos);
|
||||
|
||||
if (code === 0x20) { break; }
|
||||
|
||||
// ascii control characters
|
||||
if (code < 0x20 || code === 0x7F) { break; }
|
||||
|
||||
if (code === 0x5C /* \ */ && pos + 1 < max) {
|
||||
pos += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (code === 0x28 /* ( */) {
|
||||
level++;
|
||||
}
|
||||
|
||||
if (code === 0x29 /* ) */) {
|
||||
if (level === 0) { break; }
|
||||
level--;
|
||||
}
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
if (start === pos) { return result; }
|
||||
if (level !== 0) { return result; }
|
||||
|
||||
result.str = unescapeAll(str.slice(start, pos));
|
||||
result.lines = lines;
|
||||
result.pos = pos;
|
||||
result.ok = true;
|
||||
return result;
|
||||
};
|
||||
48
lib/markdown-it/lib/helpers/parse_link_label.js
Normal file
48
lib/markdown-it/lib/helpers/parse_link_label.js
Normal file
@@ -0,0 +1,48 @@
|
||||
// Parse link label
|
||||
//
|
||||
// this function assumes that first character ("[") already matches;
|
||||
// returns the end of the label
|
||||
//
|
||||
'use strict';
|
||||
|
||||
module.exports = function parseLinkLabel(state, start, disableNested) {
|
||||
var level, found, marker, prevPos,
|
||||
labelEnd = -1,
|
||||
max = state.posMax,
|
||||
oldPos = state.pos;
|
||||
|
||||
state.pos = start + 1;
|
||||
level = 1;
|
||||
|
||||
while (state.pos < max) {
|
||||
marker = state.src.charCodeAt(state.pos);
|
||||
if (marker === 0x5D /* ] */) {
|
||||
level--;
|
||||
if (level === 0) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
prevPos = state.pos;
|
||||
state.md.inline.skipToken(state);
|
||||
if (marker === 0x5B /* [ */) {
|
||||
if (prevPos === state.pos - 1) {
|
||||
// increase level if we find text `[`, which is not a part of any token
|
||||
level++;
|
||||
} else if (disableNested) {
|
||||
state.pos = oldPos;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (found) {
|
||||
labelEnd = state.pos;
|
||||
}
|
||||
|
||||
// restore old state
|
||||
state.pos = oldPos;
|
||||
|
||||
return labelEnd;
|
||||
};
|
||||
53
lib/markdown-it/lib/helpers/parse_link_title.js
Normal file
53
lib/markdown-it/lib/helpers/parse_link_title.js
Normal file
@@ -0,0 +1,53 @@
|
||||
// Parse link title
|
||||
//
|
||||
'use strict';
|
||||
|
||||
|
||||
var unescapeAll = require('../common/utils').unescapeAll;
|
||||
|
||||
|
||||
module.exports = function parseLinkTitle(str, pos, max) {
|
||||
var code,
|
||||
marker,
|
||||
lines = 0,
|
||||
start = pos,
|
||||
result = {
|
||||
ok: false,
|
||||
pos: 0,
|
||||
lines: 0,
|
||||
str: ''
|
||||
};
|
||||
|
||||
if (pos >= max) { return result; }
|
||||
|
||||
marker = str.charCodeAt(pos);
|
||||
|
||||
if (marker !== 0x22 /* " */ && marker !== 0x27 /* ' */ && marker !== 0x28 /* ( */) { return result; }
|
||||
|
||||
pos++;
|
||||
|
||||
// if opening marker is "(", switch it to closing marker ")"
|
||||
if (marker === 0x28) { marker = 0x29; }
|
||||
|
||||
while (pos < max) {
|
||||
code = str.charCodeAt(pos);
|
||||
if (code === marker) {
|
||||
result.pos = pos + 1;
|
||||
result.lines = lines;
|
||||
result.str = unescapeAll(str.slice(start + 1, pos));
|
||||
result.ok = true;
|
||||
return result;
|
||||
} else if (code === 0x0A) {
|
||||
lines++;
|
||||
} else if (code === 0x5C /* \ */ && pos + 1 < max) {
|
||||
pos++;
|
||||
if (str.charCodeAt(pos) === 0x0A) {
|
||||
lines++;
|
||||
}
|
||||
}
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
581
lib/markdown-it/lib/index.js
Normal file
581
lib/markdown-it/lib/index.js
Normal file
@@ -0,0 +1,581 @@
|
||||
// Main parser class
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
var utils = require('./common/utils');
|
||||
var helpers = require('./helpers');
|
||||
var Renderer = require('./renderer');
|
||||
var ParserCore = require('./parser_core');
|
||||
var ParserBlock = require('./parser_block');
|
||||
var ParserInline = require('./parser_inline');
|
||||
var LinkifyIt = require('linkify-it');
|
||||
var mdurl = require('mdurl');
|
||||
var punycode = require('punycode');
|
||||
|
||||
|
||||
var config = {
|
||||
'default': require('./presets/default'),
|
||||
zero: require('./presets/zero'),
|
||||
commonmark: require('./presets/commonmark')
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// This validator can prohibit more than really needed to prevent XSS. It's a
|
||||
// tradeoff to keep code simple and to be secure by default.
|
||||
//
|
||||
// If you need different setup - override validator method as you wish. Or
|
||||
// replace it with dummy function and use external sanitizer.
|
||||
//
|
||||
|
||||
var BAD_PROTO_RE = /^(vbscript|javascript|file|data):/;
|
||||
var GOOD_DATA_RE = /^data:image\/(gif|png|jpeg|webp);/;
|
||||
|
||||
function validateLink(url) {
|
||||
// url should be normalized at this point, and existing entities are decoded
|
||||
var str = url.trim().toLowerCase();
|
||||
|
||||
return BAD_PROTO_RE.test(str) ? (GOOD_DATA_RE.test(str) ? true : false) : true;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
var RECODE_HOSTNAME_FOR = [ 'http:', 'https:', 'mailto:' ];
|
||||
|
||||
function normalizeLink(url) {
|
||||
var parsed = mdurl.parse(url, true);
|
||||
|
||||
if (parsed.hostname) {
|
||||
// Encode hostnames in urls like:
|
||||
// `http://host/`, `https://host/`, `mailto:user@host`, `//host/`
|
||||
//
|
||||
// We don't encode unknown schemas, because it's likely that we encode
|
||||
// something we shouldn't (e.g. `skype:name` treated as `skype:host`)
|
||||
//
|
||||
if (!parsed.protocol || RECODE_HOSTNAME_FOR.indexOf(parsed.protocol) >= 0) {
|
||||
try {
|
||||
parsed.hostname = punycode.toASCII(parsed.hostname);
|
||||
} catch (er) { /**/ }
|
||||
}
|
||||
}
|
||||
|
||||
return mdurl.encode(mdurl.format(parsed));
|
||||
}
|
||||
|
||||
function normalizeLinkText(url) {
|
||||
var parsed = mdurl.parse(url, true);
|
||||
|
||||
if (parsed.hostname) {
|
||||
// Encode hostnames in urls like:
|
||||
// `http://host/`, `https://host/`, `mailto:user@host`, `//host/`
|
||||
//
|
||||
// We don't encode unknown schemas, because it's likely that we encode
|
||||
// something we shouldn't (e.g. `skype:name` treated as `skype:host`)
|
||||
//
|
||||
if (!parsed.protocol || RECODE_HOSTNAME_FOR.indexOf(parsed.protocol) >= 0) {
|
||||
try {
|
||||
parsed.hostname = punycode.toUnicode(parsed.hostname);
|
||||
} catch (er) { /**/ }
|
||||
}
|
||||
}
|
||||
|
||||
return mdurl.decode(mdurl.format(parsed));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* class MarkdownIt
|
||||
*
|
||||
* Main parser/renderer class.
|
||||
*
|
||||
* ##### Usage
|
||||
*
|
||||
* ```javascript
|
||||
* // node.js, "classic" way:
|
||||
* var MarkdownIt = require('markdown-it'),
|
||||
* md = new MarkdownIt();
|
||||
* var result = md.render('# markdown-it rulezz!');
|
||||
*
|
||||
* // node.js, the same, but with sugar:
|
||||
* var md = require('markdown-it')();
|
||||
* var result = md.render('# markdown-it rulezz!');
|
||||
*
|
||||
* // browser without AMD, added to "window" on script load
|
||||
* // Note, there are no dash.
|
||||
* var md = window.markdownit();
|
||||
* var result = md.render('# markdown-it rulezz!');
|
||||
* ```
|
||||
*
|
||||
* Single line rendering, without paragraph wrap:
|
||||
*
|
||||
* ```javascript
|
||||
* var md = require('markdown-it')();
|
||||
* var result = md.renderInline('__markdown-it__ rulezz!');
|
||||
* ```
|
||||
**/
|
||||
|
||||
/**
|
||||
* new MarkdownIt([presetName, options])
|
||||
* - presetName (String): optional, `commonmark` / `zero`
|
||||
* - options (Object)
|
||||
*
|
||||
* Creates parser instanse with given config. Can be called without `new`.
|
||||
*
|
||||
* ##### presetName
|
||||
*
|
||||
* MarkdownIt provides named presets as a convenience to quickly
|
||||
* enable/disable active syntax rules and options for common use cases.
|
||||
*
|
||||
* - ["commonmark"](https://github.com/markdown-it/markdown-it/blob/master/lib/presets/commonmark.js) -
|
||||
* configures parser to strict [CommonMark](http://commonmark.org/) mode.
|
||||
* - [default](https://github.com/markdown-it/markdown-it/blob/master/lib/presets/default.js) -
|
||||
* similar to GFM, used when no preset name given. Enables all available rules,
|
||||
* but still without html, typographer & autolinker.
|
||||
* - ["zero"](https://github.com/markdown-it/markdown-it/blob/master/lib/presets/zero.js) -
|
||||
* all rules disabled. Useful to quickly setup your config via `.enable()`.
|
||||
* For example, when you need only `bold` and `italic` markup and nothing else.
|
||||
*
|
||||
* ##### options:
|
||||
*
|
||||
* - __html__ - `false`. Set `true` to enable HTML tags in source. Be careful!
|
||||
* That's not safe! You may need external sanitizer to protect output from XSS.
|
||||
* It's better to extend features via plugins, instead of enabling HTML.
|
||||
* - __xhtmlOut__ - `false`. Set `true` to add '/' when closing single tags
|
||||
* (`<br />`). This is needed only for full CommonMark compatibility. In real
|
||||
* world you will need HTML output.
|
||||
* - __breaks__ - `false`. Set `true` to convert `\n` in paragraphs into `<br>`.
|
||||
* - __langPrefix__ - `language-`. CSS language class prefix for fenced blocks.
|
||||
* Can be useful for external highlighters.
|
||||
* - __linkify__ - `false`. Set `true` to autoconvert URL-like text to links.
|
||||
* - __typographer__ - `false`. Set `true` to enable [some language-neutral
|
||||
* replacement](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/replacements.js) +
|
||||
* quotes beautification (smartquotes).
|
||||
* - __quotes__ - `“”‘’`, String or Array. Double + single quotes replacement
|
||||
* pairs, when typographer enabled and smartquotes on. For example, you can
|
||||
* use `'«»„“'` for Russian, `'„“‚‘'` for German, and
|
||||
* `['«\xA0', '\xA0»', '‹\xA0', '\xA0›']` for French (including nbsp).
|
||||
* - __highlight__ - `null`. Highlighter function for fenced code blocks.
|
||||
* Highlighter `function (str, lang)` should return escaped HTML. It can also
|
||||
* return empty string if the source was not changed and should be escaped
|
||||
* externaly. If result starts with <pre... internal wrapper is skipped.
|
||||
*
|
||||
* ##### Example
|
||||
*
|
||||
* ```javascript
|
||||
* // commonmark mode
|
||||
* var md = require('markdown-it')('commonmark');
|
||||
*
|
||||
* // default mode
|
||||
* var md = require('markdown-it')();
|
||||
*
|
||||
* // enable everything
|
||||
* var md = require('markdown-it')({
|
||||
* html: true,
|
||||
* linkify: true,
|
||||
* typographer: true
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* ##### Syntax highlighting
|
||||
*
|
||||
* ```js
|
||||
* var hljs = require('highlight.js') // https://highlightjs.org/
|
||||
*
|
||||
* var md = require('markdown-it')({
|
||||
* highlight: function (str, lang) {
|
||||
* if (lang && hljs.getLanguage(lang)) {
|
||||
* try {
|
||||
* return hljs.highlight(lang, str, true).value;
|
||||
* } catch (__) {}
|
||||
* }
|
||||
*
|
||||
* return ''; // use external default escaping
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
* Or with full wrapper override (if you need assign class to `<pre>`):
|
||||
*
|
||||
* ```javascript
|
||||
* var hljs = require('highlight.js') // https://highlightjs.org/
|
||||
*
|
||||
* // Actual default values
|
||||
* var md = require('markdown-it')({
|
||||
* highlight: function (str, lang) {
|
||||
* if (lang && hljs.getLanguage(lang)) {
|
||||
* try {
|
||||
* return '<pre class="hljs"><code>' +
|
||||
* hljs.highlight(lang, str, true).value +
|
||||
* '</code></pre>';
|
||||
* } catch (__) {}
|
||||
* }
|
||||
*
|
||||
* return '<pre class="hljs"><code>' + md.utils.escapeHtml(str) + '</code></pre>';
|
||||
* }
|
||||
* });
|
||||
* ```
|
||||
*
|
||||
**/
|
||||
function MarkdownIt(presetName, options) {
|
||||
if (!(this instanceof MarkdownIt)) {
|
||||
return new MarkdownIt(presetName, options);
|
||||
}
|
||||
|
||||
if (!options) {
|
||||
if (!utils.isString(presetName)) {
|
||||
options = presetName || {};
|
||||
presetName = 'default';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* MarkdownIt#inline -> ParserInline
|
||||
*
|
||||
* Instance of [[ParserInline]]. You may need it to add new rules when
|
||||
* writing plugins. For simple rules control use [[MarkdownIt.disable]] and
|
||||
* [[MarkdownIt.enable]].
|
||||
**/
|
||||
this.inline = new ParserInline();
|
||||
|
||||
/**
|
||||
* MarkdownIt#block -> ParserBlock
|
||||
*
|
||||
* Instance of [[ParserBlock]]. You may need it to add new rules when
|
||||
* writing plugins. For simple rules control use [[MarkdownIt.disable]] and
|
||||
* [[MarkdownIt.enable]].
|
||||
**/
|
||||
this.block = new ParserBlock();
|
||||
|
||||
/**
|
||||
* MarkdownIt#core -> Core
|
||||
*
|
||||
* Instance of [[Core]] chain executor. You may need it to add new rules when
|
||||
* writing plugins. For simple rules control use [[MarkdownIt.disable]] and
|
||||
* [[MarkdownIt.enable]].
|
||||
**/
|
||||
this.core = new ParserCore();
|
||||
|
||||
/**
|
||||
* MarkdownIt#renderer -> Renderer
|
||||
*
|
||||
* Instance of [[Renderer]]. Use it to modify output look. Or to add rendering
|
||||
* rules for new token types, generated by plugins.
|
||||
*
|
||||
* ##### Example
|
||||
*
|
||||
* ```javascript
|
||||
* var md = require('markdown-it')();
|
||||
*
|
||||
* function myToken(tokens, idx, options, env, self) {
|
||||
* //...
|
||||
* return result;
|
||||
* };
|
||||
*
|
||||
* md.renderer.rules['my_token'] = myToken
|
||||
* ```
|
||||
*
|
||||
* See [[Renderer]] docs and [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js).
|
||||
**/
|
||||
this.renderer = new Renderer();
|
||||
|
||||
/**
|
||||
* MarkdownIt#linkify -> LinkifyIt
|
||||
*
|
||||
* [linkify-it](https://github.com/markdown-it/linkify-it) instance.
|
||||
* Used by [linkify](https://github.com/markdown-it/markdown-it/blob/master/lib/rules_core/linkify.js)
|
||||
* rule.
|
||||
**/
|
||||
this.linkify = new LinkifyIt();
|
||||
|
||||
/**
|
||||
* MarkdownIt#validateLink(url) -> Boolean
|
||||
*
|
||||
* Link validation function. CommonMark allows too much in links. By default
|
||||
* we disable `javascript:`, `vbscript:`, `file:` schemas, and almost all `data:...` schemas
|
||||
* except some embedded image types.
|
||||
*
|
||||
* You can change this behaviour:
|
||||
*
|
||||
* ```javascript
|
||||
* var md = require('markdown-it')();
|
||||
* // enable everything
|
||||
* md.validateLink = function () { return true; }
|
||||
* ```
|
||||
**/
|
||||
this.validateLink = validateLink;
|
||||
|
||||
/**
|
||||
* MarkdownIt#normalizeLink(url) -> String
|
||||
*
|
||||
* Function used to encode link url to a machine-readable format,
|
||||
* which includes url-encoding, punycode, etc.
|
||||
**/
|
||||
this.normalizeLink = normalizeLink;
|
||||
|
||||
/**
|
||||
* MarkdownIt#normalizeLinkText(url) -> String
|
||||
*
|
||||
* Function used to decode link url to a human-readable format`
|
||||
**/
|
||||
this.normalizeLinkText = normalizeLinkText;
|
||||
|
||||
|
||||
// Expose utils & helpers for easy acces from plugins
|
||||
|
||||
/**
|
||||
* MarkdownIt#utils -> utils
|
||||
*
|
||||
* Assorted utility functions, useful to write plugins. See details
|
||||
* [here](https://github.com/markdown-it/markdown-it/blob/master/lib/common/utils.js).
|
||||
**/
|
||||
this.utils = utils;
|
||||
|
||||
/**
|
||||
* MarkdownIt#helpers -> helpers
|
||||
*
|
||||
* Link components parser functions, useful to write plugins. See details
|
||||
* [here](https://github.com/markdown-it/markdown-it/blob/master/lib/helpers).
|
||||
**/
|
||||
this.helpers = utils.assign({}, helpers);
|
||||
|
||||
|
||||
this.options = {};
|
||||
this.configure(presetName);
|
||||
|
||||
if (options) { this.set(options); }
|
||||
}
|
||||
|
||||
|
||||
/** chainable
|
||||
* MarkdownIt.set(options)
|
||||
*
|
||||
* Set parser options (in the same format as in constructor). Probably, you
|
||||
* will never need it, but you can change options after constructor call.
|
||||
*
|
||||
* ##### Example
|
||||
*
|
||||
* ```javascript
|
||||
* var md = require('markdown-it')()
|
||||
* .set({ html: true, breaks: true })
|
||||
* .set({ typographer, true });
|
||||
* ```
|
||||
*
|
||||
* __Note:__ To achieve the best possible performance, don't modify a
|
||||
* `markdown-it` instance options on the fly. If you need multiple configurations
|
||||
* it's best to create multiple instances and initialize each with separate
|
||||
* config.
|
||||
**/
|
||||
MarkdownIt.prototype.set = function (options) {
|
||||
utils.assign(this.options, options);
|
||||
return this;
|
||||
};
|
||||
|
||||
|
||||
/** chainable, internal
|
||||
* MarkdownIt.configure(presets)
|
||||
*
|
||||
* Batch load of all options and compenent settings. This is internal method,
|
||||
* and you probably will not need it. But if you with - see available presets
|
||||
* and data structure [here](https://github.com/markdown-it/markdown-it/tree/master/lib/presets)
|
||||
*
|
||||
* We strongly recommend to use presets instead of direct config loads. That
|
||||
* will give better compatibility with next versions.
|
||||
**/
|
||||
MarkdownIt.prototype.configure = function (presets) {
|
||||
var self = this, presetName;
|
||||
|
||||
if (utils.isString(presets)) {
|
||||
presetName = presets;
|
||||
presets = config[presetName];
|
||||
if (!presets) { throw new Error('Wrong `markdown-it` preset "' + presetName + '", check name'); }
|
||||
}
|
||||
|
||||
if (!presets) { throw new Error('Wrong `markdown-it` preset, can\'t be empty'); }
|
||||
|
||||
if (presets.options) { self.set(presets.options); }
|
||||
|
||||
if (presets.components) {
|
||||
Object.keys(presets.components).forEach(function (name) {
|
||||
if (presets.components[name].rules) {
|
||||
self[name].ruler.enableOnly(presets.components[name].rules);
|
||||
}
|
||||
if (presets.components[name].rules2) {
|
||||
self[name].ruler2.enableOnly(presets.components[name].rules2);
|
||||
}
|
||||
});
|
||||
}
|
||||
return this;
|
||||
};
|
||||
|
||||
|
||||
/** chainable
|
||||
* MarkdownIt.enable(list, ignoreInvalid)
|
||||
* - list (String|Array): rule name or list of rule names to enable
|
||||
* - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
|
||||
*
|
||||
* Enable list or rules. It will automatically find appropriate components,
|
||||
* containing rules with given names. If rule not found, and `ignoreInvalid`
|
||||
* not set - throws exception.
|
||||
*
|
||||
* ##### Example
|
||||
*
|
||||
* ```javascript
|
||||
* var md = require('markdown-it')()
|
||||
* .enable(['sub', 'sup'])
|
||||
* .disable('smartquotes');
|
||||
* ```
|
||||
**/
|
||||
MarkdownIt.prototype.enable = function (list, ignoreInvalid) {
|
||||
var result = [];
|
||||
|
||||
if (!Array.isArray(list)) { list = [ list ]; }
|
||||
|
||||
[ 'core', 'block', 'inline' ].forEach(function (chain) {
|
||||
result = result.concat(this[chain].ruler.enable(list, true));
|
||||
}, this);
|
||||
|
||||
result = result.concat(this.inline.ruler2.enable(list, true));
|
||||
|
||||
var missed = list.filter(function (name) { return result.indexOf(name) < 0; });
|
||||
|
||||
if (missed.length && !ignoreInvalid) {
|
||||
throw new Error('MarkdownIt. Failed to enable unknown rule(s): ' + missed);
|
||||
}
|
||||
|
||||
return this;
|
||||
};
|
||||
|
||||
|
||||
/** chainable
|
||||
* MarkdownIt.disable(list, ignoreInvalid)
|
||||
* - list (String|Array): rule name or list of rule names to disable.
|
||||
* - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
|
||||
*
|
||||
* The same as [[MarkdownIt.enable]], but turn specified rules off.
|
||||
**/
|
||||
MarkdownIt.prototype.disable = function (list, ignoreInvalid) {
|
||||
var result = [];
|
||||
|
||||
if (!Array.isArray(list)) { list = [ list ]; }
|
||||
|
||||
[ 'core', 'block', 'inline' ].forEach(function (chain) {
|
||||
result = result.concat(this[chain].ruler.disable(list, true));
|
||||
}, this);
|
||||
|
||||
result = result.concat(this.inline.ruler2.disable(list, true));
|
||||
|
||||
var missed = list.filter(function (name) { return result.indexOf(name) < 0; });
|
||||
|
||||
if (missed.length && !ignoreInvalid) {
|
||||
throw new Error('MarkdownIt. Failed to disable unknown rule(s): ' + missed);
|
||||
}
|
||||
return this;
|
||||
};
|
||||
|
||||
|
||||
/** chainable
|
||||
* MarkdownIt.use(plugin, params)
|
||||
*
|
||||
* Load specified plugin with given params into current parser instance.
|
||||
* It's just a sugar to call `plugin(md, params)` with curring.
|
||||
*
|
||||
* ##### Example
|
||||
*
|
||||
* ```javascript
|
||||
* var iterator = require('markdown-it-for-inline');
|
||||
* var md = require('markdown-it')()
|
||||
* .use(iterator, 'foo_replace', 'text', function (tokens, idx) {
|
||||
* tokens[idx].content = tokens[idx].content.replace(/foo/g, 'bar');
|
||||
* });
|
||||
* ```
|
||||
**/
|
||||
MarkdownIt.prototype.use = function (plugin /*, params, ... */) {
|
||||
var args = [ this ].concat(Array.prototype.slice.call(arguments, 1));
|
||||
plugin.apply(plugin, args);
|
||||
return this;
|
||||
};
|
||||
|
||||
|
||||
/** internal
|
||||
* MarkdownIt.parse(src, env) -> Array
|
||||
* - src (String): source string
|
||||
* - env (Object): environment sandbox
|
||||
*
|
||||
* Parse input string and returns list of block tokens (special token type
|
||||
* "inline" will contain list of inline tokens). You should not call this
|
||||
* method directly, until you write custom renderer (for example, to produce
|
||||
* AST).
|
||||
*
|
||||
* `env` is used to pass data between "distributed" rules and return additional
|
||||
* metadata like reference info, needed for the renderer. It also can be used to
|
||||
* inject data in specific cases. Usually, you will be ok to pass `{}`,
|
||||
* and then pass updated object to renderer.
|
||||
**/
|
||||
MarkdownIt.prototype.parse = function (src, env) {
|
||||
if (typeof src !== 'string') {
|
||||
throw new Error('Input data should be a String');
|
||||
}
|
||||
|
||||
var state = new this.core.State(src, this, env);
|
||||
|
||||
this.core.process(state);
|
||||
|
||||
return state.tokens;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* MarkdownIt.render(src [, env]) -> String
|
||||
* - src (String): source string
|
||||
* - env (Object): environment sandbox
|
||||
*
|
||||
* Render markdown string into html. It does all magic for you :).
|
||||
*
|
||||
* `env` can be used to inject additional metadata (`{}` by default).
|
||||
* But you will not need it with high probability. See also comment
|
||||
* in [[MarkdownIt.parse]].
|
||||
**/
|
||||
MarkdownIt.prototype.render = function (src, env) {
|
||||
env = env || {};
|
||||
|
||||
return this.renderer.render(this.parse(src, env), this.options, env);
|
||||
};
|
||||
|
||||
|
||||
/** internal
|
||||
* MarkdownIt.parseInline(src, env) -> Array
|
||||
* - src (String): source string
|
||||
* - env (Object): environment sandbox
|
||||
*
|
||||
* The same as [[MarkdownIt.parse]] but skip all block rules. It returns the
|
||||
* block tokens list with the single `inline` element, containing parsed inline
|
||||
* tokens in `children` property. Also updates `env` object.
|
||||
**/
|
||||
MarkdownIt.prototype.parseInline = function (src, env) {
|
||||
var state = new this.core.State(src, this, env);
|
||||
|
||||
state.inlineMode = true;
|
||||
this.core.process(state);
|
||||
|
||||
return state.tokens;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* MarkdownIt.renderInline(src [, env]) -> String
|
||||
* - src (String): source string
|
||||
* - env (Object): environment sandbox
|
||||
*
|
||||
* Similar to [[MarkdownIt.render]] but for single paragraph content. Result
|
||||
* will NOT be wrapped into `<p>` tags.
|
||||
**/
|
||||
MarkdownIt.prototype.renderInline = function (src, env) {
|
||||
env = env || {};
|
||||
|
||||
return this.renderer.render(this.parseInline(src, env), this.options, env);
|
||||
};
|
||||
|
||||
|
||||
module.exports = MarkdownIt;
|
||||
122
lib/markdown-it/lib/parser_block.js
Normal file
122
lib/markdown-it/lib/parser_block.js
Normal file
@@ -0,0 +1,122 @@
|
||||
/** internal
|
||||
* class ParserBlock
|
||||
*
|
||||
* Block-level tokenizer.
|
||||
**/
|
||||
'use strict';
|
||||
|
||||
|
||||
var Ruler = require('./ruler');
|
||||
|
||||
|
||||
var _rules = [
|
||||
// First 2 params - rule name & source. Secondary array - list of rules,
|
||||
// which can be terminated by this one.
|
||||
[ 'table', require('./rules_block/table'), [ 'paragraph', 'reference' ] ],
|
||||
[ 'code', require('./rules_block/code') ],
|
||||
[ 'fence', require('./rules_block/fence'), [ 'paragraph', 'reference', 'blockquote', 'list' ] ],
|
||||
[ 'blockquote', require('./rules_block/blockquote'), [ 'paragraph', 'reference', 'blockquote', 'list' ] ],
|
||||
[ 'hr', require('./rules_block/hr'), [ 'paragraph', 'reference', 'blockquote', 'list' ] ],
|
||||
[ 'list', require('./rules_block/list'), [ 'paragraph', 'reference', 'blockquote' ] ],
|
||||
[ 'reference', require('./rules_block/reference') ],
|
||||
[ 'heading', require('./rules_block/heading'), [ 'paragraph', 'reference', 'blockquote' ] ],
|
||||
[ 'lheading', require('./rules_block/lheading') ],
|
||||
[ 'html_block', require('./rules_block/html_block'), [ 'paragraph', 'reference', 'blockquote' ] ],
|
||||
[ 'paragraph', require('./rules_block/paragraph') ]
|
||||
];
|
||||
|
||||
|
||||
/**
|
||||
* new ParserBlock()
|
||||
**/
|
||||
function ParserBlock() {
|
||||
/**
|
||||
* ParserBlock#ruler -> Ruler
|
||||
*
|
||||
* [[Ruler]] instance. Keep configuration of block rules.
|
||||
**/
|
||||
this.ruler = new Ruler();
|
||||
|
||||
for (var i = 0; i < _rules.length; i++) {
|
||||
this.ruler.push(_rules[i][0], _rules[i][1], { alt: (_rules[i][2] || []).slice() });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Generate tokens for input range
|
||||
//
|
||||
ParserBlock.prototype.tokenize = function (state, startLine, endLine) {
|
||||
var ok, i,
|
||||
rules = this.ruler.getRules(''),
|
||||
len = rules.length,
|
||||
line = startLine,
|
||||
hasEmptyLines = false,
|
||||
maxNesting = state.md.options.maxNesting;
|
||||
|
||||
while (line < endLine) {
|
||||
state.line = line = state.skipEmptyLines(line);
|
||||
if (line >= endLine) { break; }
|
||||
|
||||
// Termination condition for nested calls.
|
||||
// Nested calls currently used for blockquotes & lists
|
||||
if (state.sCount[line] < state.blkIndent) { break; }
|
||||
|
||||
// If nesting level exceeded - skip tail to the end. That's not ordinary
|
||||
// situation and we should not care about content.
|
||||
if (state.level >= maxNesting) {
|
||||
state.line = endLine;
|
||||
break;
|
||||
}
|
||||
|
||||
// Try all possible rules.
|
||||
// On success, rule should:
|
||||
//
|
||||
// - update `state.line`
|
||||
// - update `state.tokens`
|
||||
// - return true
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
ok = rules[i](state, line, endLine, false);
|
||||
if (ok) { break; }
|
||||
}
|
||||
|
||||
// set state.tight if we had an empty line before current tag
|
||||
// i.e. latest empty line should not count
|
||||
state.tight = !hasEmptyLines;
|
||||
|
||||
// paragraph might "eat" one newline after it in nested lists
|
||||
if (state.isEmpty(state.line - 1)) {
|
||||
hasEmptyLines = true;
|
||||
}
|
||||
|
||||
line = state.line;
|
||||
|
||||
if (line < endLine && state.isEmpty(line)) {
|
||||
hasEmptyLines = true;
|
||||
line++;
|
||||
state.line = line;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* ParserBlock.parse(str, md, env, outTokens)
|
||||
*
|
||||
* Process input string and push block tokens into `outTokens`
|
||||
**/
|
||||
ParserBlock.prototype.parse = function (src, md, env, outTokens) {
|
||||
var state;
|
||||
|
||||
if (!src) { return; }
|
||||
|
||||
state = new this.State(src, md, env, outTokens);
|
||||
|
||||
this.tokenize(state, state.line, state.lineMax);
|
||||
};
|
||||
|
||||
|
||||
ParserBlock.prototype.State = require('./rules_block/state_block');
|
||||
|
||||
|
||||
module.exports = ParserBlock;
|
||||
58
lib/markdown-it/lib/parser_core.js
Normal file
58
lib/markdown-it/lib/parser_core.js
Normal file
@@ -0,0 +1,58 @@
|
||||
/** internal
|
||||
* class Core
|
||||
*
|
||||
* Top-level rules executor. Glues block/inline parsers and does intermediate
|
||||
* transformations.
|
||||
**/
|
||||
'use strict';
|
||||
|
||||
|
||||
var Ruler = require('./ruler');
|
||||
|
||||
|
||||
var _rules = [
|
||||
[ 'normalize', require('./rules_core/normalize') ],
|
||||
[ 'block', require('./rules_core/block') ],
|
||||
[ 'inline', require('./rules_core/inline') ],
|
||||
[ 'linkify', require('./rules_core/linkify') ],
|
||||
[ 'replacements', require('./rules_core/replacements') ],
|
||||
[ 'smartquotes', require('./rules_core/smartquotes') ]
|
||||
];
|
||||
|
||||
|
||||
/**
|
||||
* new Core()
|
||||
**/
|
||||
function Core() {
|
||||
/**
|
||||
* Core#ruler -> Ruler
|
||||
*
|
||||
* [[Ruler]] instance. Keep configuration of core rules.
|
||||
**/
|
||||
this.ruler = new Ruler();
|
||||
|
||||
for (var i = 0; i < _rules.length; i++) {
|
||||
this.ruler.push(_rules[i][0], _rules[i][1]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Core.process(state)
|
||||
*
|
||||
* Executes core chain rules.
|
||||
**/
|
||||
Core.prototype.process = function (state) {
|
||||
var i, l, rules;
|
||||
|
||||
rules = this.ruler.getRules('');
|
||||
|
||||
for (i = 0, l = rules.length; i < l; i++) {
|
||||
rules[i](state);
|
||||
}
|
||||
};
|
||||
|
||||
Core.prototype.State = require('./rules_core/state_core');
|
||||
|
||||
|
||||
module.exports = Core;
|
||||
177
lib/markdown-it/lib/parser_inline.js
Normal file
177
lib/markdown-it/lib/parser_inline.js
Normal file
@@ -0,0 +1,177 @@
|
||||
/** internal
|
||||
* class ParserInline
|
||||
*
|
||||
* Tokenizes paragraph content.
|
||||
**/
|
||||
'use strict';
|
||||
|
||||
|
||||
var Ruler = require('./ruler');
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Parser rules
|
||||
|
||||
var _rules = [
|
||||
[ 'text', require('./rules_inline/text') ],
|
||||
[ 'newline', require('./rules_inline/newline') ],
|
||||
[ 'escape', require('./rules_inline/escape') ],
|
||||
[ 'backticks', require('./rules_inline/backticks') ],
|
||||
[ 'strikethrough', require('./rules_inline/strikethrough').tokenize ],
|
||||
[ 'emphasis', require('./rules_inline/emphasis').tokenize ],
|
||||
[ 'link', require('./rules_inline/link') ],
|
||||
[ 'image', require('./rules_inline/image') ],
|
||||
[ 'autolink', require('./rules_inline/autolink') ],
|
||||
[ 'html_inline', require('./rules_inline/html_inline') ],
|
||||
[ 'entity', require('./rules_inline/entity') ]
|
||||
];
|
||||
|
||||
var _rules2 = [
|
||||
[ 'balance_pairs', require('./rules_inline/balance_pairs') ],
|
||||
[ 'strikethrough', require('./rules_inline/strikethrough').postProcess ],
|
||||
[ 'emphasis', require('./rules_inline/emphasis').postProcess ],
|
||||
[ 'text_collapse', require('./rules_inline/text_collapse') ]
|
||||
];
|
||||
|
||||
|
||||
/**
|
||||
* new ParserInline()
|
||||
**/
|
||||
function ParserInline() {
|
||||
var i;
|
||||
|
||||
/**
|
||||
* ParserInline#ruler -> Ruler
|
||||
*
|
||||
* [[Ruler]] instance. Keep configuration of inline rules.
|
||||
**/
|
||||
this.ruler = new Ruler();
|
||||
|
||||
for (i = 0; i < _rules.length; i++) {
|
||||
this.ruler.push(_rules[i][0], _rules[i][1]);
|
||||
}
|
||||
|
||||
/**
|
||||
* ParserInline#ruler2 -> Ruler
|
||||
*
|
||||
* [[Ruler]] instance. Second ruler used for post-processing
|
||||
* (e.g. in emphasis-like rules).
|
||||
**/
|
||||
this.ruler2 = new Ruler();
|
||||
|
||||
for (i = 0; i < _rules2.length; i++) {
|
||||
this.ruler2.push(_rules2[i][0], _rules2[i][1]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Skip single token by running all rules in validation mode;
|
||||
// returns `true` if any rule reported success
|
||||
//
|
||||
ParserInline.prototype.skipToken = function (state) {
|
||||
var ok, i, pos = state.pos,
|
||||
rules = this.ruler.getRules(''),
|
||||
len = rules.length,
|
||||
maxNesting = state.md.options.maxNesting,
|
||||
cache = state.cache;
|
||||
|
||||
|
||||
if (typeof cache[pos] !== 'undefined') {
|
||||
state.pos = cache[pos];
|
||||
return;
|
||||
}
|
||||
|
||||
if (state.level < maxNesting) {
|
||||
for (i = 0; i < len; i++) {
|
||||
// Increment state.level and decrement it later to limit recursion.
|
||||
// It's harmless to do here, because no tokens are created. But ideally,
|
||||
// we'd need a separate private state variable for this purpose.
|
||||
//
|
||||
state.level++;
|
||||
ok = rules[i](state, true);
|
||||
state.level--;
|
||||
|
||||
if (ok) { break; }
|
||||
}
|
||||
} else {
|
||||
// Too much nesting, just skip until the end of the paragraph.
|
||||
//
|
||||
// NOTE: this will cause links to behave incorrectly in the following case,
|
||||
// when an amount of `[` is exactly equal to `maxNesting + 1`:
|
||||
//
|
||||
// [[[[[[[[[[[[[[[[[[[[[foo]()
|
||||
//
|
||||
// TODO: remove this workaround when CM standard will allow nested links
|
||||
// (we can replace it by preventing links from being parsed in
|
||||
// validation mode)
|
||||
//
|
||||
state.pos = state.posMax;
|
||||
}
|
||||
|
||||
if (!ok) { state.pos++; }
|
||||
cache[pos] = state.pos;
|
||||
};
|
||||
|
||||
|
||||
// Generate tokens for input range
|
||||
//
|
||||
ParserInline.prototype.tokenize = function (state) {
|
||||
var ok, i,
|
||||
rules = this.ruler.getRules(''),
|
||||
len = rules.length,
|
||||
end = state.posMax,
|
||||
maxNesting = state.md.options.maxNesting;
|
||||
|
||||
while (state.pos < end) {
|
||||
// Try all possible rules.
|
||||
// On success, rule should:
|
||||
//
|
||||
// - update `state.pos`
|
||||
// - update `state.tokens`
|
||||
// - return true
|
||||
|
||||
if (state.level < maxNesting) {
|
||||
for (i = 0; i < len; i++) {
|
||||
ok = rules[i](state, false);
|
||||
if (ok) { break; }
|
||||
}
|
||||
}
|
||||
|
||||
if (ok) {
|
||||
if (state.pos >= end) { break; }
|
||||
continue;
|
||||
}
|
||||
|
||||
state.pending += state.src[state.pos++];
|
||||
}
|
||||
|
||||
if (state.pending) {
|
||||
state.pushPending();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* ParserInline.parse(str, md, env, outTokens)
|
||||
*
|
||||
* Process input string and push inline tokens into `outTokens`
|
||||
**/
|
||||
ParserInline.prototype.parse = function (str, md, env, outTokens) {
|
||||
var i, rules, len;
|
||||
var state = new this.State(str, md, env, outTokens);
|
||||
|
||||
this.tokenize(state);
|
||||
|
||||
rules = this.ruler2.getRules('');
|
||||
len = rules.length;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
rules[i](state);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
ParserInline.prototype.State = require('./rules_inline/state_inline');
|
||||
|
||||
|
||||
module.exports = ParserInline;
|
||||
80
lib/markdown-it/lib/presets/commonmark.js
Normal file
80
lib/markdown-it/lib/presets/commonmark.js
Normal file
@@ -0,0 +1,80 @@
|
||||
// Commonmark default options
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = {
|
||||
options: {
|
||||
html: true, // Enable HTML tags in source
|
||||
xhtmlOut: true, // Use '/' to close single tags (<br />)
|
||||
breaks: false, // Convert '\n' in paragraphs into <br>
|
||||
langPrefix: 'language-', // CSS language prefix for fenced blocks
|
||||
linkify: false, // autoconvert URL-like texts to links
|
||||
|
||||
// Enable some language-neutral replacements + quotes beautification
|
||||
typographer: false,
|
||||
|
||||
// Double + single quotes replacement pairs, when typographer enabled,
|
||||
// and smartquotes on. Could be either a String or an Array.
|
||||
//
|
||||
// For example, you can use '«»„“' for Russian, '„“‚‘' for German,
|
||||
// and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
|
||||
quotes: '\u201c\u201d\u2018\u2019', /* “”‘’ */
|
||||
|
||||
// Highlighter function. Should return escaped HTML,
|
||||
// or '' if the source string is not changed and should be escaped externaly.
|
||||
// If result starts with <pre... internal wrapper is skipped.
|
||||
//
|
||||
// function (/*str, lang*/) { return ''; }
|
||||
//
|
||||
highlight: null,
|
||||
|
||||
maxNesting: 20 // Internal protection, recursion limit
|
||||
},
|
||||
|
||||
components: {
|
||||
|
||||
core: {
|
||||
rules: [
|
||||
'normalize',
|
||||
'block',
|
||||
'inline'
|
||||
]
|
||||
},
|
||||
|
||||
block: {
|
||||
rules: [
|
||||
'blockquote',
|
||||
'code',
|
||||
'fence',
|
||||
'heading',
|
||||
'hr',
|
||||
'html_block',
|
||||
'lheading',
|
||||
'list',
|
||||
'reference',
|
||||
'paragraph'
|
||||
]
|
||||
},
|
||||
|
||||
inline: {
|
||||
rules: [
|
||||
'autolink',
|
||||
'backticks',
|
||||
'emphasis',
|
||||
'entity',
|
||||
'escape',
|
||||
'html_inline',
|
||||
'image',
|
||||
'link',
|
||||
'newline',
|
||||
'text'
|
||||
],
|
||||
rules2: [
|
||||
'balance_pairs',
|
||||
'emphasis',
|
||||
'text_collapse'
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
41
lib/markdown-it/lib/presets/default.js
Normal file
41
lib/markdown-it/lib/presets/default.js
Normal file
@@ -0,0 +1,41 @@
|
||||
// markdown-it default options
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = {
|
||||
options: {
|
||||
html: false, // Enable HTML tags in source
|
||||
xhtmlOut: false, // Use '/' to close single tags (<br />)
|
||||
breaks: false, // Convert '\n' in paragraphs into <br>
|
||||
langPrefix: 'language-', // CSS language prefix for fenced blocks
|
||||
linkify: false, // autoconvert URL-like texts to links
|
||||
|
||||
// Enable some language-neutral replacements + quotes beautification
|
||||
typographer: false,
|
||||
|
||||
// Double + single quotes replacement pairs, when typographer enabled,
|
||||
// and smartquotes on. Could be either a String or an Array.
|
||||
//
|
||||
// For example, you can use '«»„“' for Russian, '„“‚‘' for German,
|
||||
// and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
|
||||
quotes: '\u201c\u201d\u2018\u2019', /* “”‘’ */
|
||||
|
||||
// Highlighter function. Should return escaped HTML,
|
||||
// or '' if the source string is not changed and should be escaped externaly.
|
||||
// If result starts with <pre... internal wrapper is skipped.
|
||||
//
|
||||
// function (/*str, lang*/) { return ''; }
|
||||
//
|
||||
highlight: null,
|
||||
|
||||
maxNesting: 100 // Internal protection, recursion limit
|
||||
},
|
||||
|
||||
components: {
|
||||
|
||||
core: {},
|
||||
block: {},
|
||||
inline: {}
|
||||
}
|
||||
};
|
||||
62
lib/markdown-it/lib/presets/zero.js
Normal file
62
lib/markdown-it/lib/presets/zero.js
Normal file
@@ -0,0 +1,62 @@
|
||||
// "Zero" preset, with nothing enabled. Useful for manual configuring of simple
|
||||
// modes. For example, to parse bold/italic only.
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = {
|
||||
options: {
|
||||
html: false, // Enable HTML tags in source
|
||||
xhtmlOut: false, // Use '/' to close single tags (<br />)
|
||||
breaks: false, // Convert '\n' in paragraphs into <br>
|
||||
langPrefix: 'language-', // CSS language prefix for fenced blocks
|
||||
linkify: false, // autoconvert URL-like texts to links
|
||||
|
||||
// Enable some language-neutral replacements + quotes beautification
|
||||
typographer: false,
|
||||
|
||||
// Double + single quotes replacement pairs, when typographer enabled,
|
||||
// and smartquotes on. Could be either a String or an Array.
|
||||
//
|
||||
// For example, you can use '«»„“' for Russian, '„“‚‘' for German,
|
||||
// and ['«\xA0', '\xA0»', '‹\xA0', '\xA0›'] for French (including nbsp).
|
||||
quotes: '\u201c\u201d\u2018\u2019', /* “”‘’ */
|
||||
|
||||
// Highlighter function. Should return escaped HTML,
|
||||
// or '' if the source string is not changed and should be escaped externaly.
|
||||
// If result starts with <pre... internal wrapper is skipped.
|
||||
//
|
||||
// function (/*str, lang*/) { return ''; }
|
||||
//
|
||||
highlight: null,
|
||||
|
||||
maxNesting: 20 // Internal protection, recursion limit
|
||||
},
|
||||
|
||||
components: {
|
||||
|
||||
core: {
|
||||
rules: [
|
||||
'normalize',
|
||||
'block',
|
||||
'inline'
|
||||
]
|
||||
},
|
||||
|
||||
block: {
|
||||
rules: [
|
||||
'paragraph'
|
||||
]
|
||||
},
|
||||
|
||||
inline: {
|
||||
rules: [
|
||||
'text'
|
||||
],
|
||||
rules2: [
|
||||
'balance_pairs',
|
||||
'text_collapse'
|
||||
]
|
||||
}
|
||||
}
|
||||
};
|
||||
335
lib/markdown-it/lib/renderer.js
Normal file
335
lib/markdown-it/lib/renderer.js
Normal file
@@ -0,0 +1,335 @@
|
||||
/**
|
||||
* class Renderer
|
||||
*
|
||||
* Generates HTML from parsed token stream. Each instance has independent
|
||||
* copy of rules. Those can be rewritten with ease. Also, you can add new
|
||||
* rules if you create plugin and adds new token types.
|
||||
**/
|
||||
'use strict';
|
||||
|
||||
|
||||
var assign = require('./common/utils').assign;
|
||||
var unescapeAll = require('./common/utils').unescapeAll;
|
||||
var escapeHtml = require('./common/utils').escapeHtml;
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var default_rules = {};
|
||||
|
||||
|
||||
default_rules.code_inline = function (tokens, idx, options, env, slf) {
|
||||
var token = tokens[idx];
|
||||
|
||||
return '<code' + slf.renderAttrs(token) + '>' +
|
||||
escapeHtml(tokens[idx].content) +
|
||||
'</code>';
|
||||
};
|
||||
|
||||
|
||||
default_rules.code_block = function (tokens, idx, options, env, slf) {
|
||||
var token = tokens[idx];
|
||||
|
||||
return '<pre' + slf.renderAttrs(token) + '><code>' +
|
||||
escapeHtml(tokens[idx].content) +
|
||||
'</code></pre>\n';
|
||||
};
|
||||
|
||||
|
||||
default_rules.fence = function (tokens, idx, options, env, slf) {
|
||||
var token = tokens[idx],
|
||||
info = token.info ? unescapeAll(token.info).trim() : '',
|
||||
langName = '',
|
||||
highlighted, i, tmpAttrs, tmpToken;
|
||||
|
||||
if (info) {
|
||||
langName = info.split(/\s+/g)[0];
|
||||
}
|
||||
|
||||
if (options.highlight) {
|
||||
highlighted = options.highlight(token.content, langName) || escapeHtml(token.content);
|
||||
} else {
|
||||
highlighted = escapeHtml(token.content);
|
||||
}
|
||||
|
||||
if (highlighted.indexOf('<pre') === 0) {
|
||||
return highlighted + '\n';
|
||||
}
|
||||
|
||||
// If language exists, inject class gently, without modifying original token.
|
||||
// May be, one day we will add .clone() for token and simplify this part, but
|
||||
// now we prefer to keep things local.
|
||||
if (info) {
|
||||
i = token.attrIndex('class');
|
||||
tmpAttrs = token.attrs ? token.attrs.slice() : [];
|
||||
|
||||
if (i < 0) {
|
||||
tmpAttrs.push([ 'class', options.langPrefix + langName ]);
|
||||
} else {
|
||||
tmpAttrs[i][1] += ' ' + options.langPrefix + langName;
|
||||
}
|
||||
|
||||
// Fake token just to render attributes
|
||||
tmpToken = {
|
||||
attrs: tmpAttrs
|
||||
};
|
||||
|
||||
return '<pre><code' + slf.renderAttrs(tmpToken) + '>'
|
||||
+ highlighted
|
||||
+ '</code></pre>\n';
|
||||
}
|
||||
|
||||
|
||||
return '<pre><code' + slf.renderAttrs(token) + '>'
|
||||
+ highlighted
|
||||
+ '</code></pre>\n';
|
||||
};
|
||||
|
||||
|
||||
default_rules.image = function (tokens, idx, options, env, slf) {
|
||||
var token = tokens[idx];
|
||||
|
||||
// "alt" attr MUST be set, even if empty. Because it's mandatory and
|
||||
// should be placed on proper position for tests.
|
||||
//
|
||||
// Replace content with actual value
|
||||
|
||||
token.attrs[token.attrIndex('alt')][1] =
|
||||
slf.renderInlineAsText(token.children, options, env);
|
||||
|
||||
return slf.renderToken(tokens, idx, options);
|
||||
};
|
||||
|
||||
|
||||
default_rules.hardbreak = function (tokens, idx, options /*, env */) {
|
||||
return options.xhtmlOut ? '<br />\n' : '<br>\n';
|
||||
};
|
||||
default_rules.softbreak = function (tokens, idx, options /*, env */) {
|
||||
return options.breaks ? (options.xhtmlOut ? '<br />\n' : '<br>\n') : '\n';
|
||||
};
|
||||
|
||||
|
||||
default_rules.text = function (tokens, idx /*, options, env */) {
|
||||
return escapeHtml(tokens[idx].content);
|
||||
};
|
||||
|
||||
|
||||
default_rules.html_block = function (tokens, idx /*, options, env */) {
|
||||
return tokens[idx].content;
|
||||
};
|
||||
default_rules.html_inline = function (tokens, idx /*, options, env */) {
|
||||
return tokens[idx].content;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* new Renderer()
|
||||
*
|
||||
* Creates new [[Renderer]] instance and fill [[Renderer#rules]] with defaults.
|
||||
**/
|
||||
function Renderer() {
|
||||
|
||||
/**
|
||||
* Renderer#rules -> Object
|
||||
*
|
||||
* Contains render rules for tokens. Can be updated and extended.
|
||||
*
|
||||
* ##### Example
|
||||
*
|
||||
* ```javascript
|
||||
* var md = require('markdown-it')();
|
||||
*
|
||||
* md.renderer.rules.strong_open = function () { return '<b>'; };
|
||||
* md.renderer.rules.strong_close = function () { return '</b>'; };
|
||||
*
|
||||
* var result = md.renderInline(...);
|
||||
* ```
|
||||
*
|
||||
* Each rule is called as independent static function with fixed signature:
|
||||
*
|
||||
* ```javascript
|
||||
* function my_token_render(tokens, idx, options, env, renderer) {
|
||||
* // ...
|
||||
* return renderedHTML;
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* See [source code](https://github.com/markdown-it/markdown-it/blob/master/lib/renderer.js)
|
||||
* for more details and examples.
|
||||
**/
|
||||
this.rules = assign({}, default_rules);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Renderer.renderAttrs(token) -> String
|
||||
*
|
||||
* Render token attributes to string.
|
||||
**/
|
||||
Renderer.prototype.renderAttrs = function renderAttrs(token) {
|
||||
var i, l, result;
|
||||
|
||||
if (!token.attrs) { return ''; }
|
||||
|
||||
result = '';
|
||||
|
||||
for (i = 0, l = token.attrs.length; i < l; i++) {
|
||||
result += ' ' + escapeHtml(token.attrs[i][0]) + '="' + escapeHtml(token.attrs[i][1]) + '"';
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Renderer.renderToken(tokens, idx, options) -> String
|
||||
* - tokens (Array): list of tokens
|
||||
* - idx (Numbed): token index to render
|
||||
* - options (Object): params of parser instance
|
||||
*
|
||||
* Default token renderer. Can be overriden by custom function
|
||||
* in [[Renderer#rules]].
|
||||
**/
|
||||
Renderer.prototype.renderToken = function renderToken(tokens, idx, options) {
|
||||
var nextToken,
|
||||
result = '',
|
||||
needLf = false,
|
||||
token = tokens[idx];
|
||||
|
||||
// Tight list paragraphs
|
||||
if (token.hidden) {
|
||||
return '';
|
||||
}
|
||||
|
||||
// Insert a newline between hidden paragraph and subsequent opening
|
||||
// block-level tag.
|
||||
//
|
||||
// For example, here we should insert a newline before blockquote:
|
||||
// - a
|
||||
// >
|
||||
//
|
||||
if (token.block && token.nesting !== -1 && idx && tokens[idx - 1].hidden) {
|
||||
result += '\n';
|
||||
}
|
||||
|
||||
// Add token name, e.g. `<img`
|
||||
result += (token.nesting === -1 ? '</' : '<') + token.tag;
|
||||
|
||||
// Encode attributes, e.g. `<img src="foo"`
|
||||
result += this.renderAttrs(token);
|
||||
|
||||
// Add a slash for self-closing tags, e.g. `<img src="foo" /`
|
||||
if (token.nesting === 0 && options.xhtmlOut) {
|
||||
result += ' /';
|
||||
}
|
||||
|
||||
// Check if we need to add a newline after this tag
|
||||
if (token.block) {
|
||||
needLf = true;
|
||||
|
||||
if (token.nesting === 1) {
|
||||
if (idx + 1 < tokens.length) {
|
||||
nextToken = tokens[idx + 1];
|
||||
|
||||
if (nextToken.type === 'inline' || nextToken.hidden) {
|
||||
// Block-level tag containing an inline tag.
|
||||
//
|
||||
needLf = false;
|
||||
|
||||
} else if (nextToken.nesting === -1 && nextToken.tag === token.tag) {
|
||||
// Opening tag + closing tag of the same type. E.g. `<li></li>`.
|
||||
//
|
||||
needLf = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result += needLf ? '>\n' : '>';
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Renderer.renderInline(tokens, options, env) -> String
|
||||
* - tokens (Array): list on block tokens to renter
|
||||
* - options (Object): params of parser instance
|
||||
* - env (Object): additional data from parsed input (references, for example)
|
||||
*
|
||||
* The same as [[Renderer.render]], but for single token of `inline` type.
|
||||
**/
|
||||
Renderer.prototype.renderInline = function (tokens, options, env) {
|
||||
var type,
|
||||
result = '',
|
||||
rules = this.rules;
|
||||
|
||||
for (var i = 0, len = tokens.length; i < len; i++) {
|
||||
type = tokens[i].type;
|
||||
|
||||
if (typeof rules[type] !== 'undefined') {
|
||||
result += rules[type](tokens, i, options, env, this);
|
||||
} else {
|
||||
result += this.renderToken(tokens, i, options);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
|
||||
/** internal
|
||||
* Renderer.renderInlineAsText(tokens, options, env) -> String
|
||||
* - tokens (Array): list on block tokens to renter
|
||||
* - options (Object): params of parser instance
|
||||
* - env (Object): additional data from parsed input (references, for example)
|
||||
*
|
||||
* Special kludge for image `alt` attributes to conform CommonMark spec.
|
||||
* Don't try to use it! Spec requires to show `alt` content with stripped markup,
|
||||
* instead of simple escaping.
|
||||
**/
|
||||
Renderer.prototype.renderInlineAsText = function (tokens, options, env) {
|
||||
var result = '';
|
||||
|
||||
for (var i = 0, len = tokens.length; i < len; i++) {
|
||||
if (tokens[i].type === 'text') {
|
||||
result += tokens[i].content;
|
||||
} else if (tokens[i].type === 'image') {
|
||||
result += this.renderInlineAsText(tokens[i].children, options, env);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Renderer.render(tokens, options, env) -> String
|
||||
* - tokens (Array): list on block tokens to renter
|
||||
* - options (Object): params of parser instance
|
||||
* - env (Object): additional data from parsed input (references, for example)
|
||||
*
|
||||
* Takes token stream and generates HTML. Probably, you will never need to call
|
||||
* this method directly.
|
||||
**/
|
||||
Renderer.prototype.render = function (tokens, options, env) {
|
||||
var i, len, type,
|
||||
result = '',
|
||||
rules = this.rules;
|
||||
|
||||
for (i = 0, len = tokens.length; i < len; i++) {
|
||||
type = tokens[i].type;
|
||||
|
||||
if (type === 'inline') {
|
||||
result += this.renderInline(tokens[i].children, options, env);
|
||||
} else if (typeof rules[type] !== 'undefined') {
|
||||
result += rules[tokens[i].type](tokens, i, options, env, this);
|
||||
} else {
|
||||
result += this.renderToken(tokens, i, options, env);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
module.exports = Renderer;
|
||||
352
lib/markdown-it/lib/ruler.js
Normal file
352
lib/markdown-it/lib/ruler.js
Normal file
@@ -0,0 +1,352 @@
|
||||
/**
|
||||
* class Ruler
|
||||
*
|
||||
* Helper class, used by [[MarkdownIt#core]], [[MarkdownIt#block]] and
|
||||
* [[MarkdownIt#inline]] to manage sequences of functions (rules):
|
||||
*
|
||||
* - keep rules in defined order
|
||||
* - assign the name to each rule
|
||||
* - enable/disable rules
|
||||
* - add/replace rules
|
||||
* - allow assign rules to additional named chains (in the same)
|
||||
* - cacheing lists of active rules
|
||||
*
|
||||
* You will not need use this class directly until write plugins. For simple
|
||||
* rules control use [[MarkdownIt.disable]], [[MarkdownIt.enable]] and
|
||||
* [[MarkdownIt.use]].
|
||||
**/
|
||||
'use strict';
|
||||
|
||||
|
||||
/**
|
||||
* new Ruler()
|
||||
**/
|
||||
function Ruler() {
|
||||
// List of added rules. Each element is:
|
||||
//
|
||||
// {
|
||||
// name: XXX,
|
||||
// enabled: Boolean,
|
||||
// fn: Function(),
|
||||
// alt: [ name2, name3 ]
|
||||
// }
|
||||
//
|
||||
this.__rules__ = [];
|
||||
|
||||
// Cached rule chains.
|
||||
//
|
||||
// First level - chain name, '' for default.
|
||||
// Second level - diginal anchor for fast filtering by charcodes.
|
||||
//
|
||||
this.__cache__ = null;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Helper methods, should not be used directly
|
||||
|
||||
|
||||
// Find rule index by name
|
||||
//
|
||||
Ruler.prototype.__find__ = function (name) {
|
||||
for (var i = 0; i < this.__rules__.length; i++) {
|
||||
if (this.__rules__[i].name === name) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
};
|
||||
|
||||
|
||||
// Build rules lookup cache
|
||||
//
|
||||
Ruler.prototype.__compile__ = function () {
|
||||
var self = this;
|
||||
var chains = [ '' ];
|
||||
|
||||
// collect unique names
|
||||
self.__rules__.forEach(function (rule) {
|
||||
if (!rule.enabled) { return; }
|
||||
|
||||
rule.alt.forEach(function (altName) {
|
||||
if (chains.indexOf(altName) < 0) {
|
||||
chains.push(altName);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
self.__cache__ = {};
|
||||
|
||||
chains.forEach(function (chain) {
|
||||
self.__cache__[chain] = [];
|
||||
self.__rules__.forEach(function (rule) {
|
||||
if (!rule.enabled) { return; }
|
||||
|
||||
if (chain && rule.alt.indexOf(chain) < 0) { return; }
|
||||
|
||||
self.__cache__[chain].push(rule.fn);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Ruler.at(name, fn [, options])
|
||||
* - name (String): rule name to replace.
|
||||
* - fn (Function): new rule function.
|
||||
* - options (Object): new rule options (not mandatory).
|
||||
*
|
||||
* Replace rule by name with new function & options. Throws error if name not
|
||||
* found.
|
||||
*
|
||||
* ##### Options:
|
||||
*
|
||||
* - __alt__ - array with names of "alternate" chains.
|
||||
*
|
||||
* ##### Example
|
||||
*
|
||||
* Replace existing typographer replacement rule with new one:
|
||||
*
|
||||
* ```javascript
|
||||
* var md = require('markdown-it')();
|
||||
*
|
||||
* md.core.ruler.at('replacements', function replace(state) {
|
||||
* //...
|
||||
* });
|
||||
* ```
|
||||
**/
|
||||
Ruler.prototype.at = function (name, fn, options) {
|
||||
var index = this.__find__(name);
|
||||
var opt = options || {};
|
||||
|
||||
if (index === -1) { throw new Error('Parser rule not found: ' + name); }
|
||||
|
||||
this.__rules__[index].fn = fn;
|
||||
this.__rules__[index].alt = opt.alt || [];
|
||||
this.__cache__ = null;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Ruler.before(beforeName, ruleName, fn [, options])
|
||||
* - beforeName (String): new rule will be added before this one.
|
||||
* - ruleName (String): name of added rule.
|
||||
* - fn (Function): rule function.
|
||||
* - options (Object): rule options (not mandatory).
|
||||
*
|
||||
* Add new rule to chain before one with given name. See also
|
||||
* [[Ruler.after]], [[Ruler.push]].
|
||||
*
|
||||
* ##### Options:
|
||||
*
|
||||
* - __alt__ - array with names of "alternate" chains.
|
||||
*
|
||||
* ##### Example
|
||||
*
|
||||
* ```javascript
|
||||
* var md = require('markdown-it')();
|
||||
*
|
||||
* md.block.ruler.before('paragraph', 'my_rule', function replace(state) {
|
||||
* //...
|
||||
* });
|
||||
* ```
|
||||
**/
|
||||
Ruler.prototype.before = function (beforeName, ruleName, fn, options) {
|
||||
var index = this.__find__(beforeName);
|
||||
var opt = options || {};
|
||||
|
||||
if (index === -1) { throw new Error('Parser rule not found: ' + beforeName); }
|
||||
|
||||
this.__rules__.splice(index, 0, {
|
||||
name: ruleName,
|
||||
enabled: true,
|
||||
fn: fn,
|
||||
alt: opt.alt || []
|
||||
});
|
||||
|
||||
this.__cache__ = null;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Ruler.after(afterName, ruleName, fn [, options])
|
||||
* - afterName (String): new rule will be added after this one.
|
||||
* - ruleName (String): name of added rule.
|
||||
* - fn (Function): rule function.
|
||||
* - options (Object): rule options (not mandatory).
|
||||
*
|
||||
* Add new rule to chain after one with given name. See also
|
||||
* [[Ruler.before]], [[Ruler.push]].
|
||||
*
|
||||
* ##### Options:
|
||||
*
|
||||
* - __alt__ - array with names of "alternate" chains.
|
||||
*
|
||||
* ##### Example
|
||||
*
|
||||
* ```javascript
|
||||
* var md = require('markdown-it')();
|
||||
*
|
||||
* md.inline.ruler.after('text', 'my_rule', function replace(state) {
|
||||
* //...
|
||||
* });
|
||||
* ```
|
||||
**/
|
||||
Ruler.prototype.after = function (afterName, ruleName, fn, options) {
|
||||
var index = this.__find__(afterName);
|
||||
var opt = options || {};
|
||||
|
||||
if (index === -1) { throw new Error('Parser rule not found: ' + afterName); }
|
||||
|
||||
this.__rules__.splice(index + 1, 0, {
|
||||
name: ruleName,
|
||||
enabled: true,
|
||||
fn: fn,
|
||||
alt: opt.alt || []
|
||||
});
|
||||
|
||||
this.__cache__ = null;
|
||||
};
|
||||
|
||||
/**
|
||||
* Ruler.push(ruleName, fn [, options])
|
||||
* - ruleName (String): name of added rule.
|
||||
* - fn (Function): rule function.
|
||||
* - options (Object): rule options (not mandatory).
|
||||
*
|
||||
* Push new rule to the end of chain. See also
|
||||
* [[Ruler.before]], [[Ruler.after]].
|
||||
*
|
||||
* ##### Options:
|
||||
*
|
||||
* - __alt__ - array with names of "alternate" chains.
|
||||
*
|
||||
* ##### Example
|
||||
*
|
||||
* ```javascript
|
||||
* var md = require('markdown-it')();
|
||||
*
|
||||
* md.core.ruler.push('my_rule', function replace(state) {
|
||||
* //...
|
||||
* });
|
||||
* ```
|
||||
**/
|
||||
Ruler.prototype.push = function (ruleName, fn, options) {
|
||||
var opt = options || {};
|
||||
|
||||
this.__rules__.push({
|
||||
name: ruleName,
|
||||
enabled: true,
|
||||
fn: fn,
|
||||
alt: opt.alt || []
|
||||
});
|
||||
|
||||
this.__cache__ = null;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Ruler.enable(list [, ignoreInvalid]) -> Array
|
||||
* - list (String|Array): list of rule names to enable.
|
||||
* - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
|
||||
*
|
||||
* Enable rules with given names. If any rule name not found - throw Error.
|
||||
* Errors can be disabled by second param.
|
||||
*
|
||||
* Returns list of found rule names (if no exception happened).
|
||||
*
|
||||
* See also [[Ruler.disable]], [[Ruler.enableOnly]].
|
||||
**/
|
||||
Ruler.prototype.enable = function (list, ignoreInvalid) {
|
||||
if (!Array.isArray(list)) { list = [ list ]; }
|
||||
|
||||
var result = [];
|
||||
|
||||
// Search by name and enable
|
||||
list.forEach(function (name) {
|
||||
var idx = this.__find__(name);
|
||||
|
||||
if (idx < 0) {
|
||||
if (ignoreInvalid) { return; }
|
||||
throw new Error('Rules manager: invalid rule name ' + name);
|
||||
}
|
||||
this.__rules__[idx].enabled = true;
|
||||
result.push(name);
|
||||
}, this);
|
||||
|
||||
this.__cache__ = null;
|
||||
return result;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Ruler.enableOnly(list [, ignoreInvalid])
|
||||
* - list (String|Array): list of rule names to enable (whitelist).
|
||||
* - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
|
||||
*
|
||||
* Enable rules with given names, and disable everything else. If any rule name
|
||||
* not found - throw Error. Errors can be disabled by second param.
|
||||
*
|
||||
* See also [[Ruler.disable]], [[Ruler.enable]].
|
||||
**/
|
||||
Ruler.prototype.enableOnly = function (list, ignoreInvalid) {
|
||||
if (!Array.isArray(list)) { list = [ list ]; }
|
||||
|
||||
this.__rules__.forEach(function (rule) { rule.enabled = false; });
|
||||
|
||||
this.enable(list, ignoreInvalid);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Ruler.disable(list [, ignoreInvalid]) -> Array
|
||||
* - list (String|Array): list of rule names to disable.
|
||||
* - ignoreInvalid (Boolean): set `true` to ignore errors when rule not found.
|
||||
*
|
||||
* Disable rules with given names. If any rule name not found - throw Error.
|
||||
* Errors can be disabled by second param.
|
||||
*
|
||||
* Returns list of found rule names (if no exception happened).
|
||||
*
|
||||
* See also [[Ruler.enable]], [[Ruler.enableOnly]].
|
||||
**/
|
||||
Ruler.prototype.disable = function (list, ignoreInvalid) {
|
||||
if (!Array.isArray(list)) { list = [ list ]; }
|
||||
|
||||
var result = [];
|
||||
|
||||
// Search by name and disable
|
||||
list.forEach(function (name) {
|
||||
var idx = this.__find__(name);
|
||||
|
||||
if (idx < 0) {
|
||||
if (ignoreInvalid) { return; }
|
||||
throw new Error('Rules manager: invalid rule name ' + name);
|
||||
}
|
||||
this.__rules__[idx].enabled = false;
|
||||
result.push(name);
|
||||
}, this);
|
||||
|
||||
this.__cache__ = null;
|
||||
return result;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Ruler.getRules(chainName) -> Array
|
||||
*
|
||||
* Return array of active functions (rules) for given chain name. It analyzes
|
||||
* rules configuration, compiles caches if not exists and returns result.
|
||||
*
|
||||
* Default chain name is `''` (empty string). It can't be skipped. That's
|
||||
* done intentionally, to keep signature monomorphic for high speed.
|
||||
**/
|
||||
Ruler.prototype.getRules = function (chainName) {
|
||||
if (this.__cache__ === null) {
|
||||
this.__compile__();
|
||||
}
|
||||
|
||||
// Chain can be empty, if rules disabled. But we still have to return Array.
|
||||
return this.__cache__[chainName] || [];
|
||||
};
|
||||
|
||||
module.exports = Ruler;
|
||||
285
lib/markdown-it/lib/rules_block/blockquote.js
Normal file
285
lib/markdown-it/lib/rules_block/blockquote.js
Normal file
@@ -0,0 +1,285 @@
|
||||
// Block quotes
|
||||
|
||||
'use strict';
|
||||
|
||||
var isSpace = require('../common/utils').isSpace;
|
||||
|
||||
|
||||
module.exports = function blockquote(state, startLine, endLine, silent) {
|
||||
var adjustTab,
|
||||
ch,
|
||||
i,
|
||||
initial,
|
||||
l,
|
||||
lastLineEmpty,
|
||||
lines,
|
||||
nextLine,
|
||||
offset,
|
||||
oldBMarks,
|
||||
oldBSCount,
|
||||
oldIndent,
|
||||
oldParentType,
|
||||
oldSCount,
|
||||
oldTShift,
|
||||
spaceAfterMarker,
|
||||
terminate,
|
||||
terminatorRules,
|
||||
token,
|
||||
wasOutdented,
|
||||
oldLineMax = state.lineMax,
|
||||
pos = state.bMarks[startLine] + state.tShift[startLine],
|
||||
max = state.eMarks[startLine];
|
||||
|
||||
// if it's indented more than 3 spaces, it should be a code block
|
||||
if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }
|
||||
|
||||
// check the block quote marker
|
||||
if (state.src.charCodeAt(pos++) !== 0x3E/* > */) { return false; }
|
||||
|
||||
// we know that it's going to be a valid blockquote,
|
||||
// so no point trying to find the end of it in silent mode
|
||||
if (silent) { return true; }
|
||||
|
||||
// skip spaces after ">" and re-calculate offset
|
||||
initial = offset = state.sCount[startLine] + pos - (state.bMarks[startLine] + state.tShift[startLine]);
|
||||
|
||||
// skip one optional space after '>'
|
||||
if (state.src.charCodeAt(pos) === 0x20 /* space */) {
|
||||
// ' > test '
|
||||
// ^ -- position start of line here:
|
||||
pos++;
|
||||
initial++;
|
||||
offset++;
|
||||
adjustTab = false;
|
||||
spaceAfterMarker = true;
|
||||
} else if (state.src.charCodeAt(pos) === 0x09 /* tab */) {
|
||||
spaceAfterMarker = true;
|
||||
|
||||
if ((state.bsCount[startLine] + offset) % 4 === 3) {
|
||||
// ' >\t test '
|
||||
// ^ -- position start of line here (tab has width===1)
|
||||
pos++;
|
||||
initial++;
|
||||
offset++;
|
||||
adjustTab = false;
|
||||
} else {
|
||||
// ' >\t test '
|
||||
// ^ -- position start of line here + shift bsCount slightly
|
||||
// to make extra space appear
|
||||
adjustTab = true;
|
||||
}
|
||||
} else {
|
||||
spaceAfterMarker = false;
|
||||
}
|
||||
|
||||
oldBMarks = [ state.bMarks[startLine] ];
|
||||
state.bMarks[startLine] = pos;
|
||||
|
||||
while (pos < max) {
|
||||
ch = state.src.charCodeAt(pos);
|
||||
|
||||
if (isSpace(ch)) {
|
||||
if (ch === 0x09) {
|
||||
offset += 4 - (offset + state.bsCount[startLine] + (adjustTab ? 1 : 0)) % 4;
|
||||
} else {
|
||||
offset++;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
oldBSCount = [ state.bsCount[startLine] ];
|
||||
state.bsCount[startLine] = state.sCount[startLine] + 1 + (spaceAfterMarker ? 1 : 0);
|
||||
|
||||
lastLineEmpty = pos >= max;
|
||||
|
||||
oldSCount = [ state.sCount[startLine] ];
|
||||
state.sCount[startLine] = offset - initial;
|
||||
|
||||
oldTShift = [ state.tShift[startLine] ];
|
||||
state.tShift[startLine] = pos - state.bMarks[startLine];
|
||||
|
||||
terminatorRules = state.md.block.ruler.getRules('blockquote');
|
||||
|
||||
oldParentType = state.parentType;
|
||||
state.parentType = 'blockquote';
|
||||
wasOutdented = false;
|
||||
|
||||
// Search the end of the block
|
||||
//
|
||||
// Block ends with either:
|
||||
// 1. an empty line outside:
|
||||
// ```
|
||||
// > test
|
||||
//
|
||||
// ```
|
||||
// 2. an empty line inside:
|
||||
// ```
|
||||
// >
|
||||
// test
|
||||
// ```
|
||||
// 3. another tag:
|
||||
// ```
|
||||
// > test
|
||||
// - - -
|
||||
// ```
|
||||
for (nextLine = startLine + 1; nextLine < endLine; nextLine++) {
|
||||
// check if it's outdented, i.e. it's inside list item and indented
|
||||
// less than said list item:
|
||||
//
|
||||
// ```
|
||||
// 1. anything
|
||||
// > current blockquote
|
||||
// 2. checking this line
|
||||
// ```
|
||||
if (state.sCount[nextLine] < state.blkIndent) wasOutdented = true;
|
||||
|
||||
pos = state.bMarks[nextLine] + state.tShift[nextLine];
|
||||
max = state.eMarks[nextLine];
|
||||
|
||||
if (pos >= max) {
|
||||
// Case 1: line is not inside the blockquote, and this line is empty.
|
||||
break;
|
||||
}
|
||||
|
||||
if (state.src.charCodeAt(pos++) === 0x3E/* > */ && !wasOutdented) {
|
||||
// This line is inside the blockquote.
|
||||
|
||||
// skip spaces after ">" and re-calculate offset
|
||||
initial = offset = state.sCount[nextLine] + pos - (state.bMarks[nextLine] + state.tShift[nextLine]);
|
||||
|
||||
// skip one optional space after '>'
|
||||
if (state.src.charCodeAt(pos) === 0x20 /* space */) {
|
||||
// ' > test '
|
||||
// ^ -- position start of line here:
|
||||
pos++;
|
||||
initial++;
|
||||
offset++;
|
||||
adjustTab = false;
|
||||
spaceAfterMarker = true;
|
||||
} else if (state.src.charCodeAt(pos) === 0x09 /* tab */) {
|
||||
spaceAfterMarker = true;
|
||||
|
||||
if ((state.bsCount[nextLine] + offset) % 4 === 3) {
|
||||
// ' >\t test '
|
||||
// ^ -- position start of line here (tab has width===1)
|
||||
pos++;
|
||||
initial++;
|
||||
offset++;
|
||||
adjustTab = false;
|
||||
} else {
|
||||
// ' >\t test '
|
||||
// ^ -- position start of line here + shift bsCount slightly
|
||||
// to make extra space appear
|
||||
adjustTab = true;
|
||||
}
|
||||
} else {
|
||||
spaceAfterMarker = false;
|
||||
}
|
||||
|
||||
oldBMarks.push(state.bMarks[nextLine]);
|
||||
state.bMarks[nextLine] = pos;
|
||||
|
||||
while (pos < max) {
|
||||
ch = state.src.charCodeAt(pos);
|
||||
|
||||
if (isSpace(ch)) {
|
||||
if (ch === 0x09) {
|
||||
offset += 4 - (offset + state.bsCount[nextLine] + (adjustTab ? 1 : 0)) % 4;
|
||||
} else {
|
||||
offset++;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
lastLineEmpty = pos >= max;
|
||||
|
||||
oldBSCount.push(state.bsCount[nextLine]);
|
||||
state.bsCount[nextLine] = state.sCount[nextLine] + 1 + (spaceAfterMarker ? 1 : 0);
|
||||
|
||||
oldSCount.push(state.sCount[nextLine]);
|
||||
state.sCount[nextLine] = offset - initial;
|
||||
|
||||
oldTShift.push(state.tShift[nextLine]);
|
||||
state.tShift[nextLine] = pos - state.bMarks[nextLine];
|
||||
continue;
|
||||
}
|
||||
|
||||
// Case 2: line is not inside the blockquote, and the last line was empty.
|
||||
if (lastLineEmpty) { break; }
|
||||
|
||||
// Case 3: another tag found.
|
||||
terminate = false;
|
||||
for (i = 0, l = terminatorRules.length; i < l; i++) {
|
||||
if (terminatorRules[i](state, nextLine, endLine, true)) {
|
||||
terminate = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (terminate) {
|
||||
// Quirk to enforce "hard termination mode" for paragraphs;
|
||||
// normally if you call `tokenize(state, startLine, nextLine)`,
|
||||
// paragraphs will look below nextLine for paragraph continuation,
|
||||
// but if blockquote is terminated by another tag, they shouldn't
|
||||
state.lineMax = nextLine;
|
||||
|
||||
if (state.blkIndent !== 0) {
|
||||
// state.blkIndent was non-zero, we now set it to zero,
|
||||
// so we need to re-calculate all offsets to appear as
|
||||
// if indent wasn't changed
|
||||
oldBMarks.push(state.bMarks[nextLine]);
|
||||
oldBSCount.push(state.bsCount[nextLine]);
|
||||
oldTShift.push(state.tShift[nextLine]);
|
||||
oldSCount.push(state.sCount[nextLine]);
|
||||
state.sCount[nextLine] -= state.blkIndent;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
oldBMarks.push(state.bMarks[nextLine]);
|
||||
oldBSCount.push(state.bsCount[nextLine]);
|
||||
oldTShift.push(state.tShift[nextLine]);
|
||||
oldSCount.push(state.sCount[nextLine]);
|
||||
|
||||
// A negative indentation means that this is a paragraph continuation
|
||||
//
|
||||
state.sCount[nextLine] = -1;
|
||||
}
|
||||
|
||||
oldIndent = state.blkIndent;
|
||||
state.blkIndent = 0;
|
||||
|
||||
token = state.push('blockquote_open', 'blockquote', 1);
|
||||
token.markup = '>';
|
||||
token.map = lines = [ startLine, 0 ];
|
||||
|
||||
state.md.block.tokenize(state, startLine, nextLine);
|
||||
|
||||
token = state.push('blockquote_close', 'blockquote', -1);
|
||||
token.markup = '>';
|
||||
|
||||
state.lineMax = oldLineMax;
|
||||
state.parentType = oldParentType;
|
||||
lines[1] = state.line;
|
||||
|
||||
// Restore original tShift; this might not be necessary since the parser
|
||||
// has already been here, but just to make sure we can do that.
|
||||
for (i = 0; i < oldTShift.length; i++) {
|
||||
state.bMarks[i + startLine] = oldBMarks[i];
|
||||
state.tShift[i + startLine] = oldTShift[i];
|
||||
state.sCount[i + startLine] = oldSCount[i];
|
||||
state.bsCount[i + startLine] = oldBSCount[i];
|
||||
}
|
||||
state.blkIndent = oldIndent;
|
||||
|
||||
return true;
|
||||
};
|
||||
34
lib/markdown-it/lib/rules_block/code.js
Normal file
34
lib/markdown-it/lib/rules_block/code.js
Normal file
@@ -0,0 +1,34 @@
|
||||
// Code block (4 spaces padded)
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = function code(state, startLine, endLine/*, silent*/) {
|
||||
var nextLine, last, token;
|
||||
|
||||
if (state.sCount[startLine] - state.blkIndent < 4) { return false; }
|
||||
|
||||
last = nextLine = startLine + 1;
|
||||
|
||||
while (nextLine < endLine) {
|
||||
if (state.isEmpty(nextLine)) {
|
||||
nextLine++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (state.sCount[nextLine] - state.blkIndent >= 4) {
|
||||
nextLine++;
|
||||
last = nextLine;
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
state.line = last;
|
||||
|
||||
token = state.push('code_block', 'code', 0);
|
||||
token.content = state.getLines(startLine, last, 4 + state.blkIndent, true);
|
||||
token.map = [ startLine, state.line ];
|
||||
|
||||
return true;
|
||||
};
|
||||
98
lib/markdown-it/lib/rules_block/fence.js
Normal file
98
lib/markdown-it/lib/rules_block/fence.js
Normal file
@@ -0,0 +1,98 @@
|
||||
// fences (``` lang, ~~~ lang)
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = function fence(state, startLine, endLine, silent) {
|
||||
var marker, len, params, nextLine, mem, token, markup,
|
||||
haveEndMarker = false,
|
||||
pos = state.bMarks[startLine] + state.tShift[startLine],
|
||||
max = state.eMarks[startLine];
|
||||
|
||||
// if it's indented more than 3 spaces, it should be a code block
|
||||
if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }
|
||||
|
||||
if (pos + 3 > max) { return false; }
|
||||
|
||||
marker = state.src.charCodeAt(pos);
|
||||
|
||||
if (marker !== 0x7E/* ~ */ && marker !== 0x60 /* ` */) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// scan marker length
|
||||
mem = pos;
|
||||
pos = state.skipChars(pos, marker);
|
||||
|
||||
len = pos - mem;
|
||||
|
||||
if (len < 3) { return false; }
|
||||
|
||||
markup = state.src.slice(mem, pos);
|
||||
params = state.src.slice(pos, max);
|
||||
|
||||
if (marker === 0x60 /* ` */) {
|
||||
if (params.indexOf(String.fromCharCode(marker)) >= 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Since start is found, we can report success here in validation mode
|
||||
if (silent) { return true; }
|
||||
|
||||
// search end of block
|
||||
nextLine = startLine;
|
||||
|
||||
for (;;) {
|
||||
nextLine++;
|
||||
if (nextLine >= endLine) {
|
||||
// unclosed block should be autoclosed by end of document.
|
||||
// also block seems to be autoclosed by end of parent
|
||||
break;
|
||||
}
|
||||
|
||||
pos = mem = state.bMarks[nextLine] + state.tShift[nextLine];
|
||||
max = state.eMarks[nextLine];
|
||||
|
||||
if (pos < max && state.sCount[nextLine] < state.blkIndent) {
|
||||
// non-empty line with negative indent should stop the list:
|
||||
// - ```
|
||||
// test
|
||||
break;
|
||||
}
|
||||
|
||||
if (state.src.charCodeAt(pos) !== marker) { continue; }
|
||||
|
||||
if (state.sCount[nextLine] - state.blkIndent >= 4) {
|
||||
// closing fence should be indented less than 4 spaces
|
||||
continue;
|
||||
}
|
||||
|
||||
pos = state.skipChars(pos, marker);
|
||||
|
||||
// closing code fence must be at least as long as the opening one
|
||||
if (pos - mem < len) { continue; }
|
||||
|
||||
// make sure tail has spaces only
|
||||
pos = state.skipSpaces(pos);
|
||||
|
||||
if (pos < max) { continue; }
|
||||
|
||||
haveEndMarker = true;
|
||||
// found!
|
||||
break;
|
||||
}
|
||||
|
||||
// If a fence has heading spaces, they should be removed from its inner block
|
||||
len = state.sCount[startLine];
|
||||
|
||||
state.line = nextLine + (haveEndMarker ? 1 : 0);
|
||||
|
||||
token = state.push('fence', 'code', 0);
|
||||
token.info = params;
|
||||
token.content = state.getLines(startLine + 1, nextLine, len, true);
|
||||
token.markup = markup;
|
||||
token.map = [ startLine, state.line ];
|
||||
|
||||
return true;
|
||||
};
|
||||
55
lib/markdown-it/lib/rules_block/heading.js
Normal file
55
lib/markdown-it/lib/rules_block/heading.js
Normal file
@@ -0,0 +1,55 @@
|
||||
// heading (#, ##, ...)
|
||||
|
||||
'use strict';
|
||||
|
||||
var isSpace = require('../common/utils').isSpace;
|
||||
|
||||
|
||||
module.exports = function heading(state, startLine, endLine, silent) {
|
||||
var ch, level, tmp, token,
|
||||
pos = state.bMarks[startLine] + state.tShift[startLine],
|
||||
max = state.eMarks[startLine];
|
||||
|
||||
// if it's indented more than 3 spaces, it should be a code block
|
||||
if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }
|
||||
|
||||
ch = state.src.charCodeAt(pos);
|
||||
|
||||
if (ch !== 0x23/* # */ || pos >= max) { return false; }
|
||||
|
||||
// count heading level
|
||||
level = 1;
|
||||
ch = state.src.charCodeAt(++pos);
|
||||
while (ch === 0x23/* # */ && pos < max && level <= 6) {
|
||||
level++;
|
||||
ch = state.src.charCodeAt(++pos);
|
||||
}
|
||||
|
||||
if (level > 6 || (pos < max && !isSpace(ch))) { return false; }
|
||||
|
||||
if (silent) { return true; }
|
||||
|
||||
// Let's cut tails like ' ### ' from the end of string
|
||||
|
||||
max = state.skipSpacesBack(max, pos);
|
||||
tmp = state.skipCharsBack(max, 0x23, pos); // #
|
||||
if (tmp > pos && isSpace(state.src.charCodeAt(tmp - 1))) {
|
||||
max = tmp;
|
||||
}
|
||||
|
||||
state.line = startLine + 1;
|
||||
|
||||
token = state.push('heading_open', 'h' + String(level), 1);
|
||||
token.markup = '########'.slice(0, level);
|
||||
token.map = [ startLine, state.line ];
|
||||
|
||||
token = state.push('inline', '', 0);
|
||||
token.content = state.src.slice(pos, max).trim();
|
||||
token.map = [ startLine, state.line ];
|
||||
token.children = [];
|
||||
|
||||
token = state.push('heading_close', 'h' + String(level), -1);
|
||||
token.markup = '########'.slice(0, level);
|
||||
|
||||
return true;
|
||||
};
|
||||
45
lib/markdown-it/lib/rules_block/hr.js
Normal file
45
lib/markdown-it/lib/rules_block/hr.js
Normal file
@@ -0,0 +1,45 @@
|
||||
// Horizontal rule
|
||||
|
||||
'use strict';
|
||||
|
||||
var isSpace = require('../common/utils').isSpace;
|
||||
|
||||
|
||||
module.exports = function hr(state, startLine, endLine, silent) {
|
||||
var marker, cnt, ch, token,
|
||||
pos = state.bMarks[startLine] + state.tShift[startLine],
|
||||
max = state.eMarks[startLine];
|
||||
|
||||
// if it's indented more than 3 spaces, it should be a code block
|
||||
if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }
|
||||
|
||||
marker = state.src.charCodeAt(pos++);
|
||||
|
||||
// Check hr marker
|
||||
if (marker !== 0x2A/* * */ &&
|
||||
marker !== 0x2D/* - */ &&
|
||||
marker !== 0x5F/* _ */) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// markers can be mixed with spaces, but there should be at least 3 of them
|
||||
|
||||
cnt = 1;
|
||||
while (pos < max) {
|
||||
ch = state.src.charCodeAt(pos++);
|
||||
if (ch !== marker && !isSpace(ch)) { return false; }
|
||||
if (ch === marker) { cnt++; }
|
||||
}
|
||||
|
||||
if (cnt < 3) { return false; }
|
||||
|
||||
if (silent) { return true; }
|
||||
|
||||
state.line = startLine + 1;
|
||||
|
||||
token = state.push('hr', 'hr', 0);
|
||||
token.map = [ startLine, state.line ];
|
||||
token.markup = Array(cnt + 1).join(String.fromCharCode(marker));
|
||||
|
||||
return true;
|
||||
};
|
||||
74
lib/markdown-it/lib/rules_block/html_block.js
Normal file
74
lib/markdown-it/lib/rules_block/html_block.js
Normal file
@@ -0,0 +1,74 @@
|
||||
// HTML block
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
var block_names = require('../common/html_blocks');
|
||||
var HTML_OPEN_CLOSE_TAG_RE = require('../common/html_re').HTML_OPEN_CLOSE_TAG_RE;
|
||||
|
||||
// An array of opening and corresponding closing sequences for html tags,
|
||||
// last argument defines whether it can terminate a paragraph or not
|
||||
//
|
||||
var HTML_SEQUENCES = [
|
||||
[ /^<(script|pre|style)(?=(\s|>|$))/i, /<\/(script|pre|style)>/i, true ],
|
||||
[ /^<!--/, /-->/, true ],
|
||||
[ /^<\?/, /\?>/, true ],
|
||||
[ /^<![A-Z]/, />/, true ],
|
||||
[ /^<!\[CDATA\[/, /\]\]>/, true ],
|
||||
[ new RegExp('^</?(' + block_names.join('|') + ')(?=(\\s|/?>|$))', 'i'), /^$/, true ],
|
||||
[ new RegExp(HTML_OPEN_CLOSE_TAG_RE.source + '\\s*$'), /^$/, false ]
|
||||
];
|
||||
|
||||
|
||||
module.exports = function html_block(state, startLine, endLine, silent) {
|
||||
var i, nextLine, token, lineText,
|
||||
pos = state.bMarks[startLine] + state.tShift[startLine],
|
||||
max = state.eMarks[startLine];
|
||||
|
||||
// if it's indented more than 3 spaces, it should be a code block
|
||||
if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }
|
||||
|
||||
if (!state.md.options.html) { return false; }
|
||||
|
||||
if (state.src.charCodeAt(pos) !== 0x3C/* < */) { return false; }
|
||||
|
||||
lineText = state.src.slice(pos, max);
|
||||
|
||||
for (i = 0; i < HTML_SEQUENCES.length; i++) {
|
||||
if (HTML_SEQUENCES[i][0].test(lineText)) { break; }
|
||||
}
|
||||
|
||||
if (i === HTML_SEQUENCES.length) { return false; }
|
||||
|
||||
if (silent) {
|
||||
// true if this sequence can be a terminator, false otherwise
|
||||
return HTML_SEQUENCES[i][2];
|
||||
}
|
||||
|
||||
nextLine = startLine + 1;
|
||||
|
||||
// If we are here - we detected HTML block.
|
||||
// Let's roll down till block end.
|
||||
if (!HTML_SEQUENCES[i][1].test(lineText)) {
|
||||
for (; nextLine < endLine; nextLine++) {
|
||||
if (state.sCount[nextLine] < state.blkIndent) { break; }
|
||||
|
||||
pos = state.bMarks[nextLine] + state.tShift[nextLine];
|
||||
max = state.eMarks[nextLine];
|
||||
lineText = state.src.slice(pos, max);
|
||||
|
||||
if (HTML_SEQUENCES[i][1].test(lineText)) {
|
||||
if (lineText.length !== 0) { nextLine++; }
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state.line = nextLine;
|
||||
|
||||
token = state.push('html_block', '', 0);
|
||||
token.map = [ startLine, nextLine ];
|
||||
token.content = state.getLines(startLine, nextLine, state.blkIndent, true);
|
||||
|
||||
return true;
|
||||
};
|
||||
83
lib/markdown-it/lib/rules_block/lheading.js
Normal file
83
lib/markdown-it/lib/rules_block/lheading.js
Normal file
@@ -0,0 +1,83 @@
|
||||
// lheading (---, ===)
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = function lheading(state, startLine, endLine/*, silent*/) {
|
||||
var content, terminate, i, l, token, pos, max, level, marker,
|
||||
nextLine = startLine + 1, oldParentType,
|
||||
terminatorRules = state.md.block.ruler.getRules('paragraph');
|
||||
|
||||
// if it's indented more than 3 spaces, it should be a code block
|
||||
if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }
|
||||
|
||||
oldParentType = state.parentType;
|
||||
state.parentType = 'paragraph'; // use paragraph to match terminatorRules
|
||||
|
||||
// jump line-by-line until empty one or EOF
|
||||
for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {
|
||||
// this would be a code block normally, but after paragraph
|
||||
// it's considered a lazy continuation regardless of what's there
|
||||
if (state.sCount[nextLine] - state.blkIndent > 3) { continue; }
|
||||
|
||||
//
|
||||
// Check for underline in setext header
|
||||
//
|
||||
if (state.sCount[nextLine] >= state.blkIndent) {
|
||||
pos = state.bMarks[nextLine] + state.tShift[nextLine];
|
||||
max = state.eMarks[nextLine];
|
||||
|
||||
if (pos < max) {
|
||||
marker = state.src.charCodeAt(pos);
|
||||
|
||||
if (marker === 0x2D/* - */ || marker === 0x3D/* = */) {
|
||||
pos = state.skipChars(pos, marker);
|
||||
pos = state.skipSpaces(pos);
|
||||
|
||||
if (pos >= max) {
|
||||
level = (marker === 0x3D/* = */ ? 1 : 2);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// quirk for blockquotes, this line should already be checked by that rule
|
||||
if (state.sCount[nextLine] < 0) { continue; }
|
||||
|
||||
// Some tags can terminate paragraph without empty line.
|
||||
terminate = false;
|
||||
for (i = 0, l = terminatorRules.length; i < l; i++) {
|
||||
if (terminatorRules[i](state, nextLine, endLine, true)) {
|
||||
terminate = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (terminate) { break; }
|
||||
}
|
||||
|
||||
if (!level) {
|
||||
// Didn't find valid underline
|
||||
return false;
|
||||
}
|
||||
|
||||
content = state.getLines(startLine, nextLine, state.blkIndent, false).trim();
|
||||
|
||||
state.line = nextLine + 1;
|
||||
|
||||
token = state.push('heading_open', 'h' + String(level), 1);
|
||||
token.markup = String.fromCharCode(marker);
|
||||
token.map = [ startLine, state.line ];
|
||||
|
||||
token = state.push('inline', '', 0);
|
||||
token.content = content;
|
||||
token.map = [ startLine, state.line - 1 ];
|
||||
token.children = [];
|
||||
|
||||
token = state.push('heading_close', 'h' + String(level), -1);
|
||||
token.markup = String.fromCharCode(marker);
|
||||
|
||||
state.parentType = oldParentType;
|
||||
|
||||
return true;
|
||||
};
|
||||
360
lib/markdown-it/lib/rules_block/list.js
Normal file
360
lib/markdown-it/lib/rules_block/list.js
Normal file
@@ -0,0 +1,360 @@
|
||||
// Lists
|
||||
|
||||
'use strict';
|
||||
|
||||
var isSpace = require('../common/utils').isSpace;
|
||||
|
||||
|
||||
// Search `[-+*][\n ]`, returns next pos after marker on success
|
||||
// or -1 on fail.
|
||||
function skipBulletListMarker(state, startLine) {
|
||||
var marker, pos, max, ch;
|
||||
|
||||
pos = state.bMarks[startLine] + state.tShift[startLine];
|
||||
max = state.eMarks[startLine];
|
||||
|
||||
marker = state.src.charCodeAt(pos++);
|
||||
// Check bullet
|
||||
if (marker !== 0x2A/* * */ &&
|
||||
marker !== 0x2D/* - */ &&
|
||||
marker !== 0x2B/* + */) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pos < max) {
|
||||
ch = state.src.charCodeAt(pos);
|
||||
|
||||
if (!isSpace(ch)) {
|
||||
// " -test " - is not a list item
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
// Search `\d+[.)][\n ]`, returns next pos after marker on success
|
||||
// or -1 on fail.
|
||||
function skipOrderedListMarker(state, startLine) {
|
||||
var ch,
|
||||
start = state.bMarks[startLine] + state.tShift[startLine],
|
||||
pos = start,
|
||||
max = state.eMarks[startLine];
|
||||
|
||||
// List marker should have at least 2 chars (digit + dot)
|
||||
if (pos + 1 >= max) { return -1; }
|
||||
|
||||
ch = state.src.charCodeAt(pos++);
|
||||
|
||||
if (ch < 0x30/* 0 */ || ch > 0x39/* 9 */) { return -1; }
|
||||
|
||||
for (;;) {
|
||||
// EOL -> fail
|
||||
if (pos >= max) { return -1; }
|
||||
|
||||
ch = state.src.charCodeAt(pos++);
|
||||
|
||||
if (ch >= 0x30/* 0 */ && ch <= 0x39/* 9 */) {
|
||||
|
||||
// List marker should have no more than 9 digits
|
||||
// (prevents integer overflow in browsers)
|
||||
if (pos - start >= 10) { return -1; }
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// found valid marker
|
||||
if (ch === 0x29/* ) */ || ch === 0x2e/* . */) {
|
||||
break;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
if (pos < max) {
|
||||
ch = state.src.charCodeAt(pos);
|
||||
|
||||
if (!isSpace(ch)) {
|
||||
// " 1.test " - is not a list item
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
return pos;
|
||||
}
|
||||
|
||||
function markTightParagraphs(state, idx) {
|
||||
var i, l,
|
||||
level = state.level + 2;
|
||||
|
||||
for (i = idx + 2, l = state.tokens.length - 2; i < l; i++) {
|
||||
if (state.tokens[i].level === level && state.tokens[i].type === 'paragraph_open') {
|
||||
state.tokens[i + 2].hidden = true;
|
||||
state.tokens[i].hidden = true;
|
||||
i += 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
module.exports = function list(state, startLine, endLine, silent) {
|
||||
var ch,
|
||||
contentStart,
|
||||
i,
|
||||
indent,
|
||||
indentAfterMarker,
|
||||
initial,
|
||||
isOrdered,
|
||||
itemLines,
|
||||
l,
|
||||
listLines,
|
||||
listTokIdx,
|
||||
markerCharCode,
|
||||
markerValue,
|
||||
max,
|
||||
nextLine,
|
||||
offset,
|
||||
oldListIndent,
|
||||
oldParentType,
|
||||
oldSCount,
|
||||
oldTShift,
|
||||
oldTight,
|
||||
pos,
|
||||
posAfterMarker,
|
||||
prevEmptyEnd,
|
||||
start,
|
||||
terminate,
|
||||
terminatorRules,
|
||||
token,
|
||||
isTerminatingParagraph = false,
|
||||
tight = true;
|
||||
|
||||
// if it's indented more than 3 spaces, it should be a code block
|
||||
if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }
|
||||
|
||||
// Special case:
|
||||
// - item 1
|
||||
// - item 2
|
||||
// - item 3
|
||||
// - item 4
|
||||
// - this one is a paragraph continuation
|
||||
if (state.listIndent >= 0 &&
|
||||
state.sCount[startLine] - state.listIndent >= 4 &&
|
||||
state.sCount[startLine] < state.blkIndent) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// limit conditions when list can interrupt
|
||||
// a paragraph (validation mode only)
|
||||
if (silent && state.parentType === 'paragraph') {
|
||||
// Next list item should still terminate previous list item;
|
||||
//
|
||||
// This code can fail if plugins use blkIndent as well as lists,
|
||||
// but I hope the spec gets fixed long before that happens.
|
||||
//
|
||||
if (state.tShift[startLine] >= state.blkIndent) {
|
||||
isTerminatingParagraph = true;
|
||||
}
|
||||
}
|
||||
|
||||
// Detect list type and position after marker
|
||||
if ((posAfterMarker = skipOrderedListMarker(state, startLine)) >= 0) {
|
||||
isOrdered = true;
|
||||
start = state.bMarks[startLine] + state.tShift[startLine];
|
||||
markerValue = Number(state.src.substr(start, posAfterMarker - start - 1));
|
||||
|
||||
// If we're starting a new ordered list right after
|
||||
// a paragraph, it should start with 1.
|
||||
if (isTerminatingParagraph && markerValue !== 1) return false;
|
||||
|
||||
} else if ((posAfterMarker = skipBulletListMarker(state, startLine)) >= 0) {
|
||||
isOrdered = false;
|
||||
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we're starting a new unordered list right after
|
||||
// a paragraph, first line should not be empty.
|
||||
if (isTerminatingParagraph) {
|
||||
if (state.skipSpaces(posAfterMarker) >= state.eMarks[startLine]) return false;
|
||||
}
|
||||
|
||||
// We should terminate list on style change. Remember first one to compare.
|
||||
markerCharCode = state.src.charCodeAt(posAfterMarker - 1);
|
||||
|
||||
// For validation mode we can terminate immediately
|
||||
if (silent) { return true; }
|
||||
|
||||
// Start list
|
||||
listTokIdx = state.tokens.length;
|
||||
|
||||
if (isOrdered) {
|
||||
token = state.push('ordered_list_open', 'ol', 1);
|
||||
if (markerValue !== 1) {
|
||||
token.attrs = [ [ 'start', markerValue ] ];
|
||||
}
|
||||
|
||||
} else {
|
||||
token = state.push('bullet_list_open', 'ul', 1);
|
||||
}
|
||||
|
||||
token.map = listLines = [ startLine, 0 ];
|
||||
token.markup = String.fromCharCode(markerCharCode);
|
||||
|
||||
//
|
||||
// Iterate list items
|
||||
//
|
||||
|
||||
nextLine = startLine;
|
||||
prevEmptyEnd = false;
|
||||
terminatorRules = state.md.block.ruler.getRules('list');
|
||||
|
||||
oldParentType = state.parentType;
|
||||
state.parentType = 'list';
|
||||
|
||||
while (nextLine < endLine) {
|
||||
pos = posAfterMarker;
|
||||
max = state.eMarks[nextLine];
|
||||
|
||||
initial = offset = state.sCount[nextLine] + posAfterMarker - (state.bMarks[startLine] + state.tShift[startLine]);
|
||||
|
||||
while (pos < max) {
|
||||
ch = state.src.charCodeAt(pos);
|
||||
|
||||
if (ch === 0x09) {
|
||||
offset += 4 - (offset + state.bsCount[nextLine]) % 4;
|
||||
} else if (ch === 0x20) {
|
||||
offset++;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
contentStart = pos;
|
||||
|
||||
if (contentStart >= max) {
|
||||
// trimming space in "- \n 3" case, indent is 1 here
|
||||
indentAfterMarker = 1;
|
||||
} else {
|
||||
indentAfterMarker = offset - initial;
|
||||
}
|
||||
|
||||
// If we have more than 4 spaces, the indent is 1
|
||||
// (the rest is just indented code block)
|
||||
if (indentAfterMarker > 4) { indentAfterMarker = 1; }
|
||||
|
||||
// " - test"
|
||||
// ^^^^^ - calculating total length of this thing
|
||||
indent = initial + indentAfterMarker;
|
||||
|
||||
// Run subparser & write tokens
|
||||
token = state.push('list_item_open', 'li', 1);
|
||||
token.markup = String.fromCharCode(markerCharCode);
|
||||
token.map = itemLines = [ startLine, 0 ];
|
||||
|
||||
// change current state, then restore it after parser subcall
|
||||
oldTight = state.tight;
|
||||
oldTShift = state.tShift[startLine];
|
||||
oldSCount = state.sCount[startLine];
|
||||
|
||||
// - example list
|
||||
// ^ listIndent position will be here
|
||||
// ^ blkIndent position will be here
|
||||
//
|
||||
oldListIndent = state.listIndent;
|
||||
state.listIndent = state.blkIndent;
|
||||
state.blkIndent = indent;
|
||||
|
||||
state.tight = true;
|
||||
state.tShift[startLine] = contentStart - state.bMarks[startLine];
|
||||
state.sCount[startLine] = offset;
|
||||
|
||||
if (contentStart >= max && state.isEmpty(startLine + 1)) {
|
||||
// workaround for this case
|
||||
// (list item is empty, list terminates before "foo"):
|
||||
// ~~~~~~~~
|
||||
// -
|
||||
//
|
||||
// foo
|
||||
// ~~~~~~~~
|
||||
state.line = Math.min(state.line + 2, endLine);
|
||||
} else {
|
||||
state.md.block.tokenize(state, startLine, endLine, true);
|
||||
}
|
||||
|
||||
// If any of list item is tight, mark list as tight
|
||||
if (!state.tight || prevEmptyEnd) {
|
||||
tight = false;
|
||||
}
|
||||
// Item become loose if finish with empty line,
|
||||
// but we should filter last element, because it means list finish
|
||||
prevEmptyEnd = (state.line - startLine) > 1 && state.isEmpty(state.line - 1);
|
||||
|
||||
state.blkIndent = state.listIndent;
|
||||
state.listIndent = oldListIndent;
|
||||
state.tShift[startLine] = oldTShift;
|
||||
state.sCount[startLine] = oldSCount;
|
||||
state.tight = oldTight;
|
||||
|
||||
token = state.push('list_item_close', 'li', -1);
|
||||
token.markup = String.fromCharCode(markerCharCode);
|
||||
|
||||
nextLine = startLine = state.line;
|
||||
itemLines[1] = nextLine;
|
||||
contentStart = state.bMarks[startLine];
|
||||
|
||||
if (nextLine >= endLine) { break; }
|
||||
|
||||
//
|
||||
// Try to check if list is terminated or continued.
|
||||
//
|
||||
if (state.sCount[nextLine] < state.blkIndent) { break; }
|
||||
|
||||
// if it's indented more than 3 spaces, it should be a code block
|
||||
if (state.sCount[startLine] - state.blkIndent >= 4) { break; }
|
||||
|
||||
// fail if terminating block found
|
||||
terminate = false;
|
||||
for (i = 0, l = terminatorRules.length; i < l; i++) {
|
||||
if (terminatorRules[i](state, nextLine, endLine, true)) {
|
||||
terminate = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (terminate) { break; }
|
||||
|
||||
// fail if list has another type
|
||||
if (isOrdered) {
|
||||
posAfterMarker = skipOrderedListMarker(state, nextLine);
|
||||
if (posAfterMarker < 0) { break; }
|
||||
} else {
|
||||
posAfterMarker = skipBulletListMarker(state, nextLine);
|
||||
if (posAfterMarker < 0) { break; }
|
||||
}
|
||||
|
||||
if (markerCharCode !== state.src.charCodeAt(posAfterMarker - 1)) { break; }
|
||||
}
|
||||
|
||||
// Finalize list
|
||||
if (isOrdered) {
|
||||
token = state.push('ordered_list_close', 'ol', -1);
|
||||
} else {
|
||||
token = state.push('bullet_list_close', 'ul', -1);
|
||||
}
|
||||
token.markup = String.fromCharCode(markerCharCode);
|
||||
|
||||
listLines[1] = nextLine;
|
||||
state.line = nextLine;
|
||||
|
||||
state.parentType = oldParentType;
|
||||
|
||||
// mark paragraphs tight if needed
|
||||
if (tight) {
|
||||
markTightParagraphs(state, listTokIdx);
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
52
lib/markdown-it/lib/rules_block/paragraph.js
Normal file
52
lib/markdown-it/lib/rules_block/paragraph.js
Normal file
@@ -0,0 +1,52 @@
|
||||
// Paragraph
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = function paragraph(state, startLine/*, endLine*/) {
|
||||
var content, terminate, i, l, token, oldParentType,
|
||||
nextLine = startLine + 1,
|
||||
terminatorRules = state.md.block.ruler.getRules('paragraph'),
|
||||
endLine = state.lineMax;
|
||||
|
||||
oldParentType = state.parentType;
|
||||
state.parentType = 'paragraph';
|
||||
|
||||
// jump line-by-line until empty one or EOF
|
||||
for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {
|
||||
// this would be a code block normally, but after paragraph
|
||||
// it's considered a lazy continuation regardless of what's there
|
||||
if (state.sCount[nextLine] - state.blkIndent > 3) { continue; }
|
||||
|
||||
// quirk for blockquotes, this line should already be checked by that rule
|
||||
if (state.sCount[nextLine] < 0) { continue; }
|
||||
|
||||
// Some tags can terminate paragraph without empty line.
|
||||
terminate = false;
|
||||
for (i = 0, l = terminatorRules.length; i < l; i++) {
|
||||
if (terminatorRules[i](state, nextLine, endLine, true)) {
|
||||
terminate = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (terminate) { break; }
|
||||
}
|
||||
|
||||
content = state.getLines(startLine, nextLine, state.blkIndent, false).trim();
|
||||
|
||||
state.line = nextLine;
|
||||
|
||||
token = state.push('paragraph_open', 'p', 1);
|
||||
token.map = [ startLine, state.line ];
|
||||
|
||||
token = state.push('inline', '', 0);
|
||||
token.content = content;
|
||||
token.map = [ startLine, state.line ];
|
||||
token.children = [];
|
||||
|
||||
token = state.push('paragraph_close', 'p', -1);
|
||||
|
||||
state.parentType = oldParentType;
|
||||
|
||||
return true;
|
||||
};
|
||||
198
lib/markdown-it/lib/rules_block/reference.js
Normal file
198
lib/markdown-it/lib/rules_block/reference.js
Normal file
@@ -0,0 +1,198 @@
|
||||
'use strict';
|
||||
|
||||
|
||||
var normalizeReference = require('../common/utils').normalizeReference;
|
||||
var isSpace = require('../common/utils').isSpace;
|
||||
|
||||
|
||||
module.exports = function reference(state, startLine, _endLine, silent) {
|
||||
var ch,
|
||||
destEndPos,
|
||||
destEndLineNo,
|
||||
endLine,
|
||||
href,
|
||||
i,
|
||||
l,
|
||||
label,
|
||||
labelEnd,
|
||||
oldParentType,
|
||||
res,
|
||||
start,
|
||||
str,
|
||||
terminate,
|
||||
terminatorRules,
|
||||
title,
|
||||
lines = 0,
|
||||
pos = state.bMarks[startLine] + state.tShift[startLine],
|
||||
max = state.eMarks[startLine],
|
||||
nextLine = startLine + 1;
|
||||
|
||||
// if it's indented more than 3 spaces, it should be a code block
|
||||
if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }
|
||||
|
||||
if (state.src.charCodeAt(pos) !== 0x5B/* [ */) { return false; }
|
||||
|
||||
// Simple check to quickly interrupt scan on [link](url) at the start of line.
|
||||
// Can be useful on practice: https://github.com/markdown-it/markdown-it/issues/54
|
||||
while (++pos < max) {
|
||||
if (state.src.charCodeAt(pos) === 0x5D /* ] */ &&
|
||||
state.src.charCodeAt(pos - 1) !== 0x5C/* \ */) {
|
||||
if (pos + 1 === max) { return false; }
|
||||
if (state.src.charCodeAt(pos + 1) !== 0x3A/* : */) { return false; }
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
endLine = state.lineMax;
|
||||
|
||||
// jump line-by-line until empty one or EOF
|
||||
terminatorRules = state.md.block.ruler.getRules('reference');
|
||||
|
||||
oldParentType = state.parentType;
|
||||
state.parentType = 'reference';
|
||||
|
||||
for (; nextLine < endLine && !state.isEmpty(nextLine); nextLine++) {
|
||||
// this would be a code block normally, but after paragraph
|
||||
// it's considered a lazy continuation regardless of what's there
|
||||
if (state.sCount[nextLine] - state.blkIndent > 3) { continue; }
|
||||
|
||||
// quirk for blockquotes, this line should already be checked by that rule
|
||||
if (state.sCount[nextLine] < 0) { continue; }
|
||||
|
||||
// Some tags can terminate paragraph without empty line.
|
||||
terminate = false;
|
||||
for (i = 0, l = terminatorRules.length; i < l; i++) {
|
||||
if (terminatorRules[i](state, nextLine, endLine, true)) {
|
||||
terminate = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (terminate) { break; }
|
||||
}
|
||||
|
||||
str = state.getLines(startLine, nextLine, state.blkIndent, false).trim();
|
||||
max = str.length;
|
||||
|
||||
for (pos = 1; pos < max; pos++) {
|
||||
ch = str.charCodeAt(pos);
|
||||
if (ch === 0x5B /* [ */) {
|
||||
return false;
|
||||
} else if (ch === 0x5D /* ] */) {
|
||||
labelEnd = pos;
|
||||
break;
|
||||
} else if (ch === 0x0A /* \n */) {
|
||||
lines++;
|
||||
} else if (ch === 0x5C /* \ */) {
|
||||
pos++;
|
||||
if (pos < max && str.charCodeAt(pos) === 0x0A) {
|
||||
lines++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (labelEnd < 0 || str.charCodeAt(labelEnd + 1) !== 0x3A/* : */) { return false; }
|
||||
|
||||
// [label]: destination 'title'
|
||||
// ^^^ skip optional whitespace here
|
||||
for (pos = labelEnd + 2; pos < max; pos++) {
|
||||
ch = str.charCodeAt(pos);
|
||||
if (ch === 0x0A) {
|
||||
lines++;
|
||||
} else if (isSpace(ch)) {
|
||||
/*eslint no-empty:0*/
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// [label]: destination 'title'
|
||||
// ^^^^^^^^^^^ parse this
|
||||
res = state.md.helpers.parseLinkDestination(str, pos, max);
|
||||
if (!res.ok) { return false; }
|
||||
|
||||
href = state.md.normalizeLink(res.str);
|
||||
if (!state.md.validateLink(href)) { return false; }
|
||||
|
||||
pos = res.pos;
|
||||
lines += res.lines;
|
||||
|
||||
// save cursor state, we could require to rollback later
|
||||
destEndPos = pos;
|
||||
destEndLineNo = lines;
|
||||
|
||||
// [label]: destination 'title'
|
||||
// ^^^ skipping those spaces
|
||||
start = pos;
|
||||
for (; pos < max; pos++) {
|
||||
ch = str.charCodeAt(pos);
|
||||
if (ch === 0x0A) {
|
||||
lines++;
|
||||
} else if (isSpace(ch)) {
|
||||
/*eslint no-empty:0*/
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// [label]: destination 'title'
|
||||
// ^^^^^^^ parse this
|
||||
res = state.md.helpers.parseLinkTitle(str, pos, max);
|
||||
if (pos < max && start !== pos && res.ok) {
|
||||
title = res.str;
|
||||
pos = res.pos;
|
||||
lines += res.lines;
|
||||
} else {
|
||||
title = '';
|
||||
pos = destEndPos;
|
||||
lines = destEndLineNo;
|
||||
}
|
||||
|
||||
// skip trailing spaces until the rest of the line
|
||||
while (pos < max) {
|
||||
ch = str.charCodeAt(pos);
|
||||
if (!isSpace(ch)) { break; }
|
||||
pos++;
|
||||
}
|
||||
|
||||
if (pos < max && str.charCodeAt(pos) !== 0x0A) {
|
||||
if (title) {
|
||||
// garbage at the end of the line after title,
|
||||
// but it could still be a valid reference if we roll back
|
||||
title = '';
|
||||
pos = destEndPos;
|
||||
lines = destEndLineNo;
|
||||
while (pos < max) {
|
||||
ch = str.charCodeAt(pos);
|
||||
if (!isSpace(ch)) { break; }
|
||||
pos++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (pos < max && str.charCodeAt(pos) !== 0x0A) {
|
||||
// garbage at the end of the line
|
||||
return false;
|
||||
}
|
||||
|
||||
label = normalizeReference(str.slice(1, labelEnd));
|
||||
if (!label) {
|
||||
// CommonMark 0.20 disallows empty labels
|
||||
return false;
|
||||
}
|
||||
|
||||
// Reference can not terminate anything. This check is for safety only.
|
||||
/*istanbul ignore if*/
|
||||
if (silent) { return true; }
|
||||
|
||||
if (typeof state.env.references === 'undefined') {
|
||||
state.env.references = {};
|
||||
}
|
||||
if (typeof state.env.references[label] === 'undefined') {
|
||||
state.env.references[label] = { title: title, href: href };
|
||||
}
|
||||
|
||||
state.parentType = oldParentType;
|
||||
|
||||
state.line = startLine + lines + 1;
|
||||
return true;
|
||||
};
|
||||
231
lib/markdown-it/lib/rules_block/state_block.js
Normal file
231
lib/markdown-it/lib/rules_block/state_block.js
Normal file
@@ -0,0 +1,231 @@
|
||||
// Parser state class
|
||||
|
||||
'use strict';
|
||||
|
||||
var Token = require('../token');
|
||||
var isSpace = require('../common/utils').isSpace;
|
||||
|
||||
|
||||
function StateBlock(src, md, env, tokens) {
|
||||
var ch, s, start, pos, len, indent, offset, indent_found;
|
||||
|
||||
this.src = src;
|
||||
|
||||
// link to parser instance
|
||||
this.md = md;
|
||||
|
||||
this.env = env;
|
||||
|
||||
//
|
||||
// Internal state vartiables
|
||||
//
|
||||
|
||||
this.tokens = tokens;
|
||||
|
||||
this.bMarks = []; // line begin offsets for fast jumps
|
||||
this.eMarks = []; // line end offsets for fast jumps
|
||||
this.tShift = []; // offsets of the first non-space characters (tabs not expanded)
|
||||
this.sCount = []; // indents for each line (tabs expanded)
|
||||
|
||||
// An amount of virtual spaces (tabs expanded) between beginning
|
||||
// of each line (bMarks) and real beginning of that line.
|
||||
//
|
||||
// It exists only as a hack because blockquotes override bMarks
|
||||
// losing information in the process.
|
||||
//
|
||||
// It's used only when expanding tabs, you can think about it as
|
||||
// an initial tab length, e.g. bsCount=21 applied to string `\t123`
|
||||
// means first tab should be expanded to 4-21%4 === 3 spaces.
|
||||
//
|
||||
this.bsCount = [];
|
||||
|
||||
// block parser variables
|
||||
this.blkIndent = 0; // required block content indent (for example, if we are
|
||||
// inside a list, it would be positioned after list marker)
|
||||
this.line = 0; // line index in src
|
||||
this.lineMax = 0; // lines count
|
||||
this.tight = false; // loose/tight mode for lists
|
||||
this.ddIndent = -1; // indent of the current dd block (-1 if there isn't any)
|
||||
this.listIndent = -1; // indent of the current list block (-1 if there isn't any)
|
||||
|
||||
// can be 'blockquote', 'list', 'root', 'paragraph' or 'reference'
|
||||
// used in lists to determine if they interrupt a paragraph
|
||||
this.parentType = 'root';
|
||||
|
||||
this.level = 0;
|
||||
|
||||
// renderer
|
||||
this.result = '';
|
||||
|
||||
// Create caches
|
||||
// Generate markers.
|
||||
s = this.src;
|
||||
indent_found = false;
|
||||
|
||||
for (start = pos = indent = offset = 0, len = s.length; pos < len; pos++) {
|
||||
ch = s.charCodeAt(pos);
|
||||
|
||||
if (!indent_found) {
|
||||
if (isSpace(ch)) {
|
||||
indent++;
|
||||
|
||||
if (ch === 0x09) {
|
||||
offset += 4 - offset % 4;
|
||||
} else {
|
||||
offset++;
|
||||
}
|
||||
continue;
|
||||
} else {
|
||||
indent_found = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (ch === 0x0A || pos === len - 1) {
|
||||
if (ch !== 0x0A) { pos++; }
|
||||
this.bMarks.push(start);
|
||||
this.eMarks.push(pos);
|
||||
this.tShift.push(indent);
|
||||
this.sCount.push(offset);
|
||||
this.bsCount.push(0);
|
||||
|
||||
indent_found = false;
|
||||
indent = 0;
|
||||
offset = 0;
|
||||
start = pos + 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Push fake entry to simplify cache bounds checks
|
||||
this.bMarks.push(s.length);
|
||||
this.eMarks.push(s.length);
|
||||
this.tShift.push(0);
|
||||
this.sCount.push(0);
|
||||
this.bsCount.push(0);
|
||||
|
||||
this.lineMax = this.bMarks.length - 1; // don't count last fake line
|
||||
}
|
||||
|
||||
// Push new token to "stream".
|
||||
//
|
||||
StateBlock.prototype.push = function (type, tag, nesting) {
|
||||
var token = new Token(type, tag, nesting);
|
||||
token.block = true;
|
||||
|
||||
if (nesting < 0) this.level--; // closing tag
|
||||
token.level = this.level;
|
||||
if (nesting > 0) this.level++; // opening tag
|
||||
|
||||
this.tokens.push(token);
|
||||
return token;
|
||||
};
|
||||
|
||||
StateBlock.prototype.isEmpty = function isEmpty(line) {
|
||||
return this.bMarks[line] + this.tShift[line] >= this.eMarks[line];
|
||||
};
|
||||
|
||||
StateBlock.prototype.skipEmptyLines = function skipEmptyLines(from) {
|
||||
for (var max = this.lineMax; from < max; from++) {
|
||||
if (this.bMarks[from] + this.tShift[from] < this.eMarks[from]) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return from;
|
||||
};
|
||||
|
||||
// Skip spaces from given position.
|
||||
StateBlock.prototype.skipSpaces = function skipSpaces(pos) {
|
||||
var ch;
|
||||
|
||||
for (var max = this.src.length; pos < max; pos++) {
|
||||
ch = this.src.charCodeAt(pos);
|
||||
if (!isSpace(ch)) { break; }
|
||||
}
|
||||
return pos;
|
||||
};
|
||||
|
||||
// Skip spaces from given position in reverse.
|
||||
StateBlock.prototype.skipSpacesBack = function skipSpacesBack(pos, min) {
|
||||
if (pos <= min) { return pos; }
|
||||
|
||||
while (pos > min) {
|
||||
if (!isSpace(this.src.charCodeAt(--pos))) { return pos + 1; }
|
||||
}
|
||||
return pos;
|
||||
};
|
||||
|
||||
// Skip char codes from given position
|
||||
StateBlock.prototype.skipChars = function skipChars(pos, code) {
|
||||
for (var max = this.src.length; pos < max; pos++) {
|
||||
if (this.src.charCodeAt(pos) !== code) { break; }
|
||||
}
|
||||
return pos;
|
||||
};
|
||||
|
||||
// Skip char codes reverse from given position - 1
|
||||
StateBlock.prototype.skipCharsBack = function skipCharsBack(pos, code, min) {
|
||||
if (pos <= min) { return pos; }
|
||||
|
||||
while (pos > min) {
|
||||
if (code !== this.src.charCodeAt(--pos)) { return pos + 1; }
|
||||
}
|
||||
return pos;
|
||||
};
|
||||
|
||||
// cut lines range from source.
|
||||
StateBlock.prototype.getLines = function getLines(begin, end, indent, keepLastLF) {
|
||||
var i, lineIndent, ch, first, last, queue, lineStart,
|
||||
line = begin;
|
||||
|
||||
if (begin >= end) {
|
||||
return '';
|
||||
}
|
||||
|
||||
queue = new Array(end - begin);
|
||||
|
||||
for (i = 0; line < end; line++, i++) {
|
||||
lineIndent = 0;
|
||||
lineStart = first = this.bMarks[line];
|
||||
|
||||
if (line + 1 < end || keepLastLF) {
|
||||
// No need for bounds check because we have fake entry on tail.
|
||||
last = this.eMarks[line] + 1;
|
||||
} else {
|
||||
last = this.eMarks[line];
|
||||
}
|
||||
|
||||
while (first < last && lineIndent < indent) {
|
||||
ch = this.src.charCodeAt(first);
|
||||
|
||||
if (isSpace(ch)) {
|
||||
if (ch === 0x09) {
|
||||
lineIndent += 4 - (lineIndent + this.bsCount[line]) % 4;
|
||||
} else {
|
||||
lineIndent++;
|
||||
}
|
||||
} else if (first - lineStart < this.tShift[line]) {
|
||||
// patched tShift masked characters to look like spaces (blockquotes, list markers)
|
||||
lineIndent++;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
||||
first++;
|
||||
}
|
||||
|
||||
if (lineIndent > indent) {
|
||||
// partially expanding tabs in code blocks, e.g '\t\tfoobar'
|
||||
// with indent=2 becomes ' \tfoobar'
|
||||
queue[i] = new Array(lineIndent - indent + 1).join(' ') + this.src.slice(first, last);
|
||||
} else {
|
||||
queue[i] = this.src.slice(first, last);
|
||||
}
|
||||
}
|
||||
|
||||
return queue.join('');
|
||||
};
|
||||
|
||||
// re-export Token class to use in block rules
|
||||
StateBlock.prototype.Token = Token;
|
||||
|
||||
|
||||
module.exports = StateBlock;
|
||||
196
lib/markdown-it/lib/rules_block/table.js
Normal file
196
lib/markdown-it/lib/rules_block/table.js
Normal file
@@ -0,0 +1,196 @@
|
||||
// GFM table, non-standard
|
||||
|
||||
'use strict';
|
||||
|
||||
var isSpace = require('../common/utils').isSpace;
|
||||
|
||||
|
||||
function getLine(state, line) {
|
||||
var pos = state.bMarks[line] + state.blkIndent,
|
||||
max = state.eMarks[line];
|
||||
|
||||
return state.src.substr(pos, max - pos);
|
||||
}
|
||||
|
||||
function escapedSplit(str) {
|
||||
var result = [],
|
||||
pos = 0,
|
||||
max = str.length,
|
||||
ch,
|
||||
escapes = 0,
|
||||
lastPos = 0,
|
||||
backTicked = false,
|
||||
lastBackTick = 0;
|
||||
|
||||
ch = str.charCodeAt(pos);
|
||||
|
||||
while (pos < max) {
|
||||
if (ch === 0x60/* ` */) {
|
||||
if (backTicked) {
|
||||
// make \` close code sequence, but not open it;
|
||||
// the reason is: `\` is correct code block
|
||||
backTicked = false;
|
||||
lastBackTick = pos;
|
||||
} else if (escapes % 2 === 0) {
|
||||
backTicked = true;
|
||||
lastBackTick = pos;
|
||||
}
|
||||
} else if (ch === 0x7c/* | */ && (escapes % 2 === 0) && !backTicked) {
|
||||
result.push(str.substring(lastPos, pos));
|
||||
lastPos = pos + 1;
|
||||
}
|
||||
|
||||
if (ch === 0x5c/* \ */) {
|
||||
escapes++;
|
||||
} else {
|
||||
escapes = 0;
|
||||
}
|
||||
|
||||
pos++;
|
||||
|
||||
// If there was an un-closed backtick, go back to just after
|
||||
// the last backtick, but as if it was a normal character
|
||||
if (pos === max && backTicked) {
|
||||
backTicked = false;
|
||||
pos = lastBackTick + 1;
|
||||
}
|
||||
|
||||
ch = str.charCodeAt(pos);
|
||||
}
|
||||
|
||||
result.push(str.substring(lastPos));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
module.exports = function table(state, startLine, endLine, silent) {
|
||||
var ch, lineText, pos, i, nextLine, columns, columnCount, token,
|
||||
aligns, t, tableLines, tbodyLines;
|
||||
|
||||
// should have at least two lines
|
||||
if (startLine + 2 > endLine) { return false; }
|
||||
|
||||
nextLine = startLine + 1;
|
||||
|
||||
if (state.sCount[nextLine] < state.blkIndent) { return false; }
|
||||
|
||||
// if it's indented more than 3 spaces, it should be a code block
|
||||
if (state.sCount[nextLine] - state.blkIndent >= 4) { return false; }
|
||||
|
||||
// first character of the second line should be '|', '-', ':',
|
||||
// and no other characters are allowed but spaces;
|
||||
// basically, this is the equivalent of /^[-:|][-:|\s]*$/ regexp
|
||||
|
||||
pos = state.bMarks[nextLine] + state.tShift[nextLine];
|
||||
if (pos >= state.eMarks[nextLine]) { return false; }
|
||||
|
||||
ch = state.src.charCodeAt(pos++);
|
||||
if (ch !== 0x7C/* | */ && ch !== 0x2D/* - */ && ch !== 0x3A/* : */) { return false; }
|
||||
|
||||
while (pos < state.eMarks[nextLine]) {
|
||||
ch = state.src.charCodeAt(pos);
|
||||
|
||||
if (ch !== 0x7C/* | */ && ch !== 0x2D/* - */ && ch !== 0x3A/* : */ && !isSpace(ch)) { return false; }
|
||||
|
||||
pos++;
|
||||
}
|
||||
|
||||
lineText = getLine(state, startLine + 1);
|
||||
|
||||
columns = lineText.split('|');
|
||||
aligns = [];
|
||||
for (i = 0; i < columns.length; i++) {
|
||||
t = columns[i].trim();
|
||||
if (!t) {
|
||||
// allow empty columns before and after table, but not in between columns;
|
||||
// e.g. allow ` |---| `, disallow ` ---||--- `
|
||||
if (i === 0 || i === columns.length - 1) {
|
||||
continue;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!/^:?-+:?$/.test(t)) { return false; }
|
||||
if (t.charCodeAt(t.length - 1) === 0x3A/* : */) {
|
||||
aligns.push(t.charCodeAt(0) === 0x3A/* : */ ? 'center' : 'right');
|
||||
} else if (t.charCodeAt(0) === 0x3A/* : */) {
|
||||
aligns.push('left');
|
||||
} else {
|
||||
aligns.push('');
|
||||
}
|
||||
}
|
||||
|
||||
lineText = getLine(state, startLine).trim();
|
||||
if (lineText.indexOf('|') === -1) { return false; }
|
||||
if (state.sCount[startLine] - state.blkIndent >= 4) { return false; }
|
||||
columns = escapedSplit(lineText.replace(/^\||\|$/g, ''));
|
||||
|
||||
// header row will define an amount of columns in the entire table,
|
||||
// and align row shouldn't be smaller than that (the rest of the rows can)
|
||||
columnCount = columns.length;
|
||||
if (columnCount > aligns.length) { return false; }
|
||||
|
||||
if (silent) { return true; }
|
||||
|
||||
token = state.push('table_open', 'table', 1);
|
||||
token.map = tableLines = [ startLine, 0 ];
|
||||
|
||||
token = state.push('thead_open', 'thead', 1);
|
||||
token.map = [ startLine, startLine + 1 ];
|
||||
|
||||
token = state.push('tr_open', 'tr', 1);
|
||||
token.map = [ startLine, startLine + 1 ];
|
||||
|
||||
for (i = 0; i < columns.length; i++) {
|
||||
token = state.push('th_open', 'th', 1);
|
||||
token.map = [ startLine, startLine + 1 ];
|
||||
if (aligns[i]) {
|
||||
token.attrs = [ [ 'style', 'text-align:' + aligns[i] ] ];
|
||||
}
|
||||
|
||||
token = state.push('inline', '', 0);
|
||||
token.content = columns[i].trim();
|
||||
token.map = [ startLine, startLine + 1 ];
|
||||
token.children = [];
|
||||
|
||||
token = state.push('th_close', 'th', -1);
|
||||
}
|
||||
|
||||
token = state.push('tr_close', 'tr', -1);
|
||||
token = state.push('thead_close', 'thead', -1);
|
||||
|
||||
token = state.push('tbody_open', 'tbody', 1);
|
||||
token.map = tbodyLines = [ startLine + 2, 0 ];
|
||||
|
||||
for (nextLine = startLine + 2; nextLine < endLine; nextLine++) {
|
||||
if (state.sCount[nextLine] < state.blkIndent) { break; }
|
||||
|
||||
lineText = getLine(state, nextLine).trim();
|
||||
if (lineText.indexOf('|') === -1) { break; }
|
||||
if (state.sCount[nextLine] - state.blkIndent >= 4) { break; }
|
||||
columns = escapedSplit(lineText.replace(/^\||\|$/g, ''));
|
||||
|
||||
token = state.push('tr_open', 'tr', 1);
|
||||
for (i = 0; i < columnCount; i++) {
|
||||
token = state.push('td_open', 'td', 1);
|
||||
if (aligns[i]) {
|
||||
token.attrs = [ [ 'style', 'text-align:' + aligns[i] ] ];
|
||||
}
|
||||
|
||||
token = state.push('inline', '', 0);
|
||||
token.content = columns[i] ? columns[i].trim() : '';
|
||||
token.children = [];
|
||||
|
||||
token = state.push('td_close', 'td', -1);
|
||||
}
|
||||
token = state.push('tr_close', 'tr', -1);
|
||||
}
|
||||
token = state.push('tbody_close', 'tbody', -1);
|
||||
token = state.push('table_close', 'table', -1);
|
||||
|
||||
tableLines[1] = tbodyLines[1] = nextLine;
|
||||
state.line = nextLine;
|
||||
return true;
|
||||
};
|
||||
16
lib/markdown-it/lib/rules_core/block.js
Normal file
16
lib/markdown-it/lib/rules_core/block.js
Normal file
@@ -0,0 +1,16 @@
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = function block(state) {
|
||||
var token;
|
||||
|
||||
if (state.inlineMode) {
|
||||
token = new state.Token('inline', '', 0);
|
||||
token.content = state.src;
|
||||
token.map = [ 0, 1 ];
|
||||
token.children = [];
|
||||
state.tokens.push(token);
|
||||
} else {
|
||||
state.md.block.parse(state.src, state.md, state.env, state.tokens);
|
||||
}
|
||||
};
|
||||
13
lib/markdown-it/lib/rules_core/inline.js
Normal file
13
lib/markdown-it/lib/rules_core/inline.js
Normal file
@@ -0,0 +1,13 @@
|
||||
'use strict';
|
||||
|
||||
module.exports = function inline(state) {
|
||||
var tokens = state.tokens, tok, i, l;
|
||||
|
||||
// Parse inlines
|
||||
for (i = 0, l = tokens.length; i < l; i++) {
|
||||
tok = tokens[i];
|
||||
if (tok.type === 'inline') {
|
||||
state.md.inline.parse(tok.content, state.md, state.env, tok.children);
|
||||
}
|
||||
}
|
||||
};
|
||||
133
lib/markdown-it/lib/rules_core/linkify.js
Normal file
133
lib/markdown-it/lib/rules_core/linkify.js
Normal file
@@ -0,0 +1,133 @@
|
||||
// Replace link-like texts with link nodes.
|
||||
//
|
||||
// Currently restricted by `md.validateLink()` to http/https/ftp
|
||||
//
|
||||
'use strict';
|
||||
|
||||
|
||||
var arrayReplaceAt = require('../common/utils').arrayReplaceAt;
|
||||
|
||||
|
||||
function isLinkOpen(str) {
|
||||
return /^<a[>\s]/i.test(str);
|
||||
}
|
||||
function isLinkClose(str) {
|
||||
return /^<\/a\s*>/i.test(str);
|
||||
}
|
||||
|
||||
|
||||
module.exports = function linkify(state) {
|
||||
var i, j, l, tokens, token, currentToken, nodes, ln, text, pos, lastPos,
|
||||
level, htmlLinkLevel, url, fullUrl, urlText,
|
||||
blockTokens = state.tokens,
|
||||
links;
|
||||
|
||||
if (!state.md.options.linkify) { return; }
|
||||
|
||||
for (j = 0, l = blockTokens.length; j < l; j++) {
|
||||
if (blockTokens[j].type !== 'inline' ||
|
||||
!state.md.linkify.pretest(blockTokens[j].content)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
tokens = blockTokens[j].children;
|
||||
|
||||
htmlLinkLevel = 0;
|
||||
|
||||
// We scan from the end, to keep position when new tags added.
|
||||
// Use reversed logic in links start/end match
|
||||
for (i = tokens.length - 1; i >= 0; i--) {
|
||||
currentToken = tokens[i];
|
||||
|
||||
// Skip content of markdown links
|
||||
if (currentToken.type === 'link_close') {
|
||||
i--;
|
||||
while (tokens[i].level !== currentToken.level && tokens[i].type !== 'link_open') {
|
||||
i--;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip content of html tag links
|
||||
if (currentToken.type === 'html_inline') {
|
||||
if (isLinkOpen(currentToken.content) && htmlLinkLevel > 0) {
|
||||
htmlLinkLevel--;
|
||||
}
|
||||
if (isLinkClose(currentToken.content)) {
|
||||
htmlLinkLevel++;
|
||||
}
|
||||
}
|
||||
if (htmlLinkLevel > 0) { continue; }
|
||||
|
||||
if (currentToken.type === 'text' && state.md.linkify.test(currentToken.content)) {
|
||||
|
||||
text = currentToken.content;
|
||||
links = state.md.linkify.match(text);
|
||||
|
||||
// Now split string to nodes
|
||||
nodes = [];
|
||||
level = currentToken.level;
|
||||
lastPos = 0;
|
||||
|
||||
for (ln = 0; ln < links.length; ln++) {
|
||||
|
||||
url = links[ln].url;
|
||||
fullUrl = state.md.normalizeLink(url);
|
||||
if (!state.md.validateLink(fullUrl)) { continue; }
|
||||
|
||||
urlText = links[ln].text;
|
||||
|
||||
// Linkifier might send raw hostnames like "example.com", where url
|
||||
// starts with domain name. So we prepend http:// in those cases,
|
||||
// and remove it afterwards.
|
||||
//
|
||||
if (!links[ln].schema) {
|
||||
urlText = state.md.normalizeLinkText('http://' + urlText).replace(/^http:\/\//, '');
|
||||
} else if (links[ln].schema === 'mailto:' && !/^mailto:/i.test(urlText)) {
|
||||
urlText = state.md.normalizeLinkText('mailto:' + urlText).replace(/^mailto:/, '');
|
||||
} else {
|
||||
urlText = state.md.normalizeLinkText(urlText);
|
||||
}
|
||||
|
||||
pos = links[ln].index;
|
||||
|
||||
if (pos > lastPos) {
|
||||
token = new state.Token('text', '', 0);
|
||||
token.content = text.slice(lastPos, pos);
|
||||
token.level = level;
|
||||
nodes.push(token);
|
||||
}
|
||||
|
||||
token = new state.Token('link_open', 'a', 1);
|
||||
token.attrs = [ [ 'href', fullUrl ] ];
|
||||
token.level = level++;
|
||||
token.markup = 'linkify';
|
||||
token.info = 'auto';
|
||||
nodes.push(token);
|
||||
|
||||
token = new state.Token('text', '', 0);
|
||||
token.content = urlText;
|
||||
token.level = level;
|
||||
nodes.push(token);
|
||||
|
||||
token = new state.Token('link_close', 'a', -1);
|
||||
token.level = --level;
|
||||
token.markup = 'linkify';
|
||||
token.info = 'auto';
|
||||
nodes.push(token);
|
||||
|
||||
lastPos = links[ln].lastIndex;
|
||||
}
|
||||
if (lastPos < text.length) {
|
||||
token = new state.Token('text', '', 0);
|
||||
token.content = text.slice(lastPos);
|
||||
token.level = level;
|
||||
nodes.push(token);
|
||||
}
|
||||
|
||||
// replace current node
|
||||
blockTokens[j].children = tokens = arrayReplaceAt(tokens, i, nodes);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
21
lib/markdown-it/lib/rules_core/normalize.js
Normal file
21
lib/markdown-it/lib/rules_core/normalize.js
Normal file
@@ -0,0 +1,21 @@
|
||||
// Normalize input string
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
// https://spec.commonmark.org/0.29/#line-ending
|
||||
var NEWLINES_RE = /\r\n?|\n/g;
|
||||
var NULL_RE = /\0/g;
|
||||
|
||||
|
||||
module.exports = function normalize(state) {
|
||||
var str;
|
||||
|
||||
// Normalize newlines
|
||||
str = state.src.replace(NEWLINES_RE, '\n');
|
||||
|
||||
// Replace NULL characters
|
||||
str = str.replace(NULL_RE, '\uFFFD');
|
||||
|
||||
state.src = str;
|
||||
};
|
||||
107
lib/markdown-it/lib/rules_core/replacements.js
Normal file
107
lib/markdown-it/lib/rules_core/replacements.js
Normal file
@@ -0,0 +1,107 @@
|
||||
// Simple typographic replacements
|
||||
//
|
||||
// (c) (C) → ©
|
||||
// (tm) (TM) → ™
|
||||
// (r) (R) → ®
|
||||
// +- → ±
|
||||
// (p) (P) -> §
|
||||
// ... → … (also ?.... → ?.., !.... → !..)
|
||||
// ???????? → ???, !!!!! → !!!, `,,` → `,`
|
||||
// -- → –, --- → —
|
||||
//
|
||||
'use strict';
|
||||
|
||||
// TODO:
|
||||
// - fractionals 1/2, 1/4, 3/4 -> ½, ¼, ¾
|
||||
// - miltiplication 2 x 4 -> 2 × 4
|
||||
|
||||
var RARE_RE = /\+-|\.\.|\?\?\?\?|!!!!|,,|--/;
|
||||
|
||||
// Workaround for phantomjs - need regex without /g flag,
|
||||
// or root check will fail every second time
|
||||
var SCOPED_ABBR_TEST_RE = /\((c|tm|r|p)\)/i;
|
||||
|
||||
var SCOPED_ABBR_RE = /\((c|tm|r|p)\)/ig;
|
||||
var SCOPED_ABBR = {
|
||||
c: '©',
|
||||
r: '®',
|
||||
p: '§',
|
||||
tm: '™'
|
||||
};
|
||||
|
||||
function replaceFn(match, name) {
|
||||
return SCOPED_ABBR[name.toLowerCase()];
|
||||
}
|
||||
|
||||
function replace_scoped(inlineTokens) {
|
||||
var i, token, inside_autolink = 0;
|
||||
|
||||
for (i = inlineTokens.length - 1; i >= 0; i--) {
|
||||
token = inlineTokens[i];
|
||||
|
||||
if (token.type === 'text' && !inside_autolink) {
|
||||
token.content = token.content.replace(SCOPED_ABBR_RE, replaceFn);
|
||||
}
|
||||
|
||||
if (token.type === 'link_open' && token.info === 'auto') {
|
||||
inside_autolink--;
|
||||
}
|
||||
|
||||
if (token.type === 'link_close' && token.info === 'auto') {
|
||||
inside_autolink++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function replace_rare(inlineTokens) {
|
||||
var i, token, inside_autolink = 0;
|
||||
|
||||
for (i = inlineTokens.length - 1; i >= 0; i--) {
|
||||
token = inlineTokens[i];
|
||||
|
||||
if (token.type === 'text' && !inside_autolink) {
|
||||
if (RARE_RE.test(token.content)) {
|
||||
token.content = token.content
|
||||
.replace(/\+-/g, '±')
|
||||
// .., ..., ....... -> …
|
||||
// but ?..... & !..... -> ?.. & !..
|
||||
.replace(/\.{2,}/g, '…').replace(/([?!])…/g, '$1..')
|
||||
.replace(/([?!]){4,}/g, '$1$1$1').replace(/,{2,}/g, ',')
|
||||
// em-dash
|
||||
.replace(/(^|[^-])---([^-]|$)/mg, '$1\u2014$2')
|
||||
// en-dash
|
||||
.replace(/(^|\s)--(\s|$)/mg, '$1\u2013$2')
|
||||
.replace(/(^|[^-\s])--([^-\s]|$)/mg, '$1\u2013$2');
|
||||
}
|
||||
}
|
||||
|
||||
if (token.type === 'link_open' && token.info === 'auto') {
|
||||
inside_autolink--;
|
||||
}
|
||||
|
||||
if (token.type === 'link_close' && token.info === 'auto') {
|
||||
inside_autolink++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
module.exports = function replace(state) {
|
||||
var blkIdx;
|
||||
|
||||
if (!state.md.options.typographer) { return; }
|
||||
|
||||
for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {
|
||||
|
||||
if (state.tokens[blkIdx].type !== 'inline') { continue; }
|
||||
|
||||
if (SCOPED_ABBR_TEST_RE.test(state.tokens[blkIdx].content)) {
|
||||
replace_scoped(state.tokens[blkIdx].children);
|
||||
}
|
||||
|
||||
if (RARE_RE.test(state.tokens[blkIdx].content)) {
|
||||
replace_rare(state.tokens[blkIdx].children);
|
||||
}
|
||||
|
||||
}
|
||||
};
|
||||
195
lib/markdown-it/lib/rules_core/smartquotes.js
Normal file
195
lib/markdown-it/lib/rules_core/smartquotes.js
Normal file
@@ -0,0 +1,195 @@
|
||||
// Convert straight quotation marks to typographic ones
|
||||
//
|
||||
'use strict';
|
||||
|
||||
|
||||
var isWhiteSpace = require('../common/utils').isWhiteSpace;
|
||||
var isPunctChar = require('../common/utils').isPunctChar;
|
||||
var isMdAsciiPunct = require('../common/utils').isMdAsciiPunct;
|
||||
|
||||
var QUOTE_TEST_RE = /['"]/;
|
||||
var QUOTE_RE = /['"]/g;
|
||||
var APOSTROPHE = '\u2019'; /* ’ */
|
||||
|
||||
|
||||
function replaceAt(str, index, ch) {
|
||||
return str.substr(0, index) + ch + str.substr(index + 1);
|
||||
}
|
||||
|
||||
function process_inlines(tokens, state) {
|
||||
var i, token, text, t, pos, max, thisLevel, item, lastChar, nextChar,
|
||||
isLastPunctChar, isNextPunctChar, isLastWhiteSpace, isNextWhiteSpace,
|
||||
canOpen, canClose, j, isSingle, stack, openQuote, closeQuote;
|
||||
|
||||
stack = [];
|
||||
|
||||
for (i = 0; i < tokens.length; i++) {
|
||||
token = tokens[i];
|
||||
|
||||
thisLevel = tokens[i].level;
|
||||
|
||||
for (j = stack.length - 1; j >= 0; j--) {
|
||||
if (stack[j].level <= thisLevel) { break; }
|
||||
}
|
||||
stack.length = j + 1;
|
||||
|
||||
if (token.type !== 'text') { continue; }
|
||||
|
||||
text = token.content;
|
||||
pos = 0;
|
||||
max = text.length;
|
||||
|
||||
/*eslint no-labels:0,block-scoped-var:0*/
|
||||
OUTER:
|
||||
while (pos < max) {
|
||||
QUOTE_RE.lastIndex = pos;
|
||||
t = QUOTE_RE.exec(text);
|
||||
if (!t) { break; }
|
||||
|
||||
canOpen = canClose = true;
|
||||
pos = t.index + 1;
|
||||
isSingle = (t[0] === "'");
|
||||
|
||||
// Find previous character,
|
||||
// default to space if it's the beginning of the line
|
||||
//
|
||||
lastChar = 0x20;
|
||||
|
||||
if (t.index - 1 >= 0) {
|
||||
lastChar = text.charCodeAt(t.index - 1);
|
||||
} else {
|
||||
for (j = i - 1; j >= 0; j--) {
|
||||
if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break; // lastChar defaults to 0x20
|
||||
if (tokens[j].type !== 'text') continue;
|
||||
|
||||
lastChar = tokens[j].content.charCodeAt(tokens[j].content.length - 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Find next character,
|
||||
// default to space if it's the end of the line
|
||||
//
|
||||
nextChar = 0x20;
|
||||
|
||||
if (pos < max) {
|
||||
nextChar = text.charCodeAt(pos);
|
||||
} else {
|
||||
for (j = i + 1; j < tokens.length; j++) {
|
||||
if (tokens[j].type === 'softbreak' || tokens[j].type === 'hardbreak') break; // nextChar defaults to 0x20
|
||||
if (tokens[j].type !== 'text') continue;
|
||||
|
||||
nextChar = tokens[j].content.charCodeAt(0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar));
|
||||
isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar));
|
||||
|
||||
isLastWhiteSpace = isWhiteSpace(lastChar);
|
||||
isNextWhiteSpace = isWhiteSpace(nextChar);
|
||||
|
||||
if (isNextWhiteSpace) {
|
||||
canOpen = false;
|
||||
} else if (isNextPunctChar) {
|
||||
if (!(isLastWhiteSpace || isLastPunctChar)) {
|
||||
canOpen = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (isLastWhiteSpace) {
|
||||
canClose = false;
|
||||
} else if (isLastPunctChar) {
|
||||
if (!(isNextWhiteSpace || isNextPunctChar)) {
|
||||
canClose = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (nextChar === 0x22 /* " */ && t[0] === '"') {
|
||||
if (lastChar >= 0x30 /* 0 */ && lastChar <= 0x39 /* 9 */) {
|
||||
// special case: 1"" - count first quote as an inch
|
||||
canClose = canOpen = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (canOpen && canClose) {
|
||||
// treat this as the middle of the word
|
||||
canOpen = false;
|
||||
canClose = isNextPunctChar;
|
||||
}
|
||||
|
||||
if (!canOpen && !canClose) {
|
||||
// middle of word
|
||||
if (isSingle) {
|
||||
token.content = replaceAt(token.content, t.index, APOSTROPHE);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (canClose) {
|
||||
// this could be a closing quote, rewind the stack to get a match
|
||||
for (j = stack.length - 1; j >= 0; j--) {
|
||||
item = stack[j];
|
||||
if (stack[j].level < thisLevel) { break; }
|
||||
if (item.single === isSingle && stack[j].level === thisLevel) {
|
||||
item = stack[j];
|
||||
|
||||
if (isSingle) {
|
||||
openQuote = state.md.options.quotes[2];
|
||||
closeQuote = state.md.options.quotes[3];
|
||||
} else {
|
||||
openQuote = state.md.options.quotes[0];
|
||||
closeQuote = state.md.options.quotes[1];
|
||||
}
|
||||
|
||||
// replace token.content *before* tokens[item.token].content,
|
||||
// because, if they are pointing at the same token, replaceAt
|
||||
// could mess up indices when quote length != 1
|
||||
token.content = replaceAt(token.content, t.index, closeQuote);
|
||||
tokens[item.token].content = replaceAt(
|
||||
tokens[item.token].content, item.pos, openQuote);
|
||||
|
||||
pos += closeQuote.length - 1;
|
||||
if (item.token === i) { pos += openQuote.length - 1; }
|
||||
|
||||
text = token.content;
|
||||
max = text.length;
|
||||
|
||||
stack.length = j;
|
||||
continue OUTER;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (canOpen) {
|
||||
stack.push({
|
||||
token: i,
|
||||
pos: t.index,
|
||||
single: isSingle,
|
||||
level: thisLevel
|
||||
});
|
||||
} else if (canClose && isSingle) {
|
||||
token.content = replaceAt(token.content, t.index, APOSTROPHE);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
module.exports = function smartquotes(state) {
|
||||
/*eslint max-depth:0*/
|
||||
var blkIdx;
|
||||
|
||||
if (!state.md.options.typographer) { return; }
|
||||
|
||||
for (blkIdx = state.tokens.length - 1; blkIdx >= 0; blkIdx--) {
|
||||
|
||||
if (state.tokens[blkIdx].type !== 'inline' ||
|
||||
!QUOTE_TEST_RE.test(state.tokens[blkIdx].content)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
process_inlines(state.tokens[blkIdx].children, state);
|
||||
}
|
||||
};
|
||||
20
lib/markdown-it/lib/rules_core/state_core.js
Normal file
20
lib/markdown-it/lib/rules_core/state_core.js
Normal file
@@ -0,0 +1,20 @@
|
||||
// Core state object
|
||||
//
|
||||
'use strict';
|
||||
|
||||
var Token = require('../token');
|
||||
|
||||
|
||||
function StateCore(src, md, env) {
|
||||
this.src = src;
|
||||
this.env = env;
|
||||
this.tokens = [];
|
||||
this.inlineMode = false;
|
||||
this.md = md; // link to parser instance
|
||||
}
|
||||
|
||||
// re-export Token class to use in core rules
|
||||
StateCore.prototype.Token = Token;
|
||||
|
||||
|
||||
module.exports = StateCore;
|
||||
72
lib/markdown-it/lib/rules_inline/autolink.js
Normal file
72
lib/markdown-it/lib/rules_inline/autolink.js
Normal file
@@ -0,0 +1,72 @@
|
||||
// Process autolinks '<protocol:...>'
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
/*eslint max-len:0*/
|
||||
var EMAIL_RE = /^<([a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*)>/;
|
||||
var AUTOLINK_RE = /^<([a-zA-Z][a-zA-Z0-9+.\-]{1,31}):([^<>\x00-\x20]*)>/;
|
||||
|
||||
|
||||
module.exports = function autolink(state, silent) {
|
||||
var tail, linkMatch, emailMatch, url, fullUrl, token,
|
||||
pos = state.pos;
|
||||
|
||||
if (state.src.charCodeAt(pos) !== 0x3C/* < */) { return false; }
|
||||
|
||||
tail = state.src.slice(pos);
|
||||
|
||||
if (tail.indexOf('>') < 0) { return false; }
|
||||
|
||||
if (AUTOLINK_RE.test(tail)) {
|
||||
linkMatch = tail.match(AUTOLINK_RE);
|
||||
|
||||
url = linkMatch[0].slice(1, -1);
|
||||
fullUrl = state.md.normalizeLink(url);
|
||||
if (!state.md.validateLink(fullUrl)) { return false; }
|
||||
|
||||
if (!silent) {
|
||||
token = state.push('link_open', 'a', 1);
|
||||
token.attrs = [ [ 'href', fullUrl ] ];
|
||||
token.markup = 'autolink';
|
||||
token.info = 'auto';
|
||||
|
||||
token = state.push('text', '', 0);
|
||||
token.content = state.md.normalizeLinkText(url);
|
||||
|
||||
token = state.push('link_close', 'a', -1);
|
||||
token.markup = 'autolink';
|
||||
token.info = 'auto';
|
||||
}
|
||||
|
||||
state.pos += linkMatch[0].length;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (EMAIL_RE.test(tail)) {
|
||||
emailMatch = tail.match(EMAIL_RE);
|
||||
|
||||
url = emailMatch[0].slice(1, -1);
|
||||
fullUrl = state.md.normalizeLink('mailto:' + url);
|
||||
if (!state.md.validateLink(fullUrl)) { return false; }
|
||||
|
||||
if (!silent) {
|
||||
token = state.push('link_open', 'a', 1);
|
||||
token.attrs = [ [ 'href', fullUrl ] ];
|
||||
token.markup = 'autolink';
|
||||
token.info = 'auto';
|
||||
|
||||
token = state.push('text', '', 0);
|
||||
token.content = state.md.normalizeLinkText(url);
|
||||
|
||||
token = state.push('link_close', 'a', -1);
|
||||
token.markup = 'autolink';
|
||||
token.info = 'auto';
|
||||
}
|
||||
|
||||
state.pos += emailMatch[0].length;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
43
lib/markdown-it/lib/rules_inline/backticks.js
Normal file
43
lib/markdown-it/lib/rules_inline/backticks.js
Normal file
@@ -0,0 +1,43 @@
|
||||
// Parse backticks
|
||||
|
||||
'use strict';
|
||||
|
||||
module.exports = function backtick(state, silent) {
|
||||
var start, max, marker, matchStart, matchEnd, token,
|
||||
pos = state.pos,
|
||||
ch = state.src.charCodeAt(pos);
|
||||
|
||||
if (ch !== 0x60/* ` */) { return false; }
|
||||
|
||||
start = pos;
|
||||
pos++;
|
||||
max = state.posMax;
|
||||
|
||||
while (pos < max && state.src.charCodeAt(pos) === 0x60/* ` */) { pos++; }
|
||||
|
||||
marker = state.src.slice(start, pos);
|
||||
|
||||
matchStart = matchEnd = pos;
|
||||
|
||||
while ((matchStart = state.src.indexOf('`', matchEnd)) !== -1) {
|
||||
matchEnd = matchStart + 1;
|
||||
|
||||
while (matchEnd < max && state.src.charCodeAt(matchEnd) === 0x60/* ` */) { matchEnd++; }
|
||||
|
||||
if (matchEnd - matchStart === marker.length) {
|
||||
if (!silent) {
|
||||
token = state.push('code_inline', 'code', 0);
|
||||
token.markup = marker;
|
||||
token.content = state.src.slice(pos, matchStart)
|
||||
.replace(/\n/g, ' ')
|
||||
.replace(/^ (.+) $/, '$1');
|
||||
}
|
||||
state.pos = matchEnd;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!silent) { state.pending += marker; }
|
||||
state.pos += marker.length;
|
||||
return true;
|
||||
};
|
||||
108
lib/markdown-it/lib/rules_inline/balance_pairs.js
Normal file
108
lib/markdown-it/lib/rules_inline/balance_pairs.js
Normal file
@@ -0,0 +1,108 @@
|
||||
// For each opening emphasis-like marker find a matching closing one
|
||||
//
|
||||
'use strict';
|
||||
|
||||
|
||||
function processDelimiters(state, delimiters) {
|
||||
var closerIdx, openerIdx, closer, opener, minOpenerIdx, newMinOpenerIdx,
|
||||
isOddMatch, lastJump,
|
||||
openersBottom = {},
|
||||
max = delimiters.length;
|
||||
|
||||
for (closerIdx = 0; closerIdx < max; closerIdx++) {
|
||||
closer = delimiters[closerIdx];
|
||||
|
||||
// Length is only used for emphasis-specific "rule of 3",
|
||||
// if it's not defined (in strikethrough or 3rd party plugins),
|
||||
// we can default it to 0 to disable those checks.
|
||||
//
|
||||
closer.length = closer.length || 0;
|
||||
|
||||
if (!closer.close) continue;
|
||||
|
||||
// Previously calculated lower bounds (previous fails)
|
||||
// for each marker and each delimiter length modulo 3.
|
||||
if (!openersBottom.hasOwnProperty(closer.marker)) {
|
||||
openersBottom[closer.marker] = [ -1, -1, -1 ];
|
||||
}
|
||||
|
||||
minOpenerIdx = openersBottom[closer.marker][closer.length % 3];
|
||||
newMinOpenerIdx = -1;
|
||||
|
||||
openerIdx = closerIdx - closer.jump - 1;
|
||||
|
||||
for (; openerIdx > minOpenerIdx; openerIdx -= opener.jump + 1) {
|
||||
opener = delimiters[openerIdx];
|
||||
|
||||
if (opener.marker !== closer.marker) continue;
|
||||
|
||||
if (newMinOpenerIdx === -1) newMinOpenerIdx = openerIdx;
|
||||
|
||||
if (opener.open &&
|
||||
opener.end < 0 &&
|
||||
opener.level === closer.level) {
|
||||
|
||||
isOddMatch = false;
|
||||
|
||||
// from spec:
|
||||
//
|
||||
// If one of the delimiters can both open and close emphasis, then the
|
||||
// sum of the lengths of the delimiter runs containing the opening and
|
||||
// closing delimiters must not be a multiple of 3 unless both lengths
|
||||
// are multiples of 3.
|
||||
//
|
||||
if (opener.close || closer.open) {
|
||||
if ((opener.length + closer.length) % 3 === 0) {
|
||||
if (opener.length % 3 !== 0 || closer.length % 3 !== 0) {
|
||||
isOddMatch = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!isOddMatch) {
|
||||
// If previous delimiter cannot be an opener, we can safely skip
|
||||
// the entire sequence in future checks. This is required to make
|
||||
// sure algorithm has linear complexity (see *_*_*_*_*_... case).
|
||||
//
|
||||
lastJump = openerIdx > 0 && !delimiters[openerIdx - 1].open ?
|
||||
delimiters[openerIdx - 1].jump + 1 :
|
||||
0;
|
||||
|
||||
closer.jump = closerIdx - openerIdx + lastJump;
|
||||
closer.open = false;
|
||||
opener.end = closerIdx;
|
||||
opener.jump = lastJump;
|
||||
opener.close = false;
|
||||
newMinOpenerIdx = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (newMinOpenerIdx !== -1) {
|
||||
// If match for this delimiter run failed, we want to set lower bound for
|
||||
// future lookups. This is required to make sure algorithm has linear
|
||||
// complexity.
|
||||
//
|
||||
// See details here:
|
||||
// https://github.com/commonmark/cmark/issues/178#issuecomment-270417442
|
||||
//
|
||||
openersBottom[closer.marker][(closer.length || 0) % 3] = newMinOpenerIdx;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
module.exports = function link_pairs(state) {
|
||||
var curr,
|
||||
tokens_meta = state.tokens_meta,
|
||||
max = state.tokens_meta.length;
|
||||
|
||||
processDelimiters(state, state.delimiters);
|
||||
|
||||
for (curr = 0; curr < max; curr++) {
|
||||
if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
|
||||
processDelimiters(state, tokens_meta[curr].delimiters);
|
||||
}
|
||||
}
|
||||
};
|
||||
137
lib/markdown-it/lib/rules_inline/emphasis.js
Normal file
137
lib/markdown-it/lib/rules_inline/emphasis.js
Normal file
@@ -0,0 +1,137 @@
|
||||
// Process *this* and _that_
|
||||
//
|
||||
'use strict';
|
||||
|
||||
|
||||
// Insert each marker as a separate text token, and add it to delimiter list
|
||||
//
|
||||
module.exports.tokenize = function emphasis(state, silent) {
|
||||
var i, scanned, token,
|
||||
start = state.pos,
|
||||
marker = state.src.charCodeAt(start);
|
||||
|
||||
if (silent) { return false; }
|
||||
|
||||
if (marker !== 0x5F /* _ */ && marker !== 0x2A /* * */) { return false; }
|
||||
|
||||
scanned = state.scanDelims(state.pos, marker === 0x2A);
|
||||
|
||||
for (i = 0; i < scanned.length; i++) {
|
||||
token = state.push('text', '', 0);
|
||||
token.content = String.fromCharCode(marker);
|
||||
|
||||
state.delimiters.push({
|
||||
// Char code of the starting marker (number).
|
||||
//
|
||||
marker: marker,
|
||||
|
||||
// Total length of these series of delimiters.
|
||||
//
|
||||
length: scanned.length,
|
||||
|
||||
// An amount of characters before this one that's equivalent to
|
||||
// current one. In plain English: if this delimiter does not open
|
||||
// an emphasis, neither do previous `jump` characters.
|
||||
//
|
||||
// Used to skip sequences like "*****" in one step, for 1st asterisk
|
||||
// value will be 0, for 2nd it's 1 and so on.
|
||||
//
|
||||
jump: i,
|
||||
|
||||
// A position of the token this delimiter corresponds to.
|
||||
//
|
||||
token: state.tokens.length - 1,
|
||||
|
||||
// If this delimiter is matched as a valid opener, `end` will be
|
||||
// equal to its position, otherwise it's `-1`.
|
||||
//
|
||||
end: -1,
|
||||
|
||||
// Boolean flags that determine if this delimiter could open or close
|
||||
// an emphasis.
|
||||
//
|
||||
open: scanned.can_open,
|
||||
close: scanned.can_close
|
||||
});
|
||||
}
|
||||
|
||||
state.pos += scanned.length;
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
|
||||
function postProcess(state, delimiters) {
|
||||
var i,
|
||||
startDelim,
|
||||
endDelim,
|
||||
token,
|
||||
ch,
|
||||
isStrong,
|
||||
max = delimiters.length;
|
||||
|
||||
for (i = max - 1; i >= 0; i--) {
|
||||
startDelim = delimiters[i];
|
||||
|
||||
if (startDelim.marker !== 0x5F/* _ */ && startDelim.marker !== 0x2A/* * */) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Process only opening markers
|
||||
if (startDelim.end === -1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
endDelim = delimiters[startDelim.end];
|
||||
|
||||
// If the previous delimiter has the same marker and is adjacent to this one,
|
||||
// merge those into one strong delimiter.
|
||||
//
|
||||
// `<em><em>whatever</em></em>` -> `<strong>whatever</strong>`
|
||||
//
|
||||
isStrong = i > 0 &&
|
||||
delimiters[i - 1].end === startDelim.end + 1 &&
|
||||
delimiters[i - 1].token === startDelim.token - 1 &&
|
||||
delimiters[startDelim.end + 1].token === endDelim.token + 1 &&
|
||||
delimiters[i - 1].marker === startDelim.marker;
|
||||
|
||||
ch = String.fromCharCode(startDelim.marker);
|
||||
|
||||
token = state.tokens[startDelim.token];
|
||||
token.type = isStrong ? 'strong_open' : 'em_open';
|
||||
token.tag = isStrong ? 'strong' : 'em';
|
||||
token.nesting = 1;
|
||||
token.markup = isStrong ? ch + ch : ch;
|
||||
token.content = '';
|
||||
|
||||
token = state.tokens[endDelim.token];
|
||||
token.type = isStrong ? 'strong_close' : 'em_close';
|
||||
token.tag = isStrong ? 'strong' : 'em';
|
||||
token.nesting = -1;
|
||||
token.markup = isStrong ? ch + ch : ch;
|
||||
token.content = '';
|
||||
|
||||
if (isStrong) {
|
||||
state.tokens[delimiters[i - 1].token].content = '';
|
||||
state.tokens[delimiters[startDelim.end + 1].token].content = '';
|
||||
i--;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Walk through delimiter list and replace text tokens with tags
|
||||
//
|
||||
module.exports.postProcess = function emphasis(state) {
|
||||
var curr,
|
||||
tokens_meta = state.tokens_meta,
|
||||
max = state.tokens_meta.length;
|
||||
|
||||
postProcess(state, state.delimiters);
|
||||
|
||||
for (curr = 0; curr < max; curr++) {
|
||||
if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
|
||||
postProcess(state, tokens_meta[curr].delimiters);
|
||||
}
|
||||
}
|
||||
};
|
||||
48
lib/markdown-it/lib/rules_inline/entity.js
Normal file
48
lib/markdown-it/lib/rules_inline/entity.js
Normal file
@@ -0,0 +1,48 @@
|
||||
// Process html entity - {, ¯, ", ...
|
||||
|
||||
'use strict';
|
||||
|
||||
var entities = require('../common/entities');
|
||||
var has = require('../common/utils').has;
|
||||
var isValidEntityCode = require('../common/utils').isValidEntityCode;
|
||||
var fromCodePoint = require('../common/utils').fromCodePoint;
|
||||
|
||||
|
||||
var DIGITAL_RE = /^&#((?:x[a-f0-9]{1,6}|[0-9]{1,7}));/i;
|
||||
var NAMED_RE = /^&([a-z][a-z0-9]{1,31});/i;
|
||||
|
||||
|
||||
module.exports = function entity(state, silent) {
|
||||
var ch, code, match, pos = state.pos, max = state.posMax;
|
||||
|
||||
if (state.src.charCodeAt(pos) !== 0x26/* & */) { return false; }
|
||||
|
||||
if (pos + 1 < max) {
|
||||
ch = state.src.charCodeAt(pos + 1);
|
||||
|
||||
if (ch === 0x23 /* # */) {
|
||||
match = state.src.slice(pos).match(DIGITAL_RE);
|
||||
if (match) {
|
||||
if (!silent) {
|
||||
code = match[1][0].toLowerCase() === 'x' ? parseInt(match[1].slice(1), 16) : parseInt(match[1], 10);
|
||||
state.pending += isValidEntityCode(code) ? fromCodePoint(code) : fromCodePoint(0xFFFD);
|
||||
}
|
||||
state.pos += match[0].length;
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
match = state.src.slice(pos).match(NAMED_RE);
|
||||
if (match) {
|
||||
if (has(entities, match[1])) {
|
||||
if (!silent) { state.pending += entities[match[1]]; }
|
||||
state.pos += match[0].length;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!silent) { state.pending += '&'; }
|
||||
state.pos++;
|
||||
return true;
|
||||
};
|
||||
52
lib/markdown-it/lib/rules_inline/escape.js
Normal file
52
lib/markdown-it/lib/rules_inline/escape.js
Normal file
@@ -0,0 +1,52 @@
|
||||
// Process escaped chars and hardbreaks
|
||||
|
||||
'use strict';
|
||||
|
||||
var isSpace = require('../common/utils').isSpace;
|
||||
|
||||
var ESCAPED = [];
|
||||
|
||||
for (var i = 0; i < 256; i++) { ESCAPED.push(0); }
|
||||
|
||||
'\\!"#$%&\'()*+,./:;<=>?@[]^_`{|}~-'
|
||||
.split('').forEach(function (ch) { ESCAPED[ch.charCodeAt(0)] = 1; });
|
||||
|
||||
|
||||
module.exports = function escape(state, silent) {
|
||||
var ch, pos = state.pos, max = state.posMax;
|
||||
|
||||
if (state.src.charCodeAt(pos) !== 0x5C/* \ */) { return false; }
|
||||
|
||||
pos++;
|
||||
|
||||
if (pos < max) {
|
||||
ch = state.src.charCodeAt(pos);
|
||||
|
||||
if (ch < 256 && ESCAPED[ch] !== 0) {
|
||||
if (!silent) { state.pending += state.src[pos]; }
|
||||
state.pos += 2;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (ch === 0x0A) {
|
||||
if (!silent) {
|
||||
state.push('hardbreak', 'br', 0);
|
||||
}
|
||||
|
||||
pos++;
|
||||
// skip leading whitespaces from next line
|
||||
while (pos < max) {
|
||||
ch = state.src.charCodeAt(pos);
|
||||
if (!isSpace(ch)) { break; }
|
||||
pos++;
|
||||
}
|
||||
|
||||
state.pos = pos;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!silent) { state.pending += '\\'; }
|
||||
state.pos++;
|
||||
return true;
|
||||
};
|
||||
47
lib/markdown-it/lib/rules_inline/html_inline.js
Normal file
47
lib/markdown-it/lib/rules_inline/html_inline.js
Normal file
@@ -0,0 +1,47 @@
|
||||
// Process html tags
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
var HTML_TAG_RE = require('../common/html_re').HTML_TAG_RE;
|
||||
|
||||
|
||||
function isLetter(ch) {
|
||||
/*eslint no-bitwise:0*/
|
||||
var lc = ch | 0x20; // to lower case
|
||||
return (lc >= 0x61/* a */) && (lc <= 0x7a/* z */);
|
||||
}
|
||||
|
||||
|
||||
module.exports = function html_inline(state, silent) {
|
||||
var ch, match, max, token,
|
||||
pos = state.pos;
|
||||
|
||||
if (!state.md.options.html) { return false; }
|
||||
|
||||
// Check start
|
||||
max = state.posMax;
|
||||
if (state.src.charCodeAt(pos) !== 0x3C/* < */ ||
|
||||
pos + 2 >= max) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Quick fail on second char
|
||||
ch = state.src.charCodeAt(pos + 1);
|
||||
if (ch !== 0x21/* ! */ &&
|
||||
ch !== 0x3F/* ? */ &&
|
||||
ch !== 0x2F/* / */ &&
|
||||
!isLetter(ch)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
match = state.src.slice(pos).match(HTML_TAG_RE);
|
||||
if (!match) { return false; }
|
||||
|
||||
if (!silent) {
|
||||
token = state.push('html_inline', '', 0);
|
||||
token.content = state.src.slice(pos, pos + match[0].length);
|
||||
}
|
||||
state.pos += match[0].length;
|
||||
return true;
|
||||
};
|
||||
152
lib/markdown-it/lib/rules_inline/image.js
Normal file
152
lib/markdown-it/lib/rules_inline/image.js
Normal file
@@ -0,0 +1,152 @@
|
||||
// Process 
|
||||
|
||||
'use strict';
|
||||
|
||||
var normalizeReference = require('../common/utils').normalizeReference;
|
||||
var isSpace = require('../common/utils').isSpace;
|
||||
|
||||
|
||||
module.exports = function image(state, silent) {
|
||||
var attrs,
|
||||
code,
|
||||
content,
|
||||
label,
|
||||
labelEnd,
|
||||
labelStart,
|
||||
pos,
|
||||
ref,
|
||||
res,
|
||||
title,
|
||||
token,
|
||||
tokens,
|
||||
start,
|
||||
href = '',
|
||||
oldPos = state.pos,
|
||||
max = state.posMax;
|
||||
|
||||
if (state.src.charCodeAt(state.pos) !== 0x21/* ! */) { return false; }
|
||||
if (state.src.charCodeAt(state.pos + 1) !== 0x5B/* [ */) { return false; }
|
||||
|
||||
labelStart = state.pos + 2;
|
||||
labelEnd = state.md.helpers.parseLinkLabel(state, state.pos + 1, false);
|
||||
|
||||
// parser failed to find ']', so it's not a valid link
|
||||
if (labelEnd < 0) { return false; }
|
||||
|
||||
pos = labelEnd + 1;
|
||||
if (pos < max && state.src.charCodeAt(pos) === 0x28/* ( */) {
|
||||
//
|
||||
// Inline link
|
||||
//
|
||||
|
||||
// [link]( <href> "title" )
|
||||
// ^^ skipping these spaces
|
||||
pos++;
|
||||
for (; pos < max; pos++) {
|
||||
code = state.src.charCodeAt(pos);
|
||||
if (!isSpace(code) && code !== 0x0A) { break; }
|
||||
}
|
||||
if (pos >= max) { return false; }
|
||||
|
||||
// [link]( <href> "title" )
|
||||
// ^^^^^^ parsing link destination
|
||||
start = pos;
|
||||
res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax);
|
||||
if (res.ok) {
|
||||
href = state.md.normalizeLink(res.str);
|
||||
if (state.md.validateLink(href)) {
|
||||
pos = res.pos;
|
||||
} else {
|
||||
href = '';
|
||||
}
|
||||
}
|
||||
|
||||
// [link]( <href> "title" )
|
||||
// ^^ skipping these spaces
|
||||
start = pos;
|
||||
for (; pos < max; pos++) {
|
||||
code = state.src.charCodeAt(pos);
|
||||
if (!isSpace(code) && code !== 0x0A) { break; }
|
||||
}
|
||||
|
||||
// [link]( <href> "title" )
|
||||
// ^^^^^^^ parsing link title
|
||||
res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax);
|
||||
if (pos < max && start !== pos && res.ok) {
|
||||
title = res.str;
|
||||
pos = res.pos;
|
||||
|
||||
// [link]( <href> "title" )
|
||||
// ^^ skipping these spaces
|
||||
for (; pos < max; pos++) {
|
||||
code = state.src.charCodeAt(pos);
|
||||
if (!isSpace(code) && code !== 0x0A) { break; }
|
||||
}
|
||||
} else {
|
||||
title = '';
|
||||
}
|
||||
|
||||
if (pos >= max || state.src.charCodeAt(pos) !== 0x29/* ) */) {
|
||||
state.pos = oldPos;
|
||||
return false;
|
||||
}
|
||||
pos++;
|
||||
} else {
|
||||
//
|
||||
// Link reference
|
||||
//
|
||||
if (typeof state.env.references === 'undefined') { return false; }
|
||||
|
||||
if (pos < max && state.src.charCodeAt(pos) === 0x5B/* [ */) {
|
||||
start = pos + 1;
|
||||
pos = state.md.helpers.parseLinkLabel(state, pos);
|
||||
if (pos >= 0) {
|
||||
label = state.src.slice(start, pos++);
|
||||
} else {
|
||||
pos = labelEnd + 1;
|
||||
}
|
||||
} else {
|
||||
pos = labelEnd + 1;
|
||||
}
|
||||
|
||||
// covers label === '' and label === undefined
|
||||
// (collapsed reference link and shortcut reference link respectively)
|
||||
if (!label) { label = state.src.slice(labelStart, labelEnd); }
|
||||
|
||||
ref = state.env.references[normalizeReference(label)];
|
||||
if (!ref) {
|
||||
state.pos = oldPos;
|
||||
return false;
|
||||
}
|
||||
href = ref.href;
|
||||
title = ref.title;
|
||||
}
|
||||
|
||||
//
|
||||
// We found the end of the link, and know for a fact it's a valid link;
|
||||
// so all that's left to do is to call tokenizer.
|
||||
//
|
||||
if (!silent) {
|
||||
content = state.src.slice(labelStart, labelEnd);
|
||||
|
||||
state.md.inline.parse(
|
||||
content,
|
||||
state.md,
|
||||
state.env,
|
||||
tokens = []
|
||||
);
|
||||
|
||||
token = state.push('image', 'img', 0);
|
||||
token.attrs = attrs = [ [ 'src', href ], [ 'alt', '' ] ];
|
||||
token.children = tokens;
|
||||
token.content = content;
|
||||
|
||||
if (title) {
|
||||
attrs.push([ 'title', title ]);
|
||||
}
|
||||
}
|
||||
|
||||
state.pos = pos;
|
||||
state.posMax = max;
|
||||
return true;
|
||||
};
|
||||
150
lib/markdown-it/lib/rules_inline/link.js
Normal file
150
lib/markdown-it/lib/rules_inline/link.js
Normal file
@@ -0,0 +1,150 @@
|
||||
// Process [link](<to> "stuff")
|
||||
|
||||
'use strict';
|
||||
|
||||
var normalizeReference = require('../common/utils').normalizeReference;
|
||||
var isSpace = require('../common/utils').isSpace;
|
||||
|
||||
|
||||
module.exports = function link(state, silent) {
|
||||
var attrs,
|
||||
code,
|
||||
label,
|
||||
labelEnd,
|
||||
labelStart,
|
||||
pos,
|
||||
res,
|
||||
ref,
|
||||
title,
|
||||
token,
|
||||
href = '',
|
||||
oldPos = state.pos,
|
||||
max = state.posMax,
|
||||
start = state.pos,
|
||||
parseReference = true;
|
||||
|
||||
if (state.src.charCodeAt(state.pos) !== 0x5B/* [ */) { return false; }
|
||||
|
||||
labelStart = state.pos + 1;
|
||||
labelEnd = state.md.helpers.parseLinkLabel(state, state.pos, true);
|
||||
|
||||
// parser failed to find ']', so it's not a valid link
|
||||
if (labelEnd < 0) { return false; }
|
||||
|
||||
pos = labelEnd + 1;
|
||||
if (pos < max && state.src.charCodeAt(pos) === 0x28/* ( */) {
|
||||
//
|
||||
// Inline link
|
||||
//
|
||||
|
||||
// might have found a valid shortcut link, disable reference parsing
|
||||
parseReference = false;
|
||||
|
||||
// [link]( <href> "title" )
|
||||
// ^^ skipping these spaces
|
||||
pos++;
|
||||
for (; pos < max; pos++) {
|
||||
code = state.src.charCodeAt(pos);
|
||||
if (!isSpace(code) && code !== 0x0A) { break; }
|
||||
}
|
||||
if (pos >= max) { return false; }
|
||||
|
||||
// [link]( <href> "title" )
|
||||
// ^^^^^^ parsing link destination
|
||||
start = pos;
|
||||
res = state.md.helpers.parseLinkDestination(state.src, pos, state.posMax);
|
||||
if (res.ok) {
|
||||
href = state.md.normalizeLink(res.str);
|
||||
if (state.md.validateLink(href)) {
|
||||
pos = res.pos;
|
||||
} else {
|
||||
href = '';
|
||||
}
|
||||
}
|
||||
|
||||
// [link]( <href> "title" )
|
||||
// ^^ skipping these spaces
|
||||
start = pos;
|
||||
for (; pos < max; pos++) {
|
||||
code = state.src.charCodeAt(pos);
|
||||
if (!isSpace(code) && code !== 0x0A) { break; }
|
||||
}
|
||||
|
||||
// [link]( <href> "title" )
|
||||
// ^^^^^^^ parsing link title
|
||||
res = state.md.helpers.parseLinkTitle(state.src, pos, state.posMax);
|
||||
if (pos < max && start !== pos && res.ok) {
|
||||
title = res.str;
|
||||
pos = res.pos;
|
||||
|
||||
// [link]( <href> "title" )
|
||||
// ^^ skipping these spaces
|
||||
for (; pos < max; pos++) {
|
||||
code = state.src.charCodeAt(pos);
|
||||
if (!isSpace(code) && code !== 0x0A) { break; }
|
||||
}
|
||||
} else {
|
||||
title = '';
|
||||
}
|
||||
|
||||
if (pos >= max || state.src.charCodeAt(pos) !== 0x29/* ) */) {
|
||||
// parsing a valid shortcut link failed, fallback to reference
|
||||
parseReference = true;
|
||||
}
|
||||
pos++;
|
||||
}
|
||||
|
||||
if (parseReference) {
|
||||
//
|
||||
// Link reference
|
||||
//
|
||||
if (typeof state.env.references === 'undefined') { return false; }
|
||||
|
||||
if (pos < max && state.src.charCodeAt(pos) === 0x5B/* [ */) {
|
||||
start = pos + 1;
|
||||
pos = state.md.helpers.parseLinkLabel(state, pos);
|
||||
if (pos >= 0) {
|
||||
label = state.src.slice(start, pos++);
|
||||
} else {
|
||||
pos = labelEnd + 1;
|
||||
}
|
||||
} else {
|
||||
pos = labelEnd + 1;
|
||||
}
|
||||
|
||||
// covers label === '' and label === undefined
|
||||
// (collapsed reference link and shortcut reference link respectively)
|
||||
if (!label) { label = state.src.slice(labelStart, labelEnd); }
|
||||
|
||||
ref = state.env.references[normalizeReference(label)];
|
||||
if (!ref) {
|
||||
state.pos = oldPos;
|
||||
return false;
|
||||
}
|
||||
href = ref.href;
|
||||
title = ref.title;
|
||||
}
|
||||
|
||||
//
|
||||
// We found the end of the link, and know for a fact it's a valid link;
|
||||
// so all that's left to do is to call tokenizer.
|
||||
//
|
||||
if (!silent) {
|
||||
state.pos = labelStart;
|
||||
state.posMax = labelEnd;
|
||||
|
||||
token = state.push('link_open', 'a', 1);
|
||||
token.attrs = attrs = [ [ 'href', href ] ];
|
||||
if (title) {
|
||||
attrs.push([ 'title', title ]);
|
||||
}
|
||||
|
||||
state.md.inline.tokenize(state);
|
||||
|
||||
token = state.push('link_close', 'a', -1);
|
||||
}
|
||||
|
||||
state.pos = pos;
|
||||
state.posMax = max;
|
||||
return true;
|
||||
};
|
||||
42
lib/markdown-it/lib/rules_inline/newline.js
Normal file
42
lib/markdown-it/lib/rules_inline/newline.js
Normal file
@@ -0,0 +1,42 @@
|
||||
// Proceess '\n'
|
||||
|
||||
'use strict';
|
||||
|
||||
var isSpace = require('../common/utils').isSpace;
|
||||
|
||||
|
||||
module.exports = function newline(state, silent) {
|
||||
var pmax, max, pos = state.pos;
|
||||
|
||||
if (state.src.charCodeAt(pos) !== 0x0A/* \n */) { return false; }
|
||||
|
||||
pmax = state.pending.length - 1;
|
||||
max = state.posMax;
|
||||
|
||||
// ' \n' -> hardbreak
|
||||
// Lookup in pending chars is bad practice! Don't copy to other rules!
|
||||
// Pending string is stored in concat mode, indexed lookups will cause
|
||||
// convertion to flat mode.
|
||||
if (!silent) {
|
||||
if (pmax >= 0 && state.pending.charCodeAt(pmax) === 0x20) {
|
||||
if (pmax >= 1 && state.pending.charCodeAt(pmax - 1) === 0x20) {
|
||||
state.pending = state.pending.replace(/ +$/, '');
|
||||
state.push('hardbreak', 'br', 0);
|
||||
} else {
|
||||
state.pending = state.pending.slice(0, -1);
|
||||
state.push('softbreak', 'br', 0);
|
||||
}
|
||||
|
||||
} else {
|
||||
state.push('softbreak', 'br', 0);
|
||||
}
|
||||
}
|
||||
|
||||
pos++;
|
||||
|
||||
// skip heading spaces for next line
|
||||
while (pos < max && isSpace(state.src.charCodeAt(pos))) { pos++; }
|
||||
|
||||
state.pos = pos;
|
||||
return true;
|
||||
};
|
||||
150
lib/markdown-it/lib/rules_inline/state_inline.js
Normal file
150
lib/markdown-it/lib/rules_inline/state_inline.js
Normal file
@@ -0,0 +1,150 @@
|
||||
// Inline parser state
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
var Token = require('../token');
|
||||
var isWhiteSpace = require('../common/utils').isWhiteSpace;
|
||||
var isPunctChar = require('../common/utils').isPunctChar;
|
||||
var isMdAsciiPunct = require('../common/utils').isMdAsciiPunct;
|
||||
|
||||
|
||||
function StateInline(src, md, env, outTokens) {
|
||||
this.src = src;
|
||||
this.env = env;
|
||||
this.md = md;
|
||||
this.tokens = outTokens;
|
||||
this.tokens_meta = Array(outTokens.length);
|
||||
|
||||
this.pos = 0;
|
||||
this.posMax = this.src.length;
|
||||
this.level = 0;
|
||||
this.pending = '';
|
||||
this.pendingLevel = 0;
|
||||
|
||||
// Stores { start: end } pairs. Useful for backtrack
|
||||
// optimization of pairs parse (emphasis, strikes).
|
||||
this.cache = {};
|
||||
|
||||
// List of emphasis-like delimiters for current tag
|
||||
this.delimiters = [];
|
||||
|
||||
// Stack of delimiter lists for upper level tags
|
||||
this._prev_delimiters = [];
|
||||
}
|
||||
|
||||
|
||||
// Flush pending text
|
||||
//
|
||||
StateInline.prototype.pushPending = function () {
|
||||
var token = new Token('text', '', 0);
|
||||
token.content = this.pending;
|
||||
token.level = this.pendingLevel;
|
||||
this.tokens.push(token);
|
||||
this.pending = '';
|
||||
return token;
|
||||
};
|
||||
|
||||
|
||||
// Push new token to "stream".
|
||||
// If pending text exists - flush it as text token
|
||||
//
|
||||
StateInline.prototype.push = function (type, tag, nesting) {
|
||||
if (this.pending) {
|
||||
this.pushPending();
|
||||
}
|
||||
|
||||
var token = new Token(type, tag, nesting);
|
||||
var token_meta = null;
|
||||
|
||||
if (nesting < 0) {
|
||||
// closing tag
|
||||
this.level--;
|
||||
this.delimiters = this._prev_delimiters.pop();
|
||||
}
|
||||
|
||||
token.level = this.level;
|
||||
|
||||
if (nesting > 0) {
|
||||
// opening tag
|
||||
this.level++;
|
||||
this._prev_delimiters.push(this.delimiters);
|
||||
this.delimiters = [];
|
||||
token_meta = { delimiters: this.delimiters };
|
||||
}
|
||||
|
||||
this.pendingLevel = this.level;
|
||||
this.tokens.push(token);
|
||||
this.tokens_meta.push(token_meta);
|
||||
return token;
|
||||
};
|
||||
|
||||
|
||||
// Scan a sequence of emphasis-like markers, and determine whether
|
||||
// it can start an emphasis sequence or end an emphasis sequence.
|
||||
//
|
||||
// - start - position to scan from (it should point at a valid marker);
|
||||
// - canSplitWord - determine if these markers can be found inside a word
|
||||
//
|
||||
StateInline.prototype.scanDelims = function (start, canSplitWord) {
|
||||
var pos = start, lastChar, nextChar, count, can_open, can_close,
|
||||
isLastWhiteSpace, isLastPunctChar,
|
||||
isNextWhiteSpace, isNextPunctChar,
|
||||
left_flanking = true,
|
||||
right_flanking = true,
|
||||
max = this.posMax,
|
||||
marker = this.src.charCodeAt(start);
|
||||
|
||||
// treat beginning of the line as a whitespace
|
||||
lastChar = start > 0 ? this.src.charCodeAt(start - 1) : 0x20;
|
||||
|
||||
while (pos < max && this.src.charCodeAt(pos) === marker) { pos++; }
|
||||
|
||||
count = pos - start;
|
||||
|
||||
// treat end of the line as a whitespace
|
||||
nextChar = pos < max ? this.src.charCodeAt(pos) : 0x20;
|
||||
|
||||
isLastPunctChar = isMdAsciiPunct(lastChar) || isPunctChar(String.fromCharCode(lastChar));
|
||||
isNextPunctChar = isMdAsciiPunct(nextChar) || isPunctChar(String.fromCharCode(nextChar));
|
||||
|
||||
isLastWhiteSpace = isWhiteSpace(lastChar);
|
||||
isNextWhiteSpace = isWhiteSpace(nextChar);
|
||||
|
||||
if (isNextWhiteSpace) {
|
||||
left_flanking = false;
|
||||
} else if (isNextPunctChar) {
|
||||
if (!(isLastWhiteSpace || isLastPunctChar)) {
|
||||
left_flanking = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (isLastWhiteSpace) {
|
||||
right_flanking = false;
|
||||
} else if (isLastPunctChar) {
|
||||
if (!(isNextWhiteSpace || isNextPunctChar)) {
|
||||
right_flanking = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!canSplitWord) {
|
||||
can_open = left_flanking && (!right_flanking || isLastPunctChar);
|
||||
can_close = right_flanking && (!left_flanking || isNextPunctChar);
|
||||
} else {
|
||||
can_open = left_flanking;
|
||||
can_close = right_flanking;
|
||||
}
|
||||
|
||||
return {
|
||||
can_open: can_open,
|
||||
can_close: can_close,
|
||||
length: count
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
// re-export Token class to use in block rules
|
||||
StateInline.prototype.Token = Token;
|
||||
|
||||
|
||||
module.exports = StateInline;
|
||||
131
lib/markdown-it/lib/rules_inline/strikethrough.js
Normal file
131
lib/markdown-it/lib/rules_inline/strikethrough.js
Normal file
@@ -0,0 +1,131 @@
|
||||
// ~~strike through~~
|
||||
//
|
||||
'use strict';
|
||||
|
||||
|
||||
// Insert each marker as a separate text token, and add it to delimiter list
|
||||
//
|
||||
module.exports.tokenize = function strikethrough(state, silent) {
|
||||
var i, scanned, token, len, ch,
|
||||
start = state.pos,
|
||||
marker = state.src.charCodeAt(start);
|
||||
|
||||
if (silent) { return false; }
|
||||
|
||||
if (marker !== 0x7E/* ~ */) { return false; }
|
||||
|
||||
scanned = state.scanDelims(state.pos, true);
|
||||
len = scanned.length;
|
||||
ch = String.fromCharCode(marker);
|
||||
|
||||
if (len < 2) { return false; }
|
||||
|
||||
if (len % 2) {
|
||||
token = state.push('text', '', 0);
|
||||
token.content = ch;
|
||||
len--;
|
||||
}
|
||||
|
||||
for (i = 0; i < len; i += 2) {
|
||||
token = state.push('text', '', 0);
|
||||
token.content = ch + ch;
|
||||
|
||||
state.delimiters.push({
|
||||
marker: marker,
|
||||
length: 0, // disable "rule of 3" length checks meant for emphasis
|
||||
jump: i,
|
||||
token: state.tokens.length - 1,
|
||||
end: -1,
|
||||
open: scanned.can_open,
|
||||
close: scanned.can_close
|
||||
});
|
||||
}
|
||||
|
||||
state.pos += scanned.length;
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
|
||||
function postProcess(state, delimiters) {
|
||||
var i, j,
|
||||
startDelim,
|
||||
endDelim,
|
||||
token,
|
||||
loneMarkers = [],
|
||||
max = delimiters.length;
|
||||
|
||||
for (i = 0; i < max; i++) {
|
||||
startDelim = delimiters[i];
|
||||
|
||||
if (startDelim.marker !== 0x7E/* ~ */) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (startDelim.end === -1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
endDelim = delimiters[startDelim.end];
|
||||
|
||||
token = state.tokens[startDelim.token];
|
||||
token.type = 's_open';
|
||||
token.tag = 's';
|
||||
token.nesting = 1;
|
||||
token.markup = '~~';
|
||||
token.content = '';
|
||||
|
||||
token = state.tokens[endDelim.token];
|
||||
token.type = 's_close';
|
||||
token.tag = 's';
|
||||
token.nesting = -1;
|
||||
token.markup = '~~';
|
||||
token.content = '';
|
||||
|
||||
if (state.tokens[endDelim.token - 1].type === 'text' &&
|
||||
state.tokens[endDelim.token - 1].content === '~') {
|
||||
|
||||
loneMarkers.push(endDelim.token - 1);
|
||||
}
|
||||
}
|
||||
|
||||
// If a marker sequence has an odd number of characters, it's splitted
|
||||
// like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the
|
||||
// start of the sequence.
|
||||
//
|
||||
// So, we have to move all those markers after subsequent s_close tags.
|
||||
//
|
||||
while (loneMarkers.length) {
|
||||
i = loneMarkers.pop();
|
||||
j = i + 1;
|
||||
|
||||
while (j < state.tokens.length && state.tokens[j].type === 's_close') {
|
||||
j++;
|
||||
}
|
||||
|
||||
j--;
|
||||
|
||||
if (i !== j) {
|
||||
token = state.tokens[j];
|
||||
state.tokens[j] = state.tokens[i];
|
||||
state.tokens[i] = token;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Walk through delimiter list and replace text tokens with tags
|
||||
//
|
||||
module.exports.postProcess = function strikethrough(state) {
|
||||
var curr,
|
||||
tokens_meta = state.tokens_meta,
|
||||
max = state.tokens_meta.length;
|
||||
|
||||
postProcess(state, state.delimiters);
|
||||
|
||||
for (curr = 0; curr < max; curr++) {
|
||||
if (tokens_meta[curr] && tokens_meta[curr].delimiters) {
|
||||
postProcess(state, tokens_meta[curr].delimiters);
|
||||
}
|
||||
}
|
||||
};
|
||||
89
lib/markdown-it/lib/rules_inline/text.js
Normal file
89
lib/markdown-it/lib/rules_inline/text.js
Normal file
@@ -0,0 +1,89 @@
|
||||
// Skip text characters for text token, place those to pending buffer
|
||||
// and increment current pos
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
// Rule to skip pure text
|
||||
// '{}$%@~+=:' reserved for extentions
|
||||
|
||||
// !, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, :, ;, <, =, >, ?, @, [, \, ], ^, _, `, {, |, }, or ~
|
||||
|
||||
// !!!! Don't confuse with "Markdown ASCII Punctuation" chars
|
||||
// http://spec.commonmark.org/0.15/#ascii-punctuation-character
|
||||
function isTerminatorChar(ch) {
|
||||
switch (ch) {
|
||||
case 0x0A/* \n */:
|
||||
case 0x21/* ! */:
|
||||
case 0x23/* # */:
|
||||
case 0x24/* $ */:
|
||||
case 0x25/* % */:
|
||||
case 0x26/* & */:
|
||||
case 0x2A/* * */:
|
||||
case 0x2B/* + */:
|
||||
case 0x2D/* - */:
|
||||
case 0x3A/* : */:
|
||||
case 0x3C/* < */:
|
||||
case 0x3D/* = */:
|
||||
case 0x3E/* > */:
|
||||
case 0x40/* @ */:
|
||||
case 0x5B/* [ */:
|
||||
case 0x5C/* \ */:
|
||||
case 0x5D/* ] */:
|
||||
case 0x5E/* ^ */:
|
||||
case 0x5F/* _ */:
|
||||
case 0x60/* ` */:
|
||||
case 0x7B/* { */:
|
||||
case 0x7D/* } */:
|
||||
case 0x7E/* ~ */:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = function text(state, silent) {
|
||||
var pos = state.pos;
|
||||
|
||||
while (pos < state.posMax && !isTerminatorChar(state.src.charCodeAt(pos))) {
|
||||
pos++;
|
||||
}
|
||||
|
||||
if (pos === state.pos) { return false; }
|
||||
|
||||
if (!silent) { state.pending += state.src.slice(state.pos, pos); }
|
||||
|
||||
state.pos = pos;
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
// Alternative implementation, for memory.
|
||||
//
|
||||
// It costs 10% of performance, but allows extend terminators list, if place it
|
||||
// to `ParcerInline` property. Probably, will switch to it sometime, such
|
||||
// flexibility required.
|
||||
|
||||
/*
|
||||
var TERMINATOR_RE = /[\n!#$%&*+\-:<=>@[\\\]^_`{}~]/;
|
||||
|
||||
module.exports = function text(state, silent) {
|
||||
var pos = state.pos,
|
||||
idx = state.src.slice(pos).search(TERMINATOR_RE);
|
||||
|
||||
// first char is terminator -> empty text
|
||||
if (idx === 0) { return false; }
|
||||
|
||||
// no terminator -> text till end of string
|
||||
if (idx < 0) {
|
||||
if (!silent) { state.pending += state.src.slice(pos); }
|
||||
state.pos = state.src.length;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!silent) { state.pending += state.src.slice(pos, pos + idx); }
|
||||
|
||||
state.pos += idx;
|
||||
|
||||
return true;
|
||||
};*/
|
||||
41
lib/markdown-it/lib/rules_inline/text_collapse.js
Normal file
41
lib/markdown-it/lib/rules_inline/text_collapse.js
Normal file
@@ -0,0 +1,41 @@
|
||||
// Clean up tokens after emphasis and strikethrough postprocessing:
|
||||
// merge adjacent text nodes into one and re-calculate all token levels
|
||||
//
|
||||
// This is necessary because initially emphasis delimiter markers (*, _, ~)
|
||||
// are treated as their own separate text tokens. Then emphasis rule either
|
||||
// leaves them as text (needed to merge with adjacent text) or turns them
|
||||
// into opening/closing tags (which messes up levels inside).
|
||||
//
|
||||
'use strict';
|
||||
|
||||
|
||||
module.exports = function text_collapse(state) {
|
||||
var curr, last,
|
||||
level = 0,
|
||||
tokens = state.tokens,
|
||||
max = state.tokens.length;
|
||||
|
||||
for (curr = last = 0; curr < max; curr++) {
|
||||
// re-calculate levels after emphasis/strikethrough turns some text nodes
|
||||
// into opening/closing tags
|
||||
if (tokens[curr].nesting < 0) level--; // closing tag
|
||||
tokens[curr].level = level;
|
||||
if (tokens[curr].nesting > 0) level++; // opening tag
|
||||
|
||||
if (tokens[curr].type === 'text' &&
|
||||
curr + 1 < max &&
|
||||
tokens[curr + 1].type === 'text') {
|
||||
|
||||
// collapse two adjacent text nodes
|
||||
tokens[curr + 1].content = tokens[curr].content + tokens[curr + 1].content;
|
||||
} else {
|
||||
if (curr !== last) { tokens[last] = tokens[curr]; }
|
||||
|
||||
last++;
|
||||
}
|
||||
}
|
||||
|
||||
if (curr !== last) {
|
||||
tokens.length = last;
|
||||
}
|
||||
};
|
||||
197
lib/markdown-it/lib/token.js
Normal file
197
lib/markdown-it/lib/token.js
Normal file
@@ -0,0 +1,197 @@
|
||||
// Token class
|
||||
|
||||
'use strict';
|
||||
|
||||
|
||||
/**
|
||||
* class Token
|
||||
**/
|
||||
|
||||
/**
|
||||
* new Token(type, tag, nesting)
|
||||
*
|
||||
* Create new token and fill passed properties.
|
||||
**/
|
||||
function Token(type, tag, nesting) {
|
||||
/**
|
||||
* Token#type -> String
|
||||
*
|
||||
* Type of the token (string, e.g. "paragraph_open")
|
||||
**/
|
||||
this.type = type;
|
||||
|
||||
/**
|
||||
* Token#tag -> String
|
||||
*
|
||||
* html tag name, e.g. "p"
|
||||
**/
|
||||
this.tag = tag;
|
||||
|
||||
/**
|
||||
* Token#attrs -> Array
|
||||
*
|
||||
* Html attributes. Format: `[ [ name1, value1 ], [ name2, value2 ] ]`
|
||||
**/
|
||||
this.attrs = null;
|
||||
|
||||
/**
|
||||
* Token#map -> Array
|
||||
*
|
||||
* Source map info. Format: `[ line_begin, line_end ]`
|
||||
**/
|
||||
this.map = null;
|
||||
|
||||
/**
|
||||
* Token#nesting -> Number
|
||||
*
|
||||
* Level change (number in {-1, 0, 1} set), where:
|
||||
*
|
||||
* - `1` means the tag is opening
|
||||
* - `0` means the tag is self-closing
|
||||
* - `-1` means the tag is closing
|
||||
**/
|
||||
this.nesting = nesting;
|
||||
|
||||
/**
|
||||
* Token#level -> Number
|
||||
*
|
||||
* nesting level, the same as `state.level`
|
||||
**/
|
||||
this.level = 0;
|
||||
|
||||
/**
|
||||
* Token#children -> Array
|
||||
*
|
||||
* An array of child nodes (inline and img tokens)
|
||||
**/
|
||||
this.children = null;
|
||||
|
||||
/**
|
||||
* Token#content -> String
|
||||
*
|
||||
* In a case of self-closing tag (code, html, fence, etc.),
|
||||
* it has contents of this tag.
|
||||
**/
|
||||
this.content = '';
|
||||
|
||||
/**
|
||||
* Token#markup -> String
|
||||
*
|
||||
* '*' or '_' for emphasis, fence string for fence, etc.
|
||||
**/
|
||||
this.markup = '';
|
||||
|
||||
/**
|
||||
* Token#info -> String
|
||||
*
|
||||
* fence infostring
|
||||
**/
|
||||
this.info = '';
|
||||
|
||||
/**
|
||||
* Token#meta -> Object
|
||||
*
|
||||
* A place for plugins to store an arbitrary data
|
||||
**/
|
||||
this.meta = null;
|
||||
|
||||
/**
|
||||
* Token#block -> Boolean
|
||||
*
|
||||
* True for block-level tokens, false for inline tokens.
|
||||
* Used in renderer to calculate line breaks
|
||||
**/
|
||||
this.block = false;
|
||||
|
||||
/**
|
||||
* Token#hidden -> Boolean
|
||||
*
|
||||
* If it's true, ignore this element when rendering. Used for tight lists
|
||||
* to hide paragraphs.
|
||||
**/
|
||||
this.hidden = false;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Token.attrIndex(name) -> Number
|
||||
*
|
||||
* Search attribute index by name.
|
||||
**/
|
||||
Token.prototype.attrIndex = function attrIndex(name) {
|
||||
var attrs, i, len;
|
||||
|
||||
if (!this.attrs) { return -1; }
|
||||
|
||||
attrs = this.attrs;
|
||||
|
||||
for (i = 0, len = attrs.length; i < len; i++) {
|
||||
if (attrs[i][0] === name) { return i; }
|
||||
}
|
||||
return -1;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Token.attrPush(attrData)
|
||||
*
|
||||
* Add `[ name, value ]` attribute to list. Init attrs if necessary
|
||||
**/
|
||||
Token.prototype.attrPush = function attrPush(attrData) {
|
||||
if (this.attrs) {
|
||||
this.attrs.push(attrData);
|
||||
} else {
|
||||
this.attrs = [ attrData ];
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Token.attrSet(name, value)
|
||||
*
|
||||
* Set `name` attribute to `value`. Override old value if exists.
|
||||
**/
|
||||
Token.prototype.attrSet = function attrSet(name, value) {
|
||||
var idx = this.attrIndex(name),
|
||||
attrData = [ name, value ];
|
||||
|
||||
if (idx < 0) {
|
||||
this.attrPush(attrData);
|
||||
} else {
|
||||
this.attrs[idx] = attrData;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Token.attrGet(name)
|
||||
*
|
||||
* Get the value of attribute `name`, or null if it does not exist.
|
||||
**/
|
||||
Token.prototype.attrGet = function attrGet(name) {
|
||||
var idx = this.attrIndex(name), value = null;
|
||||
if (idx >= 0) {
|
||||
value = this.attrs[idx][1];
|
||||
}
|
||||
return value;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Token.attrJoin(name, value)
|
||||
*
|
||||
* Join value to existing attribute via space. Or create new attribute if not
|
||||
* exists. Useful to operate with token classes.
|
||||
**/
|
||||
Token.prototype.attrJoin = function attrJoin(name, value) {
|
||||
var idx = this.attrIndex(name);
|
||||
|
||||
if (idx < 0) {
|
||||
this.attrPush([ name, value ]);
|
||||
} else {
|
||||
this.attrs[idx][1] = this.attrs[idx][1] + ' ' + value;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
module.exports = Token;
|
||||
77
lib/renderer.js
Normal file
77
lib/renderer.js
Normal file
@@ -0,0 +1,77 @@
|
||||
import utils from "./markdown-it/lib/common/utils";
|
||||
|
||||
|
||||
const def_pugs_lst = [
|
||||
'markdown-it-abbr',
|
||||
'markdown-it-cjk-breaks',
|
||||
'markdown-it-container',
|
||||
'markdown-it-deflist',
|
||||
'markdown-it-emoji',
|
||||
'markdown-it-footnote',
|
||||
'markdown-it-ins',
|
||||
'markdown-it-mark',
|
||||
'markdown-it-prism',
|
||||
'markdown-it-sub',
|
||||
'markdown-it-sup',
|
||||
// 'markdown-it-katex',
|
||||
'markdown-it-toc-and-anchor'
|
||||
];
|
||||
|
||||
/**
|
||||
* General default plugin config
|
||||
* @param {List} pugs plugin List.
|
||||
* @return {List} plugin List.
|
||||
*/
|
||||
function checkPlugins(pugs) {
|
||||
var def_pugs_obj = {};
|
||||
for (var i = 0; i < def_pugs_lst.length; i++)
|
||||
def_pugs_obj[def_pugs_lst[i]] = { 'name': def_pugs_lst[i], 'enable': true };
|
||||
var _t = [];
|
||||
for (var i = 0; i < pugs.length; i++) {
|
||||
if (!(pugs[i] instanceof Object) || !(pugs[i].plugin instanceof Object)) continue;
|
||||
var pug_name = pugs[i].plugin.name;
|
||||
if (!pug_name) continue;
|
||||
if (pugs[i].plugin.enable == null || pugs[i].plugin.enable == undefined || pugs[i].plugin.enable != true)
|
||||
pugs[i].plugin.enable = false;
|
||||
if (def_pugs_obj[pug_name]) {
|
||||
def_pugs_obj[pug_name] = pugs[i].plugin;
|
||||
} else _t.push(pugs[i].plugin);
|
||||
}
|
||||
|
||||
for (var i = def_pugs_lst.length - 1; i >= 0; i--) {
|
||||
_t.unshift(def_pugs_obj[def_pugs_lst[i]]);
|
||||
}
|
||||
return _t;
|
||||
}
|
||||
|
||||
module.exports = function(data, options) {
|
||||
const MdIt = require('./markdown-it');
|
||||
const cfg = this.config.markdown;
|
||||
const opt = cfg ? cfg : 'default';
|
||||
let parser = opt === 'default' || opt === 'commonmark' || opt === 'zero'
|
||||
? new MdIt(opt)
|
||||
: new MdIt(opt.render);
|
||||
|
||||
|
||||
let plugins = checkPlugins(opt.plugins);
|
||||
|
||||
parser = plugins.reduce((parser, pugs) => {
|
||||
if (pugs.enable) {
|
||||
let plugin = require('./'+pugs.name);
|
||||
if(typeof plugin !== 'function' && typeof plugin.default === 'function') {
|
||||
plugin = plugin.default;
|
||||
}
|
||||
|
||||
if(pugs.options) {
|
||||
return parser.use(plugin, pugs.options);
|
||||
} else {
|
||||
return parser.use(plugin);
|
||||
}
|
||||
}
|
||||
else return parser;
|
||||
|
||||
}, parser);
|
||||
|
||||
|
||||
return parser.render(data.text);
|
||||
}
|
||||
39
package.json
Normal file
39
package.json
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"name": "hexo-renderer-markdown-it-ex",
|
||||
"version": "1.0",
|
||||
"description": "A Markdown parser for Hexo.",
|
||||
"main": "index.js",
|
||||
"repository": "https://github.com/amehime/hexo-renderer-markdown-it-ex.git",
|
||||
"keywords": [
|
||||
"hexo",
|
||||
"renderer",
|
||||
"markdown",
|
||||
"markdown-it",
|
||||
"hexo-renderer"
|
||||
],
|
||||
"directories": {
|
||||
"lib": "./lib"
|
||||
},
|
||||
"files": [
|
||||
"index.js",
|
||||
"lib/"
|
||||
],
|
||||
"author": "Ruri Shimotsuki",
|
||||
"license": "MIT",
|
||||
"bugs": "https://github.com/amehime/hexo-renderer-markdown-it-ex/issues",
|
||||
"homepage": "https://github.com/amehime/hexo-renderer-markdown-it-ex",
|
||||
"dependencies": {
|
||||
"hexo-util": "^1.7.0",
|
||||
"clone": "^2.1.0",
|
||||
"uslug": "^1.0.4",
|
||||
"prismjs": "1.20.0",
|
||||
"argparse": "^1.0.7",
|
||||
"entities": "~2.0.0",
|
||||
"linkify-it": "^2.0.0",
|
||||
"mdurl": "^1.0.1",
|
||||
"uc.micro": "^1.0.5"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8.6.0"
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user