chore: Move editor into codebase (#2930)
This commit is contained in:
56
shared/editor/rules/breaks.ts
Normal file
56
shared/editor/rules/breaks.ts
Normal file
@@ -0,0 +1,56 @@
|
||||
import MarkdownIt from "markdown-it";
|
||||
import Token from "markdown-it/lib/token";
|
||||
|
||||
function isHardbreak(token: Token) {
|
||||
return (
|
||||
token.type === "hardbreak" ||
|
||||
(token.type === "text" && token.content === "\\")
|
||||
);
|
||||
}
|
||||
|
||||
export default function markdownBreakToParagraphs(md: MarkdownIt) {
|
||||
// insert a new rule after the "inline" rules are parsed
|
||||
md.core.ruler.after("inline", "breaks", (state) => {
|
||||
const { Token } = state;
|
||||
const tokens = state.tokens;
|
||||
|
||||
// work backwards through the tokens and find text that looks like a br
|
||||
for (let i = tokens.length - 1; i > 0; i--) {
|
||||
const tokenChildren = tokens[i].children || [];
|
||||
const matches = tokenChildren.filter(isHardbreak);
|
||||
|
||||
if (matches.length) {
|
||||
let token;
|
||||
|
||||
const nodes: Token[] = [];
|
||||
const children = tokenChildren.filter((child) => !isHardbreak(child));
|
||||
|
||||
let count = matches.length;
|
||||
if (children.length) count++;
|
||||
|
||||
for (let i = 0; i < count; i++) {
|
||||
const isLast = i === count - 1;
|
||||
|
||||
token = new Token("paragraph_open", "p", 1);
|
||||
nodes.push(token);
|
||||
|
||||
const text = new Token("text", "", 0);
|
||||
text.content = "";
|
||||
|
||||
token = new Token("inline", "", 0);
|
||||
token.level = 1;
|
||||
token.children = isLast ? [text, ...children] : [text];
|
||||
token.content = "";
|
||||
nodes.push(token);
|
||||
|
||||
token = new Token("paragraph_close", "p", -1);
|
||||
nodes.push(token);
|
||||
}
|
||||
|
||||
tokens.splice(i - 1, 3, ...nodes);
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
}
|
||||
105
shared/editor/rules/checkboxes.ts
Normal file
105
shared/editor/rules/checkboxes.ts
Normal file
@@ -0,0 +1,105 @@
|
||||
import MarkdownIt from "markdown-it";
|
||||
import Token from "markdown-it/lib/token";
|
||||
|
||||
const CHECKBOX_REGEX = /\[(X|\s|_|-)\]\s(.*)?/i;
|
||||
|
||||
function matches(token: Token | void) {
|
||||
return token && token.content.match(CHECKBOX_REGEX);
|
||||
}
|
||||
|
||||
function isInline(token: Token | void): boolean {
|
||||
return !!token && token.type === "inline";
|
||||
}
|
||||
|
||||
function isParagraph(token: Token | void): boolean {
|
||||
return !!token && token.type === "paragraph_open";
|
||||
}
|
||||
|
||||
function isListItem(token: Token | void): boolean {
|
||||
return (
|
||||
!!token &&
|
||||
(token.type === "list_item_open" || token.type === "checkbox_item_open")
|
||||
);
|
||||
}
|
||||
|
||||
function looksLikeChecklist(tokens: Token[], index: number) {
|
||||
return (
|
||||
isInline(tokens[index]) &&
|
||||
isListItem(tokens[index - 2]) &&
|
||||
isParagraph(tokens[index - 1]) &&
|
||||
matches(tokens[index])
|
||||
);
|
||||
}
|
||||
|
||||
export default function markdownItCheckbox(md: MarkdownIt): void {
|
||||
function render(tokens: Token[], idx: number) {
|
||||
const token = tokens[idx];
|
||||
const checked = !!token.attrGet("checked");
|
||||
|
||||
if (token.nesting === 1) {
|
||||
// opening tag
|
||||
return `<li class="checkbox-list-item"><span class="checkbox ${
|
||||
checked ? "checked" : ""
|
||||
}">${checked ? "[x]" : "[ ]"}</span>`;
|
||||
} else {
|
||||
// closing tag
|
||||
return "</li>\n";
|
||||
}
|
||||
}
|
||||
|
||||
md.renderer.rules.checkbox_item_open = render;
|
||||
md.renderer.rules.checkbox_item_close = render;
|
||||
|
||||
// insert a new rule after the "inline" rules are parsed
|
||||
md.core.ruler.after("inline", "checkboxes", (state) => {
|
||||
const tokens = state.tokens;
|
||||
|
||||
// work backwards through the tokens and find text that looks like a checkbox
|
||||
for (let i = tokens.length - 1; i > 0; i--) {
|
||||
const matches = looksLikeChecklist(tokens, i);
|
||||
if (matches) {
|
||||
const value = matches[1];
|
||||
const checked = value.toLowerCase() === "x";
|
||||
|
||||
// convert surrounding list tokens
|
||||
if (tokens[i - 3].type === "bullet_list_open") {
|
||||
tokens[i - 3].type = "checkbox_list_open";
|
||||
}
|
||||
|
||||
if (tokens[i + 3].type === "bullet_list_close") {
|
||||
tokens[i + 3].type = "checkbox_list_close";
|
||||
}
|
||||
|
||||
// remove [ ] [x] from list item label – must use the content from the
|
||||
// child for escaped characters to be unescaped correctly.
|
||||
const tokenChildren = tokens[i].children;
|
||||
if (tokenChildren) {
|
||||
const contentMatches = tokenChildren[0].content.match(CHECKBOX_REGEX);
|
||||
|
||||
if (contentMatches) {
|
||||
const label = contentMatches[2];
|
||||
|
||||
tokens[i].content = label;
|
||||
tokenChildren[0].content = label;
|
||||
}
|
||||
}
|
||||
|
||||
// open list item and ensure checked state is transferred
|
||||
tokens[i - 2].type = "checkbox_item_open";
|
||||
|
||||
if (checked === true) {
|
||||
tokens[i - 2].attrs = [["checked", "true"]];
|
||||
}
|
||||
|
||||
// close the list item
|
||||
let j = i;
|
||||
while (tokens[j].type !== "list_item_close") {
|
||||
j++;
|
||||
}
|
||||
tokens[j].type = "checkbox_item_close";
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
}
|
||||
91
shared/editor/rules/embeds.ts
Normal file
91
shared/editor/rules/embeds.ts
Normal file
@@ -0,0 +1,91 @@
|
||||
import MarkdownIt from "markdown-it";
|
||||
import Token from "markdown-it/lib/token";
|
||||
import { EmbedDescriptor } from "../types";
|
||||
|
||||
function isParagraph(token: Token) {
|
||||
return token.type === "paragraph_open";
|
||||
}
|
||||
|
||||
function isInline(token: Token) {
|
||||
return token.type === "inline" && token.level === 1;
|
||||
}
|
||||
|
||||
function isLinkOpen(token: Token) {
|
||||
return token.type === "link_open";
|
||||
}
|
||||
|
||||
function isLinkClose(token: Token) {
|
||||
return token.type === "link_close";
|
||||
}
|
||||
|
||||
export default function (embeds: EmbedDescriptor[]) {
|
||||
function isEmbed(token: Token, link: Token) {
|
||||
const href = link.attrs ? link.attrs[0][1] : "";
|
||||
const simpleLink = href === token.content;
|
||||
|
||||
if (!simpleLink) return false;
|
||||
if (!embeds) return false;
|
||||
|
||||
for (const embed of embeds) {
|
||||
const matches = embed.matcher(href);
|
||||
if (matches) {
|
||||
return {
|
||||
...embed,
|
||||
matches,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return function markdownEmbeds(md: MarkdownIt) {
|
||||
md.core.ruler.after("inline", "embeds", (state) => {
|
||||
const tokens = state.tokens;
|
||||
let insideLink;
|
||||
|
||||
for (let i = 0; i < tokens.length - 1; i++) {
|
||||
// once we find an inline token look through it's children for links
|
||||
if (isInline(tokens[i]) && isParagraph(tokens[i - 1])) {
|
||||
const tokenChildren = tokens[i].children || [];
|
||||
|
||||
for (let j = 0; j < tokenChildren.length - 1; j++) {
|
||||
const current = tokenChildren[j];
|
||||
if (!current) continue;
|
||||
|
||||
if (isLinkOpen(current)) {
|
||||
insideLink = current;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (isLinkClose(current)) {
|
||||
insideLink = null;
|
||||
continue;
|
||||
}
|
||||
|
||||
// of hey, we found a link – lets check to see if it should be
|
||||
// considered to be an embed
|
||||
if (insideLink) {
|
||||
const result = isEmbed(current, insideLink);
|
||||
if (result) {
|
||||
const { content } = current;
|
||||
|
||||
// convert to embed token
|
||||
const token = new Token("embed", "iframe", 0);
|
||||
token.attrSet("href", content);
|
||||
|
||||
// delete the inline link – this makes the assumption that the
|
||||
// embed is the only thing in the para.
|
||||
// TODO: double check this
|
||||
tokens.splice(i - 1, 3, token);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
};
|
||||
}
|
||||
10
shared/editor/rules/emoji.ts
Normal file
10
shared/editor/rules/emoji.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
import nameToEmoji from "gemoji/name-to-emoji.json";
|
||||
import MarkdownIt from "markdown-it";
|
||||
import emojiPlugin from "markdown-it-emoji";
|
||||
|
||||
export default function emoji(md: MarkdownIt) {
|
||||
return emojiPlugin(md, {
|
||||
defs: nameToEmoji,
|
||||
shortcuts: {},
|
||||
});
|
||||
}
|
||||
156
shared/editor/rules/mark.ts
Normal file
156
shared/editor/rules/mark.ts
Normal file
@@ -0,0 +1,156 @@
|
||||
// Adapted from:
|
||||
// https://github.com/markdown-it/markdown-it-mark/blob/master/index.js
|
||||
|
||||
import MarkdownIt from "markdown-it";
|
||||
import StateInline from "markdown-it/lib/rules_inline/state_inline";
|
||||
|
||||
export default function (options: { delim: string; mark: string }) {
|
||||
const delimCharCode = options.delim.charCodeAt(0);
|
||||
|
||||
return function emphasisPlugin(md: MarkdownIt) {
|
||||
function tokenize(state: StateInline, silent: boolean) {
|
||||
let i, token;
|
||||
|
||||
const start = state.pos,
|
||||
marker = state.src.charCodeAt(start);
|
||||
|
||||
if (silent) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (marker !== delimCharCode) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const scanned = state.scanDelims(state.pos, true);
|
||||
const ch = String.fromCharCode(marker);
|
||||
let len = scanned.length;
|
||||
|
||||
if (len < 2) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (len % 2) {
|
||||
token = state.push("text", "", 0);
|
||||
token.content = ch;
|
||||
len--;
|
||||
}
|
||||
|
||||
for (i = 0; i < len; i += 2) {
|
||||
token = state.push("text", "", 0);
|
||||
token.content = ch + ch;
|
||||
|
||||
if (!scanned.can_open && !scanned.can_close) {
|
||||
continue;
|
||||
}
|
||||
|
||||
state.delimiters.push({
|
||||
marker,
|
||||
length: 0, // disable "rule of 3" length checks meant for emphasis
|
||||
jump: i,
|
||||
token: state.tokens.length - 1,
|
||||
end: -1,
|
||||
open: scanned.can_open,
|
||||
close: scanned.can_close,
|
||||
});
|
||||
}
|
||||
|
||||
state.pos += scanned.length;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Walk through delimiter list and replace text tokens with tags
|
||||
//
|
||||
function postProcess(
|
||||
state: StateInline,
|
||||
delimiters: StateInline.Delimiter[]
|
||||
) {
|
||||
let i = 0,
|
||||
j,
|
||||
startDelim,
|
||||
endDelim,
|
||||
token;
|
||||
const loneMarkers: number[] = [],
|
||||
max = delimiters.length;
|
||||
|
||||
for (i = 0; i < max; i++) {
|
||||
startDelim = delimiters[i];
|
||||
|
||||
if (startDelim.marker !== delimCharCode) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (startDelim.end === -1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
endDelim = delimiters[startDelim.end];
|
||||
|
||||
token = state.tokens[startDelim.token];
|
||||
token.type = `${options.mark}_open`;
|
||||
token.tag = "span";
|
||||
token.attrs = [["class", options.mark]];
|
||||
token.nesting = 1;
|
||||
token.markup = options.delim;
|
||||
token.content = "";
|
||||
|
||||
token = state.tokens[endDelim.token];
|
||||
token.type = `${options.mark}_close`;
|
||||
token.tag = "span";
|
||||
token.nesting = -1;
|
||||
token.markup = options.delim;
|
||||
token.content = "";
|
||||
|
||||
if (
|
||||
state.tokens[endDelim.token - 1].type === "text" &&
|
||||
state.tokens[endDelim.token - 1].content === options.delim[0]
|
||||
) {
|
||||
loneMarkers.push(endDelim.token - 1);
|
||||
}
|
||||
}
|
||||
|
||||
// If a marker sequence has an odd number of characters, it's split
|
||||
// like this: `~~~~~` -> `~` + `~~` + `~~`, leaving one marker at the
|
||||
// start of the sequence.
|
||||
//
|
||||
// So, we have to move all those markers after subsequent s_close tags.
|
||||
while (loneMarkers.length) {
|
||||
i = loneMarkers.pop() as number;
|
||||
j = i + 1;
|
||||
|
||||
while (
|
||||
j < state.tokens.length &&
|
||||
state.tokens[j].type === `${options.mark}_close`
|
||||
) {
|
||||
j++;
|
||||
}
|
||||
|
||||
j--;
|
||||
|
||||
if (i !== j) {
|
||||
token = state.tokens[j];
|
||||
state.tokens[j] = state.tokens[i];
|
||||
state.tokens[i] = token;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
md.inline.ruler.before("emphasis", options.mark, tokenize);
|
||||
md.inline.ruler2.before("emphasis", options.mark, function (state) {
|
||||
let curr;
|
||||
const tokensMeta = state.tokens_meta,
|
||||
max = (state.tokens_meta || []).length;
|
||||
|
||||
postProcess(state, state.delimiters);
|
||||
|
||||
for (curr = 0; curr < max; curr++) {
|
||||
const delimiters = tokensMeta[curr]?.delimiters;
|
||||
if (tokensMeta[curr] && delimiters) {
|
||||
postProcess(state, delimiters);
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
};
|
||||
}
|
||||
21
shared/editor/rules/notices.ts
Normal file
21
shared/editor/rules/notices.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
import MarkdownIt from "markdown-it";
|
||||
import customFence from "markdown-it-container";
|
||||
import Token from "markdown-it/lib/token";
|
||||
|
||||
export default function notice(md: MarkdownIt): void {
|
||||
return customFence(md, "notice", {
|
||||
marker: ":",
|
||||
validate: () => true,
|
||||
render: function (tokens: Token[], idx: number) {
|
||||
const { info } = tokens[idx];
|
||||
|
||||
if (tokens[idx].nesting === 1) {
|
||||
// opening tag
|
||||
return `<div class="notice notice-${md.utils.escapeHtml(info)}">\n`;
|
||||
} else {
|
||||
// closing tag
|
||||
return "</div>\n";
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
76
shared/editor/rules/tables.ts
Normal file
76
shared/editor/rules/tables.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
import MarkdownIt from "markdown-it";
|
||||
import Token from "markdown-it/lib/token";
|
||||
|
||||
const BREAK_REGEX = /(?:^|[^\\])\\n/;
|
||||
|
||||
export default function markdownTables(md: MarkdownIt): void {
|
||||
// insert a new rule after the "inline" rules are parsed
|
||||
md.core.ruler.after("inline", "tables-pm", (state) => {
|
||||
const tokens = state.tokens;
|
||||
let inside = false;
|
||||
|
||||
for (let i = tokens.length - 1; i > 0; i--) {
|
||||
if (inside) {
|
||||
tokens[i].level--;
|
||||
}
|
||||
|
||||
// convert unescaped \n in the text into real br tag
|
||||
if (tokens[i].type === "inline" && tokens[i].content.match(BREAK_REGEX)) {
|
||||
const existing = tokens[i].children || [];
|
||||
tokens[i].children = [];
|
||||
|
||||
existing.forEach((child) => {
|
||||
const breakParts = child.content.split(BREAK_REGEX);
|
||||
|
||||
// a schema agnostic way to know if a node is inline code would be
|
||||
// great, for now we are stuck checking the node type.
|
||||
if (breakParts.length > 1 && child.type !== "code_inline") {
|
||||
breakParts.forEach((part, index) => {
|
||||
const token = new Token("text", "", 1);
|
||||
token.content = part.trim();
|
||||
tokens[i].children?.push(token);
|
||||
|
||||
if (index < breakParts.length - 1) {
|
||||
const brToken = new Token("br", "br", 1);
|
||||
tokens[i].children?.push(brToken);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
tokens[i].children?.push(child);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// filter out incompatible tokens from markdown-it that we don't need
|
||||
// in prosemirror. thead/tbody do nothing.
|
||||
if (
|
||||
["thead_open", "thead_close", "tbody_open", "tbody_close"].includes(
|
||||
tokens[i].type
|
||||
)
|
||||
) {
|
||||
inside = !inside;
|
||||
tokens.splice(i, 1);
|
||||
}
|
||||
|
||||
if (["th_open", "td_open"].includes(tokens[i].type)) {
|
||||
// markdown-it table parser does not return paragraphs inside the cells
|
||||
// but prosemirror requires them, so we add 'em in here.
|
||||
tokens.splice(i + 1, 0, new Token("paragraph_open", "p", 1));
|
||||
|
||||
// markdown-it table parser stores alignment as html styles, convert
|
||||
// to a simple string here
|
||||
const tokenAttrs = tokens[i].attrs;
|
||||
if (tokenAttrs) {
|
||||
const style = tokenAttrs[0][1];
|
||||
tokens[i].info = style.split(":")[1];
|
||||
}
|
||||
}
|
||||
|
||||
if (["th_close", "td_close"].includes(tokens[i].type)) {
|
||||
tokens.splice(i, 0, new Token("paragraph_close", "p", -1));
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
}
|
||||
24
shared/editor/rules/underlines.ts
Normal file
24
shared/editor/rules/underlines.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import MarkdownIt from "markdown-it";
|
||||
|
||||
export default function markdownUnderlines(md: MarkdownIt) {
|
||||
md.inline.ruler2.after("emphasis", "underline", (state) => {
|
||||
const tokens = state.tokens;
|
||||
|
||||
for (let i = tokens.length - 1; i > 0; i--) {
|
||||
const token = tokens[i];
|
||||
|
||||
if (token.markup === "__") {
|
||||
if (token.type === "strong_open") {
|
||||
tokens[i].tag = "underline";
|
||||
tokens[i].type = "underline_open";
|
||||
}
|
||||
if (token.type === "strong_close") {
|
||||
tokens[i].tag = "underline";
|
||||
tokens[i].type = "underline_close";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
}
|
||||
Reference in New Issue
Block a user