feat: Upgrade editor (#1227)
* WIP * document migration * fix: Handle clashing keyboard events * fix: convert getSummary * fix: parseDocumentIds * lint * fix: Remove unused plugin * Move editor version to header Add editor version check for API endpoints * fix: Editor update auto-reload Bump RME * test * bump rme * Remove slate flow types, improve themeing, bump rme * bump rme * fix: parseDocumentIds returning duplicate ID's, improved regression tests * test * fix: Missing code styles * lint * chore: Upgrade v2 migration to use AST * Bump RME * Update welcome doc * add highlight to keyboard shortcuts ref * theming improvements * fix: Code comments show as headings, closes #1255 * loop * fix: TOC highlighting * lint * add: Automated backup of docs before migration * Update embeds to new format * fix: React warning * bump to final editor version 10.0.0 * test
This commit is contained in:
@@ -1,28 +0,0 @@
|
||||
// @flow
|
||||
import { filter } from 'lodash';
|
||||
import slugify from 'shared/utils/slugify';
|
||||
import unescape from 'shared/utils/unescape';
|
||||
|
||||
export default function getHeadingsForText(
|
||||
text: string
|
||||
): { level: number, title: string, slug: string }[] {
|
||||
const regex = /^(#{1,6})\s(.*)$/gm;
|
||||
|
||||
let match;
|
||||
let output = [];
|
||||
while ((match = regex.exec(text)) !== null) {
|
||||
if (!match) continue;
|
||||
|
||||
const level = match[1].length;
|
||||
const title = unescape(match[2]);
|
||||
|
||||
let slug = slugify(title);
|
||||
const existing = filter(output, { slug });
|
||||
if (existing.length) {
|
||||
slug = `${slug}-${existing.length}`;
|
||||
}
|
||||
output.push({ level, title, slug });
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
/* eslint-disable flowtype/require-valid-file-annotation */
|
||||
import getHeadingsForText from './getHeadingsForText';
|
||||
|
||||
it('should return an array of document headings', () => {
|
||||
const response = getHeadingsForText(`
|
||||
# Header
|
||||
|
||||
## Subheading
|
||||
`);
|
||||
|
||||
expect(response.length).toBe(2);
|
||||
expect(response[0].level).toBe(1);
|
||||
expect(response[0].title).toBe('Header');
|
||||
expect(response[1].level).toBe(2);
|
||||
expect(response[1].title).toBe('Subheading');
|
||||
});
|
||||
|
||||
it('should unescape special characters', () => {
|
||||
const response = getHeadingsForText(`# Header <\\>`);
|
||||
|
||||
expect(response.length).toBe(1);
|
||||
expect(response[0].title).toBe('Header <>');
|
||||
});
|
||||
@@ -1,29 +1,39 @@
|
||||
// @flow
|
||||
import MarkdownSerializer from 'slate-md-serializer';
|
||||
const Markdown = new MarkdownSerializer();
|
||||
import { parser } from 'rich-markdown-editor';
|
||||
|
||||
export default function parseDocumentIds(text: string): string[] {
|
||||
const value = Markdown.deserialize(text);
|
||||
const value = parser.parse(text);
|
||||
let links = [];
|
||||
|
||||
function findLinks(node) {
|
||||
if (node.type === 'link') {
|
||||
const href = node.data.get('href');
|
||||
// get text nodes
|
||||
if (node.type.name === 'text') {
|
||||
// get marks for text nodes
|
||||
node.marks.forEach(mark => {
|
||||
// any of the marks links?
|
||||
if (mark.type.name === 'link') {
|
||||
const { href } = mark.attrs;
|
||||
// any of the links to other docs?
|
||||
if (href.startsWith('/doc')) {
|
||||
const tokens = href.replace(/\/$/, '').split('/');
|
||||
const lastToken = tokens[tokens.length - 1];
|
||||
|
||||
if (href.startsWith('/doc')) {
|
||||
const tokens = href.replace(/\/$/, '').split('/');
|
||||
const lastToken = tokens[tokens.length - 1];
|
||||
links.push(lastToken);
|
||||
}
|
||||
// don't return the same link more than once
|
||||
if (!links.includes(lastToken)) {
|
||||
links.push(lastToken);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (!node.nodes) {
|
||||
if (!node.content.size) {
|
||||
return;
|
||||
}
|
||||
|
||||
node.nodes.forEach(findLinks);
|
||||
node.content.descendants(findLinks);
|
||||
}
|
||||
|
||||
findLinks(value.document);
|
||||
findLinks(value);
|
||||
return links;
|
||||
}
|
||||
|
||||
@@ -1,20 +1,38 @@
|
||||
/* eslint-disable flowtype/require-valid-file-annotation */
|
||||
import parseDocumentIds from './parseDocumentIds';
|
||||
|
||||
it('should return an array of document ids', () => {
|
||||
it('should not return non links', () => {
|
||||
expect(parseDocumentIds(`# Header`).length).toBe(0);
|
||||
expect(
|
||||
parseDocumentIds(`# Header
|
||||
});
|
||||
|
||||
it('should return an array of document ids', () => {
|
||||
const result = parseDocumentIds(`# Header
|
||||
|
||||
[title](/doc/test-456733)
|
||||
`)[0]
|
||||
).toBe('test-456733');
|
||||
[internal](/doc/test-456733)
|
||||
`);
|
||||
|
||||
expect(result.length).toBe(1);
|
||||
expect(result[0]).toBe('test-456733');
|
||||
});
|
||||
|
||||
it('should not return duplicate document ids', () => {
|
||||
expect(parseDocumentIds(`# Header`).length).toBe(0);
|
||||
|
||||
const result = parseDocumentIds(`# Header
|
||||
|
||||
[internal](/doc/test-456733)
|
||||
|
||||
[another link to the same doc](/doc/test-456733)
|
||||
`);
|
||||
|
||||
expect(result.length).toBe(1);
|
||||
expect(result[0]).toBe('test-456733');
|
||||
});
|
||||
|
||||
it('should not return non document links', () => {
|
||||
expect(parseDocumentIds(`[title](http://www.google.com)`).length).toBe(0);
|
||||
expect(parseDocumentIds(`[google](http://www.google.com)`).length).toBe(0);
|
||||
});
|
||||
|
||||
it('should not return non document relative links', () => {
|
||||
expect(parseDocumentIds(`[title](/developers)`).length).toBe(0);
|
||||
expect(parseDocumentIds(`[relative](/developers)`).length).toBe(0);
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user