From 0bc30729440cc61bbaddf9c84f1f9431eba7300d Mon Sep 17 00:00:00 2001 From: Mechiel Lukkien Date: Wed, 10 Jan 2024 16:48:53 +0100 Subject: [PATCH] new website for www.xmox.nl most content is in markdown files in website/, some is taken out of the repo README and rfc/index.txt. a Go file generates html. static files are kept in a separate repo due to size. --- .gitignore | 2 + Makefile | 3 + README.md | 143 +- config/doc.go | 39 +- develop.txt | 25 + doc.go | 20 +- gendoc.sh | 61 +- genwebsite.sh | 56 + go.mod | 1 + go.sum | 2 + rfc/Makefile | 4 +- rfc/index.txt | 803 +++--- rfc/xr.go | 4 +- .../russross/blackfriday/v2/.gitignore | 8 + .../russross/blackfriday/v2/.travis.yml | 17 + .../russross/blackfriday/v2/LICENSE.txt | 29 + .../russross/blackfriday/v2/README.md | 335 +++ .../russross/blackfriday/v2/block.go | 1612 ++++++++++++ .../github.com/russross/blackfriday/v2/doc.go | 46 + .../russross/blackfriday/v2/entities.go | 2236 +++++++++++++++++ .../github.com/russross/blackfriday/v2/esc.go | 70 + .../russross/blackfriday/v2/html.go | 952 +++++++ .../russross/blackfriday/v2/inline.go | 1228 +++++++++ .../russross/blackfriday/v2/markdown.go | 950 +++++++ .../russross/blackfriday/v2/node.go | 360 +++ .../russross/blackfriday/v2/smartypants.go | 457 ++++ vendor/modules.txt | 3 + webaccount/account.js | 2 +- webaccount/account.ts | 2 +- webadmin/admin.js | 2 +- webadmin/admin.ts | 2 +- website/features/index.md | 504 ++++ website/index.md | 64 + website/install/index.md | 99 + website/protocols/summary.md | 43 + website/screenshots/index.md | 43 + website/website.go | 552 ++++ 37 files changed, 10274 insertions(+), 505 deletions(-) create mode 100755 genwebsite.sh create mode 100644 vendor/github.com/russross/blackfriday/v2/.gitignore create mode 100644 vendor/github.com/russross/blackfriday/v2/.travis.yml create mode 100644 vendor/github.com/russross/blackfriday/v2/LICENSE.txt create mode 100644 vendor/github.com/russross/blackfriday/v2/README.md create mode 100644 vendor/github.com/russross/blackfriday/v2/block.go create mode 100644 vendor/github.com/russross/blackfriday/v2/doc.go create mode 100644 vendor/github.com/russross/blackfriday/v2/entities.go create mode 100644 vendor/github.com/russross/blackfriday/v2/esc.go create mode 100644 vendor/github.com/russross/blackfriday/v2/html.go create mode 100644 vendor/github.com/russross/blackfriday/v2/inline.go create mode 100644 vendor/github.com/russross/blackfriday/v2/markdown.go create mode 100644 vendor/github.com/russross/blackfriday/v2/node.go create mode 100644 vendor/github.com/russross/blackfriday/v2/smartypants.go create mode 100644 website/features/index.md create mode 100644 website/index.md create mode 100644 website/install/index.md create mode 100644 website/protocols/summary.md create mode 100644 website/screenshots/index.md create mode 100644 website/website.go diff --git a/.gitignore b/.gitignore index 3ba63aa..88d33e8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ /mox /mox.exe /rfc/[0-9][0-9]* +/rfc/xr/ /local/ /testdata/check/ /testdata/*/data/ @@ -24,3 +25,4 @@ /node_modules/ /upgrade*-verifydata.*.pprof /upgrade*-openaccounts.*.pprof +/website/html/ diff --git a/Makefile b/Makefile index d9ffd64..6e7514d 100644 --- a/Makefile +++ b/Makefile @@ -118,6 +118,9 @@ docker: docker-release: ./docker-release.sh +genwebsite: + ./genwebsite.sh + buildall: GOOS=linux GOARCH=arm go build GOOS=linux GOARCH=arm64 go build diff --git a/README.md b/README.md index 544a92e..49124be 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ Mox is a modern full-featured open source secure mail server for low-maintenance self-hosted email. +For more details, see the mox website, https://www.xmox.nl. + See Quickstart below to get started. ## Features @@ -48,12 +50,12 @@ proton.me. The code is heavily cross-referenced with the RFCs for readability/maintainability. - # Quickstart The easiest way to get started with serving email for your domain is to get a -(virtual) machine dedicated to serving email, name it [host].[domain] (e.g. -mail.example.com), login as root, and run: +(virtual) machine dedicated to serving email, name it `[host].[domain]` (e.g. +mail.example.com). Having a DNSSEC-verifying resolver installed, such as +unbound, is highly recommended. Run as root: # Create mox user and homedir (or pick another name or homedir): useradd -m -d /home/mox mox @@ -64,48 +66,62 @@ mail.example.com), login as root, and run: # Generate config files for your address/domain: ./mox quickstart you@example.com -The quickstart creates configuration files for the domain and account, -generates an admin and account password, prints the DNS records you need to add -and prints commands to start mox and optionally install mox as a service. +The quickstart: + +- Creates configuration files mox.conf and domains.conf. +- Adds the domain and an account for the email address to domains.conf +- Generates an admin and account password. +- Prints the DNS records you need to add, for the machine and domain. +- Prints commands to start mox, and optionally install mox as a service. A machine that doesn't already run a webserver is highly recommended because -modern email requires HTTPS, and mox currently needs it for automatic TLS. You -could combine mox with an existing webserver, but it requires a lot more -configuration. If you want to serve websites on the same machine, consider using -the webserver built into mox. It's pretty good! If you want to run an existing -webserver on port 443/80, see "mox help quickstart". +modern email requires HTTPS, and mox currently needs to run a webserver for +automatic TLS with ACME. You could combine mox with an existing webserver, but +it requires a lot more configuration. If you want to serve websites on the same +machine, consider using the webserver built into mox. It's pretty good! If you +want to run an existing webserver on port 443/80, see `mox help quickstart`. After starting, you can access the admin web interface on internal IPs. # Download -You can easily (cross) compile mox if you have a recent Go toolchain installed -(see "go version", it must be >= 1.20; otherwise, see https://go.dev/dl/ or -https://go.dev/doc/manage-install and $HOME/go/bin): +Download a mox binary from +https://beta.gobuilds.org/github.com/mjl-/mox@latest/linux-amd64-latest/. + +Symlink or rename it to "mox". + +The URL above always resolves to the latest release for linux/amd64 built with +the latest Go toolchain. See the links at the bottom of that page for binaries +for other platforms. + +# Compiling + +You can easily (cross) compile mox yourself. You need a recent Go toolchain +installed. Run `go version`, it must be >= 1.20. Download the latest version +from https://go.dev/dl/ or see https://go.dev/doc/manage-install. + +To download the source code of the latest release, and compile it to binary "mox": GOBIN=$PWD CGO_ENABLED=0 go install github.com/mjl-/mox@latest -Or you can download a binary built with the latest Go toolchain from -https://beta.gobuilds.org/github.com/mjl-/mox@latest/linux-amd64-latest/, and -symlink or rename it to "mox". - -Verify you have a working mox binary: - - ./mox version - Mox only compiles for and fully works on unix systems. Mox also compiles for Windows, but "mox serve" does not yet work, though "mox localserve" (for a local test instance) and most other subcommands do. Mox does not compile for Plan 9. -You can also run mox with docker image `r.xmox.nl/mox`, with tags like `v0.0.1` -and `v0.0.1-go1.20.1-alpine3.17.2`, see https://r.xmox.nl/r/mox/. Though new -docker images aren't (automatically) generated for new Go runtime/compile -releases. See docker-compose.yml in this repository for instructions on -starting. It is important to run with docker host networking, so mox can use -the public IPs and has correct remote IP information for incoming connections -(important for junk filtering and rate-limiting). Given these caveats, it's -recommended to run mox without docker. +# Docker + +Although not recommended, you can also run mox with docker image +`r.xmox.nl/mox`, with tags like `v0.0.1` and `v0.0.1-go1.20.1-alpine3.17.2`, see +https://r.xmox.nl/r/mox/. See +https://github.com/mjl-/mox/blob/main/docker-compose.yml to get started. + +New docker images aren't (automatically) generated for new Go runtime/compile +releases. + +It is important to run with docker host networking, so mox can use the public +IPs and has correct remote IP information for incoming connections (important +for junk filtering and rate-limiting). # Future/development @@ -115,7 +131,6 @@ https://nlnet.nl/project/Mox/. ## Roadmap -- Improve documentation - Improve SMTP delivery from queue - Webmail improvements - HTTP-based API for sending messages and receiving delivery feedback @@ -143,14 +158,15 @@ https://nlnet.nl/project/Mox/. new deliveries) - Improve support for mobile clients with extensions: IMAP URLAUTH, SMTP CHUNKING and BINARYMIME, IMAP CATENATE +- Mailing list manager There are many smaller improvements to make as well, search for "todo" in the code. ## Not supported/planned -But perhaps in the future... +There is currently no plan to implement the following. Though this may +change in the future. -- Mailing list manager - Functioning as SMTP relay - POP3 - Delivery to (unix) OS system users @@ -175,11 +191,15 @@ make that easy. ## Where is the documentation? -See all commands and help output at https://pkg.go.dev/github.com/mjl-/mox/. +To keep mox as a project maintainable, documentation is integrated into, and +generated from the code. -See the commented example config files at -https://pkg.go.dev/github.com/mjl-/mox/config/. They often contain enough -documentation about a feature and how to configure it. +A list of mox commands, and their help output, are at +https://www.xmox.nl/commands/. + +Mox is configured through configuration files, and each field comes with +documentation. See https://www.xmox.nl/config/ for config files containing all +fields and their documentation. You can get the same information by running "mox" without arguments to list its subcommands and usage, and "mox help [subcommand]" for more details. @@ -187,9 +207,8 @@ subcommands and usage, and "mox help [subcommand]" for more details. The example config files are printed by "mox config describe-static" and "mox config describe-dynamic". -Mox is still in early stages, and documentation is still limited. Please create -an issue describing what is unclear or confusing, and we'll try to improve the -documentation. +If you're missing some documentation, please create an issue describing what is +unclear or confusing, and we'll try to improve the documentation. ## Is Mox affected by SMTP smuggling? @@ -272,16 +291,16 @@ For bug reports, please file an issue at https://github.com/mjl-/mox/issues/new. ## How do I change my password? Regular users (doing IMAP/SMTP with authentication) can change their password -at the account page, e.g. http://localhost/. Or you can set a password with "mox +at the account page, e.g. `http://localhost/`. Or you can set a password with "mox setaccountpassword". The admin can change the password of any account through the admin page, at -http://localhost/admin/ by default (leave username empty when logging in). +`http://localhost/admin/` by default (leave username empty when logging in). The account and admin pages are served on localhost for configs created with the quickstart. To access these from your browser, run `ssh -L 8080:localhost:80 you@yourmachine` locally and open -http://localhost:8080/[...]. +`http://localhost:8080/[...]`. The admin password can be changed with "mox setadminpassword". @@ -371,19 +390,6 @@ should account for the size of the email messages (no compression currently), an additional 15% overhead for the meta data, and add some more headroom. Expand as necessary. -## Can I see some screenshots? - -Yes, see https://www.xmox.nl/screenshots/. - -Mox has a webmail for reading/writing messages. - -Mox also has an "account" web interface where users can view their account and -manage their address configuration, such as rules for automatically delivering -certain incoming messages to a specific mailbox. - -And mox has an "admin" web interface where the administrator can make changes, -e.g. add/remove/modify domains/accounts/addresses. - ## Won't the big email providers block my email? It is a common misconception that it is impossible to run your own email server @@ -417,8 +423,8 @@ domain. Sending messages with content that resembles known spam messages. Should your email be rejected, you will typically get an error message during the SMTP transaction that explains why. In the case of big email providers the -error message often has instructions on how to prove to them you are a legimate -sender. +error message often has instructions on how to prove to them you are a +legitimate sender. ## Can I use existing TLS certificates/keys? @@ -426,18 +432,19 @@ Yes. The quickstart command creates a config that uses ACME with Let's Encrypt, but you can change the config file to use existing certificate and key files. You'll see "ACME: letsencrypt" in the "TLS" section of the "public" Listener. -Remove or comment out the ACME-line, and add a "KeyCerts" section like in the -example config file in -https://pkg.go.dev/github.com/mjl-/mox/config#hdr-mox_conf. You can have -multiple certificates and keys: The line with the "-" (dash) is the start of a -list item. Duplicate that line up to and including the line with KeyFile for -each certificate/key you have. Mox makes a TLS config that holds all specified -certificates/keys, and uses it for all services for that Listener (including a -webserver), choosing the correct certificate for incoming requests. +Remove or comment out the ACME-line, and add a "KeyCerts" section, see +https://www.xmox.nl/config/#cfg-mox-conf-Listeners-x-TLS-KeyCerts + +You can have multiple certificates and keys: The line with the "-" (dash) is +the start of a list item. Duplicate that line up to and including the line with +KeyFile for each certificate/key you have. Mox makes a TLS config that holds +all specified certificates/keys, and uses it for all services for that Listener +(including a webserver), choosing the correct certificate for incoming +requests. Keep in mind that for each email domain you host, you will need a certificate -for `mta-sts.` and `autoconfig.`, unless you disable MTA-STS -and autoconfig for that domain. +for `mta-sts.`, `autoconfig.` and `mail.`, unless you +disable MTA-STS, autoconfig and the client-settings-domain for that domain. Mox opens the key and certificate files during initial startup, as root (and passes file descriptors to the unprivileged process). No special permissions diff --git a/config/doc.go b/config/doc.go index a1508c8..bb3caf9 100644 --- a/config/doc.go +++ b/config/doc.go @@ -1,15 +1,36 @@ /* -Package config holds the configuration file definitions for mox.conf (Static) -and domains.conf (Dynamic). +Package config holds the configuration file definitions. -These config files are in "sconf" format. Summarized: Indent with tabs, "#" as -first non-whitespace character makes the line a comment (you cannot have a line -with both a value and a comment), strings are not quoted/escaped and can never -span multiple lines. See https://pkg.go.dev/github.com/mjl-/sconf for details. +Mox uses two config files: -Annotated empty/default configuration files you could use as a starting point -for your mox.conf and domains.conf, as generated by "mox config -describe-static" and "mox config describe-domains": +1. mox.conf, also called the static configuration file. +2. domains.conf, also called the dynamic configuration file. + +The static configuration file is never reloaded during the lifetime of a +running mox instance. After changes to mox.conf, mox must be restarted for the +changes to take effect. + +The dynamic configuration file is reloaded automatically when it changes. +If the file contains an error after the change, the reload is aborted and the +previous version remains active. + +Below are "empty" config files, generated from the config file definitions in +the source code, along with comments explaining the fields. Fields named "x" are +placeholders for user-chosen map keys. + +# sconf + +The config files are in "sconf" format. Properties of sconf files: + + - Indentation with tabs only. + - "#" as first non-whitespace character makes the line a comment. Lines with a + value cannot also have a comment. + - Values don't have syntax indicating their type. For example, strings are + not quoted/escaped and can never span multiple lines. + - Fields that are optional can be left out completely. But the value of an + optional field may itself have required fields. + +See https://pkg.go.dev/github.com/mjl-/sconf for details. # mox.conf diff --git a/develop.txt b/develop.txt index 37170d3..2615227 100644 --- a/develop.txt +++ b/develop.txt @@ -19,6 +19,7 @@ This file has notes useful for mox developers. for reuse do use mlog as it is more convenient. Internally, we always use mlog.Log to do the logging, wrapping an slog.Logger. + # Reusable packages Most non-server Go packages are meant to be reusable. This means internal @@ -28,6 +29,7 @@ with bad API. Third party users aren't affected too seriously due to Go's minimal version selection. The reusable packages are in apidiff/packages.txt. We generate the incompatible changes with each release. + # Web interfaces/frontend The web interface frontends (for webmail/, webadmin/ and webaccount/) are @@ -54,6 +56,28 @@ TypeScript interface exposing a "root" HTMLElement that is added to the DOM, and functions for accessing/changing the internal state, keeping the UI managable. + +# Website + +The content of the public website at https://www.xmox.nl is in website/, as +markdown files. The website HTML is generated by website/website.go. The FAQ +is taken from README.md, the protocol support table is generated from +rfc/index.txt. The website is kept in this repository so a commit can change +both the implementation and the documentation on the website. Some of the info +in README.md is duplicated on the website, often more elaborate and possibly +with a slightly less technical audience. The website should also mostly be +readable through the markdown in the git repo. + +Large files (images/videos) are in https://github.com/mjl-/mox-website-files to +keep the repository reasonably sized. + +The public website serves the content from the "website" branch. After a +release release, the main branch (with latest development code and +corresponding changes to the website about new features) is merged into the +website branch. Commits to the website branch (e.g. for a news item, or any +other change unrelated to a new release) is merged back into the main branch. + + # TLS certificates https://github.com/cloudflare/cfssl is useful for testing with TLS @@ -134,6 +158,7 @@ Listeners: KeyFile: ../../cfssl/wildcard.$domain-key.pem ``` + # ACME https://github.com/letsencrypt/pebble is useful for testing with ACME. Start a diff --git a/doc.go b/doc.go index 5c0b68c..ecf8dad 100644 --- a/doc.go +++ b/doc.go @@ -2,7 +2,21 @@ Command mox is a modern, secure, full-featured, open source mail server for low-maintenance self-hosted email. -# Commands +Mox is started with the "serve" subcommand, but mox also has many other +subcommands. + +Many of those commands talk to a running mox instance, through the ctl file in +the data directory. Specify the configuration file (that holds the path to the +data directory) through the -config flag or MOXCONF environment variable. + +Commands that don't talk to a running mox instance are often for +testing/debugging email functionality. For example for parsing an email message, +or looking up SPF/DKIM/DMARC records. + +Below is the usage information as printed by the command when started without +any parameters. Followed by the help and usage information for each command. + +# Usage mox [-config config/mox.conf] [-pedantic] ... mox serve @@ -76,10 +90,6 @@ low-maintenance self-hosted email. mox message parse message.eml mox reassignthreads [account] -Many commands talk to a running mox instance, through the ctl file in the data -directory. Specify the configuration file (that holds the path to the data -directory) through the -config flag or MOXCONF environment variable. - # mox serve Start mox, serving SMTP/IMAP/HTTPS. diff --git a/gendoc.sh b/gendoc.sh index 0bd5c06..f851acd 100755 --- a/gendoc.sh +++ b/gendoc.sh @@ -6,7 +6,22 @@ cat </dev/null +rm -r website/html/* 2>/dev/null + +set -euo pipefail + +commithash=$(git rev-parse --short HEAD) +commitdate=$(git log -1 --date=format:"%Y-%m-%d" --format="%ad") +export commithash +export commitdate + +# Link to static files and cross-references. +ln -sf ../../../mox-website-files/files website/html/files +ln -sf ../../rfc/xr website/html/xr + + +# All commands below are executed relative to ./website/ +cd website + +go run website.go -root -title 'Mox: modern, secure, all-in-one mail server' 'Mox' < index.md >html/index.html + +mkdir html/features +( + cat features/index.md + echo + sed -n -e '/# FAQ/q' -e '/## Roadmap/,/# FAQ/p' < ../README.md + echo + echo 'Also see the [Protocols](../protocols/) page for implementation status, and (non)-plans.' +) | go run website.go 'Features' >html/features/index.html + +mkdir html/screenshots +go run website.go 'Screenshots' < screenshots/index.md >html/screenshots/index.html + +mkdir html/install +go run website.go 'Install' < install/index.md >html/install/index.html + +mkdir html/faq +sed -n '/# FAQ/,//p' < ../README.md | go run website.go 'FAQ' >html/faq/index.html + +mkdir html/config +( + echo '# Config reference' + echo + sed -n '/^Package config holds /,/\*\//p' < ../config/doc.go | grep -v -E '^(Package config holds |\*/)' | sed 's/^# /## /' +) | go run website.go 'Config reference' >html/config/index.html + +mkdir html/commands +( + echo '# Command reference' + echo + sed -n '/^Mox is started /,/\*\//p' < ../doc.go | grep -v '\*/' | sed 's/^# /## /' +) | go run website.go 'Command reference' >html/commands/index.html + +mkdir html/protocols +go run website.go -protocols 'Protocols' <../rfc/index.txt >html/protocols/index.html diff --git a/go.mod b/go.mod index 077efe4..5b38b2c 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/mjl-/sherpaprom v0.0.2 github.com/mjl-/sherpats v0.0.5 github.com/prometheus/client_golang v1.18.0 + github.com/russross/blackfriday/v2 v2.1.0 go.etcd.io/bbolt v1.3.8 golang.org/x/crypto v0.17.0 golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc diff --git a/go.sum b/go.sum index 7ce8759..8b29ac7 100644 --- a/go.sum +++ b/go.sum @@ -60,6 +60,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190503130316-740c07785007/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/rfc/Makefile b/rfc/Makefile index 354c56e..2f4cc92 100644 --- a/rfc/Makefile +++ b/rfc/Makefile @@ -6,5 +6,5 @@ fetch: link: go run -tags link link.go -- ../*.go ../*/*.go -xr: - go run xr.go -- xr-dev $$(git rev-parse --short HEAD) $$(git log -1 --date=format:"%Y-%m-%d" --format="%ad") $$(git tag | tail -n1) ../*.go ../*/*.go +genxr: + go run xr.go -- xr/dev $$(git rev-parse --short HEAD) $$(git log -1 --date=format:"%Y-%m-%d" --format="%ad") $$(git tag | tail -n1) ../*.go ../*/*.go diff --git a/rfc/index.txt b/rfc/index.txt index 574d6f5..7198ea5 100644 --- a/rfc/index.txt +++ b/rfc/index.txt @@ -1,448 +1,449 @@ -This file lists RFC's by number and title. "make" fetches the RFC's and adds -references back to the source code where they are referenced. Not all RFC's in -this list have been implemented yet. +This file lists RFC's that are relevant for email, along with implementation +status. "make" fetches the RFC's and adds references back to the source code +where they are referenced. The protocol support page on the website is +generated from this information as well. + +Each tab-separated row has: +- RFC number +- Support/implementation status +- RFC status (e.g. Obs for obsolete) +- RFC title + +If the support status column value starts with a minus, it isn't included on +the protocol page on the website. Valid words for implementaiton status: +- Yes, support is deemed complete +- Partial, support is partial, more work can be done +- Roadmap, no support, but it is planned +- No, not supported and no plans +- ?, implementation status unknown Also see IANA assignments, https://www.iana.org/protocols -# Mail, message format, MIME -822 Standard for ARPA Internet Text Messages -1847 Security Multiparts for MIME: Multipart/Signed and Multipart/Encrypted -2045 Multipurpose Internet Mail Extensions (MIME) Part One: Format of Internet Message Bodies -2046 Multipurpose Internet Mail Extensions (MIME) Part Two: Media Types -2047 MIME (Multipurpose Internet Mail Extensions) Part Three: Message Header Extensions for Non-ASCII Text -2049 Multipurpose Internet Mail Extensions (MIME) Part Five: Conformance Criteria and Examples -2183 Communicating Presentation Information in Internet Messages: The Content-Disposition Header Field -2231 MIME Parameter Value and Encoded Word Extensions: Character Sets, Languages, and Continuations -3629 UTF-8, a transformation format of ISO 10646 -3676 The Text/Plain Format and DelSp Parameters -3834 Recommendations for Automatic Responses to Electronic Mail -5234 Augmented BNF for Syntax Specifications: ABNF -5322 Internet Message Format -5598 Internet Mail Architecture -6854 Update to Internet Message Format to Allow Group Syntax in the "From:" and "Sender:" Header Fields -7405 Case-Sensitive String Support in ABNF -9228 Delivered-To Email Header Field + +# Internet Message Format +822 Yes Obs Standard for ARPA Internet Text Messages +1847 No - Security Multiparts for MIME: Multipart/Signed and Multipart/Encrypted +2045 Yes - Multipurpose Internet Mail Extensions (MIME) Part One: Format of Internet Message Bodies +2046 Yes - Multipurpose Internet Mail Extensions (MIME) Part Two: Media Types +2047 Yes - MIME (Multipurpose Internet Mail Extensions) Part Three: Message Header Extensions for Non-ASCII Text +2049 - - Multipurpose Internet Mail Extensions (MIME) Part Five: Conformance Criteria and Examples +2183 Yes - Communicating Presentation Information in Internet Messages: The Content-Disposition Header Field +2231 Yes - MIME Parameter Value and Encoded Word Extensions: Character Sets, Languages, and Continuations +3629 - - UTF-8, a transformation format of ISO 10646 +3676 No - The Text/Plain Format and DelSp Parameters +3834 Roadmap - Recommendations for Automatic Responses to Electronic Mail +5234 - - Augmented BNF for Syntax Specifications: ABNF +5322 Yes - Internet Message Format +5598 - - Internet Mail Architecture +6854 - - Update to Internet Message Format to Allow Group Syntax in the "From:" and "Sender:" Header Fields +7405 - - Case-Sensitive String Support in ABNF +9228 Yes - Delivered-To Email Header Field https://www.iana.org/assignments/message-headers/message-headers.xhtml # SMTP +821 Yes Obs (RFC 2821) SIMPLE MAIL TRANSFER PROTOCOL +2821 Yes Obs (RFC 5321) Simple Mail Transfer Protocol +5321 Yes - Simple Mail Transfer Protocol -821 (obsoleted by RFC 2821) SIMPLE MAIL TRANSFER PROTOCOL -2821 (obsoleted by RFC 5321) Simple Mail Transfer Protocol -5321 Simple Mail Transfer Protocol - -1870 SMTP Service Extension for Message Size Declaration -1985 SMTP Service Extension for Remote Message Queue Starting -2034 SMTP Service Extension for Returning Enhanced Error Codes -2852 Deliver By SMTP Service Extension -2920 SMTP Service Extension for Command Pipelining -2505 Anti-Spam Recommendations for SMTP MTAs -2852 Deliver By SMTP Service Extension -3207 SMTP Service Extension for Secure SMTP over Transport Layer Security (STARTTLS) -3030 SMTP Service Extensions for Transmission of Large and Binary MIME Messages -3461 Simple Mail Transfer Protocol (SMTP) Service Extension for Delivery Status Notifications (DSNs) -3462 (obsoleted by RFC 6522) The Multipart/Report Content Type for the Reporting of Mail System Administrative Messages -3463 Enhanced Mail System Status Codes -3464 An Extensible Message Format for Delivery Status Notifications -3798 (obsoleted by RFC 8098) Message Disposition Notification -3848 ESMTP and LMTP Transmission Types Registration -3865 A No Soliciting Simple Mail Transfer Protocol (SMTP) Service Extension -3885 SMTP Service Extension for Message Tracking -3974 SMTP Operational Experience in Mixed IPv4/v6 Environments -4409 (obsoleted by RFC 6409) Message Submission for Mail -4468 Message Submission BURL Extension -4865 SMTP Submission Service Extension for Future Message Release -4954 SMTP Service Extension for Authentication -5068 Email Submission Operations: Access and Accountability Requirements -5248 A Registry for SMTP Enhanced Mail System Status Codes -5335 (obsoleted by RFC 6532) Internationalized Email Headers -5336 (obsoleted by RFC 6531) SMTP Extension for Internationalized Email Addresses -5337 (obsoleted by RFC 6533) Internationalized Delivery Status and Disposition Notifications -6008 Authentication-Results Registration for Differentiating among Cryptographic Results -6152 SMTP Service Extension for 8-bit MIME Transport -6409 Message Submission for Mail -6522 The Multipart/Report Media Type for the Reporting of Mail System Administrative Messages -6530 Overview and Framework for Internationalized Email -6531 SMTP Extension for Internationalized Email -6532 Internationalized Email Headers -6533 Internationalized Delivery Status and Disposition Notifications -6729 Indicating Email Handling States in Trace Fields -6857 Post-Delivery Message Downgrading for Internationalized Email Messages -7293 The Require-Recipient-Valid-Since Header Field and SMTP Service Extension -7372 Email Authentication Status Codes -7435 Opportunistic Security: Some Protection Most of the Time -7504 SMTP 521 and 556 Reply Codes -7505 A "Null MX" No Service Resource Record for Domains That Accept No Mail -8098 Message Disposition Notification -8601 Message Header Field for Indicating Message Authentication Status -8689 SMTP Require TLS Option +1870 Yes - SMTP Service Extension for Message Size Declaration +1985 No - SMTP Service Extension for Remote Message Queue Starting +2034 Yes - SMTP Service Extension for Returning Enhanced Error Codes +2852 No - Deliver By SMTP Service Extension +2920 Yes - SMTP Service Extension for Command Pipelining +2505 - - Anti-Spam Recommendations for SMTP MTAs +3207 Yes - SMTP Service Extension for Secure SMTP over Transport Layer Security (STARTTLS) +3030 Roadmap - SMTP Service Extensions for Transmission of Large and Binary MIME Messages +3461 Roadmap - Simple Mail Transfer Protocol (SMTP) Service Extension for Delivery Status Notifications (DSNs) +3462 - Obs (RFC 6522) The Multipart/Report Content Type for the Reporting of Mail System Administrative Messages +3463 Yes - Enhanced Mail System Status Codes +3464 Yes - An Extensible Message Format for Delivery Status Notifications +3798 ? Obs (RFC 8098) Message Disposition Notification +3848 - - ESMTP and LMTP Transmission Types Registration +3865 No - A No Soliciting Simple Mail Transfer Protocol (SMTP) Service Extension +3885 No - SMTP Service Extension for Message Tracking +3974 - - SMTP Operational Experience in Mixed IPv4/v6 Environments +4409 - Obs (RFC 6409) Message Submission for Mail +4468 Roadmap - Message Submission BURL Extension +4865 Roadmap - SMTP Submission Service Extension for Future Message Release +4954 Yes - SMTP Service Extension for Authentication +5068 - - Email Submission Operations: Access and Accountability Requirements +5248 - - A Registry for SMTP Enhanced Mail System Status Codes +5335 - Obs (RFC 6532) Internationalized Email Headers +5336 - Obs (RFC 6531) SMTP Extension for Internationalized Email Addresses +5337 - Obs (RFC 6533) Internationalized Delivery Status and Disposition Notifications +5782 Yes - DNS Blacklists and Whitelists +6008 Yes - Authentication-Results Registration for Differentiating among Cryptographic Results +6152 Yes - SMTP Service Extension for 8-bit MIME Transport +6409 Yes - Message Submission for Mail +6522 Yes - The Multipart/Report Media Type for the Reporting of Mail System Administrative Messages +6530 Yes - Overview and Framework for Internationalized Email +6531 Yes - SMTP Extension for Internationalized Email +6532 Yes - Internationalized Email Headers +6533 Yes - Internationalized Delivery Status and Disposition Notifications +6647 Partial - Email Greylisting: An Applicability Statement for SMTP +6729 No - Indicating Email Handling States in Trace Fields +6857 No - Post-Delivery Message Downgrading for Internationalized Email Messages +7293 No - The Require-Recipient-Valid-Since Header Field and SMTP Service Extension +7372 Yes - Email Authentication Status Codes +7435 Yes - Opportunistic Security: Some Protection Most of the Time +7504 Yes - SMTP 521 and 556 Reply Codes +7505 Yes - A "Null MX" No Service Resource Record for Domains That Accept No Mail +8098 ? - Message Disposition Notification +8601 Yes - Message Header Field for Indicating Message Authentication Status +8689 Yes - SMTP Require TLS Option +8904 No - DNS Whitelist (DNSWL) Email Authentication Method Extension # SPF -4408 (obsoleted by RFC 7208) Sender Policy Framework (SPF) for Authorizing Use of Domains in E-Mail, Version 1 -6652 Sender Policy Framework (SPF) Authentication Failure Reporting Using the Abuse Reporting Format -7208 Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1 -7208-eid5436 errata: header-field FWS -7208-eid6721 errata: corrected smtp example response -7208-eid4751 errata (not verified): ptr mechanism -7208-eid5227 errata (not verified): ptr lookup order -7208-eid6595 errata (not verified): 2 void lookups vs exists -7208-eid6216 errata (not verified): ptr in multiple requirements example from appendix A.4 +4408 Yes Obs (by RFC 7208) Sender Policy Framework (SPF) for Authorizing Use of Domains in E-Mail, Version 1 +6652 ? - Sender Policy Framework (SPF) Authentication Failure Reporting Using the Abuse Reporting Format +7208 Yes - Sender Policy Framework (SPF) for Authorizing Use of Domains in Email, Version 1 +7208-eid5436 - - errata: header-field FWS +7208-eid6721 - - errata: corrected smtp example response +7208-eid4751 - - errata (not verified): ptr mechanism +7208-eid5227 - - errata (not verified): ptr lookup order +7208-eid6595 - - errata (not verified): 2 void lookups vs exists +7208-eid6216 - - errata (not verified): ptr in multiple requirements example from appendix A.4 # DKIM -6376 DomainKeys Identified Mail (DKIM) Signatures -6376-eid4810 errata: q= qp-hdr-value -6376-eid5070 errata: tag-spec +6376 Yes - DomainKeys Identified Mail (DKIM) Signatures +6376-eid4810 - - errata: q= qp-hdr-value +6376-eid5070 - - errata: tag-spec -4686 Analysis of Threats Motivating DomainKeys Identified Mail (DKIM) -4871 (obsoleted by RFC 6376) DomainKeys Identified Mail (DKIM) Signatures -5016 Requirements for a DomainKeys Identified Mail (DKIM) Signing Practices Protocol -5585 DomainKeys Identified Mail (DKIM) Service Overview -5672 (obsoleted by RFC 6376) DomainKeys Identified Mail (DKIM) Signatures -- Update -5863 DomainKeys Identified Mail (DKIM) Development, Deployment, and Operations -6377 DomainKeys Identified Mail (DKIM) and Mailing Lists -8032 Edwards-Curve Digital Signature Algorithm (EdDSA) -8301 Cryptographic Algorithm and Key Usage Update to DomainKeys Identified Mail (DKIM) -8463 A New Cryptographic Signature Method for DomainKeys Identified Mail (DKIM) +4686 - - Analysis of Threats Motivating DomainKeys Identified Mail (DKIM) +4871 Yes Obs (RFC 6376) DomainKeys Identified Mail (DKIM) Signatures +5016 -Yes - Requirements for a DomainKeys Identified Mail (DKIM) Signing Practices Protocol +5585 -Yes - DomainKeys Identified Mail (DKIM) Service Overview +5672 -Yes Obs (by RFC 6376) DomainKeys Identified Mail (DKIM) Signatures -- Update +5863 -Yes - DomainKeys Identified Mail (DKIM) Development, Deployment, and Operations +6377 ? - DomainKeys Identified Mail (DKIM) and Mailing Lists +8032 - - Edwards-Curve Digital Signature Algorithm (EdDSA) +8301 Yes - Cryptographic Algorithm and Key Usage Update to DomainKeys Identified Mail (DKIM) +8463 Yes - A New Cryptographic Signature Method for DomainKeys Identified Mail (DKIM) # DMARC -7489 Domain-based Message Authentication, Reporting, and Conformance (DMARC) -7489-eid5440 errata: valid dmarc records with(out) semicolon -7489-eid6729 errata (not verified): publicsuffix list only for ICANN DOMAINS -7960 Interoperability Issues between Domain-based Message Authentication, Reporting, and Conformance (DMARC) and Indirect Email Flows -9091 Experimental Domain-Based Message Authentication, Reporting, and Conformance (DMARC) Extension for Public Suffix Domains - -# DKIM/SPF/DMARC -8616 Email Authentication for Internationalized Mail - -# Greylisting -6647 Email Greylisting: An Applicability Statement for SMTP - -# DNSBL/DNSWL -5782 DNS Blacklists and Whitelists -8904 DNS Whitelist (DNSWL) Email Authentication Method Extension - -# DANE -6394 Use Cases and Requirements for DNS-Based Authentication of Named Entities (DANE) -6698 The DNS-Based Authentication of Named Entities (DANE) Transport Layer Security (TLS) Protocol: TLSA -7218 Adding Acronyms to Simplify Conversations about DNS-Based Authentication of Named Entities (DANE) -7671 The DNS-Based Authentication of Named Entities (DANE) Protocol: Updates and Operational Guidance -7672 SMTP Security via Opportunistic DNS-Based Authentication of Named Entities (DANE) Transport Layer Security (TLS) -7673 Using DNS-Based Authentication of Named Entities (DANE) TLSA Records with SRV Records -7929 DNS-Based Authentication of Named Entities (DANE) Bindings for OpenPGP -8162 Using Secure DNS to Associate Certificates with Domain Names for S/MIME - -# TLS-RPT -8460 SMTP TLS Reporting -8460-eid6241 Wrong example for JSON field "mx-host". - -# MTA-STS -8461 SMTP MTA Strict Transport Security (MTA-STS) +7489 Yes - Domain-based Message Authentication, Reporting, and Conformance (DMARC) +7489-eid5440 - - errata: valid dmarc records with(out) semicolon +7489-eid6729 - - errata (not verified): publicsuffix list only for ICANN DOMAINS +7960 Yes - Interoperability Issues between Domain-based Message Authentication, Reporting, and Conformance (DMARC) and Indirect Email Flows +9091 Roadmap - Experimental Domain-Based Message Authentication, Reporting, and Conformance (DMARC) Extension for Public Suffix Domains # ARC -8617 The Authenticated Received Chain (ARC) Protocol +8617 Roadmap - The Authenticated Received Chain (ARC) Protocol + +# DANE +6394 -Yes - Use Cases and Requirements for DNS-Based Authentication of Named Entities (DANE) +6698 Yes - The DNS-Based Authentication of Named Entities (DANE) Transport Layer Security (TLS) Protocol: TLSA +7218 -Yes - Adding Acronyms to Simplify Conversations about DNS-Based Authentication of Named Entities (DANE) +7671 -Yes - The DNS-Based Authentication of Named Entities (DANE) Protocol: Updates and Operational Guidance +7672 Yes - SMTP Security via Opportunistic DNS-Based Authentication of Named Entities (DANE) Transport Layer Security (TLS) +7673 Roadmap - Using DNS-Based Authentication of Named Entities (DANE) TLSA Records with SRV Records +7929 No - DNS-Based Authentication of Named Entities (DANE) Bindings for OpenPGP +8162 No - Using Secure DNS to Associate Certificates with Domain Names for S/MIME + +# MTA-STS +8461 Yes - SMTP MTA Strict Transport Security (MTA-STS) + +# TLS Reporting +8460 Yes - SMTP TLS Reporting +8460-eid6241 - - Wrong example for JSON field "mx-host". # ARF -5965 An Extensible Format for Email Feedback Reports -6650 Creation and Use of Email Feedback Reports: An Applicability Statement for the Abuse Reporting Format (ARF) -6591 Authentication Failure Reporting Using the Abuse Reporting Format -6692 Source Ports in Abuse Reporting Format (ARF) Reports +5965 Roadmap - An Extensible Format for Email Feedback Reports +6650 Roadmap - Creation and Use of Email Feedback Reports: An Applicability Statement for the Abuse Reporting Format (ARF) +6591 ? - Authentication Failure Reporting Using the Abuse Reporting Format +6692 Roadmap - Source Ports in Abuse Reporting Format (ARF) Reports +9477 Roadmap - Complaint Feedback Loop Address Header # IMAP +1730 Yes Obs (RFC 2060) INTERNET MESSAGE ACCESS PROTOCOL - VERSION 4 +2060 Yes Obs (RFC 3501) INTERNET MESSAGE ACCESS PROTOCOL - VERSION 4rev1 +3501 Yes Obs (RFC 9051) INTERNET MESSAGE ACCESS PROTOCOL - VERSION 4rev1 +9051 Yes - Internet Message Access Protocol (IMAP) - Version 4rev2 -1730 (obsoleted by RFC 2060) INTERNET MESSAGE ACCESS PROTOCOL - VERSION 4 -2060 (obsoleted by RFC 3501) INTERNET MESSAGE ACCESS PROTOCOL - VERSION 4rev1 -3501 (obsoleted by RFC 9051) INTERNET MESSAGE ACCESS PROTOCOL - VERSION 4rev1 -9051 Internet Message Access Protocol (IMAP) - Version 4rev2 +1733 -Yes - DISTRIBUTED ELECTRONIC MAIL MODELS IN IMAP4 +2087 Roadmap - IMAP4 QUOTA extension +2088 - Obs (RFC 7888) IMAP4 non-synchronizing literals +2152 Yes - UTF-7 A Mail-Safe Transformation Format of Unicode +2177 Yes - IMAP4 IDLE command +2180 Yes - IMAP4 Multi-Accessed Mailbox Practice +2193 No - IMAP4 Mailbox Referrals +2342 Yes - IMAP4 Namespace +2683 Yes - IMAP4 Implementation Recommendations +2971 Yes - IMAP4 ID extension +3348 Yes Obs (RFC 5258) The Internet Message Action Protocol (IMAP4) Child Mailbox Extension +3502 Roadmap - Internet Message Access Protocol (IMAP) - MULTIAPPEND Extension +3503 ? - Message Disposition Notification (MDN) profile for Internet Message Access Protocol (IMAP) +3516 Yes - IMAP4 Binary Content Extension +3691 Yes - Internet Message Access Protocol (IMAP) UNSELECT command +4314 Roadmap - IMAP4 Access Control List (ACL) Extension +4315 Yes - Internet Message Access Protocol (IMAP) - UIDPLUS extension +4466 -Yes - Collected Extensions to IMAP4 ABNF +4467 Roadmap - Internet Message Access Protocol (IMAP) - URLAUTH Extension +4469 Roadmap Internet Message Access Protocol (IMAP) CATENATE Extension +4549 -Yes - Synchronization Operations for Disconnected IMAP4 Clients +4551 Yes Obs (RFC 7162) IMAP Extension for Conditional STORE Operation or Quick Flag Changes Resynchronization +4731 Yes - IMAP4 Extension to SEARCH Command for Controlling What Kind of Information Is Returned +4959 Yes - IMAP Extension for Simple Authentication and Security Layer (SASL) Initial Client Response +4978 Roadmap - The IMAP COMPRESS Extension +5032 Roadmap - WITHIN Search Extension to the IMAP Protocol +5092 Roadmap - IMAP URL Scheme +5161 Yes - The IMAP ENABLE Extension +5162 Yes Obs (RFC 7162) IMAP4 Extensions for Quick Mailbox Resynchronization +5182 Yes - IMAP Extension for Referencing the Last SEARCH Result +5255 No - Internet Message Access Protocol Internationalization +5256 Roadmap - Internet Message Access Protocol - SORT and THREAD Extensions +5257 No - Internet Message Access Protocol - ANNOTATE Extension +5258 Yes - Internet Message Access Protocol version 4 - LIST Command Extensions +5259 No - Internet Message Access Protocol - CONVERT Extension +5267 Roadmap - Contexts for IMAP4 +5464 Roadmap - The IMAP METADATA Extension +5465 Roadmap - The IMAP NOTIFY Extension +5466 Roadmap - IMAP4 Extension for Named Searches (Filters) +5524 No - Extended URLFETCH for Binary and Converted Parts +5530 Yes - IMAP Response Codes +5738 Partial Obs (RFC 6855) IMAP Support for UTF-8 +5788 -Yes - IMAP4 Keyword Registry +5819 Yes - IMAP4 Extension for Returning STATUS Information in Extended LIST +5957 Roadmap - Display-Based Address Sorting for the IMAP4 SORT Extension +6154 Yes - IMAP LIST Extension for Special-Use Mailboxes +6203 No - IMAP4 Extension for Fuzzy Search +6237 Roadmap Obs (RFC 7377) IMAP4 Multimailbox SEARCH Extension +6851 Yes - Internet Message Access Protocol (IMAP) - MOVE Extension +6855 Yes - IMAP Support for UTF-8 +6858 No - Simplified POP and IMAP Downgrading for Internationalized Email +7162 Yes - IMAP Extensions: Quick Flag Changes Resynchronization (CONDSTORE) and Quick Mailbox Resynchronization (QRESYNC) +7162-eid5055 - - errata: space after untagged OK +7377 Roadmap - IMAP4 Multimailbox SEARCH Extension +7888 Yes - IMAP4 Non-synchronizing Literals +7889 Yes - The IMAP APPENDLIMIT Extension +8437 Roadmap - IMAP UNAUTHENTICATE Extension for Connection Reuse +8438 Yes - IMAP Extension for STATUS=SIZE +8440 ? - IMAP4 Extension for Returning MYRIGHTS Information in Extended LIST +8457 Roadmap - IMAP "$Important" Keyword and "\Important" Special-Use Attribute +8474 Roadmap - IMAP Extension for Object Identifiers +8508 Roadmap - IMAP REPLACE Extension +8514 Roadmap - Internet Message Access Protocol (IMAP) - SAVEDATE Extension +8970 Roadmap - IMAP4 Extension: Message Preview Generation +9208 Roadmap - IMAP QUOTA Extension +9394 Roadmap - IMAP PARTIAL Extension for Paged SEARCH and FETCH -1733 DISTRIBUTED ELECTRONIC MAIL MODELS IN IMAP4 -2087 IMAP4 QUOTA extension -2088 (obsoleted by RFC 7888) IMAP4 non-synchronizing literals -2152 UTF-7 A Mail-Safe Transformation Format of Unicode -2177 IMAP4 IDLE command -2180 IMAP4 Multi-Accessed Mailbox Practice -2193 IMAP4 Mailbox Referrals -2342 IMAP4 Namespace -2683 IMAP4 Implementation Recommendations -2971 IMAP4 ID extension -3348 (obsoleted by RFC 5258) The Internet Message Action Protocol (IMAP4) Child Mailbox Extension -3502 Internet Message Access Protocol (IMAP) - MULTIAPPEND Extension -3503 Message Disposition Notification (MDN) profile for Internet Message Access Protocol (IMAP) -3516 IMAP4 Binary Content Extension -3691 Internet Message Access Protocol (IMAP) UNSELECT command -4314 IMAP4 Access Control List (ACL) Extension -4315 Internet Message Access Protocol (IMAP) - UIDPLUS extension -4466 Collected Extensions to IMAP4 ABNF -4467 Internet Message Access Protocol (IMAP) - URLAUTH Extension -4469 Internet Message Access Protocol (IMAP) CATENATE Extension -4549 Synchronization Operations for Disconnected IMAP4 Clients -4551 (obsoleted by RFC 7162) IMAP Extension for Conditional STORE Operation or Quick Flag Changes Resynchronization -4731 IMAP4 Extension to SEARCH Command for Controlling What Kind of Information Is Returned -4959 IMAP Extension for Simple Authentication and Security Layer (SASL) Initial Client Response -4978 The IMAP COMPRESS Extension -5032 WITHIN Search Extension to the IMAP Protocol -5092 IMAP URL Scheme -5161 The IMAP ENABLE Extension -5162 (obsoleted by RFC 7162) IMAP4 Extensions for Quick Mailbox Resynchronization -5182 IMAP Extension for Referencing the Last SEARCH Result -5255 Internet Message Access Protocol Internationalization -5256 Internet Message Access Protocol - SORT and THREAD Extensions -5257 Internet Message Access Protocol - ANNOTATE Extension -5258 Internet Message Access Protocol version 4 - LIST Command Extensions -5259 Internet Message Access Protocol - CONVERT Extension -5267 Contexts for IMAP4 -5464 The IMAP METADATA Extension -5465 The IMAP NOTIFY Extension -5466 IMAP4 Extension for Named Searches (Filters) -5524 Extended URLFETCH for Binary and Converted Parts -5530 IMAP Response Codes -5738 (obsoleted by RFC 6855) IMAP Support for UTF-8 -5788 IMAP4 Keyword Registry -5819 IMAP4 Extension for Returning STATUS Information in Extended LIST -5957 Display-Based Address Sorting for the IMAP4 SORT Extension -6154 IMAP LIST Extension for Special-Use Mailboxes -6203 IMAP4 Extension for Fuzzy Search -6237 (obsoleted by RFC 7377) IMAP4 Multimailbox SEARCH Extension -6851 Internet Message Access Protocol (IMAP) - MOVE Extension -6855 IMAP Support for UTF-8 -6858 Simplified POP and IMAP Downgrading for Internationalized Email -7162 IMAP Extensions: Quick Flag Changes Resynchronization (CONDSTORE) and Quick Mailbox Resynchronization (QRESYNC) -7162-eid5055 errata: space after untagged OK -7377 IMAP4 Multimailbox SEARCH Extension -7888 IMAP4 Non-synchronizing Literals -7889 The IMAP APPENDLIMIT Extension -8437 IMAP UNAUTHENTICATE Extension for Connection Reuse -8438 IMAP Extension for STATUS=SIZE -8440 IMAP4 Extension for Returning MYRIGHTS Information in Extended LIST -8457 IMAP "$Important" Keyword and "\Important" Special-Use Attribute -8474 IMAP Extension for Object Identifiers -8508 IMAP REPLACE Extension -8514 Internet Message Access Protocol (IMAP) - SAVEDATE Extension -8970 IMAP4 Extension: Message Preview Generation -9208 IMAP QUOTA Extension -9394 IMAP PARTIAL Extension for Paged SEARCH and FETCH - -5198 Unicode Format for Network Interchange +5198 -? - Unicode Format for Network Interchange # Lemonade profile -4550 (obsoleted by RFC 5550) Internet Email to Support Diverse Service Environments (Lemonade) Profile -5383 Deployment Considerations for Lemonade-Compliant Mobile Email -5423 Internet Message Store Events -5442 LEMONADE Architecture - Supporting Open Mobile Alliance (OMA) Mobile Email (MEM) Using Internet Mail -5550 The Internet Email to Support Diverse Service Environments (Lemonade) Profile -5551 Lemonade Notifications Architecture +4550 -? Obs (RFC 5550) Internet Email to Support Diverse Service Environments (Lemonade) Profile +5383 -? - Deployment Considerations for Lemonade-Compliant Mobile Email +5423 -? - Internet Message Store Events +5442 -? - LEMONADE Architecture - Supporting Open Mobile Alliance (OMA) Mobile Email (MEM) Using Internet Mail +5550 -? - The Internet Email to Support Diverse Service Environments (Lemonade) Profile +5551 -? - Lemonade Notifications Architecture # Mailing list -2369 The Use of URLs as Meta-Syntax for Core Mail List Commands and their Transport through Message Header Fields -2919 List-Id: A Structured Field and Namespace for the Identification of Mailing Lists +2369 ? - The Use of URLs as Meta-Syntax for Core Mail List Commands and their Transport through Message Header Fields +2919 ? - List-Id: A Structured Field and Namespace for the Identification of Mailing Lists # Sieve -3028 (obsoleted by RFC 5228) Sieve: A Mail Filtering Language -5228 Sieve: An Email Filtering Language -5804 A Protocol for Remotely Managing Sieve Scripts +3028 Roadmap Obs (RFC 5228) Sieve: A Mail Filtering Language +5228 Roadmap - Sieve: An Email Filtering Language +5804 Roadmap - A Protocol for Remotely Managing Sieve Scripts -3894 Sieve Extension: Copying Without Side Effects -5173 Sieve Email Filtering: Body Extension -5183 Sieve Email Filtering: Environment Extension -5229 Sieve Email Filtering: Variables Extension -5230 Sieve Email Filtering: Vacation Extension -5231 Sieve Email Filtering: Relational Extension -5232 Sieve Email Filtering: Imap4flags Extension -5233 Sieve Email Filtering: Subaddress Extension -5235 Sieve Email Filtering: Spamtest and Virustest Extensions -5260 Sieve Email Filtering: Date and Index Extensions -5293 Sieve Email Filtering: Editheader Extension -5429 Sieve Email Filtering: Reject and Extended Reject Extensions -5435 Sieve Email Filtering: Extension for Notifications -5437 Sieve Notification Mechanism: Extensible Messaging and Presence Protocol (XMPP) -5463 Sieve Email Filtering: Ihave Extension -5490 The Sieve Mail-Filtering Language -- Extensions for Checking Mailbox Status and Accessing Mailbox Metadata -5703 Sieve Email Filtering: MIME Part Tests, Iteration, Extraction, Replacement, and Enclosure -5784 Sieve Email Filtering: Sieves and Display Directives in XML -6131 Sieve Vacation Extension: "Seconds" Parameter -6558 Sieve Extension for Converting Messages before Delivery -6609 Sieve Email Filtering: Include Extension -6785 Support for Internet Message Access Protocol (IMAP) Events in Sieve -8579 Sieve Email Filtering: Delivering to Special-Use Mailboxes -8580 Sieve Extension: File Carbon Copy (FCC) -9042 Sieve Email Filtering: Delivery by MAILBOXID +3894 No - Sieve Extension: Copying Without Side Effects +5173 No - Sieve Email Filtering: Body Extension +5183 Roadmap - Sieve Email Filtering: Environment Extension +5229 Roadmap - Sieve Email Filtering: Variables Extension +5230 Roadmap - Sieve Email Filtering: Vacation Extension +5231 Roadmap - Sieve Email Filtering: Relational Extension +5232 Roadmap - Sieve Email Filtering: Imap4flags Extension +5233 Roadmap - Sieve Email Filtering: Subaddress Extension +5235 No - Sieve Email Filtering: Spamtest and Virustest Extensions +5260 No - Sieve Email Filtering: Date and Index Extensions +5293 No - Sieve Email Filtering: Editheader Extension +5429 Roadmap - Sieve Email Filtering: Reject and Extended Reject Extensions +5435 No - Sieve Email Filtering: Extension for Notifications +5437 No - Sieve Notification Mechanism: Extensible Messaging and Presence Protocol (XMPP) +5463 Roadmap - Sieve Email Filtering: Ihave Extension +5490 No - The Sieve Mail-Filtering Language -- Extensions for Checking Mailbox Status and Accessing Mailbox Metadata +5703 No - Sieve Email Filtering: MIME Part Tests, Iteration, Extraction, Replacement, and Enclosure +5784 No - Sieve Email Filtering: Sieves and Display Directives in XML +6131 ? - Sieve Vacation Extension: "Seconds" Parameter +6558 No - Sieve Extension for Converting Messages before Delivery +6609 No - Sieve Email Filtering: Include Extension +6785 Roadmap - Support for Internet Message Access Protocol (IMAP) Events in Sieve +8579 Roadmap - Sieve Email Filtering: Delivering to Special-Use Mailboxes +8580 No - Sieve Extension: File Carbon Copy (FCC) +9042 No - Sieve Email Filtering: Delivery by MAILBOXID -3431 (obsoleted by RFC 5231) Relational Extension -3598 (obsoleted by RFC 5233) Subaddress Extension -3685 (obsoleted by RFC 5235) Spamtest and VirusTest Extensions +3431 Roadmap Obs (RFC 5231) Relational Extension +3598 Roadmap Obs (RFC 5233) Subaddress Extension +3685 No Obs (RFC 5235) Spamtest and VirusTest Extensions Also see http://sieve.info/documents # JMAP -8620 The JSON Meta Application Protocol (JMAP) -8621 The JSON Meta Application Protocol (JMAP) for Mail -8887 A JSON Meta Application Protocol (JMAP) Subprotocol for WebSocket -9007 Handling Message Disposition Notification with the JSON Meta Application Protocol (JMAP) -9219 S/MIME Signature Verification Extension to the JSON Meta Application Protocol (JMAP) -9425 JSON Meta Application Protocol (JMAP) for Quotas +8620 Roadmap - The JSON Meta Application Protocol (JMAP) +8621 Roadmap - The JSON Meta Application Protocol (JMAP) for Mail +8887 Roadmap - A JSON Meta Application Protocol (JMAP) Subprotocol for WebSocket +9007 ? - Handling Message Disposition Notification with the JSON Meta Application Protocol (JMAP) +9219 No - S/MIME Signature Verification Extension to the JSON Meta Application Protocol (JMAP) +9425 No - JSON Meta Application Protocol (JMAP) for Quotas See implementation guide, https://jmap.io/server.html -# Vouch by reference -5518 Vouch By Reference +# CalDAV/iCal +4791 Roadmap - Calendaring Extensions to WebDAV (CalDAV) +5689 Roadmap - Extended MKCOL for Web Distributed Authoring and Versioning (WebDAV) +6638 Roadmap - Scheduling Extensions to CalDAV +6764 Roadmap - Locating Services for Calendaring Extensions to WebDAV (CalDAV) and vCard Extensions to WebDAV (CardDAV) +7809 Roadmap - Calendaring Extensions to WebDAV (CalDAV): Time Zones by Reference +7953 Roadmap - Calendar Availability -# TLS -5056 On the Use of Channel Bindings to Secure Channels -5705 Keying Material Exporters for Transport Layer Security (TLS) -5929 Channel Bindings for TLS -6125 Representation and Verification of Domain-Based Application Service Identity within Internet Public Key Infrastructure Using X.509 (PKIX) Certificates in the Context of Transport Layer Security (TLS) -7250 Using Raw Public Keys in Transport Layer Security (TLS) and Datagram Transport Layer Security (DTLS) -7525 Recommendations for Secure Use of Transport Layer Security (TLS) and Datagram Transport Layer Security (DTLS) -7627 Transport Layer Security (TLS) Session Hash and Extended Master Secret Extension -8314 Cleartext Considered Obsolete: Use of Transport Layer Security (TLS) for Email Submission and Access -8446 The Transport Layer Security (TLS) Protocol Version 1.3 -8996 Deprecating TLS 1.0 and TLS 1.1 -8997 Deprecation of TLS 1.1 for Email Submission and Access -9266 Channel Bindings for TLS 1.3 +5545 Roadmap - Internet Calendaring and Scheduling Core Object Specification (iCalendar) +5546 Roadmap - iCalendar Transport-Independent Interoperability Protocol (iTIP) +6047 Roadmap - iCalendar Message-Based Interoperability Protocol (iMIP) +6868 Roadmap - Parameter Value Encoding in iCalendar and vCard +7529 ? - Non-Gregorian Recurrence Rules in the Internet Calendaring and Scheduling Core Object Specification (iCalendar) +7986 ? - New Properties for iCalendar +9073 ? - Event Publishing Extensions to iCalendar +9074 ? - "VALARM" Extensions for iCalendar +9253 ? - Support for iCalendar Relationships +6321 ? - xCal: The XML Format for iCalendar +7265 ? - jCal: The JSON Format for iCalendar -# SASL +# CardDAV/vCard +6352 Roadmap - CardDAV: vCard Extensions to Web Distributed Authoring and Versioning (WebDAV) -2104 HMAC: Keyed-Hashing for Message Authentication -2195 IMAP/POP AUTHorize Extension for Simple Challenge/Response -4013 (obsoleted by RFC 7613) SASLprep: Stringprep Profile for User Names and Passwords -4422 Simple Authentication and Security Layer (SASL) -4505 Anonymous Simple Authentication and Security Layer (SASL) Mechanism -4616 The PLAIN Simple Authentication and Security Layer (SASL) Mechanism -5802 Salted Challenge Response Authentication Mechanism (SCRAM) SASL and GSS-API Mechanisms -6331 Moving DIGEST-MD5 to Historic -7613 (obsoleted by RFC 8265) Preparation, Enforcement, and Comparison of Internationalized Strings Representing Usernames and Passwords -7677 SCRAM-SHA-256 and SCRAM-SHA-256-PLUS Simple Authentication and Security Layer (SASL) Mechanisms -8265 Preparation, Enforcement, and Comparison of Internationalized Strings Representing Usernames and Passwords - -# IDNA -3492 Punycode: A Bootstring encoding of Unicode for Internationalized Domain Names in Applications (IDNA) -5890 Internationalized Domain Names for Applications (IDNA): Definitions and Document Framework -5891 Internationalized Domain Names in Applications (IDNA): Protocol -5892 The Unicode Code Points and Internationalized Domain Names for Applications (IDNA) -5893 Right-to-Left Scripts for Internationalized Domain Names for Applications (IDNA) -5894 Internationalized Domain Names for Applications (IDNA): Background, Explanation, and Rationale - -# ACME -8555 Automatic Certificate Management Environment (ACME) -8737 Automated Certificate Management Environment (ACME) TLS Application-Layer Protocol Negotiation (ALPN) Challenge Extension - -# CAA -8657 Certification Authority Authorization (CAA) Record Extensions for Account URI and Automatic Certificate Management Environment (ACME) Method Binding -8659 DNS Certification Authority Authorization (CAA) Resource Record - -# DNS -1034 DOMAIN NAMES - CONCEPTS AND FACILITIES -1035 DOMAIN NAMES - IMPLEMENTATION AND SPECIFICATION -1101 DNS Encoding of Network Names and Other Types -1536 Common DNS Implementation Errors and Suggested Fixes -2181 Clarifications to the DNS Specification -2308 Negative Caching of DNS Queries (DNS NCACHE) -2672 (obsoleted by RFC 6672) Non-Terminal DNS Name Redirection -3226 DNSSEC and IPv6 A6 aware server/resolver message size requirements -3363 Representing Internet Protocol version 6 (IPv6) Addresses in the Domain Name System (DNS) -3596 DNS Extensions to Support IP Version 6 -3597 Handling of Unknown DNS Resource Record (RR) Types -3833 Threat Analysis of the Domain Name System (DNS) -4343 Domain Name System (DNS) Case Insensitivity Clarification -4592 The Role of Wildcards in the Domain Name System -5001 DNS Name Server Identifier (NSID) Option -5452 Measures for Making DNS More Resilient against Forged Answers -6604 xNAME RCODE and Status Bits Clarification -6672 DNAME Redirection in the DNS -6891 Extension Mechanisms for DNS (EDNS(0)) -6895 Domain Name System (DNS) IANA Considerations -7686 The ".onion" Special-Use Domain Name -7766 DNS Transport over TCP - Implementation Requirements -7828 The edns-tcp-keepalive EDNS0 Option -7873 Domain Name System (DNS) Cookies -8020 NXDOMAIN: There Really Is Nothing Underneath -8482 Providing Minimal-Sized Responses to DNS Queries That Have QTYPE=ANY -8490 DNS Stateful Operations -8499 DNS Terminology -8767 Serving Stale Data to Improve DNS Resiliency -8914 Extended DNS Errors -9018 Interoperable Domain Name System (DNS) Server Cookies -9210 DNS Transport over TCP - Operational Requirements - -# DNSSEC -3225 Indicating Resolver Support of DNSSEC -3658 Delegation Signer (DS) Resource Record (RR) -4033 DNS Security Introduction and Requirements -4034 Resource Records for the DNS Security Extensions -4035 Protocol Modifications for the DNS Security Extensions -4470 Minimally Covering NSEC Records and DNSSEC On-line Signing -4956 DNS Security (DNSSEC) Opt-In -5155 DNS Security (DNSSEC) Hashed Authenticated Denial of Existence -5702 Use of SHA-2 Algorithms with RSA in DNSKEY and RRSIG Resource Records for DNSSEC -5933 Use of GOST Signature Algorithms in DNSKEY and RRSIG Resource Records for DNSSEC -6014 Cryptographic Algorithm Identifier Allocation for DNSSEC -6781 DNSSEC Operational Practices, Version 2 -6840 Clarifications and Implementation Notes for DNS Security (DNSSEC) -7901 CHAIN Query Requests in DNS -8198 Aggressive Use of DNSSEC-Validated Cache -8624 Algorithm Implementation Requirements and Usage Guidance for DNSSEC -8749 Moving DNSSEC Lookaside Validation (DLV) to Historic Status -9077 NSEC and NSEC3: TTLs and Aggressive Use -9157 Revised IANA Considerations for DNSSEC -9276 Guidance for NSEC3 Parameter Settings - -# HTTP - -2616 Hypertext Transfer Protocol -- HTTP/1.1 -7230 Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing -9110 HTTP Semantics - -# Websockets - -6455 The WebSocket Protocol - -# More - -3339 Date and Time on the Internet: Timestamps -3986 Uniform Resource Identifier (URI): Generic Syntax -5617 (Historic) DomainKeys Identified Mail (DKIM) Author Domain Signing Practices (ADSP) -6186 (not used in practice) Use of SRV Records for Locating Email Submission/Access Services -7817 Updated Transport Layer Security (TLS) Server Identity Check Procedure for Email-Related Protocols +2425 Roadmap - A MIME Content-Type for Directory Information +2426 ? - vCard MIME Directory Profile +6350 Roadmap - vCard Format Specification +6351 ? - xCard: vCard XML Representation +6473 ? - vCard KIND:application +6474 ? - vCard Format Extensions: Place of Birth, Place and Date of Death +6715 ? - vCard Format Extensions: Representing vCard Extensions Defined by the Open Mobile Alliance (OMA) Converged Address Book (CAB) Group +6869 ? - vCard KIND:device +7095 ? - jCard: The JSON Format for vCard # WebDAV -4918 HTTP Extensions for Web Distributed Authoring and Versioning (WebDAV) -3253 Versioning Extensions to WebDAV (Web Distributed Authoring and Versioning) -3648 Web Distributed Authoring and Versioning (WebDAV) Ordered Collections Protocol -3744 Web Distributed Authoring and Versioning (WebDAV) Access Control Protocol -4437 Web Distributed Authoring and Versioning (WebDAV) Redirect Reference Resources -5323 Web Distributed Authoring and Versioning (WebDAV) SEARCH -6578 Collection Synchronization for Web Distributed Authoring and Versioning (WebDAV) +4918 Roadmap - HTTP Extensions for Web Distributed Authoring and Versioning (WebDAV) +3253 ? - Versioning Extensions to WebDAV (Web Distributed Authoring and Versioning) +3648 ? - Web Distributed Authoring and Versioning (WebDAV) Ordered Collections Protocol +3744 ? - Web Distributed Authoring and Versioning (WebDAV) Access Control Protocol +4437 ? - Web Distributed Authoring and Versioning (WebDAV) Redirect Reference Resources +5323 ? - Web Distributed Authoring and Versioning (WebDAV) SEARCH +6578 ? - Collection Synchronization for Web Distributed Authoring and Versioning (WebDAV) -# CalDAV -4791 Calendaring Extensions to WebDAV (CalDAV) -5689 Extended MKCOL for Web Distributed Authoring and Versioning (WebDAV) -6638 Scheduling Extensions to CalDAV -6764 Locating Services for Calendaring Extensions to WebDAV (CalDAV) and vCard Extensions to WebDAV (CardDAV) -7809 Calendaring Extensions to WebDAV (CalDAV): Time Zones by Reference -7953 Calendar Availability +# SASL +2104 - - HMAC: Keyed-Hashing for Message Authentication +2195 Yes - IMAP/POP AUTHorize Extension for Simple Challenge/Response +4013 Yes Obs (RFC 7613) SASLprep: Stringprep Profile for User Names and Passwords +4422 Yes - Simple Authentication and Security Layer (SASL) +4505 No - Anonymous Simple Authentication and Security Layer (SASL) Mechanism +4616 Yes - The PLAIN Simple Authentication and Security Layer (SASL) Mechanism +5802 Yes - Salted Challenge Response Authentication Mechanism (SCRAM) SASL and GSS-API Mechanisms +6331 -No - Moving DIGEST-MD5 to Historic +7613 Yes Obs (RFC 8265) Preparation, Enforcement, and Comparison of Internationalized Strings Representing Usernames and Passwords +7677 Yes - SCRAM-SHA-256 and SCRAM-SHA-256-PLUS Simple Authentication and Security Layer (SASL) Mechanisms +8265 Yes - Preparation, Enforcement, and Comparison of Internationalized Strings Representing Usernames and Passwords -# iCal -5545 Internet Calendaring and Scheduling Core Object Specification (iCalendar) -5546 iCalendar Transport-Independent Interoperability Protocol (iTIP) -6047 iCalendar Message-Based Interoperability Protocol (iMIP) -6868 Parameter Value Encoding in iCalendar and vCard -7529 Non-Gregorian Recurrence Rules in the Internet Calendaring and Scheduling Core Object Specification (iCalendar) -7986 New Properties for iCalendar -9073 Event Publishing Extensions to iCalendar -9074 "VALARM" Extensions for iCalendar -9253 Support for iCalendar Relationships -6321 xCal: The XML Format for iCalendar -7265 jCal: The JSON Format for iCalendar +# Internationalization +3492 Yes - Punycode: A Bootstring encoding of Unicode for Internationalized Domain Names in Applications (IDNA) +5890 Yes - Internationalized Domain Names for Applications (IDNA): Definitions and Document Framework +5891 Yes - Internationalized Domain Names in Applications (IDNA): Protocol +5892 ? - The Unicode Code Points and Internationalized Domain Names for Applications (IDNA) +5893 ? - Right-to-Left Scripts for Internationalized Domain Names for Applications (IDNA) +5894 ? - Internationalized Domain Names for Applications (IDNA): Background, Explanation, and Rationale +8616 Yes - Email Authentication for Internationalized Mail -# CardDAV -6352 CardDAV: vCard Extensions to Web Distributed Authoring and Versioning (WebDAV) +# TLS +5056 Yes - On the Use of Channel Bindings to Secure Channels +5705 Yes - Keying Material Exporters for Transport Layer Security (TLS) +5929 Yes - Channel Bindings for TLS +6125 -? - Representation and Verification of Domain-Based Application Service Identity within Internet Public Key Infrastructure Using X.509 (PKIX) Certificates in the Context of Transport Layer Security (TLS) +7250 -No - Using Raw Public Keys in Transport Layer Security (TLS) and Datagram Transport Layer Security (DTLS) +7525 -? - Recommendations for Secure Use of Transport Layer Security (TLS) and Datagram Transport Layer Security (DTLS) +7627 -? - Transport Layer Security (TLS) Session Hash and Extended Master Secret Extension +8314 Yes - Cleartext Considered Obsolete: Use of Transport Layer Security (TLS) for Email Submission and Access +8446 Yes - The Transport Layer Security (TLS) Protocol Version 1.3 +8996 Yes - Deprecating TLS 1.0 and TLS 1.1 +8997 Yes - Deprecation of TLS 1.1 for Email Submission and Access +9266 Yes - Channel Bindings for TLS 1.3 -# vCard -2425 A MIME Content-Type for Directory Information -2426 vCard MIME Directory Profile -6350 vCard Format Specification -6351 xCard: vCard XML Representation -6473 vCard KIND:application -6474 vCard Format Extensions: Place of Birth, Place and Date of Death -6715 vCard Format Extensions: Representing vCard Extensions Defined by the Open Mobile Alliance (OMA) Converged Address Book (CAB) Group -6869 vCard KIND:device -7095 jCard: The JSON Format for vCard +# ACME +8555 Yes - Automatic Certificate Management Environment (ACME) +8737 Yes - Automated Certificate Management Environment (ACME) TLS Application-Layer Protocol Negotiation (ALPN) Challenge Extension + +# CAA +8659 Yes - DNS Certification Authority Authorization (CAA) Resource Record +8657 Yes - Certification Authority Authorization (CAA) Record Extensions for Account URI and Automatic Certificate Management Environment (ACME) Method Binding + +# Vouch by reference +5518 -? - Vouch By Reference + +# HTTP +2616 Yes Obs (RFC 7230) Hypertext Transfer Protocol -- HTTP/1.1 +6455 Yes - The WebSocket Protocol +7230 Yes Obs (RFC 9110) Hypertext Transfer Protocol (HTTP/1.1): Message Syntax and Routing +9110 Yes - HTTP Semantics + + +# More +3339 -? - Date and Time on the Internet: Timestamps +3986 -? - Uniform Resource Identifier (URI): Generic Syntax +5617 -? - (Historic) DomainKeys Identified Mail (DKIM) Author Domain Signing Practices (ADSP) +6186 -? - (not used in practice) Use of SRV Records for Locating Email Submission/Access Services +7817 -? - Updated Transport Layer Security (TLS) Server Identity Check Procedure for Email-Related Protocols + +# DNS +1034 -? - DOMAIN NAMES - CONCEPTS AND FACILITIES +1035 -? - DOMAIN NAMES - IMPLEMENTATION AND SPECIFICATION +1101 -? - DNS Encoding of Network Names and Other Types +1536 -? - Common DNS Implementation Errors and Suggested Fixes +2181 -? - Clarifications to the DNS Specification +2308 -? - Negative Caching of DNS Queries (DNS NCACHE) +2672 -? - (obsoleted by RFC 6672) Non-Terminal DNS Name Redirection +3226 -? - DNSSEC and IPv6 A6 aware server/resolver message size requirements +3363 -? - Representing Internet Protocol version 6 (IPv6) Addresses in the Domain Name System (DNS) +3596 -? - DNS Extensions to Support IP Version 6 +3597 -? - Handling of Unknown DNS Resource Record (RR) Types +3833 -? - Threat Analysis of the Domain Name System (DNS) +4343 -? - Domain Name System (DNS) Case Insensitivity Clarification +4592 -? - The Role of Wildcards in the Domain Name System +5001 -? - DNS Name Server Identifier (NSID) Option +5452 -? - Measures for Making DNS More Resilient against Forged Answers +6604 -? - xNAME RCODE and Status Bits Clarification +6672 -? - DNAME Redirection in the DNS +6891 -? - Extension Mechanisms for DNS (EDNS(0)) +6895 -? - Domain Name System (DNS) IANA Considerations +7686 -? - The ".onion" Special-Use Domain Name +7766 -? - DNS Transport over TCP - Implementation Requirements +7828 -? - The edns-tcp-keepalive EDNS0 Option +7873 -? - Domain Name System (DNS) Cookies +8020 -? - NXDOMAIN: There Really Is Nothing Underneath +8482 -? - Providing Minimal-Sized Responses to DNS Queries That Have QTYPE=ANY +8490 -? - DNS Stateful Operations +8499 -? - DNS Terminology +8767 -? - Serving Stale Data to Improve DNS Resiliency +8914 -? - Extended DNS Errors +9018 -? - Interoperable Domain Name System (DNS) Server Cookies +9210 -? - DNS Transport over TCP - Operational Requirements + +# DNSSEC +3225 -? - Indicating Resolver Support of DNSSEC +3658 -? - Delegation Signer (DS) Resource Record (RR) +4033 -? - DNS Security Introduction and Requirements +4034 -? - Resource Records for the DNS Security Extensions +4035 -? - Protocol Modifications for the DNS Security Extensions +4470 -? - Minimally Covering NSEC Records and DNSSEC On-line Signing +4956 -? - DNS Security (DNSSEC) Opt-In +5155 -? - DNS Security (DNSSEC) Hashed Authenticated Denial of Existence +5702 -? - Use of SHA-2 Algorithms with RSA in DNSKEY and RRSIG Resource Records for DNSSEC +5933 -? - Use of GOST Signature Algorithms in DNSKEY and RRSIG Resource Records for DNSSEC +6014 -? - Cryptographic Algorithm Identifier Allocation for DNSSEC +6781 -? - DNSSEC Operational Practices, Version 2 +6840 -? - Clarifications and Implementation Notes for DNS Security (DNSSEC) +7901 -? - CHAIN Query Requests in DNS +8198 -? - Aggressive Use of DNSSEC-Validated Cache +8624 -? - Algorithm Implementation Requirements and Usage Guidance for DNSSEC +8749 -? - Moving DNSSEC Lookaside Validation (DLV) to Historic Status +9077 -? - NSEC and NSEC3: TTLs and Aggressive Use +9157 -? - Revised IANA Considerations for DNSSEC +9276 -? - Guidance for NSEC3 Parameter Settings diff --git a/rfc/xr.go b/rfc/xr.go index 6088b11..5da6b2b 100644 --- a/rfc/xr.go +++ b/rfc/xr.go @@ -165,10 +165,10 @@ for (const a of document.querySelectorAll('a')) { continue } t := strings.Split(line, "\t") - if len(t) != 2 { + if len(t) != 4 { continue } - topics[topic] = append(topics[topic], rfc{strings.TrimSpace(t[0]), t[1]}) + topics[topic] = append(topics[topic], rfc{strings.TrimSpace(t[0]), t[3]}) } for _, l := range topics { sort.Slice(l, func(i, j int) bool { diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore new file mode 100644 index 0000000..75623dc --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/.gitignore @@ -0,0 +1,8 @@ +*.out +*.swp +*.8 +*.6 +_obj +_test* +markdown +tags diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml new file mode 100644 index 0000000..b0b525a --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go +go: + - "1.10.x" + - "1.11.x" + - tip +matrix: + fast_finish: true + allow_failures: + - go: tip +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v ./... diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt new file mode 100644 index 0000000..2885af3 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md new file mode 100644 index 0000000..d9c08a2 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/README.md @@ -0,0 +1,335 @@ +Blackfriday +[![Build Status][BuildV2SVG]][BuildV2URL] +[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with modern Go releases in module mode. +With Go installed: + + go get github.com/russross/blackfriday/v2 + +will resolve and add the package to the current development module, +then build and install it. Alternatively, you can achieve the same +if you import it in a package: + + import "github.com/russross/blackfriday/v2" + +and `go get` without parameters. + +Legacy GOPATH mode is unsupported. + + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the +documentation is available at +https://pkg.go.dev/github.com/russross/blackfriday/v2. + +It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://pkg.go.dev/github.com/russross/blackfriday. + + +Usage +----- + +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: + +```go +output := blackfriday.Run(input) +``` + +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: + +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. + +Here's an example of simple usage of Blackfriday together with Bluemonday: + +```go +import ( + "github.com/microcosm-cc/bluemonday" + "github.com/russross/blackfriday/v2" +) + +// ... +unsafe := blackfriday.Run(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options + +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. + +### `blackfriday-tool` + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `AutoHeadingIDs` extension is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ```go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ```go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled newlines in the input + translate into line breaks in the output. This extension is off by default. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable heading anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): + renders output as LaTeX. + +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + +* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. + +* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style + + +TODO +---- + +* More unit testing +* Improve Unicode support. It does not understand all Unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all UTF-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + + [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 + [BuildV2URL]: https://travis-ci.org/russross/blackfriday + [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 + [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go new file mode 100644 index 0000000..dcd61e6 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/block.go @@ -0,0 +1,1612 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "html" + "regexp" + "strings" + "unicode" +) + +const ( + charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" + escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" +) + +var ( + reBackslashOrAmp = regexp.MustCompile("[\\&]") + reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *Markdown) block(data []byte) { + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed heading: + // + // # Heading 1 + // ## Heading 2 + // ... + // ###### Heading 6 + if p.isPrefixHeading(data) { + data = data[p.prefixHeading(data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.extensions&Titleblock != 0 { + if data[0] == '%' { + if i := p.titleBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.addBlock(HorizontalRule, nil) + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.extensions&Tables != 0 { + if i := p.table(data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(data, ListTypeOrdered):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(data, ListTypeDefinition):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headings, too + data = data[p.paragraph(data):] + } + + p.nesting-- +} + +func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { + p.closeUnmatchedBlocks() + container := p.addChild(typ, 0) + container.content = content + return container +} + +func (p *Markdown) isPrefixHeading(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.extensions&SpaceHeadings != 0 { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + if level == len(data) || data[level] != ' ' { + return false + } + } + return true +} + +func (p *Markdown) prefixHeading(data []byte) int { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.extensions&HeadingIDs != 0 { + j, k := 0, 0 + // find start/end of heading id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract heading id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.extensions&AutoHeadingIDs != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + block := p.addBlock(Heading, data[i:end]) + block.HeadingID = id + block.Level = level + } + return skip +} + +func (p *Markdown) isUnderlinedHeading(data []byte) int { + // test of level 1 heading + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 1 + } + return 0 + } + + // test of level 2 heading + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 2 + } + return 0 + } + + return 0 +} + +func (p *Markdown) titleBlock(data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + consumed := len(data) + data = bytes.TrimPrefix(data, []byte("% ")) + data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) + block := p.addBlock(Heading, data) + block.Level = 1 + block.IsTitleblock = true + + return consumed +} + +func (p *Markdown) html(data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + + return i +} + +func finalizeHTMLBlock(block *Node) { + block.Literal = block.content + block.content = nil +} + +// HTML comment, lax form +func (p *Markdown) htmlComment(data []byte, doRender bool) int { + i := p.inlineHTMLComment(data) + // needs to end with a blank line + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + block := p.addBlock(HTMLBlock, data[:end]) + finalizeHTMLBlock(block) + } + return size + } + return 0 +} + +// HR, which is the only self-closing block tag considered +func (p *Markdown) htmlHr(data []byte, doRender bool) int { + if len(data) < 4 { + return 0 + } + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + i := 3 + for i < len(data) && data[i] != '>' && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '>' { + i++ + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + return size + } + } + return 0 +} + +func (p *Markdown) htmlFindTag(data []byte) (string, bool) { + i := 0 + for i < len(data) && isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *Markdown) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + if tag == "hr" { + return 2 + } + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.extensions&LaxHTMLBlocks != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*Markdown) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + if i < len(data) && data[i] == '\n' { + i++ + } + return i +} + +func (*Markdown) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for i < len(data) && data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If info is not nil, it gets set to the syntax specified in the fence line. +func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + i++ + i = skipChar(data, i, ' ') + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + if i == len(data) { + return i, marker + } + if i > len(data) || data[i] != '\n' { + return 0, "" + } + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { + var info string + beg, marker := isFenceLine(data, &info, "") + if beg == 0 || beg >= len(data) { + return 0 + } + fenceLength := beg - 1 + + var work bytes.Buffer + work.Write([]byte(info)) + work.WriteByte('\n') + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + fenceEnd, _ := isFenceLine(data[beg:], nil, marker) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = true + block.FenceLength = fenceLength + finalizeCodeBlock(block) + } + + return beg +} + +func unescapeChar(str []byte) []byte { + if str[0] == '\\' { + return []byte{str[1]} + } + return []byte(html.UnescapeString(string(str))) +} + +func unescapeString(str []byte) []byte { + if reBackslashOrAmp.Match(str) { + return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) + } + return str +} + +func finalizeCodeBlock(block *Node) { + if block.IsFenced { + newlinePos := bytes.IndexByte(block.content, '\n') + firstLine := block.content[:newlinePos] + rest := block.content[newlinePos+1:] + block.Info = unescapeString(bytes.Trim(firstLine, "\n")) + block.Literal = rest + } else { + block.Literal = block.content + } + block.content = nil +} + +func (p *Markdown) table(data []byte) int { + table := p.addBlock(Table, nil) + i, columns := p.tableHeader(data) + if i == 0 { + p.tip = table.Parent + table.Unlink() + return 0 + } + + p.addBlock(TableBody, nil) + + for i < len(data) { + pipes, rowStart := 0, i + for ; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + if i < len(data) && data[i] == '\n' { + i++ + } + p.tableRow(data[rowStart:i], columns, false) + } + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { + i := 0 + colCount := 1 + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + j := i + if j < len(data) && data[j] == '\n' { + j++ + } + header := data[:j] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]CellAlignFlags, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for i < len(data) && data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TableAlignmentLeft + dashes++ + } + for i < len(data) && data[i] == '-' { + i++ + dashes++ + } + if i < len(data) && data[i] == ':' { + i++ + columns[col] |= TableAlignmentRight + dashes++ + } + for i < len(data) && data[i] == ' ' { + i++ + } + if i == len(data) { + return + } + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for i < len(data) && data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && i < len(data) && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.addBlock(TableHead, nil) + p.tableRow(header, columns, true) + size = i + if size < len(data) && data[size] == '\n' { + size++ + } + return +} + +func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { + p.addBlock(TableRow, nil) + i, col := 0, 0 + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for i < len(data) && data[i] == ' ' { + i++ + } + + cellStart := i + + for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { + cellEnd-- + } + + cell := p.addBlock(TableCell, data[cellStart:cellEnd]) + cell.IsHeader = header + cell.Align = columns[col] + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + cell := p.addBlock(TableCell, nil) + cell.IsHeader = header + cell.Align = columns[col] + } + + // silently ignore rows with too many cells +} + +// returns blockquote prefix length +func (p *Markdown) quotePrefix(data []byte) int { + i := 0 + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + if i < len(data) && data[i] == '>' { + if i+1 < len(data) && data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *Markdown) quote(data []byte) int { + block := p.addBlock(BlockQuote, nil) + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for end < len(data) && data[end] != '\n' { + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + if end < len(data) && data[end] == '\n' { + end++ + } + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + p.block(raw.Bytes()) + p.finalize(block) + return end +} + +// returns prefix length for block code +func (p *Markdown) codePrefix(data []byte) int { + if len(data) >= 1 && data[0] == '\t' { + return 1 + } + if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *Markdown) code(data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for i < len(data) && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '\n' { + i++ + } + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffer + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = false + finalizeCodeBlock(block) + + return i +} + +// returns unordered list item prefix +func (p *Markdown) uliPrefix(data []byte) int { + i := 0 + // start with up to 3 spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + if i >= len(data)-1 { + return 0 + } + // need one of {'*', '+', '-'} followed by a space or a tab + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + (data[i+1] != ' ' && data[i+1] != '\t') { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *Markdown) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for i < len(data) && data[i] >= '0' && data[i] <= '9' { + i++ + } + if start == i || i >= len(data)-1 { + return 0 + } + + // we need >= 1 digits followed by a dot and a space or a tab + if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *Markdown) dliPrefix(data []byte) int { + if len(data) < 2 { + return 0 + } + i := 0 + // need a ':' followed by a space or a tab + if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + for i < len(data) && data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *Markdown) list(data []byte, flags ListType) int { + i := 0 + flags |= ListItemBeginningOfList + block := p.addBlock(List, nil) + block.ListFlags = flags + block.Tight = true + + for i < len(data) { + skip := p.listItem(data[i:], &flags) + if flags&ListItemContainsBlock != 0 { + block.ListData.Tight = false + } + i += skip + if skip == 0 || flags&ListItemEndOfList != 0 { + break + } + flags &= ^ListItemBeginningOfList + } + + above := block.Parent + finalizeList(block) + p.tip = above + return i +} + +// Returns true if the list item is not the same type as its parent list +func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool { + if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 { + return true + } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 { + return true + } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) { + return true + } + return false +} + +// Returns true if block ends with a blank line, descending if needed +// into lists and sublists. +func endsWithBlankLine(block *Node) bool { + // TODO: figure this out. Always false now. + for block != nil { + //if block.lastLineBlank { + //return true + //} + t := block.Type + if t == List || t == Item { + block = block.LastChild + } else { + break + } + } + return false +} + +func finalizeList(block *Node) { + block.open = false + item := block.FirstChild + for item != nil { + // check for non-final list item ending with blank line: + if endsWithBlankLine(item) && item.Next != nil { + block.ListData.Tight = false + break + } + // recurse into children of list item, to see if there are spaces + // between any of them: + subItem := item.FirstChild + for subItem != nil { + if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { + block.ListData.Tight = false + break + } + subItem = subItem.Next + } + item = item.Next + } +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *Markdown) listItem(data []byte, flags *ListType) int { + // keep track of the indentation of the first line + itemIndent := 0 + if data[0] == '\t' { + itemIndent += 4 + } else { + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + } + + var bulletChar byte = '*' + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } else { + bulletChar = data[i-2] + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^ListTypeTerm + } + } + if i == 0 { + // if in definition list, set term flag and continue + if *flags&ListTypeDefinition != 0 { + *flags |= ListTypeTerm + } else { + return 0 + } + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + line = i + continue + } + + // calculate the indentation + indent := 0 + indentIndex := 0 + if data[line] == '\t' { + indentIndex++ + indent += 4 + } else { + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + indentIndex++ + } + } + + chunk := data[line+indentIndex : i] + + if p.extensions&FencedCode != 0 { + // determine if in or out of codeblock + // if in codeblock, ignore normal list processing + _, marker := isFenceLine(chunk, nil, codeBlockMarker) + if marker != "" { + if codeBlockMarker == "" { + // start of codeblock + codeBlockMarker = marker + } else { + // end of codeblock. + codeBlockMarker = "" + } + } + // we are in a codeblock, write line, and continue + if codeBlockMarker != "" || marker != "" { + raw.Write(data[line+indentIndex : i]) + line = i + continue gatherlines + } + } + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + // to be a nested list, it must be indented more + // if not, it is either a different kind of list + // or the next item in the same list + if indent <= itemIndent { + if p.listTypeChanged(chunk, flags) { + *flags |= ListItemEndOfList + } else if containsBlankLine { + *flags |= ListItemContainsBlock + } + + break gatherlines + } + + if containsBlankLine { + *flags |= ListItemContainsBlock + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix heading? + case p.isPrefixHeading(chunk): + // if the heading is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= ListItemEndOfList + break gatherlines + } + *flags |= ListItemContainsBlock + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&ListTypeDefinition != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for next < len(data) && data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= ListItemEndOfList + } + } else { + *flags |= ListItemEndOfList + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + raw.WriteByte('\n') + *flags |= ListItemContainsBlock + } + + // if this line was preceded by one or more blanks, + // re-introduce the blank into the buffer + if containsBlankLine { + containsBlankLine = false + raw.WriteByte('\n') + } + + // add the line into the working buffer without prefix + raw.Write(data[line+indentIndex : i]) + + line = i + } + + rawBytes := raw.Bytes() + + block := p.addBlock(Item, nil) + block.ListFlags = *flags + block.Tight = false + block.BulletChar = bulletChar + block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark + + // render the contents of the list item + if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(rawBytes[:sublist]) + p.block(rawBytes[sublist:]) + } else { + p.block(rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + child := p.addChild(Paragraph, 0) + child.content = rawBytes[:sublist] + p.block(rawBytes[sublist:]) + } else { + child := p.addChild(Paragraph, 0) + child.content = rawBytes + } + } + return line +} + +// render a single paragraph that has already been parsed out +func (p *Markdown) renderParagraph(data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + end := len(data) + // trim trailing newline + if data[len(data)-1] == '\n' { + end-- + } + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + p.addBlock(Paragraph, data[beg:end]) +} + +func (p *Markdown) paragraph(data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + tabSize := TabSizeDefault + if p.extensions&TabSizeEight != 0 { + tabSize = TabSizeDouble + } + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a reference or a footnote? If so, end a paragraph + // preceding it and report that we have consumed up to the end of that + // reference: + if refEnd := isReference(p, current, tabSize); refEnd > 0 { + p.renderParagraph(data[:i]) + return i + refEnd + } + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.extensions&DefinitionLists != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(data[prev:], ListTypeDefinition) + } + } + + p.renderParagraph(data[:i]) + return i + n + } + + // an underline under some text marks a heading, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeading(current); level > 0 { + // render the paragraph + p.renderParagraph(data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + id := "" + if p.extensions&AutoHeadingIDs != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + block := p.addBlock(Heading, data[prev:eol]) + block.Level = level + block.HeadingID = id + + // find the end of the underline + for i < len(data) && data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.extensions&LaxHTMLBlocks != 0 { + if data[i] == '<' && p.html(current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a prefixed heading or a horizontal rule after this, paragraph is over + if p.isPrefixHeading(current) || p.isHRule(current) { + p.renderParagraph(data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.extensions&FencedCode != 0 { + if p.fencedCodeBlock(current, false) > 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(current) != 0 { + ret := p.list(data[prev:], ListTypeDefinition) + return ret + } + } + + // if there's a list after this, paragraph is over + if p.extensions&NoEmptyLineBeforeBlock != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + nl := bytes.IndexByte(data[i:], '\n') + if nl >= 0 { + i += nl + 1 + } else { + i += len(data[i:]) + } + } + + p.renderParagraph(data[:i]) + return i +} + +func skipChar(data []byte, start int, char byte) int { + i := start + for i < len(data) && data[i] == char { + i++ + } + return i +} + +func skipUntilChar(text []byte, start int, char byte) int { + i := start + for i < len(text) && text[i] != char { + i++ + } + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go new file mode 100644 index 0000000..57ff152 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/doc.go @@ -0,0 +1,46 @@ +// Package blackfriday is a markdown processor. +// +// It translates plain text with simple formatting rules into an AST, which can +// then be further processed to HTML (provided by Blackfriday itself) or other +// formats (provided by the community). +// +// The simplest way to invoke Blackfriday is to call the Run function. It will +// take a text input and produce a text output in HTML (or other format). +// +// A slightly more sophisticated way to use Blackfriday is to create a Markdown +// processor and to call Parse, which returns a syntax tree for the input +// document. You can leverage Blackfriday's parsing for content extraction from +// markdown documents. You can assign a custom renderer and set various options +// to the Markdown processor. +// +// If you're interested in calling Blackfriday from command line, see +// https://github.com/russross/blackfriday-tool. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when AutoHeadingIDs extension is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that precede the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go new file mode 100644 index 0000000..a2c3edb --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/entities.go @@ -0,0 +1,2236 @@ +package blackfriday + +// Extracted from https://html.spec.whatwg.org/multipage/entities.json +var entities = map[string]bool{ + "Æ": true, + "Æ": true, + "&": true, + "&": true, + "Á": true, + "Á": true, + "Ă": true, + "Â": true, + "Â": true, + "А": true, + "𝔄": true, + "À": true, + "À": true, + "Α": true, + "Ā": true, + "⩓": true, + "Ą": true, + "𝔸": true, + "⁡": true, + "Å": true, + "Å": true, + "𝒜": true, + "≔": true, + "Ã": true, + "Ã": true, + "Ä": true, + "Ä": true, + "∖": true, + "⫧": true, + "⌆": true, + "Б": true, + "∵": true, + "ℬ": true, + "Β": true, + "𝔅": true, + "𝔹": true, + "˘": true, + "ℬ": true, + "≎": true, + "Ч": true, + "©": true, + "©": true, + "Ć": true, + "⋒": true, + "ⅅ": true, + "ℭ": true, + "Č": true, + "Ç": true, + "Ç": true, + "Ĉ": true, + "∰": true, + "Ċ": true, + "¸": true, + "·": true, + "ℭ": true, + "Χ": true, + "⊙": true, + "⊖": true, + "⊕": true, + "⊗": true, + "∲": true, + "”": true, + "’": true, + "∷": true, + "⩴": true, + "≡": true, + "∯": true, + "∮": true, + "ℂ": true, + "∐": true, + "∳": true, + "⨯": true, + "𝒞": true, + "⋓": true, + "≍": true, + "ⅅ": true, + "⤑": true, + "Ђ": true, + "Ѕ": true, + "Џ": true, + "‡": true, + "↡": true, + "⫤": true, + "Ď": true, + "Д": true, + "∇": true, + "Δ": true, + "𝔇": true, + "´": true, + "˙": true, + "˝": true, + "`": true, + "˜": true, + "⋄": true, + "ⅆ": true, + "𝔻": true, + "¨": true, + "⃜": true, + "≐": true, + "∯": true, + "¨": true, + "⇓": true, + "⇐": true, + "⇔": true, + "⫤": true, + "⟸": true, + "⟺": true, + "⟹": true, + "⇒": true, + "⊨": true, + "⇑": true, + "⇕": true, + "∥": true, + "↓": true, + "⤓": true, + "⇵": true, + "̑": true, + "⥐": true, + "⥞": true, + "↽": true, + "⥖": true, + "⥟": true, + "⇁": true, + "⥗": true, + "⊤": true, + "↧": true, + "⇓": true, + "𝒟": true, + "Đ": true, + "Ŋ": true, + "Ð": true, + "Ð": true, + "É": true, + "É": true, + "Ě": true, + "Ê": true, + "Ê": true, + "Э": true, + "Ė": true, + "𝔈": true, + "È": true, + "È": true, + "∈": true, + "Ē": true, + "◻": true, + "▫": true, + "Ę": true, + "𝔼": true, + "Ε": true, + "⩵": true, + "≂": true, + "⇌": true, + "ℰ": true, + "⩳": true, + "Η": true, + "Ë": true, + "Ë": true, + "∃": true, + "ⅇ": true, + "Ф": true, + "𝔉": true, + "◼": true, + "▪": true, + "𝔽": true, + "∀": true, + "ℱ": true, + "ℱ": true, + "Ѓ": true, + ">": true, + ">": true, + "Γ": true, + "Ϝ": true, + "Ğ": true, + "Ģ": true, + "Ĝ": true, + "Г": true, + "Ġ": true, + "𝔊": true, + "⋙": true, + "𝔾": true, + "≥": true, + "⋛": true, + "≧": true, + "⪢": true, + "≷": true, + "⩾": true, + "≳": true, + "𝒢": true, + "≫": true, + "Ъ": true, + "ˇ": true, + "^": true, + "Ĥ": true, + "ℌ": true, + "ℋ": true, + "ℍ": true, + "─": true, + "ℋ": true, + "Ħ": true, + "≎": true, + "≏": true, + "Е": true, + "IJ": true, + "Ё": true, + "Í": true, + "Í": true, + "Î": true, + "Î": true, + "И": true, + "İ": true, + "ℑ": true, + "Ì": true, + "Ì": true, + "ℑ": true, + "Ī": true, + "ⅈ": true, + "⇒": true, + "∬": true, + "∫": true, + "⋂": true, + "⁣": true, + "⁢": true, + "Į": true, + "𝕀": true, + "Ι": true, + "ℐ": true, + "Ĩ": true, + "І": true, + "Ï": true, + "Ï": true, + "Ĵ": true, + "Й": true, + "𝔍": true, + "𝕁": true, + "𝒥": true, + "Ј": true, + "Є": true, + "Х": true, + "Ќ": true, + "Κ": true, + "Ķ": true, + "К": true, + "𝔎": true, + "𝕂": true, + "𝒦": true, + "Љ": true, + "<": true, + "<": true, + "Ĺ": true, + "Λ": true, + "⟪": true, + "ℒ": true, + "↞": true, + "Ľ": true, + "Ļ": true, + "Л": true, + "⟨": true, + "←": true, + "⇤": true, + "⇆": true, + "⌈": true, + "⟦": true, + "⥡": true, + "⇃": true, + "⥙": true, + "⌊": true, + "↔": true, + "⥎": true, + "⊣": true, + "↤": true, + "⥚": true, + "⊲": true, + "⧏": true, + "⊴": true, + "⥑": true, + "⥠": true, + "↿": true, + "⥘": true, + "↼": true, + "⥒": true, + "⇐": true, + "⇔": true, + "⋚": true, + "≦": true, + "≶": true, + "⪡": true, + "⩽": true, + "≲": true, + "𝔏": true, + "⋘": true, + "⇚": true, + "Ŀ": true, + "⟵": true, + "⟷": true, + "⟶": true, + "⟸": true, + "⟺": true, + "⟹": true, + "𝕃": true, + "↙": true, + "↘": true, + "ℒ": true, + "↰": true, + "Ł": true, + "≪": true, + "⤅": true, + "М": true, + " ": true, + "ℳ": true, + "𝔐": true, + "∓": true, + "𝕄": true, + "ℳ": true, + "Μ": true, + "Њ": true, + "Ń": true, + "Ň": true, + "Ņ": true, + "Н": true, + "​": true, + "​": true, + "​": true, + "​": true, + "≫": true, + "≪": true, + " ": true, + "𝔑": true, + "⁠": true, + " ": true, + "ℕ": true, + "⫬": true, + "≢": true, + "≭": true, + "∦": true, + "∉": true, + "≠": true, + "≂̸": true, + "∄": true, + "≯": true, + "≱": true, + "≧̸": true, + "≫̸": true, + "≹": true, + "⩾̸": true, + "≵": true, + "≎̸": true, + "≏̸": true, + "⋪": true, + "⧏̸": true, + "⋬": true, + "≮": true, + "≰": true, + "≸": true, + "≪̸": true, + "⩽̸": true, + "≴": true, + "⪢̸": true, + "⪡̸": true, + "⊀": true, + "⪯̸": true, + "⋠": true, + "∌": true, + "⋫": true, + "⧐̸": true, + "⋭": true, + "⊏̸": true, + "⋢": true, + "⊐̸": true, + "⋣": true, + "⊂⃒": true, + "⊈": true, + "⊁": true, + "⪰̸": true, + "⋡": true, + "≿̸": true, + "⊃⃒": true, + "⊉": true, + "≁": true, + "≄": true, + "≇": true, + "≉": true, + "∤": true, + "𝒩": true, + "Ñ": true, + "Ñ": true, + "Ν": true, + "Œ": true, + "Ó": true, + "Ó": true, + "Ô": true, + "Ô": true, + "О": true, + "Ő": true, + "𝔒": true, + "Ò": true, + "Ò": true, + "Ō": true, + "Ω": true, + "Ο": true, + "𝕆": true, + "“": true, + "‘": true, + "⩔": true, + "𝒪": true, + "Ø": true, + "Ø": true, + "Õ": true, + "Õ": true, + "⨷": true, + "Ö": true, + "Ö": true, + "‾": true, + "⏞": true, + "⎴": true, + "⏜": true, + "∂": true, + "П": true, + "𝔓": true, + "Φ": true, + "Π": true, + "±": true, + "ℌ": true, + "ℙ": true, + "⪻": true, + "≺": true, + "⪯": true, + "≼": true, + "≾": true, + "″": true, + "∏": true, + "∷": true, + "∝": true, + "𝒫": true, + "Ψ": true, + """: true, + """: true, + "𝔔": true, + "ℚ": true, + "𝒬": true, + "⤐": true, + "®": true, + "®": true, + "Ŕ": true, + "⟫": true, + "↠": true, + "⤖": true, + "Ř": true, + "Ŗ": true, + "Р": true, + "ℜ": true, + "∋": true, + "⇋": true, + "⥯": true, + "ℜ": true, + "Ρ": true, + "⟩": true, + "→": true, + "⇥": true, + "⇄": true, + "⌉": true, + "⟧": true, + "⥝": true, + "⇂": true, + "⥕": true, + "⌋": true, + "⊢": true, + "↦": true, + "⥛": true, + "⊳": true, + "⧐": true, + "⊵": true, + "⥏": true, + "⥜": true, + "↾": true, + "⥔": true, + "⇀": true, + "⥓": true, + "⇒": true, + "ℝ": true, + "⥰": true, + "⇛": true, + "ℛ": true, + "↱": true, + "⧴": true, + "Щ": true, + "Ш": true, + "Ь": true, + "Ś": true, + "⪼": true, + "Š": true, + "Ş": true, + "Ŝ": true, + "С": true, + "𝔖": true, + "↓": true, + "←": true, + "→": true, + "↑": true, + "Σ": true, + "∘": true, + "𝕊": true, + "√": true, + "□": true, + "⊓": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊔": true, + "𝒮": true, + "⋆": true, + "⋐": true, + "⋐": true, + "⊆": true, + "≻": true, + "⪰": true, + "≽": true, + "≿": true, + "∋": true, + "∑": true, + "⋑": true, + "⊃": true, + "⊇": true, + "⋑": true, + "Þ": true, + "Þ": true, + "™": true, + "Ћ": true, + "Ц": true, + " ": true, + "Τ": true, + "Ť": true, + "Ţ": true, + "Т": true, + "𝔗": true, + "∴": true, + "Θ": true, + "  ": true, + " ": true, + "∼": true, + "≃": true, + "≅": true, + "≈": true, + "𝕋": true, + "⃛": true, + "𝒯": true, + "Ŧ": true, + "Ú": true, + "Ú": true, + "↟": true, + "⥉": true, + "Ў": true, + "Ŭ": true, + "Û": true, + "Û": true, + "У": true, + "Ű": true, + "𝔘": true, + "Ù": true, + "Ù": true, + "Ū": true, + "_": true, + "⏟": true, + "⎵": true, + "⏝": true, + "⋃": true, + "⊎": true, + "Ų": true, + "𝕌": true, + "↑": true, + "⤒": true, + "⇅": true, + "↕": true, + "⥮": true, + "⊥": true, + "↥": true, + "⇑": true, + "⇕": true, + "↖": true, + "↗": true, + "ϒ": true, + "Υ": true, + "Ů": true, + "𝒰": true, + "Ũ": true, + "Ü": true, + "Ü": true, + "⊫": true, + "⫫": true, + "В": true, + "⊩": true, + "⫦": true, + "⋁": true, + "‖": true, + "‖": true, + "∣": true, + "|": true, + "❘": true, + "≀": true, + " ": true, + "𝔙": true, + "𝕍": true, + "𝒱": true, + "⊪": true, + "Ŵ": true, + "⋀": true, + "𝔚": true, + "𝕎": true, + "𝒲": true, + "𝔛": true, + "Ξ": true, + "𝕏": true, + "𝒳": true, + "Я": true, + "Ї": true, + "Ю": true, + "Ý": true, + "Ý": true, + "Ŷ": true, + "Ы": true, + "𝔜": true, + "𝕐": true, + "𝒴": true, + "Ÿ": true, + "Ж": true, + "Ź": true, + "Ž": true, + "З": true, + "Ż": true, + "​": true, + "Ζ": true, + "ℨ": true, + "ℤ": true, + "𝒵": true, + "á": true, + "á": true, + "ă": true, + "∾": true, + "∾̳": true, + "∿": true, + "â": true, + "â": true, + "´": true, + "´": true, + "а": true, + "æ": true, + "æ": true, + "⁡": true, + "𝔞": true, + "à": true, + "à": true, + "ℵ": true, + "ℵ": true, + "α": true, + "ā": true, + "⨿": true, + "&": true, + "&": true, + "∧": true, + "⩕": true, + "⩜": true, + "⩘": true, + "⩚": true, + "∠": true, + "⦤": true, + "∠": true, + "∡": true, + "⦨": true, + "⦩": true, + "⦪": true, + "⦫": true, + "⦬": true, + "⦭": true, + "⦮": true, + "⦯": true, + "∟": true, + "⊾": true, + "⦝": true, + "∢": true, + "Å": true, + "⍼": true, + "ą": true, + "𝕒": true, + "≈": true, + "⩰": true, + "⩯": true, + "≊": true, + "≋": true, + "'": true, + "≈": true, + "≊": true, + "å": true, + "å": true, + "𝒶": true, + "*": true, + "≈": true, + "≍": true, + "ã": true, + "ã": true, + "ä": true, + "ä": true, + "∳": true, + "⨑": true, + "⫭": true, + "≌": true, + "϶": true, + "‵": true, + "∽": true, + "⋍": true, + "⊽": true, + "⌅": true, + "⌅": true, + "⎵": true, + "⎶": true, + "≌": true, + "б": true, + "„": true, + "∵": true, + "∵": true, + "⦰": true, + "϶": true, + "ℬ": true, + "β": true, + "ℶ": true, + "≬": true, + "𝔟": true, + "⋂": true, + "◯": true, + "⋃": true, + "⨀": true, + "⨁": true, + "⨂": true, + "⨆": true, + "★": true, + "▽": true, + "△": true, + "⨄": true, + "⋁": true, + "⋀": true, + "⤍": true, + "⧫": true, + "▪": true, + "▴": true, + "▾": true, + "◂": true, + "▸": true, + "␣": true, + "▒": true, + "░": true, + "▓": true, + "█": true, + "=⃥": true, + "≡⃥": true, + "⌐": true, + "𝕓": true, + "⊥": true, + "⊥": true, + "⋈": true, + "╗": true, + "╔": true, + "╖": true, + "╓": true, + "═": true, + "╦": true, + "╩": true, + "╤": true, + "╧": true, + "╝": true, + "╚": true, + "╜": true, + "╙": true, + "║": true, + "╬": true, + "╣": true, + "╠": true, + "╫": true, + "╢": true, + "╟": true, + "⧉": true, + "╕": true, + "╒": true, + "┐": true, + "┌": true, + "─": true, + "╥": true, + "╨": true, + "┬": true, + "┴": true, + "⊟": true, + "⊞": true, + "⊠": true, + "╛": true, + "╘": true, + "┘": true, + "└": true, + "│": true, + "╪": true, + "╡": true, + "╞": true, + "┼": true, + "┤": true, + "├": true, + "‵": true, + "˘": true, + "¦": true, + "¦": true, + "𝒷": true, + "⁏": true, + "∽": true, + "⋍": true, + "\": true, + "⧅": true, + "⟈": true, + "•": true, + "•": true, + "≎": true, + "⪮": true, + "≏": true, + "≏": true, + "ć": true, + "∩": true, + "⩄": true, + "⩉": true, + "⩋": true, + "⩇": true, + "⩀": true, + "∩︀": true, + "⁁": true, + "ˇ": true, + "⩍": true, + "č": true, + "ç": true, + "ç": true, + "ĉ": true, + "⩌": true, + "⩐": true, + "ċ": true, + "¸": true, + "¸": true, + "⦲": true, + "¢": true, + "¢": true, + "·": true, + "𝔠": true, + "ч": true, + "✓": true, + "✓": true, + "χ": true, + "○": true, + "⧃": true, + "ˆ": true, + "≗": true, + "↺": true, + "↻": true, + "®": true, + "Ⓢ": true, + "⊛": true, + "⊚": true, + "⊝": true, + "≗": true, + "⨐": true, + "⫯": true, + "⧂": true, + "♣": true, + "♣": true, + ":": true, + "≔": true, + "≔": true, + ",": true, + "@": true, + "∁": true, + "∘": true, + "∁": true, + "ℂ": true, + "≅": true, + "⩭": true, + "∮": true, + "𝕔": true, + "∐": true, + "©": true, + "©": true, + "℗": true, + "↵": true, + "✗": true, + "𝒸": true, + "⫏": true, + "⫑": true, + "⫐": true, + "⫒": true, + "⋯": true, + "⤸": true, + "⤵": true, + "⋞": true, + "⋟": true, + "↶": true, + "⤽": true, + "∪": true, + "⩈": true, + "⩆": true, + "⩊": true, + "⊍": true, + "⩅": true, + "∪︀": true, + "↷": true, + "⤼": true, + "⋞": true, + "⋟": true, + "⋎": true, + "⋏": true, + "¤": true, + "¤": true, + "↶": true, + "↷": true, + "⋎": true, + "⋏": true, + "∲": true, + "∱": true, + "⌭": true, + "⇓": true, + "⥥": true, + "†": true, + "ℸ": true, + "↓": true, + "‐": true, + "⊣": true, + "⤏": true, + "˝": true, + "ď": true, + "д": true, + "ⅆ": true, + "‡": true, + "⇊": true, + "⩷": true, + "°": true, + "°": true, + "δ": true, + "⦱": true, + "⥿": true, + "𝔡": true, + "⇃": true, + "⇂": true, + "⋄": true, + "⋄": true, + "♦": true, + "♦": true, + "¨": true, + "ϝ": true, + "⋲": true, + "÷": true, + "÷": true, + "÷": true, + "⋇": true, + "⋇": true, + "ђ": true, + "⌞": true, + "⌍": true, + "$": true, + "𝕕": true, + "˙": true, + "≐": true, + "≑": true, + "∸": true, + "∔": true, + "⊡": true, + "⌆": true, + "↓": true, + "⇊": true, + "⇃": true, + "⇂": true, + "⤐": true, + "⌟": true, + "⌌": true, + "𝒹": true, + "ѕ": true, + "⧶": true, + "đ": true, + "⋱": true, + "▿": true, + "▾": true, + "⇵": true, + "⥯": true, + "⦦": true, + "џ": true, + "⟿": true, + "⩷": true, + "≑": true, + "é": true, + "é": true, + "⩮": true, + "ě": true, + "≖": true, + "ê": true, + "ê": true, + "≕": true, + "э": true, + "ė": true, + "ⅇ": true, + "≒": true, + "𝔢": true, + "⪚": true, + "è": true, + "è": true, + "⪖": true, + "⪘": true, + "⪙": true, + "⏧": true, + "ℓ": true, + "⪕": true, + "⪗": true, + "ē": true, + "∅": true, + "∅": true, + "∅": true, + " ": true, + " ": true, + " ": true, + "ŋ": true, + " ": true, + "ę": true, + "𝕖": true, + "⋕": true, + "⧣": true, + "⩱": true, + "ε": true, + "ε": true, + "ϵ": true, + "≖": true, + "≕": true, + "≂": true, + "⪖": true, + "⪕": true, + "=": true, + "≟": true, + "≡": true, + "⩸": true, + "⧥": true, + "≓": true, + "⥱": true, + "ℯ": true, + "≐": true, + "≂": true, + "η": true, + "ð": true, + "ð": true, + "ë": true, + "ë": true, + "€": true, + "!": true, + "∃": true, + "ℰ": true, + "ⅇ": true, + "≒": true, + "ф": true, + "♀": true, + "ffi": true, + "ff": true, + "ffl": true, + "𝔣": true, + "fi": true, + "fj": true, + "♭": true, + "fl": true, + "▱": true, + "ƒ": true, + "𝕗": true, + "∀": true, + "⋔": true, + "⫙": true, + "⨍": true, + "½": true, + "½": true, + "⅓": true, + "¼": true, + "¼": true, + "⅕": true, + "⅙": true, + "⅛": true, + "⅔": true, + "⅖": true, + "¾": true, + "¾": true, + "⅗": true, + "⅜": true, + "⅘": true, + "⅚": true, + "⅝": true, + "⅞": true, + "⁄": true, + "⌢": true, + "𝒻": true, + "≧": true, + "⪌": true, + "ǵ": true, + "γ": true, + "ϝ": true, + "⪆": true, + "ğ": true, + "ĝ": true, + "г": true, + "ġ": true, + "≥": true, + "⋛": true, + "≥": true, + "≧": true, + "⩾": true, + "⩾": true, + "⪩": true, + "⪀": true, + "⪂": true, + "⪄": true, + "⋛︀": true, + "⪔": true, + "𝔤": true, + "≫": true, + "⋙": true, + "ℷ": true, + "ѓ": true, + "≷": true, + "⪒": true, + "⪥": true, + "⪤": true, + "≩": true, + "⪊": true, + "⪊": true, + "⪈": true, + "⪈": true, + "≩": true, + "⋧": true, + "𝕘": true, + "`": true, + "ℊ": true, + "≳": true, + "⪎": true, + "⪐": true, + ">": true, + ">": true, + "⪧": true, + "⩺": true, + "⋗": true, + "⦕": true, + "⩼": true, + "⪆": true, + "⥸": true, + "⋗": true, + "⋛": true, + "⪌": true, + "≷": true, + "≳": true, + "≩︀": true, + "≩︀": true, + "⇔": true, + " ": true, + "½": true, + "ℋ": true, + "ъ": true, + "↔": true, + "⥈": true, + "↭": true, + "ℏ": true, + "ĥ": true, + "♥": true, + "♥": true, + "…": true, + "⊹": true, + "𝔥": true, + "⤥": true, + "⤦": true, + "⇿": true, + "∻": true, + "↩": true, + "↪": true, + "𝕙": true, + "―": true, + "𝒽": true, + "ℏ": true, + "ħ": true, + "⁃": true, + "‐": true, + "í": true, + "í": true, + "⁣": true, + "î": true, + "î": true, + "и": true, + "е": true, + "¡": true, + "¡": true, + "⇔": true, + "𝔦": true, + "ì": true, + "ì": true, + "ⅈ": true, + "⨌": true, + "∭": true, + "⧜": true, + "℩": true, + "ij": true, + "ī": true, + "ℑ": true, + "ℐ": true, + "ℑ": true, + "ı": true, + "⊷": true, + "Ƶ": true, + "∈": true, + "℅": true, + "∞": true, + "⧝": true, + "ı": true, + "∫": true, + "⊺": true, + "ℤ": true, + "⊺": true, + "⨗": true, + "⨼": true, + "ё": true, + "į": true, + "𝕚": true, + "ι": true, + "⨼": true, + "¿": true, + "¿": true, + "𝒾": true, + "∈": true, + "⋹": true, + "⋵": true, + "⋴": true, + "⋳": true, + "∈": true, + "⁢": true, + "ĩ": true, + "і": true, + "ï": true, + "ï": true, + "ĵ": true, + "й": true, + "𝔧": true, + "ȷ": true, + "𝕛": true, + "𝒿": true, + "ј": true, + "є": true, + "κ": true, + "ϰ": true, + "ķ": true, + "к": true, + "𝔨": true, + "ĸ": true, + "х": true, + "ќ": true, + "𝕜": true, + "𝓀": true, + "⇚": true, + "⇐": true, + "⤛": true, + "⤎": true, + "≦": true, + "⪋": true, + "⥢": true, + "ĺ": true, + "⦴": true, + "ℒ": true, + "λ": true, + "⟨": true, + "⦑": true, + "⟨": true, + "⪅": true, + "«": true, + "«": true, + "←": true, + "⇤": true, + "⤟": true, + "⤝": true, + "↩": true, + "↫": true, + "⤹": true, + "⥳": true, + "↢": true, + "⪫": true, + "⤙": true, + "⪭": true, + "⪭︀": true, + "⤌": true, + "❲": true, + "{": true, + "[": true, + "⦋": true, + "⦏": true, + "⦍": true, + "ľ": true, + "ļ": true, + "⌈": true, + "{": true, + "л": true, + "⤶": true, + "“": true, + "„": true, + "⥧": true, + "⥋": true, + "↲": true, + "≤": true, + "←": true, + "↢": true, + "↽": true, + "↼": true, + "⇇": true, + "↔": true, + "⇆": true, + "⇋": true, + "↭": true, + "⋋": true, + "⋚": true, + "≤": true, + "≦": true, + "⩽": true, + "⩽": true, + "⪨": true, + "⩿": true, + "⪁": true, + "⪃": true, + "⋚︀": true, + "⪓": true, + "⪅": true, + "⋖": true, + "⋚": true, + "⪋": true, + "≶": true, + "≲": true, + "⥼": true, + "⌊": true, + "𝔩": true, + "≶": true, + "⪑": true, + "↽": true, + "↼": true, + "⥪": true, + "▄": true, + "љ": true, + "≪": true, + "⇇": true, + "⌞": true, + "⥫": true, + "◺": true, + "ŀ": true, + "⎰": true, + "⎰": true, + "≨": true, + "⪉": true, + "⪉": true, + "⪇": true, + "⪇": true, + "≨": true, + "⋦": true, + "⟬": true, + "⇽": true, + "⟦": true, + "⟵": true, + "⟷": true, + "⟼": true, + "⟶": true, + "↫": true, + "↬": true, + "⦅": true, + "𝕝": true, + "⨭": true, + "⨴": true, + "∗": true, + "_": true, + "◊": true, + "◊": true, + "⧫": true, + "(": true, + "⦓": true, + "⇆": true, + "⌟": true, + "⇋": true, + "⥭": true, + "‎": true, + "⊿": true, + "‹": true, + "𝓁": true, + "↰": true, + "≲": true, + "⪍": true, + "⪏": true, + "[": true, + "‘": true, + "‚": true, + "ł": true, + "<": true, + "<": true, + "⪦": true, + "⩹": true, + "⋖": true, + "⋋": true, + "⋉": true, + "⥶": true, + "⩻": true, + "⦖": true, + "◃": true, + "⊴": true, + "◂": true, + "⥊": true, + "⥦": true, + "≨︀": true, + "≨︀": true, + "∺": true, + "¯": true, + "¯": true, + "♂": true, + "✠": true, + "✠": true, + "↦": true, + "↦": true, + "↧": true, + "↤": true, + "↥": true, + "▮": true, + "⨩": true, + "м": true, + "—": true, + "∡": true, + "𝔪": true, + "℧": true, + "µ": true, + "µ": true, + "∣": true, + "*": true, + "⫰": true, + "·": true, + "·": true, + "−": true, + "⊟": true, + "∸": true, + "⨪": true, + "⫛": true, + "…": true, + "∓": true, + "⊧": true, + "𝕞": true, + "∓": true, + "𝓂": true, + "∾": true, + "μ": true, + "⊸": true, + "⊸": true, + "⋙̸": true, + "≫⃒": true, + "≫̸": true, + "⇍": true, + "⇎": true, + "⋘̸": true, + "≪⃒": true, + "≪̸": true, + "⇏": true, + "⊯": true, + "⊮": true, + "∇": true, + "ń": true, + "∠⃒": true, + "≉": true, + "⩰̸": true, + "≋̸": true, + "ʼn": true, + "≉": true, + "♮": true, + "♮": true, + "ℕ": true, + " ": true, + " ": true, + "≎̸": true, + "≏̸": true, + "⩃": true, + "ň": true, + "ņ": true, + "≇": true, + "⩭̸": true, + "⩂": true, + "н": true, + "–": true, + "≠": true, + "⇗": true, + "⤤": true, + "↗": true, + "↗": true, + "≐̸": true, + "≢": true, + "⤨": true, + "≂̸": true, + "∄": true, + "∄": true, + "𝔫": true, + "≧̸": true, + "≱": true, + "≱": true, + "≧̸": true, + "⩾̸": true, + "⩾̸": true, + "≵": true, + "≯": true, + "≯": true, + "⇎": true, + "↮": true, + "⫲": true, + "∋": true, + "⋼": true, + "⋺": true, + "∋": true, + "њ": true, + "⇍": true, + "≦̸": true, + "↚": true, + "‥": true, + "≰": true, + "↚": true, + "↮": true, + "≰": true, + "≦̸": true, + "⩽̸": true, + "⩽̸": true, + "≮": true, + "≴": true, + "≮": true, + "⋪": true, + "⋬": true, + "∤": true, + "𝕟": true, + "¬": true, + "¬": true, + "∉": true, + "⋹̸": true, + "⋵̸": true, + "∉": true, + "⋷": true, + "⋶": true, + "∌": true, + "∌": true, + "⋾": true, + "⋽": true, + "∦": true, + "∦": true, + "⫽⃥": true, + "∂̸": true, + "⨔": true, + "⊀": true, + "⋠": true, + "⪯̸": true, + "⊀": true, + "⪯̸": true, + "⇏": true, + "↛": true, + "⤳̸": true, + "↝̸": true, + "↛": true, + "⋫": true, + "⋭": true, + "⊁": true, + "⋡": true, + "⪰̸": true, + "𝓃": true, + "∤": true, + "∦": true, + "≁": true, + "≄": true, + "≄": true, + "∤": true, + "∦": true, + "⋢": true, + "⋣": true, + "⊄": true, + "⫅̸": true, + "⊈": true, + "⊂⃒": true, + "⊈": true, + "⫅̸": true, + "⊁": true, + "⪰̸": true, + "⊅": true, + "⫆̸": true, + "⊉": true, + "⊃⃒": true, + "⊉": true, + "⫆̸": true, + "≹": true, + "ñ": true, + "ñ": true, + "≸": true, + "⋪": true, + "⋬": true, + "⋫": true, + "⋭": true, + "ν": true, + "#": true, + "№": true, + " ": true, + "⊭": true, + "⤄": true, + "≍⃒": true, + "⊬": true, + "≥⃒": true, + ">⃒": true, + "⧞": true, + "⤂": true, + "≤⃒": true, + "<⃒": true, + "⊴⃒": true, + "⤃": true, + "⊵⃒": true, + "∼⃒": true, + "⇖": true, + "⤣": true, + "↖": true, + "↖": true, + "⤧": true, + "Ⓢ": true, + "ó": true, + "ó": true, + "⊛": true, + "⊚": true, + "ô": true, + "ô": true, + "о": true, + "⊝": true, + "ő": true, + "⨸": true, + "⊙": true, + "⦼": true, + "œ": true, + "⦿": true, + "𝔬": true, + "˛": true, + "ò": true, + "ò": true, + "⧁": true, + "⦵": true, + "Ω": true, + "∮": true, + "↺": true, + "⦾": true, + "⦻": true, + "‾": true, + "⧀": true, + "ō": true, + "ω": true, + "ο": true, + "⦶": true, + "⊖": true, + "𝕠": true, + "⦷": true, + "⦹": true, + "⊕": true, + "∨": true, + "↻": true, + "⩝": true, + "ℴ": true, + "ℴ": true, + "ª": true, + "ª": true, + "º": true, + "º": true, + "⊶": true, + "⩖": true, + "⩗": true, + "⩛": true, + "ℴ": true, + "ø": true, + "ø": true, + "⊘": true, + "õ": true, + "õ": true, + "⊗": true, + "⨶": true, + "ö": true, + "ö": true, + "⌽": true, + "∥": true, + "¶": true, + "¶": true, + "∥": true, + "⫳": true, + "⫽": true, + "∂": true, + "п": true, + "%": true, + ".": true, + "‰": true, + "⊥": true, + "‱": true, + "𝔭": true, + "φ": true, + "ϕ": true, + "ℳ": true, + "☎": true, + "π": true, + "⋔": true, + "ϖ": true, + "ℏ": true, + "ℎ": true, + "ℏ": true, + "+": true, + "⨣": true, + "⊞": true, + "⨢": true, + "∔": true, + "⨥": true, + "⩲": true, + "±": true, + "±": true, + "⨦": true, + "⨧": true, + "±": true, + "⨕": true, + "𝕡": true, + "£": true, + "£": true, + "≺": true, + "⪳": true, + "⪷": true, + "≼": true, + "⪯": true, + "≺": true, + "⪷": true, + "≼": true, + "⪯": true, + "⪹": true, + "⪵": true, + "⋨": true, + "≾": true, + "′": true, + "ℙ": true, + "⪵": true, + "⪹": true, + "⋨": true, + "∏": true, + "⌮": true, + "⌒": true, + "⌓": true, + "∝": true, + "∝": true, + "≾": true, + "⊰": true, + "𝓅": true, + "ψ": true, + " ": true, + "𝔮": true, + "⨌": true, + "𝕢": true, + "⁗": true, + "𝓆": true, + "ℍ": true, + "⨖": true, + "?": true, + "≟": true, + """: true, + """: true, + "⇛": true, + "⇒": true, + "⤜": true, + "⤏": true, + "⥤": true, + "∽̱": true, + "ŕ": true, + "√": true, + "⦳": true, + "⟩": true, + "⦒": true, + "⦥": true, + "⟩": true, + "»": true, + "»": true, + "→": true, + "⥵": true, + "⇥": true, + "⤠": true, + "⤳": true, + "⤞": true, + "↪": true, + "↬": true, + "⥅": true, + "⥴": true, + "↣": true, + "↝": true, + "⤚": true, + "∶": true, + "ℚ": true, + "⤍": true, + "❳": true, + "}": true, + "]": true, + "⦌": true, + "⦎": true, + "⦐": true, + "ř": true, + "ŗ": true, + "⌉": true, + "}": true, + "р": true, + "⤷": true, + "⥩": true, + "”": true, + "”": true, + "↳": true, + "ℜ": true, + "ℛ": true, + "ℜ": true, + "ℝ": true, + "▭": true, + "®": true, + "®": true, + "⥽": true, + "⌋": true, + "𝔯": true, + "⇁": true, + "⇀": true, + "⥬": true, + "ρ": true, + "ϱ": true, + "→": true, + "↣": true, + "⇁": true, + "⇀": true, + "⇄": true, + "⇌": true, + "⇉": true, + "↝": true, + "⋌": true, + "˚": true, + "≓": true, + "⇄": true, + "⇌": true, + "‏": true, + "⎱": true, + "⎱": true, + "⫮": true, + "⟭": true, + "⇾": true, + "⟧": true, + "⦆": true, + "𝕣": true, + "⨮": true, + "⨵": true, + ")": true, + "⦔": true, + "⨒": true, + "⇉": true, + "›": true, + "𝓇": true, + "↱": true, + "]": true, + "’": true, + "’": true, + "⋌": true, + "⋊": true, + "▹": true, + "⊵": true, + "▸": true, + "⧎": true, + "⥨": true, + "℞": true, + "ś": true, + "‚": true, + "≻": true, + "⪴": true, + "⪸": true, + "š": true, + "≽": true, + "⪰": true, + "ş": true, + "ŝ": true, + "⪶": true, + "⪺": true, + "⋩": true, + "⨓": true, + "≿": true, + "с": true, + "⋅": true, + "⊡": true, + "⩦": true, + "⇘": true, + "⤥": true, + "↘": true, + "↘": true, + "§": true, + "§": true, + ";": true, + "⤩": true, + "∖": true, + "∖": true, + "✶": true, + "𝔰": true, + "⌢": true, + "♯": true, + "щ": true, + "ш": true, + "∣": true, + "∥": true, + "­": true, + "­": true, + "σ": true, + "ς": true, + "ς": true, + "∼": true, + "⩪": true, + "≃": true, + "≃": true, + "⪞": true, + "⪠": true, + "⪝": true, + "⪟": true, + "≆": true, + "⨤": true, + "⥲": true, + "←": true, + "∖": true, + "⨳": true, + "⧤": true, + "∣": true, + "⌣": true, + "⪪": true, + "⪬": true, + "⪬︀": true, + "ь": true, + "/": true, + "⧄": true, + "⌿": true, + "𝕤": true, + "♠": true, + "♠": true, + "∥": true, + "⊓": true, + "⊓︀": true, + "⊔": true, + "⊔︀": true, + "⊏": true, + "⊑": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊐": true, + "⊒": true, + "□": true, + "□": true, + "▪": true, + "▪": true, + "→": true, + "𝓈": true, + "∖": true, + "⌣": true, + "⋆": true, + "☆": true, + "★": true, + "ϵ": true, + "ϕ": true, + "¯": true, + "⊂": true, + "⫅": true, + "⪽": true, + "⊆": true, + "⫃": true, + "⫁": true, + "⫋": true, + "⊊": true, + "⪿": true, + "⥹": true, + "⊂": true, + "⊆": true, + "⫅": true, + "⊊": true, + "⫋": true, + "⫇": true, + "⫕": true, + "⫓": true, + "≻": true, + "⪸": true, + "≽": true, + "⪰": true, + "⪺": true, + "⪶": true, + "⋩": true, + "≿": true, + "∑": true, + "♪": true, + "¹": true, + "¹": true, + "²": true, + "²": true, + "³": true, + "³": true, + "⊃": true, + "⫆": true, + "⪾": true, + "⫘": true, + "⊇": true, + "⫄": true, + "⟉": true, + "⫗": true, + "⥻": true, + "⫂": true, + "⫌": true, + "⊋": true, + "⫀": true, + "⊃": true, + "⊇": true, + "⫆": true, + "⊋": true, + "⫌": true, + "⫈": true, + "⫔": true, + "⫖": true, + "⇙": true, + "⤦": true, + "↙": true, + "↙": true, + "⤪": true, + "ß": true, + "ß": true, + "⌖": true, + "τ": true, + "⎴": true, + "ť": true, + "ţ": true, + "т": true, + "⃛": true, + "⌕": true, + "𝔱": true, + "∴": true, + "∴": true, + "θ": true, + "ϑ": true, + "ϑ": true, + "≈": true, + "∼": true, + " ": true, + "≈": true, + "∼": true, + "þ": true, + "þ": true, + "˜": true, + "×": true, + "×": true, + "⊠": true, + "⨱": true, + "⨰": true, + "∭": true, + "⤨": true, + "⊤": true, + "⌶": true, + "⫱": true, + "𝕥": true, + "⫚": true, + "⤩": true, + "‴": true, + "™": true, + "▵": true, + "▿": true, + "◃": true, + "⊴": true, + "≜": true, + "▹": true, + "⊵": true, + "◬": true, + "≜": true, + "⨺": true, + "⨹": true, + "⧍": true, + "⨻": true, + "⏢": true, + "𝓉": true, + "ц": true, + "ћ": true, + "ŧ": true, + "≬": true, + "↞": true, + "↠": true, + "⇑": true, + "⥣": true, + "ú": true, + "ú": true, + "↑": true, + "ў": true, + "ŭ": true, + "û": true, + "û": true, + "у": true, + "⇅": true, + "ű": true, + "⥮": true, + "⥾": true, + "𝔲": true, + "ù": true, + "ù": true, + "↿": true, + "↾": true, + "▀": true, + "⌜": true, + "⌜": true, + "⌏": true, + "◸": true, + "ū": true, + "¨": true, + "¨": true, + "ų": true, + "𝕦": true, + "↑": true, + "↕": true, + "↿": true, + "↾": true, + "⊎": true, + "υ": true, + "ϒ": true, + "υ": true, + "⇈": true, + "⌝": true, + "⌝": true, + "⌎": true, + "ů": true, + "◹": true, + "𝓊": true, + "⋰": true, + "ũ": true, + "▵": true, + "▴": true, + "⇈": true, + "ü": true, + "ü": true, + "⦧": true, + "⇕": true, + "⫨": true, + "⫩": true, + "⊨": true, + "⦜": true, + "ϵ": true, + "ϰ": true, + "∅": true, + "ϕ": true, + "ϖ": true, + "∝": true, + "↕": true, + "ϱ": true, + "ς": true, + "⊊︀": true, + "⫋︀": true, + "⊋︀": true, + "⫌︀": true, + "ϑ": true, + "⊲": true, + "⊳": true, + "в": true, + "⊢": true, + "∨": true, + "⊻": true, + "≚": true, + "⋮": true, + "|": true, + "|": true, + "𝔳": true, + "⊲": true, + "⊂⃒": true, + "⊃⃒": true, + "𝕧": true, + "∝": true, + "⊳": true, + "𝓋": true, + "⫋︀": true, + "⊊︀": true, + "⫌︀": true, + "⊋︀": true, + "⦚": true, + "ŵ": true, + "⩟": true, + "∧": true, + "≙": true, + "℘": true, + "𝔴": true, + "𝕨": true, + "℘": true, + "≀": true, + "≀": true, + "𝓌": true, + "⋂": true, + "◯": true, + "⋃": true, + "▽": true, + "𝔵": true, + "⟺": true, + "⟷": true, + "ξ": true, + "⟸": true, + "⟵": true, + "⟼": true, + "⋻": true, + "⨀": true, + "𝕩": true, + "⨁": true, + "⨂": true, + "⟹": true, + "⟶": true, + "𝓍": true, + "⨆": true, + "⨄": true, + "△": true, + "⋁": true, + "⋀": true, + "ý": true, + "ý": true, + "я": true, + "ŷ": true, + "ы": true, + "¥": true, + "¥": true, + "𝔶": true, + "ї": true, + "𝕪": true, + "𝓎": true, + "ю": true, + "ÿ": true, + "ÿ": true, + "ź": true, + "ž": true, + "з": true, + "ż": true, + "ℨ": true, + "ζ": true, + "𝔷": true, + "ж": true, + "⇝": true, + "𝕫": true, + "𝓏": true, + "‍": true, + "‌": true, +} diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go new file mode 100644 index 0000000..6ab6010 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/esc.go @@ -0,0 +1,70 @@ +package blackfriday + +import ( + "html" + "io" +) + +var htmlEscaper = [256][]byte{ + '&': []byte("&"), + '<': []byte("<"), + '>': []byte(">"), + '"': []byte("""), +} + +func escapeHTML(w io.Writer, s []byte) { + escapeEntities(w, s, false) +} + +func escapeAllHTML(w io.Writer, s []byte) { + escapeEntities(w, s, true) +} + +func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) { + var start, end int + for end < len(s) { + escSeq := htmlEscaper[s[end]] + if escSeq != nil { + isEntity, entityEnd := nodeIsEntity(s, end) + if isEntity && !escapeValidEntities { + w.Write(s[start : entityEnd+1]) + start = entityEnd + 1 + } else { + w.Write(s[start:end]) + w.Write(escSeq) + start = end + 1 + } + } + end++ + } + if start < len(s) && end <= len(s) { + w.Write(s[start:end]) + } +} + +func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) { + isEntity = false + endEntityPos = end + 1 + + if s[end] == '&' { + for endEntityPos < len(s) { + if s[endEntityPos] == ';' { + if entities[string(s[end:endEntityPos+1])] { + isEntity = true + break + } + } + if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' { + break + } + endEntityPos++ + } + } + + return isEntity, endEntityPos +} + +func escLink(w io.Writer, text []byte) { + unesc := html.UnescapeString(string(text)) + escapeHTML(w, []byte(unesc)) +} diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go new file mode 100644 index 0000000..cb4f26e --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/html.go @@ -0,0 +1,952 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strings" +) + +// HTMLFlags control optional behavior of HTML renderer. +type HTMLFlags int + +// HTML renderer configuration options. +const ( + HTMLFlagsNone HTMLFlags = 0 + SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks + SkipImages // Skip embedded images + SkipLinks // Skip all links + Safelink // Only link to trusted protocols + NofollowLinks // Only link with rel="nofollow" + NoreferrerLinks // Only link with rel="noreferrer" + NoopenerLinks // Only link with rel="noopener" + HrefTargetBlank // Add a blank target + CompletePage // Generate a complete HTML page + UseXHTML // Generate XHTML output instead of HTML + FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source + Smartypants // Enable smart punctuation substitutions + SmartypantsFractions // Enable smart fractions (with Smartypants) + SmartypantsDashes // Enable smart dashes (with Smartypants) + SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) + SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering + SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) + TOC // Generate a table of contents +) + +var ( + htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) +) + +const ( + htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + + processingInstruction + "|" + declaration + "|" + cdata + ")" + closeTag = "]" + openTag = "<" + tagName + attribute + "*" + "\\s*/?>" + attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" + attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" + attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" + attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" + cdata = "" + declaration = "]*>" + doubleQuotedValue = "\"[^\"]*\"" + htmlComment = "|" + processingInstruction = "[<][?].*?[?][>]" + singleQuotedValue = "'[^']*'" + tagName = "[A-Za-z][A-Za-z0-9-]*" + unquotedValue = "[^\"'=<>`\\x00-\\x20]+" +) + +// HTMLRendererParameters is a collection of supplementary parameters tweaking +// the behavior of various parts of HTML renderer. +type HTMLRendererParameters struct { + // Prepend this text to each relative URL. + AbsolutePrefix string + // Add this text to each footnote anchor, to ensure uniqueness. + FootnoteAnchorPrefix string + // Show this text inside the tag for a footnote return link, if the + // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string + // [return] is used. + FootnoteReturnLinkContents string + // If set, add this text to the front of each Heading ID, to ensure + // uniqueness. + HeadingIDPrefix string + // If set, add this text to the back of each Heading ID, to ensure uniqueness. + HeadingIDSuffix string + // Increase heading levels: if the offset is 1,

becomes

etc. + // Negative offset is also valid. + // Resulting levels are clipped between 1 and 6. + HeadingLevelOffset int + + Title string // Document title (used if CompletePage is set) + CSS string // Optional CSS file URL (used if CompletePage is set) + Icon string // Optional icon file URL (used if CompletePage is set) + + Flags HTMLFlags // Flags allow customizing this renderer's behavior +} + +// HTMLRenderer is a type that implements the Renderer interface for HTML output. +// +// Do not create this directly, instead use the NewHTMLRenderer function. +type HTMLRenderer struct { + HTMLRendererParameters + + closeTag string // how to end singleton tags: either " />" or ">" + + // Track heading IDs to prevent ID collision in a single generation. + headingIDs map[string]int + + lastOutputLen int + disableTags int + + sr *SPRenderer +} + +const ( + xhtmlClose = " />" + htmlClose = ">" +) + +// NewHTMLRenderer creates and configures an HTMLRenderer object, which +// satisfies the Renderer interface. +func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { + // configure the rendering engine + closeTag := htmlClose + if params.Flags&UseXHTML != 0 { + closeTag = xhtmlClose + } + + if params.FootnoteReturnLinkContents == "" { + // U+FE0E is VARIATION SELECTOR-15. + // It suppresses automatic emoji presentation of the preceding + // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. + params.FootnoteReturnLinkContents = "↩\ufe0e" + } + + return &HTMLRenderer{ + HTMLRendererParameters: params, + + closeTag: closeTag, + headingIDs: make(map[string]int), + + sr: NewSmartypantsRenderer(params.Flags), + } +} + +func isHTMLTag(tag []byte, tagname string) bool { + found, _ := findHTMLTagPos(tag, tagname) + return found +} + +// Look for a character, but ignore it when it's in any kind of quotes, it +// might be JavaScript +func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { + inSingleQuote := false + inDoubleQuote := false + inGraveQuote := false + i := start + for i < len(html) { + switch { + case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: + return i + case html[i] == '\'': + inSingleQuote = !inSingleQuote + case html[i] == '"': + inDoubleQuote = !inDoubleQuote + case html[i] == '`': + inGraveQuote = !inGraveQuote + } + i++ + } + return start +} + +func findHTMLTagPos(tag []byte, tagname string) (bool, int) { + i := 0 + if i < len(tag) && tag[0] != '<' { + return false, -1 + } + i++ + i = skipSpace(tag, i) + + if i < len(tag) && tag[i] == '/' { + i++ + } + + i = skipSpace(tag, i) + j := 0 + for ; i < len(tag); i, j = i+1, j+1 { + if j >= len(tagname) { + break + } + + if strings.ToLower(string(tag[i]))[0] != tagname[j] { + return false, -1 + } + } + + if i == len(tag) { + return false, -1 + } + + rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') + if rightAngle >= i { + return true, rightAngle + } + + return false, -1 +} + +func skipSpace(tag []byte, i int) int { + for i < len(tag) && isspace(tag[i]) { + i++ + } + return i +} + +func isRelativeLink(link []byte) (yes bool) { + // a tag begin with '#' + if link[0] == '#' { + return true + } + + // link begin with '/' but not '//', the second maybe a protocol relative link + if len(link) >= 2 && link[0] == '/' && link[1] != '/' { + return true + } + + // only the root '/' + if len(link) == 1 && link[0] == '/' { + return true + } + + // current directory : begin with "./" + if bytes.HasPrefix(link, []byte("./")) { + return true + } + + // parent directory : begin with "../" + if bytes.HasPrefix(link, []byte("../")) { + return true + } + + return false +} + +func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { + for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { + tmp := fmt.Sprintf("%s-%d", id, count+1) + + if _, tmpFound := r.headingIDs[tmp]; !tmpFound { + r.headingIDs[id] = count + 1 + id = tmp + } else { + id = id + "-1" + } + } + + if _, found := r.headingIDs[id]; !found { + r.headingIDs[id] = 0 + } + + return id +} + +func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { + if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { + newDest := r.AbsolutePrefix + if link[0] != '/' { + newDest += "/" + } + newDest += string(link) + return []byte(newDest) + } + return link +} + +func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { + if isRelativeLink(link) { + return attrs + } + val := []string{} + if flags&NofollowLinks != 0 { + val = append(val, "nofollow") + } + if flags&NoreferrerLinks != 0 { + val = append(val, "noreferrer") + } + if flags&NoopenerLinks != 0 { + val = append(val, "noopener") + } + if flags&HrefTargetBlank != 0 { + attrs = append(attrs, "target=\"_blank\"") + } + if len(val) == 0 { + return attrs + } + attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) + return append(attrs, attr) +} + +func isMailto(link []byte) bool { + return bytes.HasPrefix(link, []byte("mailto:")) +} + +func needSkipLink(flags HTMLFlags, dest []byte) bool { + if flags&SkipLinks != 0 { + return true + } + return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) +} + +func isSmartypantable(node *Node) bool { + pt := node.Parent.Type + return pt != Link && pt != CodeBlock && pt != Code +} + +func appendLanguageAttr(attrs []string, info []byte) []string { + if len(info) == 0 { + return attrs + } + endOfLang := bytes.IndexAny(info, "\t ") + if endOfLang < 0 { + endOfLang = len(info) + } + return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) +} + +func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { + w.Write(name) + if len(attrs) > 0 { + w.Write(spaceBytes) + w.Write([]byte(strings.Join(attrs, " "))) + } + w.Write(gtBytes) + r.lastOutputLen = 1 +} + +func footnoteRef(prefix string, node *Node) []byte { + urlFrag := prefix + string(slugify(node.Destination)) + anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) + return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) +} + +func footnoteItem(prefix string, slug []byte) []byte { + return []byte(fmt.Sprintf(`
  • `, prefix, slug)) +} + +func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { + const format = ` %s` + return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) +} + +func itemOpenCR(node *Node) bool { + if node.Prev == nil { + return false + } + ld := node.Parent.ListData + return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 +} + +func skipParagraphTags(node *Node) bool { + grandparent := node.Parent.Parent + if grandparent == nil || grandparent.Type != List { + return false + } + tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 + return grandparent.Type == List && tightOrTerm +} + +func cellAlignment(align CellAlignFlags) string { + switch align { + case TableAlignmentLeft: + return "left" + case TableAlignmentRight: + return "right" + case TableAlignmentCenter: + return "center" + default: + return "" + } +} + +func (r *HTMLRenderer) out(w io.Writer, text []byte) { + if r.disableTags > 0 { + w.Write(htmlTagRe.ReplaceAll(text, []byte{})) + } else { + w.Write(text) + } + r.lastOutputLen = len(text) +} + +func (r *HTMLRenderer) cr(w io.Writer) { + if r.lastOutputLen > 0 { + r.out(w, nlBytes) + } +} + +var ( + nlBytes = []byte{'\n'} + gtBytes = []byte{'>'} + spaceBytes = []byte{' '} +) + +var ( + brTag = []byte("
    ") + brXHTMLTag = []byte("
    ") + emTag = []byte("") + emCloseTag = []byte("") + strongTag = []byte("") + strongCloseTag = []byte("") + delTag = []byte("") + delCloseTag = []byte("") + ttTag = []byte("") + ttCloseTag = []byte("") + aTag = []byte("") + preTag = []byte("
    ")
    +	preCloseTag        = []byte("
    ") + codeTag = []byte("") + codeCloseTag = []byte("") + pTag = []byte("

    ") + pCloseTag = []byte("

    ") + blockquoteTag = []byte("
    ") + blockquoteCloseTag = []byte("
    ") + hrTag = []byte("
    ") + hrXHTMLTag = []byte("
    ") + ulTag = []byte("
      ") + ulCloseTag = []byte("
    ") + olTag = []byte("
      ") + olCloseTag = []byte("
    ") + dlTag = []byte("
    ") + dlCloseTag = []byte("
    ") + liTag = []byte("
  • ") + liCloseTag = []byte("
  • ") + ddTag = []byte("
    ") + ddCloseTag = []byte("
    ") + dtTag = []byte("
    ") + dtCloseTag = []byte("
    ") + tableTag = []byte("") + tableCloseTag = []byte("
    ") + tdTag = []byte("") + thTag = []byte("") + theadTag = []byte("") + theadCloseTag = []byte("") + tbodyTag = []byte("") + tbodyCloseTag = []byte("") + trTag = []byte("") + trCloseTag = []byte("") + h1Tag = []byte("") + h2Tag = []byte("") + h3Tag = []byte("") + h4Tag = []byte("") + h5Tag = []byte("") + h6Tag = []byte("") + + footnotesDivBytes = []byte("\n
    \n\n") + footnotesCloseDivBytes = []byte("\n
    \n") +) + +func headingTagsFromLevel(level int) ([]byte, []byte) { + if level <= 1 { + return h1Tag, h1CloseTag + } + switch level { + case 2: + return h2Tag, h2CloseTag + case 3: + return h3Tag, h3CloseTag + case 4: + return h4Tag, h4CloseTag + case 5: + return h5Tag, h5CloseTag + } + return h6Tag, h6CloseTag +} + +func (r *HTMLRenderer) outHRTag(w io.Writer) { + if r.Flags&UseXHTML == 0 { + r.out(w, hrTag) + } else { + r.out(w, hrXHTMLTag) + } +} + +// RenderNode is a default renderer of a single node of a syntax tree. For +// block nodes it will be called twice: first time with entering=true, second +// time with entering=false, so that it could know when it's working on an open +// tag and when on close. It writes the result to w. +// +// The return value is a way to tell the calling walker to adjust its walk +// pattern: e.g. it can terminate the traversal by returning Terminate. Or it +// can ask the walker to skip a subtree of this node by returning SkipChildren. +// The typical behavior is to return GoToNext, which asks for the usual +// traversal to the next node. +func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { + attrs := []string{} + switch node.Type { + case Text: + if r.Flags&Smartypants != 0 { + var tmp bytes.Buffer + escapeHTML(&tmp, node.Literal) + r.sr.Process(w, tmp.Bytes()) + } else { + if node.Parent.Type == Link { + escLink(w, node.Literal) + } else { + escapeHTML(w, node.Literal) + } + } + case Softbreak: + r.cr(w) + // TODO: make it configurable via out(renderer.softbreak) + case Hardbreak: + if r.Flags&UseXHTML == 0 { + r.out(w, brTag) + } else { + r.out(w, brXHTMLTag) + } + r.cr(w) + case Emph: + if entering { + r.out(w, emTag) + } else { + r.out(w, emCloseTag) + } + case Strong: + if entering { + r.out(w, strongTag) + } else { + r.out(w, strongCloseTag) + } + case Del: + if entering { + r.out(w, delTag) + } else { + r.out(w, delCloseTag) + } + case HTMLSpan: + if r.Flags&SkipHTML != 0 { + break + } + r.out(w, node.Literal) + case Link: + // mark it but don't link it if it is not a safe link: no smartypants + dest := node.LinkData.Destination + if needSkipLink(r.Flags, dest) { + if entering { + r.out(w, ttTag) + } else { + r.out(w, ttCloseTag) + } + } else { + if entering { + dest = r.addAbsPrefix(dest) + var hrefBuf bytes.Buffer + hrefBuf.WriteString("href=\"") + escLink(&hrefBuf, dest) + hrefBuf.WriteByte('"') + attrs = append(attrs, hrefBuf.String()) + if node.NoteID != 0 { + r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) + break + } + attrs = appendLinkAttrs(attrs, r.Flags, dest) + if len(node.LinkData.Title) > 0 { + var titleBuff bytes.Buffer + titleBuff.WriteString("title=\"") + escapeHTML(&titleBuff, node.LinkData.Title) + titleBuff.WriteByte('"') + attrs = append(attrs, titleBuff.String()) + } + r.tag(w, aTag, attrs) + } else { + if node.NoteID != 0 { + break + } + r.out(w, aCloseTag) + } + } + case Image: + if r.Flags&SkipImages != 0 { + return SkipChildren + } + if entering { + dest := node.LinkData.Destination + dest = r.addAbsPrefix(dest) + if r.disableTags == 0 { + //if options.safe && potentiallyUnsafe(dest) { + //out(w, ``)
+				//} else {
+				r.out(w, []byte(`<img src=`)) + } + } + case Code: + r.out(w, codeTag) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + case Document: + break + case Paragraph: + if skipParagraphTags(node) { + break + } + if entering { + // TODO: untangle this clusterfuck about when the newlines need + // to be added and when not. + if node.Prev != nil { + switch node.Prev.Type { + case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: + r.cr(w) + } + } + if node.Parent.Type == BlockQuote && node.Prev == nil { + r.cr(w) + } + r.out(w, pTag) + } else { + r.out(w, pCloseTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case BlockQuote: + if entering { + r.cr(w) + r.out(w, blockquoteTag) + } else { + r.out(w, blockquoteCloseTag) + r.cr(w) + } + case HTMLBlock: + if r.Flags&SkipHTML != 0 { + break + } + r.cr(w) + r.out(w, node.Literal) + r.cr(w) + case Heading: + headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level + openTag, closeTag := headingTagsFromLevel(headingLevel) + if entering { + if node.IsTitleblock { + attrs = append(attrs, `class="title"`) + } + if node.HeadingID != "" { + id := r.ensureUniqueHeadingID(node.HeadingID) + if r.HeadingIDPrefix != "" { + id = r.HeadingIDPrefix + id + } + if r.HeadingIDSuffix != "" { + id = id + r.HeadingIDSuffix + } + attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) + } + r.cr(w) + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case HorizontalRule: + r.cr(w) + r.outHRTag(w) + r.cr(w) + case List: + openTag := ulTag + closeTag := ulCloseTag + if node.ListFlags&ListTypeOrdered != 0 { + openTag = olTag + closeTag = olCloseTag + } + if node.ListFlags&ListTypeDefinition != 0 { + openTag = dlTag + closeTag = dlCloseTag + } + if entering { + if node.IsFootnotesList { + r.out(w, footnotesDivBytes) + r.outHRTag(w) + r.cr(w) + } + r.cr(w) + if node.Parent.Type == Item && node.Parent.Parent.Tight { + r.cr(w) + } + r.tag(w, openTag[:len(openTag)-1], attrs) + r.cr(w) + } else { + r.out(w, closeTag) + //cr(w) + //if node.parent.Type != Item { + // cr(w) + //} + if node.Parent.Type == Item && node.Next != nil { + r.cr(w) + } + if node.Parent.Type == Document || node.Parent.Type == BlockQuote { + r.cr(w) + } + if node.IsFootnotesList { + r.out(w, footnotesCloseDivBytes) + } + } + case Item: + openTag := liTag + closeTag := liCloseTag + if node.ListFlags&ListTypeDefinition != 0 { + openTag = ddTag + closeTag = ddCloseTag + } + if node.ListFlags&ListTypeTerm != 0 { + openTag = dtTag + closeTag = dtCloseTag + } + if entering { + if itemOpenCR(node) { + r.cr(w) + } + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) + break + } + r.out(w, openTag) + } else { + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + if r.Flags&FootnoteReturnLinks != 0 { + r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) + } + } + r.out(w, closeTag) + r.cr(w) + } + case CodeBlock: + attrs = appendLanguageAttr(attrs, node.Info) + r.cr(w) + r.out(w, preTag) + r.tag(w, codeTag[:len(codeTag)-1], attrs) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + r.out(w, preCloseTag) + if node.Parent.Type != Item { + r.cr(w) + } + case Table: + if entering { + r.cr(w) + r.out(w, tableTag) + } else { + r.out(w, tableCloseTag) + r.cr(w) + } + case TableCell: + openTag := tdTag + closeTag := tdCloseTag + if node.IsHeader { + openTag = thTag + closeTag = thCloseTag + } + if entering { + align := cellAlignment(node.Align) + if align != "" { + attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) + } + if node.Prev == nil { + r.cr(w) + } + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + r.cr(w) + } + case TableHead: + if entering { + r.cr(w) + r.out(w, theadTag) + } else { + r.out(w, theadCloseTag) + r.cr(w) + } + case TableBody: + if entering { + r.cr(w) + r.out(w, tbodyTag) + // XXX: this is to adhere to a rather silly test. Should fix test. + if node.FirstChild == nil { + r.cr(w) + } + } else { + r.out(w, tbodyCloseTag) + r.cr(w) + } + case TableRow: + if entering { + r.cr(w) + r.out(w, trTag) + } else { + r.out(w, trCloseTag) + r.cr(w) + } + default: + panic("Unknown node type " + node.Type.String()) + } + return GoToNext +} + +// RenderHeader writes HTML document preamble and TOC if requested. +func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { + r.writeDocumentHeader(w) + if r.Flags&TOC != 0 { + r.writeTOC(w, ast) + } +} + +// RenderFooter writes HTML document footer. +func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { + if r.Flags&CompletePage == 0 { + return + } + io.WriteString(w, "\n\n\n") +} + +func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { + if r.Flags&CompletePage == 0 { + return + } + ending := "" + if r.Flags&UseXHTML != 0 { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + ending = " /" + } else { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + } + io.WriteString(w, "\n") + io.WriteString(w, " ") + if r.Flags&Smartypants != 0 { + r.sr.Process(w, []byte(r.Title)) + } else { + escapeHTML(w, []byte(r.Title)) + } + io.WriteString(w, "\n") + io.WriteString(w, " \n") + io.WriteString(w, " \n") + if r.CSS != "" { + io.WriteString(w, " \n") + } + if r.Icon != "" { + io.WriteString(w, " \n") + } + io.WriteString(w, "\n") + io.WriteString(w, "\n\n") +} + +func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { + buf := bytes.Buffer{} + + inHeading := false + tocLevel := 0 + headingCount := 0 + + ast.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Heading && !node.HeadingData.IsTitleblock { + inHeading = entering + if entering { + node.HeadingID = fmt.Sprintf("toc_%d", headingCount) + if node.Level == tocLevel { + buf.WriteString("\n\n
  • ") + } else if node.Level < tocLevel { + for node.Level < tocLevel { + tocLevel-- + buf.WriteString("
  • \n") + } + buf.WriteString("\n\n
  • ") + } else { + for node.Level > tocLevel { + tocLevel++ + buf.WriteString("\n") + } + + if buf.Len() > 0 { + io.WriteString(w, "\n") + } + r.lastOutputLen = buf.Len() +} diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go new file mode 100644 index 0000000..d45bd94 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/inline.go @@ -0,0 +1,1228 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse inline elements. +// + +package blackfriday + +import ( + "bytes" + "regexp" + "strconv" +) + +var ( + urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` + anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) + + // https://www.w3.org/TR/html5/syntax.html#character-references + // highest unicode code point in 17 planes (2^20): 1,114,112d = + // 7 dec digits or 6 hex digits + // named entity references can be 2-31 characters with stuff like < + // at one end and ∳ at the other. There + // are also sometimes numbers at the end, although this isn't inherent + // in the specification; there are never numbers anywhere else in + // current character references, though; see ¾ and ▒, etc. + // https://www.w3.org/TR/html5/syntax.html#named-character-references + // + // entity := "&" (named group | number ref) ";" + // named group := [a-zA-Z]{2,31}[0-9]{0,2} + // number ref := "#" (dec ref | hex ref) + // dec ref := [0-9]{1,7} + // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} + htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) +) + +// Functions to parse text within a block +// Each function returns the number of chars taken care of +// data is the complete block being rendered +// offset is the number of valid chars before the current cursor + +func (p *Markdown) inline(currBlock *Node, data []byte) { + // handlers might call us recursively: enforce a maximum depth + if p.nesting >= p.maxNesting || len(data) == 0 { + return + } + p.nesting++ + beg, end := 0, 0 + for end < len(data) { + handler := p.inlineCallback[data[end]] + if handler != nil { + if consumed, node := handler(p, data, end); consumed == 0 { + // No action from the callback. + end++ + } else { + // Copy inactive chars into the output. + currBlock.AppendChild(text(data[beg:end])) + if node != nil { + currBlock.AppendChild(node) + } + // Skip past whatever the callback used. + beg = end + consumed + end = beg + } + } else { + end++ + } + } + if beg < len(data) { + if data[end-1] == '\n' { + end-- + } + currBlock.AppendChild(text(data[beg:end])) + } + p.nesting-- +} + +// single and double emphasis parsing +func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + c := data[0] + + if len(data) > 2 && data[1] != c { + // whitespace cannot follow an opening emphasis; + // strikethrough only takes two characters '~~' + if c == '~' || isspace(data[1]) { + return 0, nil + } + ret, node := helperEmphasis(p, data[1:], c) + if ret == 0 { + return 0, nil + } + + return ret + 1, node + } + + if len(data) > 3 && data[1] == c && data[2] != c { + if isspace(data[2]) { + return 0, nil + } + ret, node := helperDoubleEmphasis(p, data[2:], c) + if ret == 0 { + return 0, nil + } + + return ret + 2, node + } + + if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { + if c == '~' || isspace(data[3]) { + return 0, nil + } + ret, node := helperTripleEmphasis(p, data, 3, c) + if ret == 0 { + return 0, nil + } + + return ret + 3, node + } + + return 0, nil +} + +func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + nb := 0 + + // count the number of backticks in the delimiter + for nb < len(data) && data[nb] == '`' { + nb++ + } + + // find the next delimiter + i, end := 0, 0 + for end = nb; end < len(data) && i < nb; end++ { + if data[end] == '`' { + i++ + } else { + i = 0 + } + } + + // no matching delimiter? + if i < nb && end >= len(data) { + return 0, nil + } + + // trim outside whitespace + fBegin := nb + for fBegin < end && data[fBegin] == ' ' { + fBegin++ + } + + fEnd := end - nb + for fEnd > fBegin && data[fEnd-1] == ' ' { + fEnd-- + } + + // render the code span + if fBegin != fEnd { + code := NewNode(Code) + code.Literal = data[fBegin:fEnd] + return end, code + } + + return end, nil +} + +// newline preceded by two spaces becomes
    +func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + origOffset := offset + for offset < len(data) && data[offset] == ' ' { + offset++ + } + + if offset < len(data) && data[offset] == '\n' { + if offset-origOffset >= 2 { + return offset - origOffset + 1, NewNode(Hardbreak) + } + return offset - origOffset, nil + } + return 0, nil +} + +// newline without two spaces works when HardLineBreak is enabled +func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + if p.extensions&HardLineBreak != 0 { + return 1, NewNode(Hardbreak) + } + return 0, nil +} + +type linkType int + +const ( + linkNormal linkType = iota + linkImg + linkDeferredFootnote + linkInlineFootnote +) + +func isReferenceStyleLink(data []byte, pos int, t linkType) bool { + if t == linkDeferredFootnote { + return false + } + return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' +} + +func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +// '[': parse a link or an image or a footnote +func link(p *Markdown, data []byte, offset int) (int, *Node) { + // no links allowed inside regular links, footnote, and deferred footnotes + if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { + return 0, nil + } + + var t linkType + switch { + // special case: ![^text] == deferred footnote (that follows something with + // an exclamation point) + case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': + t = linkDeferredFootnote + // ![alt] == image + case offset >= 0 && data[offset] == '!': + t = linkImg + offset++ + // ^[text] == inline footnote + // [^refId] == deferred footnote + case p.extensions&Footnotes != 0: + if offset >= 0 && data[offset] == '^' { + t = linkInlineFootnote + offset++ + } else if len(data)-1 > offset && data[offset+1] == '^' { + t = linkDeferredFootnote + } + // [text] == regular link + default: + t = linkNormal + } + + data = data[offset:] + + var ( + i = 1 + noteID int + title, link, altContent []byte + textHasNl = false + ) + + if t == linkDeferredFootnote { + i++ + } + + // look for the matching closing bracket + for level := 1; level > 0 && i < len(data); i++ { + switch { + case data[i] == '\n': + textHasNl = true + + case isBackslashEscaped(data, i): + continue + + case data[i] == '[': + level++ + + case data[i] == ']': + level-- + if level <= 0 { + i-- // compensate for extra i++ in for loop + } + } + } + + if i >= len(data) { + return 0, nil + } + + txtE := i + i++ + var footnoteNode *Node + + // skip any amount of whitespace or newline + // (this is much more lax than original markdown syntax) + for i < len(data) && isspace(data[i]) { + i++ + } + + // inline style link + switch { + case i < len(data) && data[i] == '(': + // skip initial whitespace + i++ + + for i < len(data) && isspace(data[i]) { + i++ + } + + linkB := i + + // look for link end: ' " ) + findlinkend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')' || data[i] == '\'' || data[i] == '"': + break findlinkend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + linkE := i + + // look for title end if present + titleB, titleE := 0, 0 + if data[i] == '\'' || data[i] == '"' { + i++ + titleB = i + + findtitleend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')': + break findtitleend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + + // skip whitespace after title + titleE = i - 1 + for titleE > titleB && isspace(data[titleE]) { + titleE-- + } + + // check for closing quote presence + if data[titleE] != '\'' && data[titleE] != '"' { + titleB, titleE = 0, 0 + linkE = i + } + } + + // remove whitespace at the end of the link + for linkE > linkB && isspace(data[linkE-1]) { + linkE-- + } + + // remove optional angle brackets around the link + if data[linkB] == '<' { + linkB++ + } + if data[linkE-1] == '>' { + linkE-- + } + + // build escaped link and title + if linkE > linkB { + link = data[linkB:linkE] + } + + if titleE > titleB { + title = data[titleB:titleE] + } + + i++ + + // reference style link + case isReferenceStyleLink(data, i, t): + var id []byte + altContentConsidered := false + + // look for the id + i++ + linkB := i + for i < len(data) && data[i] != ']' { + i++ + } + if i >= len(data) { + return 0, nil + } + linkE := i + + // find the reference + if linkB == linkE { + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + id = data[1:txtE] + altContentConsidered = true + } + } else { + id = data[linkB:linkE] + } + + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + // keep link and title from reference + link = lr.link + title = lr.title + if altContentConsidered { + altContent = lr.text + } + i++ + + // shortcut reference style link or reference or inline footnote + default: + var id []byte + + // craft the id + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + if t == linkDeferredFootnote { + id = data[2:txtE] // get rid of the ^ + } else { + id = data[1:txtE] + } + } + + footnoteNode = NewNode(Item) + if t == linkInlineFootnote { + // create a new reference + noteID = len(p.notes) + 1 + + var fragment []byte + if len(id) > 0 { + if len(id) < 16 { + fragment = make([]byte, len(id)) + } else { + fragment = make([]byte, 16) + } + copy(fragment, slugify(id)) + } else { + fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) + } + + ref := &reference{ + noteID: noteID, + hasBlock: false, + link: fragment, + title: id, + footnote: footnoteNode, + } + + p.notes = append(p.notes, ref) + + link = ref.link + title = ref.title + } else { + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + if t == linkDeferredFootnote { + lr.noteID = len(p.notes) + 1 + lr.footnote = footnoteNode + p.notes = append(p.notes, lr) + } + + // keep link and title from reference + link = lr.link + // if inline footnote, title == footnote contents + title = lr.title + noteID = lr.noteID + } + + // rewind the whitespace + i = txtE + 1 + } + + var uLink []byte + if t == linkNormal || t == linkImg { + if len(link) > 0 { + var uLinkBuf bytes.Buffer + unescapeText(&uLinkBuf, link) + uLink = uLinkBuf.Bytes() + } + + // links need something to click on and somewhere to go + if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { + return 0, nil + } + } + + // call the relevant rendering function + var linkNode *Node + switch t { + case linkNormal: + linkNode = NewNode(Link) + linkNode.Destination = normalizeURI(uLink) + linkNode.Title = title + if len(altContent) > 0 { + linkNode.AppendChild(text(altContent)) + } else { + // links cannot contain other links, so turn off link parsing + // temporarily and recurse + insideLink := p.insideLink + p.insideLink = true + p.inline(linkNode, data[1:txtE]) + p.insideLink = insideLink + } + + case linkImg: + linkNode = NewNode(Image) + linkNode.Destination = uLink + linkNode.Title = title + linkNode.AppendChild(text(data[1:txtE])) + i++ + + case linkInlineFootnote, linkDeferredFootnote: + linkNode = NewNode(Link) + linkNode.Destination = link + linkNode.Title = title + linkNode.NoteID = noteID + linkNode.Footnote = footnoteNode + if t == linkInlineFootnote { + i++ + } + + default: + return 0, nil + } + + return i, linkNode +} + +func (p *Markdown) inlineHTMLComment(data []byte) int { + if len(data) < 5 { + return 0 + } + if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { + return 0 + } + i := 5 + // scan for an end-of-comment marker, across lines if necessary + for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { + i++ + } + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return i + 1 +} + +func stripMailto(link []byte) []byte { + if bytes.HasPrefix(link, []byte("mailto://")) { + return link[9:] + } else if bytes.HasPrefix(link, []byte("mailto:")) { + return link[7:] + } else { + return link + } +} + +// autolinkType specifies a kind of autolink that gets detected. +type autolinkType int + +// These are the possible flag values for the autolink renderer. +const ( + notAutolink autolinkType = iota + normalAutolink + emailAutolink +) + +// '<' when tags or autolinks are allowed +func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + altype, end := tagLength(data) + if size := p.inlineHTMLComment(data); size > 0 { + end = size + } + if end > 2 { + if altype != notAutolink { + var uLink bytes.Buffer + unescapeText(&uLink, data[1:end+1-2]) + if uLink.Len() > 0 { + link := uLink.Bytes() + node := NewNode(Link) + node.Destination = link + if altype == emailAutolink { + node.Destination = append([]byte("mailto:"), link...) + } + node.AppendChild(text(stripMailto(link))) + return end, node + } + } else { + htmlTag := NewNode(HTMLSpan) + htmlTag.Literal = data[:end] + return end, htmlTag + } + } + + return end, nil +} + +// '\\' backslash escape +var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") + +func escape(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + if len(data) > 1 { + if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { + return 2, NewNode(Hardbreak) + } + if bytes.IndexByte(escapeChars, data[1]) < 0 { + return 0, nil + } + + return 2, text(data[1:2]) + } + + return 2, nil +} + +func unescapeText(ob *bytes.Buffer, src []byte) { + i := 0 + for i < len(src) { + org := i + for i < len(src) && src[i] != '\\' { + i++ + } + + if i > org { + ob.Write(src[org:i]) + } + + if i+1 >= len(src) { + break + } + + ob.WriteByte(src[i+1]) + i += 2 + } +} + +// '&' escaped when it doesn't belong to an entity +// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; +func entity(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + end := 1 + + if end < len(data) && data[end] == '#' { + end++ + } + + for end < len(data) && isalnum(data[end]) { + end++ + } + + if end < len(data) && data[end] == ';' { + end++ // real entity + } else { + return 0, nil // lone '&' + } + + ent := data[:end] + // undo & escaping or it will be converted to &amp; by another + // escaper in the renderer + if bytes.Equal(ent, []byte("&")) { + ent = []byte{'&'} + } + + return end, text(ent) +} + +func linkEndsWithEntity(data []byte, linkEnd int) bool { + entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) + return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd +} + +// hasPrefixCaseInsensitive is a custom implementation of +// strings.HasPrefix(strings.ToLower(s), prefix) +// we rolled our own because ToLower pulls in a huge machinery of lowercasing +// anything from Unicode and that's very slow. Since this func will only be +// used on ASCII protocol prefixes, we can take shortcuts. +func hasPrefixCaseInsensitive(s, prefix []byte) bool { + if len(s) < len(prefix) { + return false + } + delta := byte('a' - 'A') + for i, b := range prefix { + if b != s[i] && b != s[i]+delta { + return false + } + } + return true +} + +var protocolPrefixes = [][]byte{ + []byte("http://"), + []byte("https://"), + []byte("ftp://"), + []byte("file://"), + []byte("mailto:"), +} + +const shortestPrefix = 6 // len("ftp://"), the shortest of the above + +func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // quick check to rule out most false hits + if p.insideLink || len(data) < offset+shortestPrefix { + return 0, nil + } + for _, prefix := range protocolPrefixes { + endOfHead := offset + 8 // 8 is the len() of the longest prefix + if endOfHead > len(data) { + endOfHead = len(data) + } + if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { + return autoLink(p, data, offset) + } + } + return 0, nil +} + +func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // Now a more expensive check to see if we're not inside an anchor element + anchorStart := offset + offsetFromAnchor := 0 + for anchorStart > 0 && data[anchorStart] != '<' { + anchorStart-- + offsetFromAnchor++ + } + + anchorStr := anchorRe.Find(data[anchorStart:]) + if anchorStr != nil { + anchorClose := NewNode(HTMLSpan) + anchorClose.Literal = anchorStr[offsetFromAnchor:] + return len(anchorStr) - offsetFromAnchor, anchorClose + } + + // scan backward for a word boundary + rewind := 0 + for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { + rewind++ + } + if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters + return 0, nil + } + + origData := data + data = data[offset-rewind:] + + if !isSafeLink(data) { + return 0, nil + } + + linkEnd := 0 + for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { + linkEnd++ + } + + // Skip punctuation at the end of the link + if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { + linkEnd-- + } + + // But don't skip semicolon if it's a part of escaped entity: + if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { + linkEnd-- + } + + // See if the link finishes with a punctuation sign that can be closed. + var copen byte + switch data[linkEnd-1] { + case '"': + copen = '"' + case '\'': + copen = '\'' + case ')': + copen = '(' + case ']': + copen = '[' + case '}': + copen = '{' + default: + copen = 0 + } + + if copen != 0 { + bufEnd := offset - rewind + linkEnd - 2 + + openDelim := 1 + + /* Try to close the final punctuation sign in this same line; + * if we managed to close it outside of the URL, that means that it's + * not part of the URL. If it closes inside the URL, that means it + * is part of the URL. + * + * Examples: + * + * foo http://www.pokemon.com/Pikachu_(Electric) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo (http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric)) + * + * (foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => foo http://www.pokemon.com/Pikachu_(Electric) + */ + + for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { + if origData[bufEnd] == data[linkEnd-1] { + openDelim++ + } + + if origData[bufEnd] == copen { + openDelim-- + } + + bufEnd-- + } + + if openDelim == 0 { + linkEnd-- + } + } + + var uLink bytes.Buffer + unescapeText(&uLink, data[:linkEnd]) + + if uLink.Len() > 0 { + node := NewNode(Link) + node.Destination = uLink.Bytes() + node.AppendChild(text(uLink.Bytes())) + return linkEnd, node + } + + return linkEnd, nil +} + +func isEndOfLink(char byte) bool { + return isspace(char) || char == '<' +} + +var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} +var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} + +func isSafeLink(link []byte) bool { + for _, path := range validPaths { + if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { + if len(link) == len(path) { + return true + } else if isalnum(link[len(path)]) { + return true + } + } + } + + for _, prefix := range validUris { + // TODO: handle unicode here + // case-insensitive prefix test + if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { + return true + } + } + + return false +} + +// return the length of the given tag, or 0 is it's not valid +func tagLength(data []byte) (autolink autolinkType, end int) { + var i, j int + + // a valid tag can't be shorter than 3 chars + if len(data) < 3 { + return notAutolink, 0 + } + + // begins with a '<' optionally followed by '/', followed by letter or number + if data[0] != '<' { + return notAutolink, 0 + } + if data[1] == '/' { + i = 2 + } else { + i = 1 + } + + if !isalnum(data[i]) { + return notAutolink, 0 + } + + // scheme test + autolink = notAutolink + + // try to find the beginning of an URI + for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { + i++ + } + + if i > 1 && i < len(data) && data[i] == '@' { + if j = isMailtoAutoLink(data[i:]); j != 0 { + return emailAutolink, i + j + } + } + + if i > 2 && i < len(data) && data[i] == ':' { + autolink = normalAutolink + i++ + } + + // complete autolink test: no whitespace or ' or " + switch { + case i >= len(data): + autolink = notAutolink + case autolink != notAutolink: + j = i + + for i < len(data) { + if data[i] == '\\' { + i += 2 + } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { + break + } else { + i++ + } + + } + + if i >= len(data) { + return autolink, 0 + } + if i > j && data[i] == '>' { + return autolink, i + 1 + } + + // one of the forbidden chars has been found + autolink = notAutolink + } + i += bytes.IndexByte(data[i:], '>') + if i < 0 { + return autolink, 0 + } + return autolink, i + 1 +} + +// look for the address part of a mail autolink and '>' +// this is less strict than the original markdown e-mail address matching +func isMailtoAutoLink(data []byte) int { + nb := 0 + + // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' + for i := 0; i < len(data); i++ { + if isalnum(data[i]) { + continue + } + + switch data[i] { + case '@': + nb++ + + case '-', '.', '_': + break + + case '>': + if nb == 1 { + return i + 1 + } + return 0 + default: + return 0 + } + } + + return 0 +} + +// look for the next emph char, skipping other constructs +func helperFindEmphChar(data []byte, c byte) int { + i := 0 + + for i < len(data) { + for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { + i++ + } + if i >= len(data) { + return 0 + } + // do not count escaped chars + if i != 0 && data[i-1] == '\\' { + i++ + continue + } + if data[i] == c { + return i + } + + if data[i] == '`' { + // skip a code span + tmpI := 0 + i++ + for i < len(data) && data[i] != '`' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } else if data[i] == '[' { + // skip a link + tmpI := 0 + i++ + for i < len(data) && data[i] != ']' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\n') { + i++ + } + if i >= len(data) { + return tmpI + } + if data[i] != '[' && data[i] != '(' { // not a link + if tmpI > 0 { + return tmpI + } + continue + } + cc := data[i] + i++ + for i < len(data) && data[i] != cc { + if tmpI == 0 && data[i] == c { + return i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } + } + return 0 +} + +func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + // skip one symbol if coming from emph3 + if len(data) > 1 && data[0] == c && data[1] == c { + i = 1 + } + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + if i >= len(data) { + return 0, nil + } + + if i+1 < len(data) && data[i+1] == c { + i++ + continue + } + + if data[i] == c && !isspace(data[i-1]) { + + if p.extensions&NoIntraEmphasis != 0 { + if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { + continue + } + } + + emph := NewNode(Emph) + p.inline(emph, data[:i]) + return i + 1, emph + } + } + + return 0, nil +} + +func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { + nodeType := Strong + if c == '~' { + nodeType = Del + } + node := NewNode(nodeType) + p.inline(node, data[:i]) + return i + 2, node + } + i++ + } + return 0, nil +} + +func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { + i := 0 + origData := data + data = data[offset:] + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + // skip whitespace preceded symbols + if data[i] != c || isspace(data[i-1]) { + continue + } + + switch { + case i+2 < len(data) && data[i+1] == c && data[i+2] == c: + // triple symbol found + strong := NewNode(Strong) + em := NewNode(Emph) + strong.AppendChild(em) + p.inline(em, data[:i]) + return i + 3, strong + case (i+1 < len(data) && data[i+1] == c): + // double symbol found, hand over to emph1 + length, node := helperEmphasis(p, origData[offset-2:], c) + if length == 0 { + return 0, nil + } + return length - 2, node + default: + // single symbol found, hand over to emph2 + length, node := helperDoubleEmphasis(p, origData[offset-1:], c) + if length == 0 { + return 0, nil + } + return length - 1, node + } + } + return 0, nil +} + +func text(s []byte) *Node { + node := NewNode(Text) + node.Literal = s + return node +} + +func normalizeURI(s []byte) []byte { + return s // TODO: implement +} diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go new file mode 100644 index 0000000..58d2e45 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/markdown.go @@ -0,0 +1,950 @@ +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "strings" + "unicode/utf8" +) + +// +// Markdown parsing and processing +// + +// Version string of the package. Appears in the rendered document when +// CompletePage flag is on. +const Version = "2.0" + +// Extensions is a bitwise or'ed collection of enabled Blackfriday's +// extensions. +type Extensions int + +// These are the supported markdown parsing extensions. +// OR these values together to select multiple extensions. +const ( + NoExtensions Extensions = 0 + NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words + Tables // Render tables + FencedCode // Render fenced code blocks + Autolink // Detect embedded URLs that are not explicitly marked + Strikethrough // Strikethrough text using ~~test~~ + LaxHTMLBlocks // Loosen up HTML block parsing rules + SpaceHeadings // Be strict about prefix heading rules + HardLineBreak // Translate newlines into line breaks + TabSizeEight // Expand tabs to eight spaces instead of four + Footnotes // Pandoc-style footnotes + NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block + HeadingIDs // specify heading IDs with {#id} + Titleblock // Titleblock ala pandoc + AutoHeadingIDs // Create the heading ID from the text + BackslashLineBreak // Translate trailing backslashes into line breaks + DefinitionLists // Render definition lists + + CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | + SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes + + CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | + Autolink | Strikethrough | SpaceHeadings | HeadingIDs | + BackslashLineBreak | DefinitionLists +) + +// ListType contains bitwise or'ed flags for list and list item objects. +type ListType int + +// These are the possible flag values for the ListItem renderer. +// Multiple flag values may be ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + ListTypeOrdered ListType = 1 << iota + ListTypeDefinition + ListTypeTerm + + ListItemContainsBlock + ListItemBeginningOfList // TODO: figure out if this is of any use now + ListItemEndOfList +) + +// CellAlignFlags holds a type of alignment in a table cell. +type CellAlignFlags int + +// These are the possible flag values for the table cell renderer. +// Only a single one of these values will be used; they are not ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + TableAlignmentLeft CellAlignFlags = 1 << iota + TableAlignmentRight + TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) +) + +// The size of a tab stop. +const ( + TabSizeDefault = 4 + TabSizeDouble = 8 +) + +// blockTags is a set of tags that are recognized as HTML block tags. +// Any of these can be included in markdown text without special escaping. +var blockTags = map[string]struct{}{ + "blockquote": {}, + "del": {}, + "div": {}, + "dl": {}, + "fieldset": {}, + "form": {}, + "h1": {}, + "h2": {}, + "h3": {}, + "h4": {}, + "h5": {}, + "h6": {}, + "iframe": {}, + "ins": {}, + "math": {}, + "noscript": {}, + "ol": {}, + "pre": {}, + "p": {}, + "script": {}, + "style": {}, + "table": {}, + "ul": {}, + + // HTML5 + "address": {}, + "article": {}, + "aside": {}, + "canvas": {}, + "figcaption": {}, + "figure": {}, + "footer": {}, + "header": {}, + "hgroup": {}, + "main": {}, + "nav": {}, + "output": {}, + "progress": {}, + "section": {}, + "video": {}, +} + +// Renderer is the rendering interface. This is mostly of interest if you are +// implementing a new rendering format. +// +// Only an HTML implementation is provided in this repository, see the README +// for external implementations. +type Renderer interface { + // RenderNode is the main rendering method. It will be called once for + // every leaf node and twice for every non-leaf node (first with + // entering=true, then with entering=false). The method should write its + // rendition of the node to the supplied writer w. + RenderNode(w io.Writer, node *Node, entering bool) WalkStatus + + // RenderHeader is a method that allows the renderer to produce some + // content preceding the main body of the output document. The header is + // understood in the broad sense here. For example, the default HTML + // renderer will write not only the HTML document preamble, but also the + // table of contents if it was requested. + // + // The method will be passed an entire document tree, in case a particular + // implementation needs to inspect it to produce output. + // + // The output should be written to the supplied writer w. If your + // implementation has no header to write, supply an empty implementation. + RenderHeader(w io.Writer, ast *Node) + + // RenderFooter is a symmetric counterpart of RenderHeader. + RenderFooter(w io.Writer, ast *Node) +} + +// Callback functions for inline parsing. One such function is defined +// for each character that triggers a response when parsing inline data. +type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) + +// Markdown is a type that holds extensions and the runtime state used by +// Parse, and the renderer. You can not use it directly, construct it with New. +type Markdown struct { + renderer Renderer + referenceOverride ReferenceOverrideFunc + refs map[string]*reference + inlineCallback [256]inlineParser + extensions Extensions + nesting int + maxNesting int + insideLink bool + + // Footnotes need to be ordered as well as available to quickly check for + // presence. If a ref is also a footnote, it's stored both in refs and here + // in notes. Slice is nil if footnotes not enabled. + notes []*reference + + doc *Node + tip *Node // = doc + oldTip *Node + lastMatchedContainer *Node // = doc + allClosed bool +} + +func (p *Markdown) getRef(refid string) (ref *reference, found bool) { + if p.referenceOverride != nil { + r, overridden := p.referenceOverride(refid) + if overridden { + if r == nil { + return nil, false + } + return &reference{ + link: []byte(r.Link), + title: []byte(r.Title), + noteID: 0, + hasBlock: false, + text: []byte(r.Text)}, true + } + } + // refs are case insensitive + ref, found = p.refs[strings.ToLower(refid)] + return ref, found +} + +func (p *Markdown) finalize(block *Node) { + above := block.Parent + block.open = false + p.tip = above +} + +func (p *Markdown) addChild(node NodeType, offset uint32) *Node { + return p.addExistingChild(NewNode(node), offset) +} + +func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { + for !p.tip.canContain(node.Type) { + p.finalize(p.tip) + } + p.tip.AppendChild(node) + p.tip = node + return node +} + +func (p *Markdown) closeUnmatchedBlocks() { + if !p.allClosed { + for p.oldTip != p.lastMatchedContainer { + parent := p.oldTip.Parent + p.finalize(p.oldTip) + p.oldTip = parent + } + p.allClosed = true + } +} + +// +// +// Public interface +// +// + +// Reference represents the details of a link. +// See the documentation in Options for more details on use-case. +type Reference struct { + // Link is usually the URL the reference points to. + Link string + // Title is the alternate text describing the link in more detail. + Title string + // Text is the optional text to override the ref with if the syntax used was + // [refid][] + Text string +} + +// ReferenceOverrideFunc is expected to be called with a reference string and +// return either a valid Reference type that the reference string maps to or +// nil. If overridden is false, the default reference logic will be executed. +// See the documentation in Options for more details on use-case. +type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) + +// New constructs a Markdown processor. You can use the same With* functions as +// for Run() to customize parser's behavior and the renderer. +func New(opts ...Option) *Markdown { + var p Markdown + for _, opt := range opts { + opt(&p) + } + p.refs = make(map[string]*reference) + p.maxNesting = 16 + p.insideLink = false + docNode := NewNode(Document) + p.doc = docNode + p.tip = docNode + p.oldTip = docNode + p.lastMatchedContainer = docNode + p.allClosed = true + // register inline parsers + p.inlineCallback[' '] = maybeLineBreak + p.inlineCallback['*'] = emphasis + p.inlineCallback['_'] = emphasis + if p.extensions&Strikethrough != 0 { + p.inlineCallback['~'] = emphasis + } + p.inlineCallback['`'] = codeSpan + p.inlineCallback['\n'] = lineBreak + p.inlineCallback['['] = link + p.inlineCallback['<'] = leftAngle + p.inlineCallback['\\'] = escape + p.inlineCallback['&'] = entity + p.inlineCallback['!'] = maybeImage + p.inlineCallback['^'] = maybeInlineFootnote + if p.extensions&Autolink != 0 { + p.inlineCallback['h'] = maybeAutoLink + p.inlineCallback['m'] = maybeAutoLink + p.inlineCallback['f'] = maybeAutoLink + p.inlineCallback['H'] = maybeAutoLink + p.inlineCallback['M'] = maybeAutoLink + p.inlineCallback['F'] = maybeAutoLink + } + if p.extensions&Footnotes != 0 { + p.notes = make([]*reference, 0) + } + return &p +} + +// Option customizes the Markdown processor's default behavior. +type Option func(*Markdown) + +// WithRenderer allows you to override the default renderer. +func WithRenderer(r Renderer) Option { + return func(p *Markdown) { + p.renderer = r + } +} + +// WithExtensions allows you to pick some of the many extensions provided by +// Blackfriday. You can bitwise OR them. +func WithExtensions(e Extensions) Option { + return func(p *Markdown) { + p.extensions = e + } +} + +// WithNoExtensions turns off all extensions and custom behavior. +func WithNoExtensions() Option { + return func(p *Markdown) { + p.extensions = NoExtensions + p.renderer = NewHTMLRenderer(HTMLRendererParameters{ + Flags: HTMLFlagsNone, + }) + } +} + +// WithRefOverride sets an optional function callback that is called every +// time a reference is resolved. +// +// In Markdown, the link reference syntax can be made to resolve a link to +// a reference instead of an inline URL, in one of the following ways: +// +// * [link text][refid] +// * [refid][] +// +// Usually, the refid is defined at the bottom of the Markdown document. If +// this override function is provided, the refid is passed to the override +// function first, before consulting the defined refids at the bottom. If +// the override function indicates an override did not occur, the refids at +// the bottom will be used to fill in the link details. +func WithRefOverride(o ReferenceOverrideFunc) Option { + return func(p *Markdown) { + p.referenceOverride = o + } +} + +// Run is the main entry point to Blackfriday. It parses and renders a +// block of markdown-encoded text. +// +// The simplest invocation of Run takes one argument, input: +// output := Run(input) +// This will parse the input with CommonExtensions enabled and render it with +// the default HTMLRenderer (with CommonHTMLFlags). +// +// Variadic arguments opts can customize the default behavior. Since Markdown +// type does not contain exported fields, you can not use it directly. Instead, +// use the With* functions. For example, this will call the most basic +// functionality, with no extensions: +// output := Run(input, WithNoExtensions()) +// +// You can use any number of With* arguments, even contradicting ones. They +// will be applied in order of appearance and the latter will override the +// former: +// output := Run(input, WithNoExtensions(), WithExtensions(exts), +// WithRenderer(yourRenderer)) +func Run(input []byte, opts ...Option) []byte { + r := NewHTMLRenderer(HTMLRendererParameters{ + Flags: CommonHTMLFlags, + }) + optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} + optList = append(optList, opts...) + parser := New(optList...) + ast := parser.Parse(input) + var buf bytes.Buffer + parser.renderer.RenderHeader(&buf, ast) + ast.Walk(func(node *Node, entering bool) WalkStatus { + return parser.renderer.RenderNode(&buf, node, entering) + }) + parser.renderer.RenderFooter(&buf, ast) + return buf.Bytes() +} + +// Parse is an entry point to the parsing part of Blackfriday. It takes an +// input markdown document and produces a syntax tree for its contents. This +// tree can then be rendered with a default or custom renderer, or +// analyzed/transformed by the caller to whatever non-standard needs they have. +// The return value is the root node of the syntax tree. +func (p *Markdown) Parse(input []byte) *Node { + p.block(input) + // Walk the tree and finish up some of unfinished blocks + for p.tip != nil { + p.finalize(p.tip) + } + // Walk the tree again and process inline markdown in each block + p.doc.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) + p.parseRefsToAST() + return p.doc +} + +func (p *Markdown) parseRefsToAST() { + if p.extensions&Footnotes == 0 || len(p.notes) == 0 { + return + } + p.tip = p.doc + block := p.addBlock(List, nil) + block.IsFootnotesList = true + block.ListFlags = ListTypeOrdered + flags := ListItemBeginningOfList + // Note: this loop is intentionally explicit, not range-form. This is + // because the body of the loop will append nested footnotes to p.notes and + // we need to process those late additions. Range form would only walk over + // the fixed initial set. + for i := 0; i < len(p.notes); i++ { + ref := p.notes[i] + p.addExistingChild(ref.footnote, 0) + block := ref.footnote + block.ListFlags = flags | ListTypeOrdered + block.RefLink = ref.link + if ref.hasBlock { + flags |= ListItemContainsBlock + p.block(ref.title) + } else { + p.inline(block, ref.title) + } + flags &^= ListItemBeginningOfList | ListItemContainsBlock + } + above := block.Parent + finalizeList(block) + p.tip = above + block.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) +} + +// +// Link references +// +// This section implements support for references that (usually) appear +// as footnotes in a document, and can be referenced anywhere in the document. +// The basic format is: +// +// [1]: http://www.google.com/ "Google" +// [2]: http://www.github.com/ "Github" +// +// Anywhere in the document, the reference can be linked by referring to its +// label, i.e., 1 and 2 in this example, as in: +// +// This library is hosted on [Github][2], a git hosting site. +// +// Actual footnotes as specified in Pandoc and supported by some other Markdown +// libraries such as php-markdown are also taken care of. They look like this: +// +// This sentence needs a bit of further explanation.[^note] +// +// [^note]: This is the explanation. +// +// Footnotes should be placed at the end of the document in an ordered list. +// Finally, there are inline footnotes such as: +// +// Inline footnotes^[Also supported.] provide a quick inline explanation, +// but are rendered at the bottom of the document. +// + +// reference holds all information necessary for a reference-style links or +// footnotes. +// +// Consider this markdown with reference-style links: +// +// [link][ref] +// +// [ref]: /url/ "tooltip title" +// +// It will be ultimately converted to this HTML: +// +//

    link

    +// +// And a reference structure will be populated as follows: +// +// p.refs["ref"] = &reference{ +// link: "/url/", +// title: "tooltip title", +// } +// +// Alternatively, reference can contain information about a footnote. Consider +// this markdown: +// +// Text needing a footnote.[^a] +// +// [^a]: This is the note +// +// A reference structure will be populated as follows: +// +// p.refs["a"] = &reference{ +// link: "a", +// title: "This is the note", +// noteID: , +// } +// +// TODO: As you can see, it begs for splitting into two dedicated structures +// for refs and for footnotes. +type reference struct { + link []byte + title []byte + noteID int // 0 if not a footnote ref + hasBlock bool + footnote *Node // a link to the Item node within a list of footnotes + + text []byte // only gets populated by refOverride feature with Reference.Text +} + +func (r *reference) String() string { + return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", + r.link, r.title, r.text, r.noteID, r.hasBlock) +} + +// Check whether or not data starts with a reference link. +// If so, it is parsed and stored in the list of references +// (in the render struct). +// Returns the number of bytes to skip to move past it, +// or zero if the first line is not a reference. +func isReference(p *Markdown, data []byte, tabSize int) int { + // up to 3 optional leading spaces + if len(data) < 4 { + return 0 + } + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + + noteID := 0 + + // id part: anything but a newline between brackets + if data[i] != '[' { + return 0 + } + i++ + if p.extensions&Footnotes != 0 { + if i < len(data) && data[i] == '^' { + // we can set it to anything here because the proper noteIds will + // be assigned later during the second pass. It just has to be != 0 + noteID = 1 + i++ + } + } + idOffset := i + for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { + i++ + } + if i >= len(data) || data[i] != ']' { + return 0 + } + idEnd := i + // footnotes can have empty ID, like this: [^], but a reference can not be + // empty like this: []. Break early if it's not a footnote and there's no ID + if noteID == 0 && idOffset == idEnd { + return 0 + } + // spacer: colon (space | tab)* newline? (space | tab)* + i++ + if i >= len(data) || data[i] != ':' { + return 0 + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && (data[i] == '\n' || data[i] == '\r') { + i++ + if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { + i++ + } + } + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i >= len(data) { + return 0 + } + + var ( + linkOffset, linkEnd int + titleOffset, titleEnd int + lineEnd int + raw []byte + hasBlock bool + ) + + if p.extensions&Footnotes != 0 && noteID != 0 { + linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) + lineEnd = linkEnd + } else { + linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) + } + if lineEnd == 0 { + return 0 + } + + // a valid ref has been found + + ref := &reference{ + noteID: noteID, + hasBlock: hasBlock, + } + + if noteID > 0 { + // reusing the link field for the id since footnotes don't have links + ref.link = data[idOffset:idEnd] + // if footnote, it's not really a title, it's the contained text + ref.title = raw + } else { + ref.link = data[linkOffset:linkEnd] + ref.title = data[titleOffset:titleEnd] + } + + // id matches are case-insensitive + id := string(bytes.ToLower(data[idOffset:idEnd])) + + p.refs[id] = ref + + return lineEnd +} + +func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { + // link: whitespace-free sequence, optionally between angle brackets + if data[i] == '<' { + i++ + } + linkOffset = i + for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { + i++ + } + linkEnd = i + if data[linkOffset] == '<' && data[linkEnd-1] == '>' { + linkOffset++ + linkEnd-- + } + + // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { + return + } + + // compute end-of-line + if i >= len(data) || data[i] == '\r' || data[i] == '\n' { + lineEnd = i + } + if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { + lineEnd++ + } + + // optional (space|tab)* spacer after a newline + if lineEnd > 0 { + i = lineEnd + 1 + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + } + + // optional title: any non-newline sequence enclosed in '"() alone on its line + if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { + i++ + titleOffset = i + + // look for EOL + for i < len(data) && data[i] != '\n' && data[i] != '\r' { + i++ + } + if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { + titleEnd = i + 1 + } else { + titleEnd = i + } + + // step back + i-- + for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { + i-- + } + if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { + lineEnd = titleEnd + titleEnd = i + } + } + + return +} + +// The first bit of this logic is the same as Parser.listItem, but the rest +// is much simpler. This function simply finds the entire block and shifts it +// over by one tab if it is indeed a block (just returns the line if it's not). +// blockEnd is the end of the section in the input buffer, and contents is the +// extracted text that was shifted over one tab. It will need to be rendered at +// the end of the document. +func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { + if i == 0 || len(data) == 0 { + return + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + blockStart = i + + // find the end of the line + blockEnd = i + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[blockEnd:i]) + blockEnd = i + + // process the following lines + containsBlankLine := false + +gatherLines: + for blockEnd < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[blockEnd:i]) > 0 { + containsBlankLine = true + blockEnd = i + continue + } + + n := 0 + if n = isIndented(data[blockEnd:i], indentSize); n == 0 { + // this is the end of the block. + // we don't want to include this last line in the index. + break gatherLines + } + + // if there were blank lines before this one, insert a new one now + if containsBlankLine { + raw.WriteByte('\n') + containsBlankLine = false + } + + // get rid of that first tab, write to buffer + raw.Write(data[blockEnd+n : i]) + hasBlock = true + + blockEnd = i + } + + if data[blockEnd-1] != '\n' { + raw.WriteByte('\n') + } + + contents = raw.Bytes() + + return +} + +// +// +// Miscellaneous helper functions +// +// + +// Test if a character is a punctuation symbol. +// Taken from a private function in regexp in the stdlib. +func ispunct(c byte) bool { + for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { + if c == r { + return true + } + } + return false +} + +// Test if a character is a whitespace character. +func isspace(c byte) bool { + return ishorizontalspace(c) || isverticalspace(c) +} + +// Test if a character is a horizontal whitespace character. +func ishorizontalspace(c byte) bool { + return c == ' ' || c == '\t' +} + +// Test if a character is a vertical character. +func isverticalspace(c byte) bool { + return c == '\n' || c == '\r' || c == '\f' || c == '\v' +} + +// Test if a character is letter. +func isletter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// Test if a character is a letter or a digit. +// TODO: check when this is looking for ASCII alnum and when it should use unicode +func isalnum(c byte) bool { + return (c >= '0' && c <= '9') || isletter(c) +} + +// Replace tab characters with spaces, aligning to the next TAB_SIZE column. +// always ends output with a newline +func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { + // first, check for common cases: no tabs, or only tabs at beginning of line + i, prefix := 0, 0 + slowcase := false + for i = 0; i < len(line); i++ { + if line[i] == '\t' { + if prefix == i { + prefix++ + } else { + slowcase = true + break + } + } + } + + // no need to decode runes if all tabs are at the beginning of the line + if !slowcase { + for i = 0; i < prefix*tabSize; i++ { + out.WriteByte(' ') + } + out.Write(line[prefix:]) + return + } + + // the slow case: we need to count runes to figure out how + // many spaces to insert for each tab + column := 0 + i = 0 + for i < len(line) { + start := i + for i < len(line) && line[i] != '\t' { + _, size := utf8.DecodeRune(line[i:]) + i += size + column++ + } + + if i > start { + out.Write(line[start:i]) + } + + if i >= len(line) { + break + } + + for { + out.WriteByte(' ') + column++ + if column%tabSize == 0 { + break + } + } + + i++ + } +} + +// Find if a line counts as indented or not. +// Returns number of characters the indent is (0 = not indented). +func isIndented(data []byte, indentSize int) int { + if len(data) == 0 { + return 0 + } + if data[0] == '\t' { + return 1 + } + if len(data) < indentSize { + return 0 + } + for i := 0; i < indentSize; i++ { + if data[i] != ' ' { + return 0 + } + } + return indentSize +} + +// Create a url-safe slug for fragments +func slugify(in []byte) []byte { + if len(in) == 0 { + return in + } + out := make([]byte, 0, len(in)) + sym := false + + for _, ch := range in { + if isalnum(ch) { + sym = false + out = append(out, ch) + } else if sym { + continue + } else { + out = append(out, '-') + sym = true + } + } + var a, b int + var ch byte + for a, ch = range out { + if ch != '-' { + break + } + } + for b = len(out) - 1; b > 0; b-- { + if out[b] != '-' { + break + } + } + return out[a : b+1] +} diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go new file mode 100644 index 0000000..04e6050 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/node.go @@ -0,0 +1,360 @@ +package blackfriday + +import ( + "bytes" + "fmt" +) + +// NodeType specifies a type of a single node of a syntax tree. Usually one +// node (and its type) corresponds to a single markdown feature, e.g. emphasis +// or code block. +type NodeType int + +// Constants for identifying different types of nodes. See NodeType. +const ( + Document NodeType = iota + BlockQuote + List + Item + Paragraph + Heading + HorizontalRule + Emph + Strong + Del + Link + Image + Text + HTMLBlock + CodeBlock + Softbreak + Hardbreak + Code + HTMLSpan + Table + TableCell + TableHead + TableBody + TableRow +) + +var nodeTypeNames = []string{ + Document: "Document", + BlockQuote: "BlockQuote", + List: "List", + Item: "Item", + Paragraph: "Paragraph", + Heading: "Heading", + HorizontalRule: "HorizontalRule", + Emph: "Emph", + Strong: "Strong", + Del: "Del", + Link: "Link", + Image: "Image", + Text: "Text", + HTMLBlock: "HTMLBlock", + CodeBlock: "CodeBlock", + Softbreak: "Softbreak", + Hardbreak: "Hardbreak", + Code: "Code", + HTMLSpan: "HTMLSpan", + Table: "Table", + TableCell: "TableCell", + TableHead: "TableHead", + TableBody: "TableBody", + TableRow: "TableRow", +} + +func (t NodeType) String() string { + return nodeTypeNames[t] +} + +// ListData contains fields relevant to a List and Item node type. +type ListData struct { + ListFlags ListType + Tight bool // Skip

    s around list item data if true + BulletChar byte // '*', '+' or '-' in bullet lists + Delimiter byte // '.' or ')' after the number in ordered lists + RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering + IsFootnotesList bool // This is a list of footnotes +} + +// LinkData contains fields relevant to a Link node type. +type LinkData struct { + Destination []byte // Destination is what goes into a href + Title []byte // Title is the tooltip thing that goes in a title attribute + NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote + Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. +} + +// CodeBlockData contains fields relevant to a CodeBlock node type. +type CodeBlockData struct { + IsFenced bool // Specifies whether it's a fenced code block or an indented one + Info []byte // This holds the info string + FenceChar byte + FenceLength int + FenceOffset int +} + +// TableCellData contains fields relevant to a TableCell node type. +type TableCellData struct { + IsHeader bool // This tells if it's under the header row + Align CellAlignFlags // This holds the value for align attribute +} + +// HeadingData contains fields relevant to a Heading node type. +type HeadingData struct { + Level int // This holds the heading level number + HeadingID string // This might hold heading ID, if present + IsTitleblock bool // Specifies whether it's a title block +} + +// Node is a single element in the abstract syntax tree of the parsed document. +// It holds connections to the structurally neighboring nodes and, for certain +// types of nodes, additional information that might be needed when rendering. +type Node struct { + Type NodeType // Determines the type of the node + Parent *Node // Points to the parent + FirstChild *Node // Points to the first child, if any + LastChild *Node // Points to the last child, if any + Prev *Node // Previous sibling; nil if it's the first child + Next *Node // Next sibling; nil if it's the last child + + Literal []byte // Text contents of the leaf nodes + + HeadingData // Populated if Type is Heading + ListData // Populated if Type is List + CodeBlockData // Populated if Type is CodeBlock + LinkData // Populated if Type is Link + TableCellData // Populated if Type is TableCell + + content []byte // Markdown content of the block nodes + open bool // Specifies an open block node that has not been finished to process yet +} + +// NewNode allocates a node of a specified type. +func NewNode(typ NodeType) *Node { + return &Node{ + Type: typ, + open: true, + } +} + +func (n *Node) String() string { + ellipsis := "" + snippet := n.Literal + if len(snippet) > 16 { + snippet = snippet[:16] + ellipsis = "..." + } + return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) +} + +// Unlink removes node 'n' from the tree. +// It panics if the node is nil. +func (n *Node) Unlink() { + if n.Prev != nil { + n.Prev.Next = n.Next + } else if n.Parent != nil { + n.Parent.FirstChild = n.Next + } + if n.Next != nil { + n.Next.Prev = n.Prev + } else if n.Parent != nil { + n.Parent.LastChild = n.Prev + } + n.Parent = nil + n.Next = nil + n.Prev = nil +} + +// AppendChild adds a node 'child' as a child of 'n'. +// It panics if either node is nil. +func (n *Node) AppendChild(child *Node) { + child.Unlink() + child.Parent = n + if n.LastChild != nil { + n.LastChild.Next = child + child.Prev = n.LastChild + n.LastChild = child + } else { + n.FirstChild = child + n.LastChild = child + } +} + +// InsertBefore inserts 'sibling' immediately before 'n'. +// It panics if either node is nil. +func (n *Node) InsertBefore(sibling *Node) { + sibling.Unlink() + sibling.Prev = n.Prev + if sibling.Prev != nil { + sibling.Prev.Next = sibling + } + sibling.Next = n + n.Prev = sibling + sibling.Parent = n.Parent + if sibling.Prev == nil { + sibling.Parent.FirstChild = sibling + } +} + +// IsContainer returns true if 'n' can contain children. +func (n *Node) IsContainer() bool { + switch n.Type { + case Document: + fallthrough + case BlockQuote: + fallthrough + case List: + fallthrough + case Item: + fallthrough + case Paragraph: + fallthrough + case Heading: + fallthrough + case Emph: + fallthrough + case Strong: + fallthrough + case Del: + fallthrough + case Link: + fallthrough + case Image: + fallthrough + case Table: + fallthrough + case TableHead: + fallthrough + case TableBody: + fallthrough + case TableRow: + fallthrough + case TableCell: + return true + default: + return false + } +} + +// IsLeaf returns true if 'n' is a leaf node. +func (n *Node) IsLeaf() bool { + return !n.IsContainer() +} + +func (n *Node) canContain(t NodeType) bool { + if n.Type == List { + return t == Item + } + if n.Type == Document || n.Type == BlockQuote || n.Type == Item { + return t != Item + } + if n.Type == Table { + return t == TableHead || t == TableBody + } + if n.Type == TableHead || n.Type == TableBody { + return t == TableRow + } + if n.Type == TableRow { + return t == TableCell + } + return false +} + +// WalkStatus allows NodeVisitor to have some control over the tree traversal. +// It is returned from NodeVisitor and different values allow Node.Walk to +// decide which node to go to next. +type WalkStatus int + +const ( + // GoToNext is the default traversal of every node. + GoToNext WalkStatus = iota + // SkipChildren tells walker to skip all children of current node. + SkipChildren + // Terminate tells walker to terminate the traversal. + Terminate +) + +// NodeVisitor is a callback to be called when traversing the syntax tree. +// Called twice for every node: once with entering=true when the branch is +// first visited, then with entering=false after all the children are done. +type NodeVisitor func(node *Node, entering bool) WalkStatus + +// Walk is a convenience method that instantiates a walker and starts a +// traversal of subtree rooted at n. +func (n *Node) Walk(visitor NodeVisitor) { + w := newNodeWalker(n) + for w.current != nil { + status := visitor(w.current, w.entering) + switch status { + case GoToNext: + w.next() + case SkipChildren: + w.entering = false + w.next() + case Terminate: + return + } + } +} + +type nodeWalker struct { + current *Node + root *Node + entering bool +} + +func newNodeWalker(root *Node) *nodeWalker { + return &nodeWalker{ + current: root, + root: root, + entering: true, + } +} + +func (nw *nodeWalker) next() { + if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { + nw.current = nil + return + } + if nw.entering && nw.current.IsContainer() { + if nw.current.FirstChild != nil { + nw.current = nw.current.FirstChild + nw.entering = true + } else { + nw.entering = false + } + } else if nw.current.Next == nil { + nw.current = nw.current.Parent + nw.entering = false + } else { + nw.current = nw.current.Next + nw.entering = true + } +} + +func dump(ast *Node) { + fmt.Println(dumpString(ast)) +} + +func dumpR(ast *Node, depth int) string { + if ast == nil { + return "" + } + indent := bytes.Repeat([]byte("\t"), depth) + content := ast.Literal + if content == nil { + content = ast.content + } + result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) + for n := ast.FirstChild; n != nil; n = n.Next { + result += dumpR(n, depth+1) + } + return result +} + +func dumpString(ast *Node) string { + return dumpR(ast, 0) +} diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go new file mode 100644 index 0000000..3a220e9 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/smartypants.go @@ -0,0 +1,457 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// SmartyPants rendering +// +// + +package blackfriday + +import ( + "bytes" + "io" +) + +// SPRenderer is a struct containing state of a Smartypants renderer. +type SPRenderer struct { + inSingleQuote bool + inDoubleQuote bool + callbacks [256]smartCallback +} + +func wordBoundary(c byte) bool { + return c == 0 || isspace(c) || ispunct(c) +} + +func tolower(c byte) byte { + if c >= 'A' && c <= 'Z' { + return c - 'A' + 'a' + } + return c +} + +func isdigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { + // edge of the buffer is likely to be a tag that we don't get to see, + // so we treat it like text sometimes + + // enumerate all sixteen possibilities for (previousChar, nextChar) + // each can be one of {0, space, punct, other} + switch { + case previousChar == 0 && nextChar == 0: + // context is not any help here, so toggle + *isOpen = !*isOpen + case isspace(previousChar) && nextChar == 0: + // [ "] might be [ "foo...] + *isOpen = true + case ispunct(previousChar) && nextChar == 0: + // [!"] hmm... could be [Run!"] or [("...] + *isOpen = false + case /* isnormal(previousChar) && */ nextChar == 0: + // [a"] is probably a close + *isOpen = false + case previousChar == 0 && isspace(nextChar): + // [" ] might be [...foo" ] + *isOpen = false + case isspace(previousChar) && isspace(nextChar): + // [ " ] context is not any help here, so toggle + *isOpen = !*isOpen + case ispunct(previousChar) && isspace(nextChar): + // [!" ] is probably a close + *isOpen = false + case /* isnormal(previousChar) && */ isspace(nextChar): + // [a" ] this is one of the easy cases + *isOpen = false + case previousChar == 0 && ispunct(nextChar): + // ["!] hmm... could be ["$1.95] or ["!...] + *isOpen = false + case isspace(previousChar) && ispunct(nextChar): + // [ "!] looks more like [ "$1.95] + *isOpen = true + case ispunct(previousChar) && ispunct(nextChar): + // [!"!] context is not any help here, so toggle + *isOpen = !*isOpen + case /* isnormal(previousChar) && */ ispunct(nextChar): + // [a"!] is probably a close + *isOpen = false + case previousChar == 0 /* && isnormal(nextChar) */ : + // ["a] is probably an open + *isOpen = true + case isspace(previousChar) /* && isnormal(nextChar) */ : + // [ "a] this is one of the easy cases + *isOpen = true + case ispunct(previousChar) /* && isnormal(nextChar) */ : + // [!"a] is probably an open + *isOpen = true + default: + // [a'b] maybe a contraction? + *isOpen = false + } + + // Note that with the limited lookahead, this non-breaking + // space will also be appended to single double quotes. + if addNBSP && !*isOpen { + out.WriteString(" ") + } + + out.WriteByte('&') + if *isOpen { + out.WriteByte('l') + } else { + out.WriteByte('r') + } + out.WriteByte(quote) + out.WriteString("quo;") + + if addNBSP && *isOpen { + out.WriteString(" ") + } + + return true +} + +func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + t1 := tolower(text[1]) + + if t1 == '\'' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { + out.WriteString("’") + return 0 + } + + if len(text) >= 3 { + t2 := tolower(text[2]) + + if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && + (len(text) < 4 || wordBoundary(text[3])) { + out.WriteString("’") + return 0 + } + } + } + + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { + return 0 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 { + t1 := tolower(text[1]) + t2 := tolower(text[2]) + + if t1 == 'c' && t2 == ')' { + out.WriteString("©") + return 2 + } + + if t1 == 'r' && t2 == ')' { + out.WriteString("®") + return 2 + } + + if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { + out.WriteString("™") + return 3 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + if text[1] == '-' { + out.WriteString("—") + return 1 + } + + if wordBoundary(previousChar) && wordBoundary(text[1]) { + out.WriteString("–") + return 0 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '-' && text[2] == '-' { + out.WriteString("—") + return 2 + } + if len(text) >= 2 && text[1] == '-' { + out.WriteString("–") + return 1 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { + if bytes.HasPrefix(text, []byte(""")) { + nextChar := byte(0) + if len(text) >= 7 { + nextChar = text[6] + } + if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { + return 5 + } + } + + if bytes.HasPrefix(text, []byte("�")) { + return 3 + } + + out.WriteByte('&') + return 0 +} + +func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { + var quote byte = 'd' + if angledQuotes { + quote = 'a' + } + + return func(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) + } +} + +func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '.' && text[2] == '.' { + out.WriteString("…") + return 2 + } + + if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { + out.WriteString("…") + return 4 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 && text[1] == '`' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b + // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) + // and avoid changing dates like 1/23/2005 into fractions. + numEnd := 0 + for len(text) > numEnd && isdigit(text[numEnd]) { + numEnd++ + } + if numEnd == 0 { + out.WriteByte(text[0]) + return 0 + } + denStart := numEnd + 1 + if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { + denStart = numEnd + 3 + } else if len(text) < numEnd+2 || text[numEnd] != '/' { + out.WriteByte(text[0]) + return 0 + } + denEnd := denStart + for len(text) > denEnd && isdigit(text[denEnd]) { + denEnd++ + } + if denEnd == denStart { + out.WriteByte(text[0]) + return 0 + } + if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { + out.WriteString("") + out.Write(text[:numEnd]) + out.WriteString("") + out.Write(text[denStart:denEnd]) + out.WriteString("") + return denEnd - 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + if text[0] == '1' && text[1] == '/' && text[2] == '2' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { + out.WriteString("½") + return 2 + } + } + + if text[0] == '1' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { + out.WriteString("¼") + return 2 + } + } + + if text[0] == '3' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { + out.WriteString("¾") + return 2 + } + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { + out.WriteString(""") + } + + return 0 +} + +func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') +} + +func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') +} + +func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { + i := 0 + + for i < len(text) && text[i] != '>' { + i++ + } + + out.Write(text[:i+1]) + return i +} + +type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int + +// NewSmartypantsRenderer constructs a Smartypants renderer object. +func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { + var ( + r SPRenderer + + smartAmpAngled = r.smartAmp(true, false) + smartAmpAngledNBSP = r.smartAmp(true, true) + smartAmpRegular = r.smartAmp(false, false) + smartAmpRegularNBSP = r.smartAmp(false, true) + + addNBSP = flags&SmartypantsQuotesNBSP != 0 + ) + + if flags&SmartypantsAngledQuotes == 0 { + r.callbacks['"'] = r.smartDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpRegular + } else { + r.callbacks['&'] = smartAmpRegularNBSP + } + } else { + r.callbacks['"'] = r.smartAngledDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpAngled + } else { + r.callbacks['&'] = smartAmpAngledNBSP + } + } + r.callbacks['\''] = r.smartSingleQuote + r.callbacks['('] = r.smartParens + if flags&SmartypantsDashes != 0 { + if flags&SmartypantsLatexDashes == 0 { + r.callbacks['-'] = r.smartDash + } else { + r.callbacks['-'] = r.smartDashLatex + } + } + r.callbacks['.'] = r.smartPeriod + if flags&SmartypantsFractions == 0 { + r.callbacks['1'] = r.smartNumber + r.callbacks['3'] = r.smartNumber + } else { + for ch := '1'; ch <= '9'; ch++ { + r.callbacks[ch] = r.smartNumberGeneric + } + } + r.callbacks['<'] = r.smartLeftAngle + r.callbacks['`'] = r.smartBacktick + return &r +} + +// Process is the entry point of the Smartypants renderer. +func (r *SPRenderer) Process(w io.Writer, text []byte) { + mark := 0 + for i := 0; i < len(text); i++ { + if action := r.callbacks[text[i]]; action != nil { + if i > mark { + w.Write(text[mark:i]) + } + previousChar := byte(0) + if i > 0 { + previousChar = text[i-1] + } + var tmp bytes.Buffer + i += action(&tmp, previousChar, text[i:]) + w.Write(tmp.Bytes()) + mark = i + 1 + } + } + if mark < len(text) { + w.Write(text[mark:]) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index dbd971a..0a6fdca 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -58,6 +58,9 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util +# github.com/russross/blackfriday/v2 v2.1.0 +## explicit +github.com/russross/blackfriday/v2 # go.etcd.io/bbolt v1.3.8 ## explicit; go 1.17 go.etcd.io/bbolt diff --git a/webaccount/account.js b/webaccount/account.js index d5eeda5..427003f 100644 --- a/webaccount/account.js +++ b/webaccount/account.js @@ -783,7 +783,7 @@ const crumbs = (...l) => [ dom.br() ]; const errmsg = (err) => '' + (err.message || '(no error message)'); -const footer = dom.div(style({ marginTop: '6ex', opacity: 0.75 }), link('https://github.com/mjl-/mox', 'mox'), ' ', moxversion); +const footer = dom.div(style({ marginTop: '6ex', opacity: 0.75 }), link('https://www.xmox.nl', 'mox'), ' ', moxversion); const domainName = (d) => { return d.Unicode || d.ASCII; }; diff --git a/webaccount/account.ts b/webaccount/account.ts index 8790661..3711c9c 100644 --- a/webaccount/account.ts +++ b/webaccount/account.ts @@ -132,7 +132,7 @@ const errmsg = (err: unknown) => ''+((err as any).message || '(no error message) const footer = dom.div( style({marginTop: '6ex', opacity: 0.75}), - link('https://github.com/mjl-/mox', 'mox'), + link('https://www.xmox.nl', 'mox'), ' ', moxversion, ) diff --git a/webadmin/admin.js b/webadmin/admin.js index bd66920..6938c6c 100644 --- a/webadmin/admin.js +++ b/webadmin/admin.js @@ -1456,7 +1456,7 @@ const crumbs = (...l) => [ dom.br() ]; const errmsg = (err) => '' + (err.message || '(no error message)'); -const footer = dom.div(style({ marginTop: '6ex', opacity: 0.75 }), link('https://github.com/mjl-/mox', 'mox'), ' ', moxversion); +const footer = dom.div(style({ marginTop: '6ex', opacity: 0.75 }), link('https://www.xmox.nl', 'mox'), ' ', moxversion); const age = (date, future, nowSecs) => { if (!nowSecs) { nowSecs = new Date().getTime() / 1000; diff --git a/webadmin/admin.ts b/webadmin/admin.ts index b68e474..d57b406 100644 --- a/webadmin/admin.ts +++ b/webadmin/admin.ts @@ -118,7 +118,7 @@ const errmsg = (err: unknown) => ''+((err as any).message || '(no error message) const footer = dom.div( style({marginTop: '6ex', opacity: 0.75}), - link('https://github.com/mjl-/mox', 'mox'), + link('https://www.xmox.nl', 'mox'), ' ', moxversion, ) diff --git a/website/features/index.md b/website/features/index.md new file mode 100644 index 0000000..2d9d9c5 --- /dev/null +++ b/website/features/index.md @@ -0,0 +1,504 @@ +# Features + +## Easy to operate + +The initial installation should be easy when using the quickstart. It performs +some DNS checks, generates config files, an initial admin account and an email +address account, and it prints all the DNS records (quite a few!) you need to +add for sending and receiving email. It also creates a systemd unit file to run +mox as a service on Linux, along with commands to enable the server. When run, +it fixes up file permissions. You normally only have to copy/paste text and run +the suggested commands. + +Upgrades are usually a matter of replacing the binary and restart mox. Mox +tries hard to not make incompatible changes. After an update you may want to +change a configuration file to enable new functionality or behaviour. + +The [configuration files](../config/) that come annotated with documentation +make it easy to discover and configure functionality. The web admin interface +guides you even more in making runtime configuration changes. The web admin +interface also writes to the runtime configuration file. So you get the power +of plain files for configuration (for readability, version control/diffs), and +the ease of a user interface for making changes. + +Mox is an all-in-one email server built in a single coherent code base. This +ensures that all functionality works well together. And that you don't have to +configure lots of individual components for a fully working system. + + +## SMTP + +SMTP is used to deliver and receive email messages on the internet. Email +clients also use it to ask an SMTP server to deliver messages (called +submission). + +Mox implements: + +- An SMTP server to accept deliveries of incoming messages, on port 25. +- An SMTP client and delivery queue for delivering messages to other mail + servers, connecting to other servers on port 25. +- A "submission" (SMTP) server, so authenticated clients can submit messages to + the queue, from which Mox will deliver, with retries. +- Commonly used SMTP extensions. + +## SPF/DKIM/DMARC + +SPF, DKIM and DMARC are mechanisms for "message authentication". SPF and DKIM +can be used to verify that a domain is indeed associated with an incoming +message. This allows mail servers to keep track of the reputation of a domain, +which is used during junk filtering. + +SPF is a mechanism whereby a domain specifies in a TXT DNS record which IPs are +allowed to use its domain in an address in the `MAIL FROM` command in an SMTP +transaction. If a sending IP is not listed, a receiving mail server may reject +the email as likely being junk. However, the decision to reject isn't made +solely based on the SPF record, keep reading. + +DKIM is a mechanism whereby a domain specifies public keys in DNS TXT records. +Legitimate messages originating from the domain will have one or more +`DKIM-Signature` message headers that reference a public key and contain a +signature. During delivery, the signature is verified. + +DMARC is a mechanism whereby a domain specifies a policy in a DNS TXT record +about what to do messages that are not authenticated with "aligned" SPF and/or +DKIM. These policies include "reject", or "quarantine" (put in junk mailbox), +or "none" (don't treat differently). DMARC authenticates the address in the +"From" header in an email message, since that is what users will typically look +at and trust. For a message to pass the "aligned SPF" check, the SPF-domain +must match the domain the message "From" header. For a message to pass the +"aligned DKIM" check, at least one verified DKIM domain must match the domain +in the message "From" header. A non-aligned verified domain is not used for +DMARC, but can still be useful in junk filtering. + +Mox sets up SPF, DKIM and DMARC for your domain, and adds `DKIM-Signature` +headers to outgoing messages. + +For incoming messages, mox will perform SPF, DKIM and DMARC checks. DMARC +policies of domains are honored by mox, though mox interprets policy +"quarantine" as "reject": Mox does not claim to accept messages, only to hide +them away in a junk mailbox. Mox uses reputation of SPF-, DKIM- and +DMARC(-like) verified domains in its reputation-based junk filtering. + +A domain's DMARC policy, as published in DNS records, can request reports about +DMARC policies as performed by other mail servers. This gives you, as domain +owner, insights into where both authenticated and non-authenticated messages +are being sent from. The policy specifies an email address whereto such reports +should be sent. Mox helps set up a policy to request such reports, +automatically processes such reports, and provides access through its admin web +interface. Mox also sends reports with the results of its DMARC evaluations to +domains that request them. + + +## DANE and MTA-STS + +DANE and MTA-STS are mechanisms for more secure email delivery using SMTP. + +Originally, SMTP delivered email messages over the internet in plain text. +Message delivery was vulnerable to eavesdropping/interception. + +The SMTP STARTTLS extension added opportunistic TLS: If a server announces +support, a (delivering) SMTP client can "upgrade" a connection to TLS. This +prevents passive attackers from eavesdropping. But an active attacker can +simply strip server support for STARTTLS, causing a message to be transferred +in plain text. With opportunistic TLS for SMTP, the TLS certificate of a server +is not verified: Certificates that are expired or for other host names are +accepted. + +Both old-fashioned plain text delivery and STARTTLS don't protect against +another active attack: Simply modifying DNS MX responses, causing email to be +delivered to another server entirely. That other server may implement STARTTLS, +and even have a certificate that can be verified. But the MX records need +protection as well. + +Both DANE and MTA-STS are (different) opt-in mechanisms to protect MX records, +and for verifying TLS certificates of SMTP servers. + +DANE protects MX records by requiring that they are DNSSEC-signed, causing +changes to DNS records to be detected. With DANE, TLS certificates of an MX +host are verified through (hashes of) either public keys or full certificates. +These are published in DNS and must also be protected with DNSSEC. If a +connection is intercepted by a different server, the TLS certificate validation +would not pass. + +MTA-STS uses PKIX (pool of trusted Certificate Authorities (CAs))to protect +both MX records and to verify TLS during SMTP STARTTLS. MTA-STS serves +existence/version of a policy at DNS record `_mta-sts.`, and +the policy itself at the PKIX-verified `https://mta-sts.`, +specifying allowed MX host names. During delivery, MX targets not in the +MTA-STS policy are rejected. The MTA-STS, MX, and MX target IP address DNS +records are not required to be protected with DNSSEC, and often aren't. If an +attacker modifies the IP address of an MTA-STS-allowed MX target, the +PKIX-verification during SMTP STARTTLS will not pass. MTA-STS policies specify +how long they should be cached. Attackers can suppress existence of an MTA-STS +record during the first communication between mail servers, but not on +subsequent deliveries. + +For delivery of outgoing messages, mox will use both DANE and MTA-STS, if +configured for a recipient domain. MTA-STS policies are cached and periodically +refreshed. + +Domains hosted by mox are both DANE- and MTA-STS protected by default. However, +DANE only applies if recipient domains and their MX records are DNSSEC-signed. +Mox requests certificates with ACME from Let's Encrypt by default, so TLS +certificates used in SMTP STARTTLS can be PKIX-verified. Mox also serves +MTA-STS policies by default. + +Mox also implements the REQUIRETLS SMTP extension. It allows message delivery +to specify that MX DNS records and SMTP server TLS certificates must be +verified along the full delivery path (not just the next hop), and that +delivery must be aborted if that cannot be guaranteed. + +Mox also implements both incoming and outgoing TLS reporting, with both DANE +and MTA-STS details. TLS reports have aggregated counts of SMTP connections +(with failures, including about TLS, and success) and the DANE/MTA-STS policies +encountered. Domains can request delivery of TLS reports by specifying a report +destination address in a TLSRPT policy, specified in a DNS TXT record under a +domain. + + +## IMAP4 + +Email clients (also called Mail User Agents, MUAs) typically access messages +through IMAP4. IMAP4 gives access to all mailboxes (folders) in an account, and +all messages in those mailboxes. IMAP4 is a protocol with a long history, and +for which many extensions have been specified. IMAP4 can be used for +efficiently synchronizing an entire account for offline/local use, or used +reading messages "online" (e.g. with third party webmail software). + +Mox implements up to IMAP4rev2, the latest revision of IMAP4 that includes lots +of functionality that used to be an extension. And mox implements commonly used +extensions on top of that, such as CONDSTORE and QRESYNC, with more extensions +to be implemented. + + +## Junk filtering + +Junk email/spam/UCE (unsolicited commercial email) is still a big problem on +the internet. One great feature of email, that is worth protecting, is that you +can send an email to another person without previous introduction. However, +spammers have the same opportunity. Various mechanisms have been developed over +time to reduce the amount of junk. + +### Reputation-based + +Most of these mechanisms have components that involves reputation. The +reputation can be based on the IP address of the sending server, or the email +address (or just its domain) of the sender, or the contents of the message. Mox +uses the junk/non-junk classifications of messages by the user to evaluate +incoming messages. + +Email clients have the ability to mark a message as junk, which typically sets +the junk-flag for the message and/or moves the message to the designated Junk +mailbox. An email client can also mark a message as non-junk, but this isn't +commonly done, so mox automatically automatically marks messages moved to +certain mailboxes (like Archive, Trash) as non-junk. + +The message database, including junk/non-junk flags, is accessible by the SMTP +server. The database allows for efficiently looking up messages by (non)-junk +flags, verified SPF/DKIM/DMARC sender domain/address and originating IP +address. This allows mox to quickly analyze the reputation of an incoming +message, and make a decision to accept/reject a message if the sender +address/domain/IP has enough reputation signal. This means messages from people +you've communicated with before will reliably make it through the junk filter. +At least if they have set up SPF and/or DKIM, which allows associating their +messages with their domain. Only messages without reputation, "first-time +senders", are subject to further scrutiny. + +### First-time senders + +For first-time senders, there is no, or not enough, signal in the sending +address/domain/IP address to make a decision. Mox does bayesian analysis on the +contents of such messages: The reputation of the words in a message are used to +calculate the probability that a message is junk, which must not pass a +configurable threshold. The reputation of words is based on their occurrence +in historic junk/non-junk messages, as classified by the user. + +### Delivery feedback + +When an incoming message is rejected for being junk, mox returns a temporary +error. Mox never claims to accept a message only to drop it (some cloud mail +providers are known to do this!), or place it in a Junk mailbox, out of view of +the user. The effect is that a spammer will not learn whether there is an +actual temporary error, or their message is treated as junk. A legitimate +sender whose message is erroneously classified as junk will receive a DSN +message about the failed delivery attempts, making it clear a different means +of communication should be tried. + +### Rejects mailbox + +When mox rejects a message for being junk, it stores a copy of the message in +the special "Rejects" mailbox (automatically cleaned up). If you are expecting +an email, e.g. about signup to a new service, and it is rejected, you will find +the message in that mailbox. By moving the message to the Inbox, and marking it +as non-junk (e.g. by moving it to the Archive or Trash mailbox), future +messages by that sender will be accepted due to the now positive reputation. + +### Reputation is per account + +In mox, all reputation is per account, not shared among accounts. One account +may mark all messages from a sender as junk, causing them to be rejected, while +another account can accept messages from the same sender. + +### DNSBL + +Mox can be configured to use an IP-based DNS blocklist (DNSBL). These are +typically employed early in the SMTP session, to see if the remote IP is a +known spammer. If so, the delivery attempt is stopped early. Mox doesn't use +DNSBLs in its default installation. But if it is configured to use a DNSBL, it +is only invoked when the other reputation-based checks are not conclusive. For +these reasons: + +1. If a sender with positive reputation finds their IP listed in a DNSBL, the + email communication channels that have always worked will keep working (until + the user marks a few of their messages as junk). +2. As little reliance on centralized parties (which DNSBLs typically are) as + possible. +3. No leaking of IP addresses of mail servers a mox instance is communicating + with to the DNSBL operator. + +### Greylisting + +Greylisting is a commonly implemented mechanism whereby the first delivery +attempt from a first-time sender is rejected with a temporary error. The idea +is that spammers don't implement delivery queueing, and will never try again. +A legitimate mail server would try again, typically within 5-15 minutes, and +the second or third attempt will be accepted. Mox does not implement +greylisting in this manner: + +Mail servers typically send from multiple IP addresses. At least both an IPv4 +and IPv6 address, and often multiple of each to reduce impact of a negative +reputation for an IP address (e.g. being listed in a DNSBL). IP-based +reputation incentivizes mail servers to use a different IP address for delivery +retries after encountering a failure. Greylisting incentivizes mail servers to +use the same IP address for retries. These incentives conflict, and mox regards +IP-based reputation as more (long-term) valuable. Due to delivering from +different IP addresses, greylisting can cause very long delays, or cause +delivery failures altogether. + +Mox does employ mechanisms to slow down possible spammers: SMTP transactions of +first-time senders and for messages classified as junk are slowed down. This +reduces the rate at which junk mail would be received, and consumes resources +of the spammer. First-time senders are delayed for 15 seconds, making it +possible to wait for expected messages, such as for signups. + + +## Webmail + +Mox includes a webmail client, still in early stages. Despite its looks, and +missing features like composing messages in HTML, it is surprisingly usable, +featuring: + +- Text and HTML rendering of messages, with/without external resources + (tracking images). +- Threading, including muting threads +- Drag-and-drop for moving messages +- Layout: top/bottom vs left/right, adjustable widths/heights +- Keyboard shortcuts + +The webmail benefits from having access to the message database, allowing for +new functionality that wouldn't be easy to implement with SMTP/IMAP4. For +example, mox keeps track of REQUIRETLS support of MX hosts (mail servers) of +recipient domains. The webmail show this information when composing a message, +and can enable REQUIRETLS by default. + +See [webmail screenshots](../screenshots/#hdr-webmail). + + +## Internationalized email + +Originally, email addresses were ASCII-only. An email address consists of a +"localpart", an "@" and a domain name. Only ASCII was allowed in message +headers. With internationalized email, localparts can be in UTF-8, domains can +use internationalized domain names (IDN/IDNA: unicode names with both an UTF-8 +encoding, and an ASCII encoding for use in DNS with domains starting with +"xn--"), and message headers are allowed to contain UTF-8 as well. + +With internationalized email, users of scripts not representable in ASCII can +use their native scripts for their email addresses. + +Mox implements internationalized email. + + +## Automatic account configuration + +To configure an email account in an email client, you typically need to specify: + +1. Email address and full name. +2. Submission (SMTP) server address, port, TLS mode, username, password and + authentication mechanism. +3. IMAP4 server address, port, TLS mode, username, password and authentication + mechanism. + +This can be cumbersome to configure manually. Email clients can choose from +several autoconfiguration mechanisms to automatically find (some of) the right +settings, given an email address: + +SRV DNS records +: The domain of the email address is used for looking up DNS SRV records, which +point to the submission (SMTP) and IMAP servers, ports (with implied TLS +mode). Not specified: username, authentication mechanism. Only secure when used +with DNSSEC. Mox prints SRV records to add for a domain. + +Thunderbird-style autoconfig +: The domain of the email address is used for looking up an XML config file at +`https://autoconfig.`, protected with WebPKI. The configuration file +holds all settings. Mox serves autoconfig profiles on its webserver. + +Autodiscover-style autodiscovery +: The domain of the email address is used to look up a SRV record that points +to an PKIX-protected HTTPS webserver that serves an XML configuration file with +all settings. Only secure when the SRV lookup is DNSSEC-protected. Mox serves +autodiscover profiles on its webserver. + +Apple device management profile +: A configuration file with all settings must be transferred to the device +manually. Mox lets users download these profiles in the account web interface, +and shows a QR code to easily download the profile. + +Even though email clients have many options to automatically find the correct +settings, many still prefer to guess incorrect legacy settings. + + +## ACME for automatic TLS + +A modern email server needs a PKIX TLS certificate for its own hostname, used +for SMTP with STARTTLS. Each domain with a "mail" CNAME for IMAP4 and SMTP +submission, with MTA-STS and with autoconfiguration needs three more +PKIX/WebPKI TLS certificates. Manually preventing your email infrastructure +from automatic periodic expiration is cumbersome, but [an +option](../config/#cfg-mox-conf-Listeners-x-TLS-KeyCerts). With ACME, TLS +certificates are retrieved and refreshed automatically. + +The quickstart sets mox up with ACME using Let's Encrypt. Other ACME providers +can be [defined](../config/#cfg-mox-conf-ACME-x) and +[configured](../config/#cfg-mox-conf-Listeners-x-TLS-ACME). Mox supports +[external account binding](../config/#cfg-mox-conf-ACME-x-ExternalAccountBinding) +(EAB) for ACME providers that require association with an existing non-ACME +account. Mox also suggests DNS CAA records, explicitly allowlisting Certificate +Authorities (CAs) allowed to sign certificates for a domain. Mox recommends CAA +records that only allow the account ID that mox has registered, preventing +potential MitM attempts. + +ACME is also used for TLS certificates for the webserver, see below. + +## Webserver + +Mox includes a configurable webserver. This may seem to add unnecessary +complexity and functionality to an email server, but contemporary email already +requires the complexity of an HTTP stack due to MTA-STS and automatic account +configuration. Not to mention webmail and an admin web interface. Luckily, mox +can build on the proven HTTP client and server stack of the Go standard +library. + +Mox mostly adds configuration options for: + +- Redirections of [entire domains](../config/#cfg-domains-conf-WebDomainRedirects) or + [paths](../config/#cfg-domains-conf-WebHandlers-dash-WebRedirect). +- [Serving static files](../config/#cfg-domains-conf-WebHandlers-dash-WebStatic) + from a directory, including optional directory listings. +- [Forwarding/Reverse proxying](../config/#cfg-domains-conf-WebHandlers-dash-WebForward), + including WebSocket connections. + +Incoming requests are handled by going through the list of configured handlers. +The first matching handler takes care of the request, matching on: + +- Host +- Path (regular expression) + +Handlers can specify additional behaviour: + +- Automatically redirect plain HTTP requests to HTTPS. +- Automatically compress the response if it seems compressible (based on + content-type). A compressed static files are kept in a fixed size cache. +- Strip the matched path before serving static file or forwarding the request. +- Add custom headers to the response. + +These settings can all be configued through the admin web interface. + +TLS certificates for configured domains are managed automatically if ACME is +configured. + +You may be tempted to install mox on a server that already runs a webserver. It +is possible to configure mox to work with an existing webserver, but it will +complicate the configuration significantly: The mox configuration has to be +modified for +[autoconfig](../config/#cfg-mox-conf-Listeners-x-AutoconfigHTTPS-NonTLS) and +[MTA-STS](../config/#cfg-mox-conf-Listeners-x-MTASTSHTTPS-NonTLS) and the +existing webserver needs to be configured to forward. You will likely manage +TLS certificates outside of mox and have to configure the paths to the [keys +and certificates](../config/#cfg-mox-conf-Listeners-x-TLS-KeyCerts), and +refresh them timely, restarting mox. Also see the `-existing-webserver` option +in the [quickstart command](../commands/#hdr-mox-quickstart). + + +## Localserve + +The [mox localserve](../commands/#hdr-mox-localserve) starts a local mox +instance with a lot of its functionality: SMTP/submission, IMAP4, Webmail, +account and admin web interface and the webserver. Localserve listens on the +standard ports + 1000, so no special privileges are needed. + +Localserve is useful for testing the email functionality of your application: +Localserve can accept all email (catchall), optionally return +temporary/permanent errors, and you can read messages in the webmail. +Localserve enables "pedantic mode", raising errors for non-standard protocol +behaviour. + + +## Admin web interface + +The admin web interface helps admins set up accounts, configure addresses, and +set up new domains (with instructions to create DNS records, and with a check +to see if they are correct). Changes made through the admin web interface +updates the [domains.conf config file](../config/#hdr-domains-conf). + +Received DMARC and TLS reports can be viewed, and cached MTA-STS policies +listed. + +DMARC evaluations for outgoing DMARC reports, and SMTP (TLS) connection results +for outgoing TLS reports can be viewed, and removed. Suppression lists for +addresses for outgoing reports can be managed as well. Some domains don't +accept reports at the addresses they configure, and send DSNs. The suppression +list helps reduce operational noise. + +See [Admin web interface screenshots](../screenshots/#hdr-admin-web-interface). + + +## Metrics and logging + +Mox provides [prometheus metrics](https://prometheus.io/docs/concepts/metric_types/) +for monitoring. A standard set of application metrics are exposed: Open file +descriptors, memory/cpu usage, etc. Mox also exposes metrics specific to its +internals. See the example +[prometheus rules](https://github.com/mjl-/mox/blob/main/prometheus.rules) in +the repository. + +Mox has configurable log levels, per +[functional package](https://pkg.go.dev/github.com/mjl-/mox#section-directories). +Mox logs in structured [logfmt](https://brandur.org/logfmt) format, which is +easy to work with (parse, filter, derive metrics from). Mox also includes three +trace-level logs, for SMTP and IMAP4: trace, traceauth (logs sensitive +authentication data, like passwords), tracedata (logs (bulk) message content). + + +## Security + +Mox aims to be a secure mail server. Many email-security features have been +implemented. Mox comes with a automated test suite, which includes fuzzing. Mox +is written in Go, a modern safer programming language that prevents whole +classes of bugs, or limits their impact. + + +## Reusable components + +Most non-server Go packages mox consists of are written to be reusable Go +packages. + +There is no guarantee that there will be no breaking changes. With Go's +dependency versioning approach (minimal version selection), Go code will never +unexpectedly stop compiling. Incompatibilities will show when explicitly +updating a dependency. Making the required changes is typically fairly +straightforward. + +Incompatible changes compared to previous releases are tracked in the git +repository, see [apidiff/](https://github.com/mjl-/mox/tree/main/apidiff). diff --git a/website/index.md b/website/index.md new file mode 100644 index 0000000..7c92927 --- /dev/null +++ b/website/index.md @@ -0,0 +1,64 @@ +# Mox - modern, secure, all-in-one email server +## Stay in control of your email and keep email decentralized! + +Complete email solution +: For sending and receiving email. With support for IMAP4, SMTP, SPF, DKIM, +DMARC, MTA-STS, DANE and DNSSEC, reputation-based +and content-based junk filtering, Internationalization (IDNA), automatic TLS +with ACME and Let's Encrypt, account autoconfiguration, webmail. + +Quick & easy +: Use the quickstart command to set up mox for your domain(s) within 10 +minutes. You'll get a secure mail server with a modern protocol stack. Upgrades +are mostly a matter of downloading the new version and restarting. Maintenance +via web interface (easy) or config file (powerful). No dependencies. + +High quality and secure +: Mox has a modern Go code base with plenty of automated tests, automated +integration tests, is manually tested against popular mail server and client +software, and is fuzz-tested. The code is well-documented and cross-referenced +with the relevant standards (RFC's). + +Open Source +: Mox is an open source project, [source code](https://github.com/mjl-/mox) is +MIT-licensed. + +See [Features](features/) for the details, including roadmap. + +## Latest release + +The latest release is v0.0.9, released on 2024-01-09, see [release +notes](https://github.com/mjl-/mox/releases/tag/v0.0.9), [download +binaries](https://beta.gobuilds.org/github.com/mjl-/mox@v0.0.9/linux-amd64-latest/), +or see [all releases](https://github.com/mjl-/mox/releases). + + +## News + +- 2024-01-09, [v0.0.9](https://github.com/mjl-/mox/releases/tag/v0.0.9) released +- 2023-12-08, There will be a + [talk about mox](https://fosdem.org/2024/schedule/event/fosdem-2024-2261--servers-mox-a-modern-full-featured-mail-server/) + in the ["Modern Email" devroom](https://fosdem.org/2024/schedule/track/modern-email/) + at [FOSDEM 2024](https://fosdem.org/2024/) (Feb 3 & 4, Brussels). See you there! +- 2023-11-22, [v0.0.8](https://github.com/mjl-/mox/releases/tag/v0.0.8) released +- 2023-09-24, [v0.0.7](https://github.com/mjl-/mox/releases/tag/v0.0.7) released + + +## Background + +Work on mox started in 2021. Admins were migrating their emails to just a few +cloud/hosting providers. In part because running and maintaining email software +had become more complicated over time: additional email protocols required yet +another component in the software stack. Combining all these components into a +working email server had become too troublesome over time. These components +were also often written in C, a programming language where a small mistake +typically has large consequences. + +Mox is a modern email server that implements all modern email protocols in a +single easy to use and maintain application. + + +## Sponsors + +Mox development is sponsored from August 2023 to August 2024 through NLnet/EU's +NGI0 Entrust, see https://nlnet.nl/project/Mox/. diff --git a/website/install/index.md b/website/install/index.md new file mode 100644 index 0000000..8d61af7 --- /dev/null +++ b/website/install/index.md @@ -0,0 +1,99 @@ +# Install + +Mox aims to be easy to install. The commands and config files to set mox up for +a new domain, including running it as a service on Linux, are printed/created +through the quickstart. + +## Quickstart + +The easiest way to get started with serving email for your domain is to get a +(virtual) machine dedicated to serving email, name it `[host].[domain]` (e.g. +mail.example.com). Having a DNSSEC-verifying resolver installed, such as +unbound, is highly recommended. Run as root: + + # Create mox user and homedir (or pick another name or homedir): + useradd -m -d /home/mox mox + + cd /home/mox + ... compile or download mox to this directory, see below ... + + # Generate config files for your address/domain: + ./mox quickstart you@example.com + +The quickstart: + +- Creates configuration files mox.conf and domains.conf. +- Adds the domain and an account for the email address to domains.conf +- Generates an admin and account password. +- Prints the DNS records you need to add, for the machine and domain. +- Prints commands to start mox, and optionally install mox as a service. + +A machine that doesn't already run a webserver is highly recommended because +modern email requires HTTPS, and mox currently needs to run a webserver for +automatic TLS with ACME. You could combine mox with an existing webserver, but +it requires a lot more configuration. If you want to serve websites on the same +machine, consider using the webserver built into mox. It's pretty good! If you +want to run an existing webserver on port 443/80, see `mox help quickstart`. + +After starting, you can access the admin web interface on internal IPs. + + +## Download + +Download a mox binary from +https://beta.gobuilds.org/github.com/mjl-/mox@latest/linux-amd64-latest/. + +Symlink or rename it to "mox". + +The URL above always resolves to the latest release for linux/amd64 built with +the latest Go toolchain. See the links at the bottom of that page for binaries +for other platforms. + + +## Compiling + +You can easily (cross) compile mox yourself. You need a recent Go toolchain +installed. Run `go version`, it must be >= 1.20. Download the latest version +from https://go.dev/dl/ or see https://go.dev/doc/manage-install. + +To download the source code of the latest release, and compile it to binary "mox": + + GOBIN=$PWD CGO_ENABLED=0 go install github.com/mjl-/mox@latest + +Mox only compiles for and fully works on unix systems. Mox also compiles for +Windows, but "mox serve" does not yet work, though "mox localserve" (for a +local test instance) and most other subcommands do. Mox does not compile for +Plan 9. + + +## Docker + +Although not recommended, you can also run mox with docker image +`r.xmox.nl/mox`, with tags like `v0.0.1` and `v0.0.1-go1.20.1-alpine3.17.2`, see +https://r.xmox.nl/r/mox/. See +https://github.com/mjl-/mox/blob/main/docker-compose.yml to get started. + +New docker images aren't (automatically) generated for new Go runtime/compile +releases. + +It is important to run with docker host networking, so mox can use the public +IPs and has correct remote IP information for incoming connections (important +for junk filtering and rate-limiting). + + +## Configuration + +Mox tries to choose sane defaults. When you add a domain or account, you +shouldn't have to change any more configuration files in most cases. If you do +need to make changes, you can edit the configuration files: `config/mox.conf` +and/or `config/domains.conf`. You do have to separately add DNS records. + +See [Config reference](../config/) for configuration files annotated with +documentation. + +Mox comes with various subcommands, useful especially for testing. See [Command +reference](../commands/) for a list of commands, and their documentation. + +If you have a question, see the [FAQ](../faq/). If your question remains +unanswered, please ask it on the [issue +tracker](https://github.com/mjl-/mox/issues/new). diff --git a/website/protocols/summary.md b/website/protocols/summary.md new file mode 100644 index 0000000..aad4807 --- /dev/null +++ b/website/protocols/summary.md @@ -0,0 +1,43 @@ +# Protocols + +## Summary + +First a high-level description of protocols and implementation status. Each +topic links to the second table with more detailed implementation status per +RFC. + + + + + + + + + + + + + + + + + + + + + + + + +
    TopicImplementedDescription
    Internet Message Format Yes The format of email messages
    SMTP Yes Delivering email
    SPF Yes Message authentication based on sending IP
    DKIM Yes Message authentication based on message header
    DMARC Yes Reject/accept policy for incoming messages that pass/fail DKIM and/or SPF message authentication
    ARC Roadmap Signed message authentication results from forwarding server
    DANE Yes Verification of TLS certificates through DNSSEC-protected DNS records
    MTA-STS Yes PKIX-based protection of TLS certificates and MX records
    TLS Reporting Yes Reporting about TLS interoperability issues
    ARF Roadmap Abuse reporting format
    IMAP Yes Email access protocol
    Sieve Roadmap Scripts to run on incoming messages
    JMAP Roadmap HTTP/JSON-based email access protocol
    CalDAV/iCal Roadmap Calendaring
    CardDAV/vCard Roadmap Contacts
    SASL Yes Authentication mechanisms
    Internationalization Yes Internationalization of domain names.
    TLS Yes TLS, for encrypted and authenticated communication.
    ACME Yes Automatically manage PKIX TLS certificates
    CAA Yes CAA DNS reords specify which certificate authorities (CAs) are allowed to sign certificates for a domain.
    HTTP Yes HTTP for webservers. Required for automatic account configuration and MTA-STS. Also relevant for the built-in webserver.
    + +## RFCs + +The mox source code is quite heavily annotated with references to the RFCs. +This makes the implementation more maintainable, and makes it easier for new +developers to make changes. See [cross-referenced code and RFCs](../xr/dev/) to +navigate RFCs and source code side by side. + +Implementation status per RFC, grouped by topic. + +### Statuses diff --git a/website/screenshots/index.md b/website/screenshots/index.md new file mode 100644 index 0000000..ca7804b --- /dev/null +++ b/website/screenshots/index.md @@ -0,0 +1,43 @@ +# Screenshots + +Mox is an email server, so most of its functionality can't really be +visualized. But the webmail client, and account and the admin web interface can +be. See screenshots below. + +## Webmail + +### Mailbox +

    + +### Top/bottom split and selecting multiple messages +
    + +### Search +
    + +### Compose +
    + +### Attachments +
    + +### Help +
    + + +## Account web interface + +### Overview +
    + +### Address +
    + + +## Admin web interface + +### Overview +
    + +### Domain +
    diff --git a/website/website.go b/website/website.go new file mode 100644 index 0000000..b6fa9d6 --- /dev/null +++ b/website/website.go @@ -0,0 +1,552 @@ +//go:build website + +package main + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "html" + htmltemplate "html/template" + "io" + "log" + "os" + "strconv" + "strings" + + "golang.org/x/exp/slices" + + "github.com/russross/blackfriday/v2" +) + +func xcheck(err error, msg string) { + if err != nil { + log.Fatalf("%s: %s", msg, err) + } +} + +func main() { + var commithash = os.Getenv("commithash") + var commitdate = os.Getenv("commitdate") + + var pageRoot, pageProtocols bool + var title string + flag.BoolVar(&pageRoot, "root", false, "is top-level index page, instead of in a sub directory") + flag.BoolVar(&pageProtocols, "protocols", false, "is protocols page") + flag.StringVar(&title, "title", "", "html title of page, set to value of link name with a suffix") + flag.Parse() + args := flag.Args() + if len(args) != 1 { + flag.Usage() + os.Exit(2) + } + linkname := args[0] + + if title == "" && linkname != "" { + title = linkname + " - Mox" + } + + // Often the website markdown file. + input, err := io.ReadAll(os.Stdin) + xcheck(err, "read") + + // For rendering the main content of the page. + r := &renderer{ + linkname == "Config reference", + "", + *blackfriday.NewHTMLRenderer(blackfriday.HTMLRendererParameters{HeadingIDPrefix: "hdr-"}), + } + opts := []blackfriday.Option{ + blackfriday.WithExtensions(blackfriday.CommonExtensions | blackfriday.AutoHeadingIDs), + blackfriday.WithRenderer(r), + } + + // Make table of contents of a page, based on h2-links, or "## ..." in markdown. + makeTOC := func() ([]byte, []byte) { + var title string + + // Get the h2's, split them over the columns. + type link struct { + Title string + ID string + } + var links []link + + node := blackfriday.New(opts...).Parse(input) + if node == nil { + return nil, nil + } + for c := node.FirstChild; c != nil; c = c.Next { + if c.Type != blackfriday.Heading { + continue + } + if c.Level == 1 { + title = string(c.FirstChild.Literal) + } else if c.Level == 2 { + link := link{string(c.FirstChild.Literal), c.HeadingID} + links = append(links, link) + } else { + // log.Fatalf("heading, level %d", c.Level) + } + } + + // We split links over 2 columns if we have quite a few, to keep the page somewhat compact. + ncol := 1 + if len(links) > 6 { + ncol = 2 + } + + n := len(links) / ncol + rem := len(links) - ncol*n + counts := make([]int, ncol) + for i := 0; i < ncol; i++ { + counts[i] = n + if rem > i { + counts[i]++ + } + } + toc := `
    ` + toc += "\n" + o := 0 + for _, n := range counts { + toc += "
      \n" + for _, link := range links[o : o+n] { + toc += fmt.Sprintf(`
    • %s
    • `, html.EscapeString("hdr-"+link.ID), html.EscapeString(link.Title)) + toc += "\n" + } + toc += "
    \n" + o += n + } + toc += "
    \n" + var titlebuf []byte + if title != "" { + titlebuf = []byte(fmt.Sprintf(`

    %s

    `, html.EscapeString("hdr-"+blackfriday.SanitizedAnchorName(title)), html.EscapeString(title))) + } + return titlebuf, []byte(toc) + } + + var output []byte + if pageRoot { + // Split content into two parts for main page. First two lines are special, for + // header. + inputstr := string(input) + lines := strings.SplitN(inputstr, "\n", 3) + if len(lines) < 2 { + log.Fatalf("missing header") + } + inputstr = inputstr[len(lines[0])+1+len(lines[1])+1:] + lines[0] = strings.TrimPrefix(lines[0], "#") + lines[1] = strings.TrimPrefix(lines[1], "##") + sep := "## Background" + inleft, inright, found := strings.Cut(inputstr, sep) + if !found { + log.Fatalf("did not find separator %q", sep) + } + outleft := blackfriday.Run([]byte(inleft), opts...) + outright := blackfriday.Run([]byte(sep+inright), opts...) + output = []byte(fmt.Sprintf(` +
    +

    %s

    +

    %s

    +
    +
    %s
    %s
    `, html.EscapeString(lines[0]), html.EscapeString(lines[1]), outleft, outright)) + } else if pageProtocols { + // ../rfc/index.txt is the standard input. We'll read each topic and the RFCs. + topics := parseTopics(input) + + // First part of content is in markdown file. + summary, err := os.ReadFile("protocols/summary.md") + xcheck(err, "reading protocol summary") + + output = blackfriday.Run(summary, opts...) + + var out bytes.Buffer + _, err = out.Write(output) + xcheck(err, "write") + + err = protocolTemplate.Execute(&out, map[string]any{"Topics": topics}) + xcheck(err, "render protocol support") + + output = out.Bytes() + } else { + // Other pages. + xinput := input + if bytes.HasPrefix(xinput, []byte("# ")) { + xinput = bytes.SplitN(xinput, []byte("\n"), 2)[1] + } + output = blackfriday.Run(xinput, opts...) + title, toc := makeTOC() + output = append(toc, output...) + output = append(title, output...) + } + + // HTML preamble. + before = strings.Replace(before, "...", ""+html.EscapeString(title)+"", 1) + before = strings.Replace(before, ">"+linkname+"<", ` style="font-weight: bold">`+linkname+"<", 1) + if !pageRoot { + before = strings.ReplaceAll(before, `"./`, `"../`) + } + _, err = os.Stdout.Write([]byte(before)) + xcheck(err, "write") + + // Page content. + _, err = os.Stdout.Write(output) + xcheck(err, "write") + + // Bottom, HTML closing. + after = strings.Replace(after, "[commit]", fmt.Sprintf("%s, commit %s", commitdate, commithash), 1) + _, err = os.Stdout.Write([]byte(after)) + xcheck(err, "write") +} + +// Implementation status of standards/protocols. +type Status string + +const ( + Implemented Status = "Yes" + Partial Status = "Partial" + Roadmap Status = "Roadmap" + NotImplemented Status = "No" + Unknown Status = "?" +) + +// RFC and its implementation status. +type RFC struct { + Number int + Title string + Status Status + StatusClass string + Obsolete bool +} + +// Topic is a group of RFC's, typically by protocol, e.g. SMTP. +type Topic struct { + Title string + ID string + RFCs []RFC +} + +// parse topics and RFCs from ../rfc/index.txt. +// headings are topics, and hold the RFCs that follow them. +func parseTopics(input []byte) []Topic { + var l []Topic + var t *Topic + + b := bufio.NewReader(bytes.NewReader(input)) + for { + line, err := b.ReadString('\n') + if line != "" { + if strings.HasPrefix(line, "# ") { + // Skip topics without RFCs to show on the website. + if t != nil && len(t.RFCs) == 0 { + l = l[:len(l)-1] + } + title := strings.TrimPrefix(line, "# ") + id := blackfriday.SanitizedAnchorName(title) + l = append(l, Topic{Title: title, ID: id}) + t = &l[len(l)-1] // RFCs will be added to t. + continue + } + + // Tokens: RFC number, implementation status, is obsolete, title. + tokens := strings.Split(line, "\t") + if len(tokens) != 4 { + continue + } + + ignore := strings.HasPrefix(tokens[1], "-") + if ignore { + continue + } + status := Status(strings.TrimPrefix(tokens[1], "-")) + var statusClass string + switch status { + case Implemented: + statusClass = "implemented" + case Partial: + statusClass = "partial" + case Roadmap: + statusClass = "roadmap" + case NotImplemented: + statusClass = "notimplemented" + case Unknown: + statusClass = "unknown" + default: + log.Fatalf("unknown implementation status %q, line %q", status, line) + } + + number, err := strconv.ParseInt(tokens[0], 10, 32) + xcheck(err, "parsing rfc number") + flags := strings.Split(tokens[2], ",") + title := tokens[3] + + rfc := RFC{ + int(number), + title, + status, + statusClass, + slices.Contains(flags, "Obs"), + } + t.RFCs = append(t.RFCs, rfc) + } + if err == io.EOF { + break + } + xcheck(err, "read line") + } + // Skip topics without RFCs to show on the website. + if t != nil && len(t.RFCs) == 0 { + l = l[:len(l)-1] + } + return l +} + +// renderer is used for all HTML pages, for showing links to h2's on hover, and for +// specially rendering the config files with links for each config field. +type renderer struct { + codeBlockConfigFile bool // Whether to interpret codeblocks as config files. + h2 string // Current title, for config line IDs. + blackfriday.HTMLRenderer // Embedded for RenderFooter and RenderHeader. +} + +func (r *renderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + if node.Type == blackfriday.Heading && node.Level == 2 { + r.h2 = string(node.FirstChild.Literal) + + id := "hdr-" + blackfriday.SanitizedAnchorName(string(node.FirstChild.Literal)) + if entering { + _, err := fmt.Fprintf(w, `

    `, id) + xcheck(err, "write") + } else { + _, err := fmt.Fprintf(w, ` #

    `, id) + xcheck(err, "write") + } + return blackfriday.GoToNext + } + if r.codeBlockConfigFile && node.Type == blackfriday.CodeBlock { + if !entering { + log.Fatalf("not entering") + } + + _, err := fmt.Fprintln(w, `
    `) + xcheck(err, "write") + r.writeConfig(w, node.Literal) + _, err = fmt.Fprintln(w, "
    ") + xcheck(err, "write") + return blackfriday.GoToNext + } + return r.HTMLRenderer.RenderNode(w, node, entering) +} + +func (r *renderer) writeConfig(w io.Writer, data []byte) { + var fields []string + for _, line := range bytes.Split(data, []byte("\n")) { + var attrs, link string + + s := string(line) + text := strings.TrimLeft(s, "\t") + if strings.HasPrefix(text, "#") { + attrs = ` class="comment"` + } else if text != "" { + // Add id attribute and link to it, based on the nested config fields that lead here. + ntab := len(s) - len(text) + nfields := ntab + 1 + if len(fields) >= nfields { + fields = fields[:nfields] + } else if nfields > len(fields)+1 { + xcheck(errors.New("indent jumped"), "write codeblock") + } else { + fields = append(fields, "") + } + + var word string + if text == "-" { + word = "dash" + } else { + word = strings.Split(text, ":")[0] + } + fields[nfields-1] = word + + id := fmt.Sprintf("cfg-%s-%s", blackfriday.SanitizedAnchorName(r.h2), strings.Join(fields, "-")) + attrs = fmt.Sprintf(` id="%s"`, id) + link = fmt.Sprintf(` #`, id) + } + if s == "" { + line = []byte("\n") // Prevent empty, zero-height line. + } + _, err := fmt.Fprintf(w, "%s%s\n", attrs, html.EscapeString(string(line)), link) + xcheck(err, "write codeblock") + } +} + +var before = ` + + + + ... + + + + + + + + +` + +// Template for protocol page, minus the first section which is read from +// protocols/summary.md. +var protocolTemplate = htmltemplate.Must(htmltemplate.New("protocolsupport").Parse(` + + + + + + + + + + + + + + + + + + + + + +
    YesAll/most of the functionality of the RFC has been implemented.
    PartialSome of the functionality from the RFC has been implemented.
    RoadmapImplementing functionality from the RFC is on the roadmap.
    NoFunctionality from the RFC has not been implemented, is not currently on the roadmap, but may be in the future.
    ?Status undecided, unknown or not applicable.
    + + + + + + + +{{ range .Topics }} + + + + {{ range .RFCs }} + + + + + + {{ end }} +{{ end }} +
    RFC #StatusTitle
    {{ .Title }} #
    {{ .Number }}{{ .Status }}{{ if .Obsolete }}Obsolete: {{ end }}{{ .Title }}
    +`))