diff --git a/.github/STALE-BOT.md b/.github/STALE-BOT.md
index 5e8f5d929b531d3f960ce648fd0445e723e4cc70..383717bfc1d951131095cc2c60b11aabdf24af34 100644
--- a/.github/STALE-BOT.md
+++ b/.github/STALE-BOT.md
@@ -3,7 +3,7 @@
 - Thanks for your contribution!
 - To remove the stale label, just leave a new comment.
 - _How to find the right people to ping?_ → [`git blame`](https://git-scm.com/docs/git-blame) to the rescue! (or GitHub's history and blame buttons.)
-- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on the [#nixos IRC channel](https://webchat.freenode.net/#nixos).
+- You can always ask for help on [our Discourse Forum](https://discourse.nixos.org/) or on [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org).
 
 ## Suggestions for PRs
 
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index bde6106e086aeac7f145096205f93397030359af..17a79dc97d2d29d36b19d4a4df9a6689c307efa1 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -8,52 +8,62 @@ jobs:
       matrix:
         os: [ubuntu-latest, macos-latest]
     runs-on: ${{ matrix.os }}
-    env:
-      CACHIX_NAME: nix-ci
+
     steps:
     - uses: actions/checkout@v2.3.4
       with:
         fetch-depth: 0
-    - uses: cachix/install-nix-action@v12
-    - uses: cachix/cachix-action@v8
+    - uses: cachix/install-nix-action@v13
+    - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
+    - uses: cachix/cachix-action@v10
       with:
         name: '${{ env.CACHIX_NAME }}'
         signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
+        authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
     #- run: nix flake check
     - run: nix-build -A checks.$(if [[ `uname` = Linux ]]; then echo x86_64-linux; else echo x86_64-darwin; fi)
+  check_cachix:
+    name: Cachix secret present for installer tests
+    runs-on: ubuntu-latest
+    outputs:
+      secret: ${{ steps.secret.outputs.secret }}
+    steps:
+      - name: Check for Cachix secret
+        id: secret
+        env:
+          _CACHIX_SECRETS: ${{ secrets.CACHIX_SIGNING_KEY }}${{ secrets.CACHIX_AUTH_TOKEN }}
+        run: echo "::set-output name=secret::${{ env._CACHIX_SECRETS != '' }}"
   installer:
-    if: github.event_name == 'push'
-    needs: tests
+    needs: [tests, check_cachix]
+    if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
     runs-on: ubuntu-latest
-    env:
-      CACHIX_NAME: nix-ci
     outputs:
       installerURL: ${{ steps.prepare-installer.outputs.installerURL }}
     steps:
     - uses: actions/checkout@v2.3.4
       with:
         fetch-depth: 0
-    - uses: cachix/install-nix-action@v12
-    - uses: cachix/cachix-action@v8
+    - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
+    - uses: cachix/install-nix-action@v13
+    - uses: cachix/cachix-action@v10
       with:
         name: '${{ env.CACHIX_NAME }}'
         signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}'
+        authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}'
     - id: prepare-installer
       run: scripts/prepare-installer-for-github-actions
   installer_test:
-    if: github.event_name == 'push'
-    needs: installer
+    needs: [installer, check_cachix]
+    if: github.event_name == 'push' && needs.check_cachix.outputs.secret == 'true'
     strategy:
       matrix:
         os: [ubuntu-latest, macos-latest]
     runs-on: ${{ matrix.os }}
-    env:
-      CACHIX_NAME: nix-ci
     steps:
     - uses: actions/checkout@v2.3.4
-    - uses: cachix/install-nix-action@master
+    - run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
+    - uses: cachix/install-nix-action@v13
       with:
         install_url: '${{needs.installer.outputs.installerURL}}'
-        install_options: '--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve'
+        install_options: "--tarball-url-prefix https://${{ env.CACHIX_NAME }}.cachix.org/serve"
     - run: nix-instantiate -E 'builtins.currentTime' --eval
-    
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 37aada307cbc5cb0cab7e9c96eecabbdcc2d9a5a..2e14561fec7fb75ecd69d6e6f5591c4e2852d60c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -82,6 +82,7 @@ perl/Makefile.config
 /tests/shell
 /tests/shell.drv
 /tests/config.nix
+/tests/ca/config.nix
 
 # /tests/lang/
 /tests/lang/*.out
diff --git a/Makefile b/Makefile
index 68ec3ab0c00202bc4009371a980f0e29cf543478..b7f0e79db2f65757e2d2c678ac12103628460dd1 100644
--- a/Makefile
+++ b/Makefile
@@ -12,6 +12,7 @@ makefiles = \
   src/resolve-system-dependencies/local.mk \
   scripts/local.mk \
   misc/bash/local.mk \
+  misc/zsh/local.mk \
   misc/systemd/local.mk \
   misc/launchd/local.mk \
   misc/upstart/local.mk \
diff --git a/Makefile.config.in b/Makefile.config.in
index 3c1f01d1e18b002d95957255cf9cba2ea251a56b..fd92365ebd64ffb1063d46892b7b5bd81823b490 100644
--- a/Makefile.config.in
+++ b/Makefile.config.in
@@ -15,7 +15,6 @@ LDFLAGS = @LDFLAGS@
 LIBARCHIVE_LIBS = @LIBARCHIVE_LIBS@
 LIBBROTLI_LIBS = @LIBBROTLI_LIBS@
 LIBCURL_LIBS = @LIBCURL_LIBS@
-LIBLZMA_LIBS = @LIBLZMA_LIBS@
 OPENSSL_LIBS = @OPENSSL_LIBS@
 LIBSECCOMP_LIBS = @LIBSECCOMP_LIBS@
 PACKAGE_NAME = @PACKAGE_NAME@
diff --git a/README.md b/README.md
index 4686010efd38336329fabeab2045e4226c7a5b66..80d6f128c680d0bb97924e79a06d5fc1dc286d13 100644
--- a/README.md
+++ b/README.md
@@ -28,7 +28,8 @@ build nix from source with nix-build or how to get a development environment.
 - [Nix manual](https://nixos.org/nix/manual)
 - [Nix jobsets on hydra.nixos.org](https://hydra.nixos.org/project/nix)
 - [NixOS Discourse](https://discourse.nixos.org/)
-- [IRC - #nixos on freenode.net](irc://irc.freenode.net/#nixos)
+- [Matrix - #nix:nixos.org](https://matrix.to/#/#nix:nixos.org)
+- [IRC - #nixos on libera.chat](irc://irc.libera.chat/#nixos)
 
 ## License
 
diff --git a/config/config.guess b/config/config.guess
index 699b3a10b21c297cee53e243ba0407c36c8f7a38..1972fda8eb05d040c1390495644252fc92fa2d10 100755
--- a/config/config.guess
+++ b/config/config.guess
@@ -1,8 +1,8 @@
 #! /bin/sh
 # Attempt to guess a canonical system name.
-#   Copyright 1992-2020 Free Software Foundation, Inc.
+#   Copyright 1992-2021 Free Software Foundation, Inc.
 
-timestamp='2020-11-19'
+timestamp='2021-01-25'
 
 # This file is free software; you can redistribute it and/or modify it
 # under the terms of the GNU General Public License as published by
@@ -50,7 +50,7 @@ version="\
 GNU config.guess ($timestamp)
 
 Originally written by Per Bothner.
-Copyright 1992-2020 Free Software Foundation, Inc.
+Copyright 1992-2021 Free Software Foundation, Inc.
 
 This is free software; see the source for copying conditions.  There is NO
 warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -188,10 +188,9 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
 	#
 	# Note: NetBSD doesn't particularly care about the vendor
 	# portion of the name.  We always set it to "unknown".
-	sysctl="sysctl -n hw.machine_arch"
 	UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \
-	    "/sbin/$sysctl" 2>/dev/null || \
-	    "/usr/sbin/$sysctl" 2>/dev/null || \
+	    /sbin/sysctl -n hw.machine_arch 2>/dev/null || \
+	    /usr/sbin/sysctl -n hw.machine_arch 2>/dev/null || \
 	    echo unknown))
 	case "$UNAME_MACHINE_ARCH" in
 	    aarch64eb) machine=aarch64_be-unknown ;;
@@ -996,6 +995,9 @@ EOF
     k1om:Linux:*:*)
 	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
+    loongarch32:Linux:*:* | loongarch64:Linux:*:* | loongarchx32:Linux:*:*)
+	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+	exit ;;
     m32r*:Linux:*:*)
 	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
@@ -1084,7 +1086,7 @@ EOF
     ppcle:Linux:*:*)
 	echo powerpcle-unknown-linux-"$LIBC"
 	exit ;;
-    riscv32:Linux:*:* | riscv64:Linux:*:*)
+    riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*)
 	echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
 	exit ;;
     s390:Linux:*:* | s390x:Linux:*:*)
@@ -1480,8 +1482,8 @@ EOF
     i*86:rdos:*:*)
 	echo "$UNAME_MACHINE"-pc-rdos
 	exit ;;
-    i*86:AROS:*:*)
-	echo "$UNAME_MACHINE"-pc-aros
+    *:AROS:*:*)
+	echo "$UNAME_MACHINE"-unknown-aros
 	exit ;;
     x86_64:VMkernel:*:*)
 	echo "$UNAME_MACHINE"-unknown-esx
diff --git a/config/config.sub b/config/config.sub
index 19c9553b1825cafb182115513bc628e0ee801bd0..63c1f1c8b5e2d881e106d8951a11c6c94ae6352b 100755
--- a/config/config.sub
+++ b/config/config.sub
@@ -1,8 +1,8 @@
 #! /bin/sh
 # Configuration validation subroutine script.
-#   Copyright 1992-2020 Free Software Foundation, Inc.
+#   Copyright 1992-2021 Free Software Foundation, Inc.
 
-timestamp='2020-12-02'
+timestamp='2021-01-08'
 
 # This file is free software; you can redistribute it and/or modify it
 # under the terms of the GNU General Public License as published by
@@ -67,7 +67,7 @@ Report bugs and patches to <config-patches@gnu.org>."
 version="\
 GNU config.sub ($timestamp)
 
-Copyright 1992-2020 Free Software Foundation, Inc.
+Copyright 1992-2021 Free Software Foundation, Inc.
 
 This is free software; see the source for copying conditions.  There is NO
 warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@@ -1185,6 +1185,7 @@ case $cpu-$vendor in
 			| k1om \
 			| le32 | le64 \
 			| lm32 \
+			| loongarch32 | loongarch64 | loongarchx32 \
 			| m32c | m32r | m32rle \
 			| m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \
 			| m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \
@@ -1229,7 +1230,7 @@ case $cpu-$vendor in
 			| powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \
 			| pru \
 			| pyramid \
-			| riscv | riscv32 | riscv64 \
+			| riscv | riscv32 | riscv32be | riscv64 | riscv64be \
 			| rl78 | romp | rs6000 | rx \
 			| s390 | s390x \
 			| score \
@@ -1682,11 +1683,14 @@ fi
 
 # Now, validate our (potentially fixed-up) OS.
 case $os in
-	# Sometimes we do "kernel-abi", so those need to count as OSes.
+	# Sometimes we do "kernel-libc", so those need to count as OSes.
 	musl* | newlib* | uclibc*)
 		;;
-	# Likewise for "kernel-libc"
-	eabi | eabihf | gnueabi | gnueabihf)
+	# Likewise for "kernel-abi"
+	eabi* | gnueabi*)
+		;;
+	# VxWorks passes extra cpu info in the 4th filed.
+	simlinux | simwindows | spe)
 		;;
 	# Now accept the basic system types.
 	# The portable systems comes first.
@@ -1750,6 +1754,8 @@ case $kernel-$os in
 		;;
 	kfreebsd*-gnu* | kopensolaris*-gnu*)
 		;;
+	vxworks-simlinux | vxworks-simwindows | vxworks-spe)
+		;;
 	nto-qnx*)
 		;;
 	os2-emx)
diff --git a/configure.ac b/configure.ac
index 24a0b369850200e8edec3e6b8a7ceb4d45038828..6e563eec33a932dc1c96fa585764c19c3fd38117 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,4 +1,4 @@
-AC_INIT(nix, m4_esyscmd([bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX"]))
+AC_INIT([nix],[m4_esyscmd(bash -c "echo -n $(cat ./.version)$VERSION_SUFFIX")])
 AC_CONFIG_MACRO_DIRS([m4])
 AC_CONFIG_SRCDIR(README.md)
 AC_CONFIG_AUX_DIR(config)
@@ -9,8 +9,7 @@ AC_PROG_SED
 AC_CANONICAL_HOST
 AC_MSG_CHECKING([for the canonical Nix system name])
 
-AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
-  [Platform identifier (e.g., `i686-linux').]),
+AC_ARG_WITH(system, AS_HELP_STRING([--with-system=SYSTEM],[Platform identifier (e.g., `i686-linux').]),
   [system=$withval],
   [case "$host_cpu" in
      i*86)
@@ -66,7 +65,7 @@ AC_SYS_LARGEFILE
 AC_STRUCT_DIRENT_D_TYPE
 if test "$sys_name" = sunos; then
     # Solaris requires -lsocket -lnsl for network functions
-    LIBS="-lsocket -lnsl $LIBS"
+    LDFLAGS="-lsocket -lnsl $LDFLAGS"
 fi
 
 
@@ -127,8 +126,7 @@ NEED_PROG(jq, jq)
 AC_SUBST(coreutils, [$(dirname $(type -p cat))])
 
 
-AC_ARG_WITH(store-dir, AC_HELP_STRING([--with-store-dir=PATH],
-  [path of the Nix store (defaults to /nix/store)]),
+AC_ARG_WITH(store-dir, AS_HELP_STRING([--with-store-dir=PATH],[path of the Nix store (defaults to /nix/store)]),
   storedir=$withval, storedir='/nix/store')
 AC_SUBST(storedir)
 
@@ -152,13 +150,12 @@ int main() {
 }]])], GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=no, GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC=yes)
 AC_MSG_RESULT($GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC)
 if test "x$GCC_ATOMIC_BUILTINS_NEED_LIBATOMIC" = xyes; then
-    LDFLAGS="$LDFLAGS -latomic"
+    LDFLAGS="-latomic $LDFLAGS"
 fi
 
 PKG_PROG_PKG_CONFIG
 
-AC_ARG_ENABLE(shared, AC_HELP_STRING([--enable-shared],
-  [Build shared libraries for Nix [default=yes]]),
+AC_ARG_ENABLE(shared, AS_HELP_STRING([--enable-shared],[Build shared libraries for Nix [default=yes]]),
   shared=$enableval, shared=yes)
 if test "$shared" = yes; then
   AC_SUBST(BUILD_SHARED_LIBS, 1, [Whether to build shared libraries.])
@@ -172,11 +169,6 @@ fi
 PKG_CHECK_MODULES([OPENSSL], [libcrypto], [CXXFLAGS="$OPENSSL_CFLAGS $CXXFLAGS"])
 
 
-# Look for libbz2, a required dependency.
-AC_CHECK_LIB([bz2], [BZ2_bzWriteOpen], [true],
-  [AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2.  See https://sourceware.org/bzip2/.])])
-AC_CHECK_HEADERS([bzlib.h], [true],
-  [AC_MSG_ERROR([Nix requires libbz2, which is part of bzip2.  See https://sourceware.org/bzip2/.])])
 # Checks for libarchive
 PKG_CHECK_MODULES([LIBARCHIVE], [libarchive >= 3.1.2], [CXXFLAGS="$LIBARCHIVE_CFLAGS $CXXFLAGS"])
 # Workaround until https://github.com/libarchive/libarchive/issues/1446 is fixed
@@ -205,16 +197,6 @@ PKG_CHECK_MODULES([EDITLINE], [libeditline], [CXXFLAGS="$EDITLINE_CFLAGS $CXXFLA
 # Look for libsodium, an optional dependency.
 PKG_CHECK_MODULES([SODIUM], [libsodium], [CXXFLAGS="$SODIUM_CFLAGS $CXXFLAGS"])
 
-# Look for liblzma, a required dependency.
-PKG_CHECK_MODULES([LIBLZMA], [liblzma], [CXXFLAGS="$LIBLZMA_CFLAGS $CXXFLAGS"])
-AC_CHECK_LIB([lzma], [lzma_stream_encoder_mt],
-  [AC_DEFINE([HAVE_LZMA_MT], [1], [xz multithreaded compression support])])
-
-# Look for zlib, a required dependency.
-PKG_CHECK_MODULES([ZLIB], [zlib], [CXXFLAGS="$ZLIB_CFLAGS $CXXFLAGS"])
-AC_CHECK_HEADER([zlib.h],[:],[AC_MSG_ERROR([could not find the zlib.h header])])
-LDFLAGS="-lz $LDFLAGS"
-
 # Look for libbrotli{enc,dec}.
 PKG_CHECK_MODULES([LIBBROTLI], [libbrotlienc libbrotlidec], [CXXFLAGS="$LIBBROTLI_CFLAGS $CXXFLAGS"])
 
@@ -230,9 +212,8 @@ AC_SUBST(HAVE_LIBCPUID, [$have_libcpuid])
 # Look for libseccomp, required for Linux sandboxing.
 if test "$sys_name" = linux; then
   AC_ARG_ENABLE([seccomp-sandboxing],
-                AC_HELP_STRING([--disable-seccomp-sandboxing],
-                               [Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)]
-                              ))
+                AS_HELP_STRING([--disable-seccomp-sandboxing],[Don't build support for seccomp sandboxing (only recommended if your arch doesn't support libseccomp yet!)
+                              ]))
   if test "x$enable_seccomp_sandboxing" != "xno"; then
     PKG_CHECK_MODULES([LIBSECCOMP], [libseccomp],
                       [CXXFLAGS="$LIBSECCOMP_CFLAGS $CXXFLAGS"])
@@ -250,8 +231,8 @@ AC_SUBST(HAVE_SECCOMP, [$have_seccomp])
 # Look for aws-cpp-sdk-s3.
 AC_LANG_PUSH(C++)
 AC_CHECK_HEADERS([aws/s3/S3Client.h],
-  [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.])
-  enable_s3=1], [enable_s3=])
+  [AC_DEFINE([ENABLE_S3], [1], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=1],
+  [AC_DEFINE([ENABLE_S3], [0], [Whether to enable S3 support via aws-sdk-cpp.]) enable_s3=])
 AC_SUBST(ENABLE_S3, [$enable_s3])
 AC_LANG_POP(C++)
 
@@ -264,8 +245,7 @@ fi
 
 
 # Whether to use the Boehm garbage collector.
-AC_ARG_ENABLE(gc, AC_HELP_STRING([--enable-gc],
-  [enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]),
+AC_ARG_ENABLE(gc, AS_HELP_STRING([--enable-gc],[enable garbage collection in the Nix expression evaluator (requires Boehm GC) [default=yes]]),
   gc=$enableval, gc=yes)
 if test "$gc" = yes; then
   PKG_CHECK_MODULES([BDW_GC], [bdw-gc])
@@ -279,8 +259,7 @@ PKG_CHECK_MODULES([GTEST], [gtest_main])
 
 
 # documentation generation switch
-AC_ARG_ENABLE(doc-gen, AC_HELP_STRING([--disable-doc-gen],
-  [disable documentation generation]),
+AC_ARG_ENABLE(doc-gen, AS_HELP_STRING([--disable-doc-gen],[disable documentation generation]),
   doc_generate=$enableval, doc_generate=yes)
 AC_SUBST(doc_generate)
 
@@ -300,19 +279,7 @@ if test "$(uname)" = "Darwin"; then
 fi
 
 
-# Do we have GNU tar?
-AC_MSG_CHECKING([if you have a recent GNU tar])
-if $tar --version 2> /dev/null | grep -q GNU && tar cvf /dev/null --warning=no-timestamp ./config.log > /dev/null; then
-    AC_MSG_RESULT(yes)
-    tarFlags="--warning=no-timestamp"
-else
-    AC_MSG_RESULT(no)
-fi
-AC_SUBST(tarFlags)
-
-
-AC_ARG_WITH(sandbox-shell, AC_HELP_STRING([--with-sandbox-shell=PATH],
-  [path of a statically-linked shell to use as /bin/sh in sandboxes]),
+AC_ARG_WITH(sandbox-shell, AS_HELP_STRING([--with-sandbox-shell=PATH],[path of a statically-linked shell to use as /bin/sh in sandboxes]),
   sandbox_shell=$withval)
 AC_SUBST(sandbox_shell)
 
@@ -327,6 +294,6 @@ done
 
 rm -f Makefile.config
 
-AC_CONFIG_HEADER([config.h])
+AC_CONFIG_HEADERS([config.h])
 AC_CONFIG_FILES([])
 AC_OUTPUT
diff --git a/doc/manual/local.mk b/doc/manual/local.mk
index 81a7755e84fd249269fd615a801531c3b2ffd579..271529b3804d9e11cbaaeed9254ce2a586747277 100644
--- a/doc/manual/local.mk
+++ b/doc/manual/local.mk
@@ -25,19 +25,19 @@ nix-eval = $(dummy-env) $(bindir)/nix eval --experimental-features nix-command -
 $(d)/%.1: $(d)/src/command-ref/%.md
 	@printf "Title: %s\n\n" "$$(basename $@ .1)" > $^.tmp
 	@cat $^ >> $^.tmp
-	$(trace-gen) lowdown -sT man $^.tmp -o $@
+	$(trace-gen) lowdown -sT man -M section=1 $^.tmp -o $@
 	@rm $^.tmp
 
 $(d)/%.8: $(d)/src/command-ref/%.md
 	@printf "Title: %s\n\n" "$$(basename $@ .8)" > $^.tmp
 	@cat $^ >> $^.tmp
-	$(trace-gen) lowdown -sT man $^.tmp -o $@
+	$(trace-gen) lowdown -sT man -M section=8 $^.tmp -o $@
 	@rm $^.tmp
 
 $(d)/nix.conf.5: $(d)/src/command-ref/conf-file.md
 	@printf "Title: %s\n\n" "$$(basename $@ .5)" > $^.tmp
 	@cat $^ >> $^.tmp
-	$(trace-gen) lowdown -sT man $^.tmp -o $@
+	$(trace-gen) lowdown -sT man -M section=5 $^.tmp -o $@
 	@rm $^.tmp
 
 $(d)/src/SUMMARY.md: $(d)/src/SUMMARY.md.in $(d)/src/command-ref/new-cli
@@ -80,7 +80,7 @@ install: $(d)/src/command-ref/new-cli
 	  if [[ $$name = SUMMARY ]]; then continue; fi; \
 	  printf "Title: %s\n\n" "$$name" > $$i.tmp; \
 	  cat $$i >> $$i.tmp; \
-	  lowdown -sT man $$i.tmp -o $(mandir)/man1/$$name.1; \
+	  lowdown -sT man -M section=1 $$i.tmp -o $(mandir)/man1/$$name.1; \
 	done
 
 $(docdir)/manual/index.html: $(MANUAL_SRCS) $(d)/book.toml $(d)/custom.css $(d)/src/SUMMARY.md $(d)/src/command-ref/new-cli $(d)/src/command-ref/conf-file.md $(d)/src/expressions/builtins.md
diff --git a/doc/manual/src/advanced-topics/cores-vs-jobs.md b/doc/manual/src/advanced-topics/cores-vs-jobs.md
index 4a9058ca19eb7393703ee3efb1ee7cf8c2516d03..9e91ab9c7886162a7255c82d3b3abaa19bbadb45 100644
--- a/doc/manual/src/advanced-topics/cores-vs-jobs.md
+++ b/doc/manual/src/advanced-topics/cores-vs-jobs.md
@@ -4,13 +4,13 @@ Nix has two relevant settings with regards to how your CPU cores will
 be utilized: `cores` and `max-jobs`. This chapter will talk about what
 they are, how they interact, and their configuration trade-offs.
 
-  - `max-jobs`  
+  - `max-jobs`\
     Dictates how many separate derivations will be built at the same
     time. If you set this to zero, the local machine will do no
     builds.  Nix will still substitute from binary caches, and build
     remotely if remote builders are configured.
 
-  - `cores`  
+  - `cores`\
     Suggests how many cores each derivation should use. Similar to
     `make -j`.
 
diff --git a/doc/manual/src/command-ref/env-common.md b/doc/manual/src/command-ref/env-common.md
index c670d82b81bf22814bbaba3fae95716447950600..6e240346153fe7eb6c6bca1771b905ed3c6b23a6 100644
--- a/doc/manual/src/command-ref/env-common.md
+++ b/doc/manual/src/command-ref/env-common.md
@@ -2,45 +2,49 @@
 
 Most Nix commands interpret the following environment variables:
 
-  - `IN_NIX_SHELL`  
+  - `IN_NIX_SHELL`\
     Indicator that tells if the current environment was set up by
     `nix-shell`. Since Nix 2.0 the values are `"pure"` and `"impure"`
 
-  - `NIX_PATH`  
+  - `NIX_PATH`\
     A colon-separated list of directories used to look up Nix
     expressions enclosed in angle brackets (i.e., `<path>`). For
     instance, the value
-    
+
         /home/eelco/Dev:/etc/nixos
-    
+
     will cause Nix to look for paths relative to `/home/eelco/Dev` and
     `/etc/nixos`, in this order. It is also possible to match paths
     against a prefix. For example, the value
-    
+
         nixpkgs=/home/eelco/Dev/nixpkgs-branch:/etc/nixos
-    
+
     will cause Nix to search for `<nixpkgs/path>` in
     `/home/eelco/Dev/nixpkgs-branch/path` and `/etc/nixos/nixpkgs/path`.
-    
+
     If a path in the Nix search path starts with `http://` or
     `https://`, it is interpreted as the URL of a tarball that will be
     downloaded and unpacked to a temporary location. The tarball must
     consist of a single top-level directory. For example, setting
     `NIX_PATH` to
-    
-        nixpkgs=https://github.com/NixOS/nixpkgs/archive/nixos-15.09.tar.gz
-    
-    tells Nix to download the latest revision in the Nixpkgs/NixOS 15.09
-    channel.
-    
-    A following shorthand can be used to refer to the official channels:
-    
-        nixpkgs=channel:nixos-15.09
-    
-    The search path can be extended using the `-I` option, which takes
-    precedence over `NIX_PATH`.
-
-  - `NIX_IGNORE_SYMLINK_STORE`  
+
+        nixpkgs=https://github.com/NixOS/nixpkgs/archive/master.tar.gz
+
+    tells Nix to download and use the current contents of the
+    `master` branch in the `nixpkgs` repository.
+
+    The URLs of the tarballs from the official nixos.org channels (see
+    [the manual for `nix-channel`](nix-channel.md)) can be abbreviated
+    as `channel:<channel-name>`.  For instance, the following two
+    values of `NIX_PATH` are equivalent:
+
+        nixpkgs=channel:nixos-21.05
+        nixpkgs=https://nixos.org/channels/nixos-21.05/nixexprs.tar.xz
+
+    The Nix search path can also be extended using the `-I` option to
+    many Nix commands, which takes precedence over `NIX_PATH`.
+
+  - `NIX_IGNORE_SYMLINK_STORE`\
     Normally, the Nix store directory (typically `/nix/store`) is not
     allowed to contain any symlink components. This is to prevent
     “impure” builds. Builders sometimes “canonicalise” paths by
@@ -50,7 +54,7 @@ Most Nix commands interpret the following environment variables:
     builds are deployed to machines where `/nix/store` resolves
     differently. If you are sure that you’re not going to do that, you
     can set `NIX_IGNORE_SYMLINK_STORE` to `1`.
-    
+
     Note that if you’re symlinking the Nix store so that you can put it
     on another file system than the root file system, on Linux you’re
     better off using `bind` mount points, e.g.,
@@ -59,44 +63,44 @@ Most Nix commands interpret the following environment variables:
     $ mkdir /nix
     $ mount -o bind /mnt/otherdisk/nix /nix
     ```
-    
+
     Consult the mount 8 manual page for details.
 
-  - `NIX_STORE_DIR`  
+  - `NIX_STORE_DIR`\
     Overrides the location of the Nix store (default `prefix/store`).
 
-  - `NIX_DATA_DIR`  
+  - `NIX_DATA_DIR`\
     Overrides the location of the Nix static data directory (default
     `prefix/share`).
 
-  - `NIX_LOG_DIR`  
+  - `NIX_LOG_DIR`\
     Overrides the location of the Nix log directory (default
     `prefix/var/log/nix`).
 
-  - `NIX_STATE_DIR`  
+  - `NIX_STATE_DIR`\
     Overrides the location of the Nix state directory (default
     `prefix/var/nix`).
 
-  - `NIX_CONF_DIR`  
+  - `NIX_CONF_DIR`\
     Overrides the location of the system Nix configuration directory
     (default `prefix/etc/nix`).
 
-  - `NIX_CONFIG`  
+  - `NIX_CONFIG`\
     Applies settings from Nix configuration from the environment.
     The content is treated as if it was read from a Nix configuration file.
     Settings are separated by the newline character.
 
-  - `NIX_USER_CONF_FILES`  
+  - `NIX_USER_CONF_FILES`\
     Overrides the location of the user Nix configuration files to load
     from (defaults to the XDG spec locations). The variable is treated
     as a list separated by the `:` token.
 
-  - `TMPDIR`  
+  - `TMPDIR`\
     Use the specified directory to store temporary files. In particular,
     this includes temporary build directories; these can take up
     substantial amounts of disk space. The default is `/tmp`.
 
-  - `NIX_REMOTE`  
+  - `NIX_REMOTE`\
     This variable should be set to `daemon` if you want to use the Nix
     daemon to execute Nix operations. This is necessary in [multi-user
     Nix installations](../installation/multi-user.md). If the Nix
@@ -104,16 +108,16 @@ Most Nix commands interpret the following environment variables:
     should be set to `unix://path/to/socket`. Otherwise, it should be
     left unset.
 
-  - `NIX_SHOW_STATS`  
+  - `NIX_SHOW_STATS`\
     If set to `1`, Nix will print some evaluation statistics, such as
     the number of values allocated.
 
-  - `NIX_COUNT_CALLS`  
+  - `NIX_COUNT_CALLS`\
     If set to `1`, Nix will print how often functions were called during
     Nix expression evaluation. This is useful for profiling your Nix
     expressions.
 
-  - `GC_INITIAL_HEAP_SIZE`  
+  - `GC_INITIAL_HEAP_SIZE`\
     If Nix has been configured to use the Boehm garbage collector, this
     variable sets the initial size of the heap in bytes. It defaults to
     384 MiB. Setting it to a low value reduces memory consumption, but
diff --git a/doc/manual/src/command-ref/nix-build.md b/doc/manual/src/command-ref/nix-build.md
index 4565bfbc271142e35181eed512f5dca66d197140..43de7a6e62d7827954ecc9552116a6f5aeaab6e8 100644
--- a/doc/manual/src/command-ref/nix-build.md
+++ b/doc/manual/src/command-ref/nix-build.md
@@ -47,16 +47,16 @@ All options not listed here are passed to `nix-store
 --realise`, except for `--arg` and `--attr` / `-A` which are passed to
 `nix-instantiate`.
 
-  - `--no-out-link`  
+  - `--no-out-link`\
     Do not create a symlink to the output path. Note that as a result
     the output does not become a root of the garbage collector, and so
     might be deleted by `nix-store
                     --gc`.
 
-  - `--dry-run`  
+  - `--dry-run`\
     Show what store paths would be built or downloaded.
 
-  - `--out-link` / `-o` *outlink*  
+  - `--out-link` / `-o` *outlink*\
     Change the name of the symlink to the output path created from
     `result` to *outlink*.
 
diff --git a/doc/manual/src/command-ref/nix-channel.md b/doc/manual/src/command-ref/nix-channel.md
index 4ca12d2cc99e4cd9df4c36f6071eafe32e730b83..24353525f82504dfd2f88a4a9bf6b9704c3a0477 100644
--- a/doc/manual/src/command-ref/nix-channel.md
+++ b/doc/manual/src/command-ref/nix-channel.md
@@ -17,26 +17,26 @@ To see the list of official NixOS channels, visit
 
 This command has the following operations:
 
-  - `--add` *url* \[*name*\]  
+  - `--add` *url* \[*name*\]\
     Adds a channel named *name* with URL *url* to the list of subscribed
     channels. If *name* is omitted, it defaults to the last component of
     *url*, with the suffixes `-stable` or `-unstable` removed.
 
-  - `--remove` *name*  
+  - `--remove` *name*\
     Removes the channel named *name* from the list of subscribed
     channels.
 
-  - `--list`  
+  - `--list`\
     Prints the names and URLs of all subscribed channels on standard
     output.
 
-  - `--update` \[*names*…\]  
+  - `--update` \[*names*…\]\
     Downloads the Nix expressions of all subscribed channels (or only
     those included in *names* if specified) and makes them the default
     for `nix-env` operations (by symlinking them from the directory
     `~/.nix-defexpr`).
 
-  - `--rollback` \[*generation*\]  
+  - `--rollback` \[*generation*\]\
     Reverts the previous call to `nix-channel
                     --update`. Optionally, you can specify a specific channel generation
     number to restore.
@@ -70,14 +70,14 @@ $ nix-instantiate --eval -E '(import <nixpkgs> {}).lib.version'
 
 # Files
 
-  - `/nix/var/nix/profiles/per-user/username/channels`  
+  - `/nix/var/nix/profiles/per-user/username/channels`\
     `nix-channel` uses a `nix-env` profile to keep track of previous
     versions of the subscribed channels. Every time you run `nix-channel
     --update`, a new channel generation (that is, a symlink to the
     channel Nix expressions in the Nix store) is created. This enables
     `nix-channel --rollback` to revert to previous versions.
 
-  - `~/.nix-defexpr/channels`  
+  - `~/.nix-defexpr/channels`\
     This is a symlink to
     `/nix/var/nix/profiles/per-user/username/channels`. It ensures that
     `nix-env` can find your channels. In a multi-user installation, you
@@ -89,7 +89,7 @@ $ nix-instantiate --eval -E '(import <nixpkgs> {}).lib.version'
 A channel URL should point to a directory containing the following
 files:
 
-  - `nixexprs.tar.xz`  
+  - `nixexprs.tar.xz`\
     A tarball containing Nix expressions and files referenced by them
     (such as build scripts and patches). At the top level, the tarball
     should contain a single directory. That directory must contain a
diff --git a/doc/manual/src/command-ref/nix-copy-closure.md b/doc/manual/src/command-ref/nix-copy-closure.md
index dcb844a721bfec303fa5d79a46ff01d7cf84a31d..7047d3012bb14c231de5b86e091e73a4a5874de8 100644
--- a/doc/manual/src/command-ref/nix-copy-closure.md
+++ b/doc/manual/src/command-ref/nix-copy-closure.md
@@ -35,21 +35,21 @@ and second to send the dump of those paths.  If this bothers you, use
 
 # Options
 
-  - `--to`  
+  - `--to`\
     Copy the closure of _paths_ from the local Nix store to the Nix
     store on _machine_. This is the default.
 
-  - `--from`  
+  - `--from`\
     Copy the closure of _paths_ from the Nix store on _machine_ to the
     local Nix store.
 
-  - `--gzip`  
+  - `--gzip`\
     Enable compression of the SSH connection.
 
-  - `--include-outputs`  
+  - `--include-outputs`\
     Also copy the outputs of store derivations included in the closure.
 
-  - `--use-substitutes` / `-s`  
+  - `--use-substitutes` / `-s`\
     Attempt to download missing paths on the target machine using Nix’s
     substitute mechanism.  Any paths that cannot be substituted on the
     target are still copied normally from the source.  This is useful,
@@ -58,12 +58,12 @@ and second to send the dump of those paths.  If this bothers you, use
     `nixos.org` (the default binary cache server) is
     fast.
 
-  - `-v`  
+  - `-v`\
     Show verbose output.
 
 # Environment variables
 
-  - `NIX_SSHOPTS`  
+  - `NIX_SSHOPTS`\
     Additional options to be passed to `ssh` on the command
     line.
 
diff --git a/doc/manual/src/command-ref/nix-env.md b/doc/manual/src/command-ref/nix-env.md
index 1c23bb0ad4110498b89bffdeabdc51f2e96a3d9d..9138fa05a5a529f81c2ce9b7e0a00be61f0ba7b2 100644
--- a/doc/manual/src/command-ref/nix-env.md
+++ b/doc/manual/src/command-ref/nix-env.md
@@ -36,27 +36,27 @@ case-sensitive. The regular expression can optionally be followed by a
 dash and a version number; if omitted, any version of the package will
 match. Here are some examples:
 
-  - `firefox`  
+  - `firefox`\
     Matches the package name `firefox` and any version.
 
-  - `firefox-32.0`  
+  - `firefox-32.0`\
     Matches the package name `firefox` and version `32.0`.
 
-  - `gtk\\+`  
+  - `gtk\\+`\
     Matches the package name `gtk+`. The `+` character must be escaped
     using a backslash to prevent it from being interpreted as a
     quantifier, and the backslash must be escaped in turn with another
     backslash to ensure that the shell passes it on.
 
-  - `.\*`  
+  - `.\*`\
     Matches any package name. This is the default for most commands.
 
-  - `'.*zip.*'`  
+  - `'.*zip.*'`\
     Matches any package name containing the string `zip`. Note the dots:
     `'*zip*'` does not work, because in a regular expression, the
     character `*` is interpreted as a quantifier.
 
-  - `'.*(firefox|chromium).*'`  
+  - `'.*(firefox|chromium).*'`\
     Matches any package name containing the strings `firefox` or
     `chromium`.
 
@@ -66,7 +66,7 @@ This section lists the options that are common to all operations. These
 options are allowed for every subcommand, though they may not always
 have an effect.
 
-  - `--file` / `-f` *path*  
+  - `--file` / `-f` *path*\
     Specifies the Nix expression (designated below as the *active Nix
     expression*) used by the `--install`, `--upgrade`, and `--query
     --available` operations to obtain derivations. The default is
@@ -77,13 +77,13 @@ have an effect.
     unpacked to a temporary location. The tarball must include a single
     top-level directory containing at least a file named `default.nix`.
 
-  - `--profile` / `-p` *path*  
+  - `--profile` / `-p` *path*\
     Specifies the profile to be used by those operations that operate on
     a profile (designated below as the *active profile*). A profile is a
     sequence of user environments called *generations*, one of which is
     the *current generation*.
 
-  - `--dry-run`  
+  - `--dry-run`\
     For the `--install`, `--upgrade`, `--uninstall`,
     `--switch-generation`, `--delete-generations` and `--rollback`
     operations, this flag will cause `nix-env` to print what *would* be
@@ -93,7 +93,7 @@ have an effect.
     [substituted](../glossary.md) (i.e., downloaded) and which paths
     will be built from source (because no substitute is available).
 
-  - `--system-filter` *system*  
+  - `--system-filter` *system*\
     By default, operations such as `--query
                     --available` show derivations matching any platform. This option
     allows you to use derivations for the specified platform *system*.
@@ -102,7 +102,7 @@ have an effect.
 
 # Files
 
-  - `~/.nix-defexpr`  
+  - `~/.nix-defexpr`\
     The source for the default Nix expressions used by the
     `--install`, `--upgrade`, and `--query --available` operations to
     obtain derivations. The `--file` option may be used to override
@@ -140,7 +140,7 @@ have an effect.
     The command `nix-channel` places symlinks to the downloaded Nix
     expressions from each subscribed channel in this directory.
 
-  - `~/.nix-profile`  
+  - `~/.nix-profile`\
     A symbolic link to the user's current profile. By default, this
     symlink points to `prefix/var/nix/profiles/default`. The `PATH`
     environment variable should include `~/.nix-profile/bin` for the
@@ -217,13 +217,13 @@ a number of possible ways:
 
 ## Flags
 
-  - `--prebuilt-only` / `-b`  
+  - `--prebuilt-only` / `-b`\
     Use only derivations for which a substitute is registered, i.e.,
     there is a pre-built binary available that can be downloaded in lieu
     of building the derivation. Thus, no packages will be built from
     source.
 
-  - `--preserve-installed`; `-P`  
+  - `--preserve-installed`; `-P`\
     Do not remove derivations with a name matching one of the
     derivations being installed. Usually, trying to have two versions of
     the same package installed in the same generation of a profile will
@@ -231,7 +231,7 @@ a number of possible ways:
     clashes between the two versions. However, this is not the case for
     all packages.
 
-  - `--remove-all`; `-r`  
+  - `--remove-all`; `-r`\
     Remove all previously installed packages first. This is equivalent
     to running `nix-env -e '.*'` first, except that everything happens
     in a single transaction.
@@ -346,24 +346,24 @@ version is installed.
 
 ## Flags
 
-  - `--lt`  
+  - `--lt`\
     Only upgrade a derivation to newer versions. This is the default.
 
-  - `--leq`  
+  - `--leq`\
     In addition to upgrading to newer versions, also “upgrade” to
     derivations that have the same version. Version are not a unique
     identification of a derivation, so there may be many derivations
     that have the same version. This flag may be useful to force
     “synchronisation” between the installed and available derivations.
 
-  - `--eq`  
+  - `--eq`\
     *Only* “upgrade” to derivations that have the same version. This may
     not seem very useful, but it actually is, e.g., when there is a new
     release of Nixpkgs and you want to replace installed applications
     with the same versions built against newer dependencies (to reduce
     the number of dependencies floating around on your system).
 
-  - `--always`  
+  - `--always`\
     In addition to upgrading to newer versions, also “upgrade” to
     derivations that have the same or a lower version. I.e., derivations
     may actually be downgraded depending on what is available in the
@@ -578,11 +578,11 @@ The derivations are sorted by their `name` attributes.
 The following flags specify the set of things on which the query
 operates.
 
-  - `--installed`  
+  - `--installed`\
     The query operates on the store paths that are installed in the
     current generation of the active profile. This is the default.
 
-  - `--available`; `-a`  
+  - `--available`; `-a`\
     The query operates on the derivations that are available in the
     active Nix expression.
 
@@ -593,24 +593,24 @@ selected derivations. Multiple flags may be specified, in which case the
 information is shown in the order given here. Note that the name of the
 derivation is shown unless `--no-name` is specified.
 
-  - `--xml`  
+  - `--xml`\
     Print the result in an XML representation suitable for automatic
     processing by other tools. The root element is called `items`, which
     contains a `item` element for each available or installed
     derivation. The fields discussed below are all stored in attributes
     of the `item` elements.
 
-  - `--json`  
+  - `--json`\
     Print the result in a JSON representation suitable for automatic
     processing by other tools.
 
-  - `--prebuilt-only` / `-b`  
+  - `--prebuilt-only` / `-b`\
     Show only derivations for which a substitute is registered, i.e.,
     there is a pre-built binary available that can be downloaded in lieu
     of building the derivation. Thus, this shows all packages that
     probably can be installed quickly.
 
-  - `--status`; `-s`  
+  - `--status`; `-s`\
     Print the *status* of the derivation. The status consists of three
     characters. The first is `I` or `-`, indicating whether the
     derivation is currently installed in the current generation of the
@@ -621,49 +621,49 @@ derivation is shown unless `--no-name` is specified.
     derivation to be built. The third is `S` or `-`, indicating whether
     a substitute is available for the derivation.
 
-  - `--attr-path`; `-P`  
+  - `--attr-path`; `-P`\
     Print the *attribute path* of the derivation, which can be used to
     unambiguously select it using the `--attr` option available in
     commands that install derivations like `nix-env --install`. This
     option only works together with `--available`
 
-  - `--no-name`  
+  - `--no-name`\
     Suppress printing of the `name` attribute of each derivation.
 
-  - `--compare-versions` / `-c`  
+  - `--compare-versions` / `-c`\
     Compare installed versions to available versions, or vice versa (if
     `--available` is given). This is useful for quickly seeing whether
     upgrades for installed packages are available in a Nix expression. A
     column is added with the following meaning:
 
-      - `<` *version*  
+      - `<` *version*\
         A newer version of the package is available or installed.
 
-      - `=` *version*  
+      - `=` *version*\
         At most the same version of the package is available or
         installed.
 
-      - `>` *version*  
+      - `>` *version*\
         Only older versions of the package are available or installed.
 
-      - `- ?`  
+      - `- ?`\
         No version of the package is available or installed.
 
-  - `--system`  
+  - `--system`\
     Print the `system` attribute of the derivation.
 
-  - `--drv-path`  
+  - `--drv-path`\
     Print the path of the store derivation.
 
-  - `--out-path`  
+  - `--out-path`\
     Print the output path of the derivation.
 
-  - `--description`  
+  - `--description`\
     Print a short (one-line) description of the derivation, if
     available. The description is taken from the `meta.description`
     attribute of the derivation.
 
-  - `--meta`  
+  - `--meta`\
     Print all of the meta-attributes of the derivation. This option is
     only available with `--xml` or `--json`.
 
@@ -874,7 +874,7 @@ error: no generation older than the current (91) exists
 
 # Environment variables
 
-  - `NIX_PROFILE`  
+  - `NIX_PROFILE`\
     Location of the Nix profile. Defaults to the target of the symlink
     `~/.nix-profile`, if it exists, or `/nix/var/nix/profiles/default`
     otherwise.
diff --git a/doc/manual/src/command-ref/nix-hash.md b/doc/manual/src/command-ref/nix-hash.md
index de0459b9e067b7b28fa1dda377795a06cf02aeea..45f67f1c5e3afc119e860c33ef212e1de13a2b42 100644
--- a/doc/manual/src/command-ref/nix-hash.md
+++ b/doc/manual/src/command-ref/nix-hash.md
@@ -29,29 +29,29 @@ md5sum`.
 
 # Options
 
-  - `--flat`  
+  - `--flat`\
     Print the cryptographic hash of the contents of each regular file
     *path*. That is, do not compute the hash over the dump of *path*.
     The result is identical to that produced by the GNU commands
     `md5sum` and `sha1sum`.
 
-  - `--base32`  
+  - `--base32`\
     Print the hash in a base-32 representation rather than hexadecimal.
     This base-32 representation is more compact and can be used in Nix
     expressions (such as in calls to `fetchurl`).
 
-  - `--truncate`  
+  - `--truncate`\
     Truncate hashes longer than 160 bits (such as SHA-256) to 160 bits.
 
-  - `--type` *hashAlgo*  
+  - `--type` *hashAlgo*\
     Use the specified cryptographic hash algorithm, which can be one of
     `md5`, `sha1`, `sha256`, and `sha512`.
 
-  - `--to-base16`  
+  - `--to-base16`\
     Don’t hash anything, but convert the base-32 hash representation
     *hash* to hexadecimal.
 
-  - `--to-base32`  
+  - `--to-base32`\
     Don’t hash anything, but convert the hexadecimal hash representation
     *hash* to base-32.
 
diff --git a/doc/manual/src/command-ref/nix-instantiate.md b/doc/manual/src/command-ref/nix-instantiate.md
index c369397b64f7b915bed5e597b9934f8ce48b7cdd..2e198daed703538ccba40bedd166f0bffd3cb978 100644
--- a/doc/manual/src/command-ref/nix-instantiate.md
+++ b/doc/manual/src/command-ref/nix-instantiate.md
@@ -29,26 +29,26 @@ standard input.
 
 # Options
 
-  - `--add-root` *path*  
+  - `--add-root` *path*\
     See the [corresponding option](nix-store.md) in `nix-store`.
 
-  - `--parse`  
+  - `--parse`\
     Just parse the input files, and print their abstract syntax trees on
     standard output in ATerm format.
 
-  - `--eval`  
+  - `--eval`\
     Just parse and evaluate the input files, and print the resulting
     values on standard output. No instantiation of store derivations
     takes place.
 
-  - `--find-file`  
+  - `--find-file`\
     Look up the given files in Nix’s search path (as specified by the
     `NIX_PATH` environment variable). If found, print the corresponding
     absolute paths on standard output. For instance, if `NIX_PATH` is
     `nixpkgs=/home/alice/nixpkgs`, then `nix-instantiate --find-file
     nixpkgs/default.nix` will print `/home/alice/nixpkgs/default.nix`.
 
-  - `--strict`  
+  - `--strict`\
     When used with `--eval`, recursively evaluate list elements and
     attributes. Normally, such sub-expressions are left unevaluated
     (since the Nix expression language is lazy).
@@ -58,17 +58,17 @@ standard input.
     > This option can cause non-termination, because lazy data
     > structures can be infinitely large.
 
-  - `--json`  
+  - `--json`\
     When used with `--eval`, print the resulting value as an JSON
     representation of the abstract syntax tree rather than as an ATerm.
 
-  - `--xml`  
+  - `--xml`\
     When used with `--eval`, print the resulting value as an XML
     representation of the abstract syntax tree rather than as an ATerm.
     The schema is the same as that used by the [`toXML`
     built-in](../expressions/builtins.md).
 
-  - `--read-write-mode`  
+  - `--read-write-mode`\
     When used with `--eval`, perform evaluation in read/write mode so
     nix language features that require it will still work (at the cost
     of needing to do instantiation of every evaluated derivation). If
diff --git a/doc/manual/src/command-ref/nix-prefetch-url.md b/doc/manual/src/command-ref/nix-prefetch-url.md
index 59ab89b2965980a7bac50c513dffd79903d3b1e3..3bcd209e275e06e36a7f9ccdbd1be2ca0df8d2a1 100644
--- a/doc/manual/src/command-ref/nix-prefetch-url.md
+++ b/doc/manual/src/command-ref/nix-prefetch-url.md
@@ -37,22 +37,22 @@ Nix store is also printed.
 
 # Options
 
-  - `--type` *hashAlgo*  
+  - `--type` *hashAlgo*\
     Use the specified cryptographic hash algorithm, which can be one of
     `md5`, `sha1`, `sha256`, and `sha512`.
 
-  - `--print-path`  
+  - `--print-path`\
     Print the store path of the downloaded file on standard output.
 
-  - `--unpack`  
+  - `--unpack`\
     Unpack the archive (which must be a tarball or zip file) and add the
     result to the Nix store. The resulting hash can be used with
     functions such as Nixpkgs’s `fetchzip` or `fetchFromGitHub`.
 
-  - `--executable`  
+  - `--executable`\
     Set the executable bit on the downloaded file.
 
-  - `--name` *name*  
+  - `--name` *name*\
     Override the name of the file in the Nix store. By default, this is
     `hash-basename`, where *basename* is the last component of *url*.
     Overriding the name is necessary when *basename* contains characters
diff --git a/doc/manual/src/command-ref/nix-shell.md b/doc/manual/src/command-ref/nix-shell.md
index 54812a49fdf994e5378302b136d3479e08d5d6d9..72f6730f18acdd6265741384c047301bc6726e39 100644
--- a/doc/manual/src/command-ref/nix-shell.md
+++ b/doc/manual/src/command-ref/nix-shell.md
@@ -54,7 +54,7 @@ All options not listed here are passed to `nix-store
 --realise`, except for `--arg` and `--attr` / `-A` which are passed to
 `nix-instantiate`.
 
-  - `--command` *cmd*  
+  - `--command` *cmd*\
     In the environment of the derivation, run the shell command *cmd*.
     This command is executed in an interactive shell. (Use `--run` to
     use a non-interactive shell instead.) However, a call to `exit` is
@@ -64,36 +64,34 @@ All options not listed here are passed to `nix-store
     drop you into the interactive shell. This can be useful for doing
     any additional initialisation.
 
-  - `--run` *cmd*  
+  - `--run` *cmd*\
     Like `--command`, but executes the command in a non-interactive
     shell. This means (among other things) that if you hit Ctrl-C while
     the command is running, the shell exits.
 
-  - `--exclude` *regexp*  
+  - `--exclude` *regexp*\
     Do not build any dependencies whose store path matches the regular
     expression *regexp*. This option may be specified multiple times.
 
-  - `--pure`  
+  - `--pure`\
     If this flag is specified, the environment is almost entirely
     cleared before the interactive shell is started, so you get an
     environment that more closely corresponds to the “real” Nix build. A
     few variables, in particular `HOME`, `USER` and `DISPLAY`, are
-    retained. Note that (depending on your Bash
-    installation) `/etc/bashrc` is still sourced, so any variables set
-    there will affect the interactive shell.
+    retained.
 
-  - `--packages` / `-p` *packages*…  
+  - `--packages` / `-p` *packages*…\
     Set up an environment in which the specified packages are present.
     The command line arguments are interpreted as attribute names inside
     the Nix Packages collection. Thus, `nix-shell -p libjpeg openjdk`
     will start a shell in which the packages denoted by the attribute
     names `libjpeg` and `openjdk` are present.
 
-  - `-i` *interpreter*  
+  - `-i` *interpreter*\
     The chained script interpreter to be invoked by `nix-shell`. Only
     applicable in `#!`-scripts (described below).
 
-  - `--keep` *name*  
+  - `--keep` *name*\
     When a `--pure` shell is started, keep the listed environment
     variables.
 
@@ -101,7 +99,7 @@ The following common options are supported:
 
 # Environment variables
 
-  - `NIX_BUILD_SHELL`  
+  - `NIX_BUILD_SHELL`\
     Shell used to start the interactive environment. Defaults to the
     `bash` found in `PATH`.
 
diff --git a/doc/manual/src/command-ref/nix-store.md b/doc/manual/src/command-ref/nix-store.md
index 361c20cc96da6c3cd4d87336ebcb2219fdbc249f..7a131dc0240f5ff83ce3a3374c8694e229539981 100644
--- a/doc/manual/src/command-ref/nix-store.md
+++ b/doc/manual/src/command-ref/nix-store.md
@@ -22,7 +22,7 @@ This section lists the options that are common to all operations. These
 options are allowed for every subcommand, though they may not always
 have an effect.
 
-  - `--add-root` *path*  
+  - `--add-root` *path*\
     Causes the result of a realisation (`--realise` and
     `--force-realise`) to be registered as a root of the garbage
     collector. *path* will be created as a symlink to the resulting
@@ -79,22 +79,22 @@ paths. Realisation is a somewhat overloaded term:
     system). If the path is already valid, we are done immediately.
     Otherwise, the path and any missing paths in its closure may be
     produced through substitutes. If there are no (successful)
-    subsitutes, realisation fails.
+    substitutes, realisation fails.
 
 The output path of each derivation is printed on standard output. (For
 non-derivations argument, the argument itself is printed.)
 
 The following flags are available:
 
-  - `--dry-run`  
+  - `--dry-run`\
     Print on standard error a description of what packages would be
     built or downloaded, without actually performing the operation.
 
-  - `--ignore-unknown`  
+  - `--ignore-unknown`\
     If a non-derivation path does not have a substitute, then silently
     ignore it.
 
-  - `--check`  
+  - `--check`\
     This option allows you to check whether a derivation is
     deterministic. It rebuilds the specified derivation and checks
     whether the result is bitwise-identical with the existing outputs,
@@ -110,20 +110,20 @@ The following flags are available:
 
 Special exit codes:
 
-  - `100`  
+  - `100`\
     Generic build failure, the builder process returned with a non-zero
     exit code.
 
-  - `101`  
+  - `101`\
     Build timeout, the build was aborted because it did not complete
     within the specified `timeout`.
 
-  - `102`  
+  - `102`\
     Hash mismatch, the build output was rejected because it does not
     match the [`outputHash` attribute of the
     derivation](../expressions/advanced-attributes.md).
 
-  - `104`  
+  - `104`\
     Not deterministic, the build succeeded in check mode but the
     resulting output is not binary reproducable.
 
@@ -170,7 +170,7 @@ access to a restricted ssh user.
 
 The following flags are available:
 
-  - `--write`  
+  - `--write`\
     Allow the connected client to request the realization of
     derivations. In effect, this can be used to make the host act as a
     remote builder.
@@ -200,18 +200,18 @@ reachable via file system references from a set of “roots”, are deleted.
 
 The following suboperations may be specified:
 
-  - `--print-roots`  
+  - `--print-roots`\
     This operation prints on standard output the set of roots used by
     the garbage collector.
 
-  - `--print-live`  
+  - `--print-live`\
     This operation prints on standard output the set of “live” store
     paths, which are all the store paths reachable from the roots. Live
     paths should never be deleted, since that would break consistency —
     it would become possible that applications are installed that
     reference things that are no longer present in the store.
 
-  - `--print-dead`  
+  - `--print-dead`\
     This operation prints out on standard output the set of “dead” store
     paths, which is just the opposite of the set of live paths: any path
     in the store that is not live (with respect to the roots) is dead.
@@ -219,7 +219,7 @@ The following suboperations may be specified:
 By default, all unreachable paths are deleted. The following options
 control what gets deleted and in what order:
 
-  - `--max-freed` *bytes*  
+  - `--max-freed` *bytes*\
     Keep deleting paths until at least *bytes* bytes have been deleted,
     then stop. The argument *bytes* can be followed by the
     multiplicative suffix `K`, `M`, `G` or `T`, denoting KiB, MiB, GiB
@@ -300,22 +300,22 @@ symlink.
 
 ## Common query options
 
-  - `--use-output`; `-u`  
+  - `--use-output`; `-u`\
     For each argument to the query that is a store derivation, apply the
     query to the output path of the derivation instead.
 
-  - `--force-realise`; `-f`  
+  - `--force-realise`; `-f`\
     Realise each argument to the query first (see [`nix-store
     --realise`](#operation---realise)).
 
 ## Queries
 
-  - `--outputs`  
+  - `--outputs`\
     Prints out the [output paths](../glossary.md) of the store
     derivations *paths*. These are the paths that will be produced when
     the derivation is built.
 
-  - `--requisites`; `-R`  
+  - `--requisites`; `-R`\
     Prints out the [closure](../glossary.md) of the store path *paths*.
 
     This query has one option:
@@ -332,31 +332,31 @@ symlink.
     dependencies) is obtained by distributing the closure of a store
     derivation and specifying the option `--include-outputs`.
 
-  - `--references`  
+  - `--references`\
     Prints the set of [references](../glossary.md) of the store paths
     *paths*, that is, their immediate dependencies. (For *all*
     dependencies, use `--requisites`.)
 
-  - `--referrers`  
+  - `--referrers`\
     Prints the set of *referrers* of the store paths *paths*, that is,
     the store paths currently existing in the Nix store that refer to
     one of *paths*. Note that contrary to the references, the set of
     referrers is not constant; it can change as store paths are added or
     removed.
 
-  - `--referrers-closure`  
+  - `--referrers-closure`\
     Prints the closure of the set of store paths *paths* under the
     referrers relation; that is, all store paths that directly or
     indirectly refer to one of *paths*. These are all the path currently
     in the Nix store that are dependent on *paths*.
 
-  - `--deriver`; `-d`  
+  - `--deriver`; `-d`\
     Prints the [deriver](../glossary.md) of the store paths *paths*. If
     the path has no deriver (e.g., if it is a source file), or if the
     deriver is not known (e.g., in the case of a binary-only
     deployment), the string `unknown-deriver` is printed.
 
-  - `--graph`  
+  - `--graph`\
     Prints the references graph of the store paths *paths* in the format
     of the `dot` tool of AT\&T's [Graphviz
     package](http://www.graphviz.org/). This can be used to visualise
@@ -364,39 +364,39 @@ symlink.
     this to a store derivation. To obtain a runtime dependency graph,
     apply it to an output path.
 
-  - `--tree`  
+  - `--tree`\
     Prints the references graph of the store paths *paths* as a nested
     ASCII tree. References are ordered by descending closure size; this
     tends to flatten the tree, making it more readable. The query only
     recurses into a store path when it is first encountered; this
     prevents a blowup of the tree representation of the graph.
 
-  - `--graphml`  
+  - `--graphml`\
     Prints the references graph of the store paths *paths* in the
     [GraphML](http://graphml.graphdrawing.org/) file format. This can be
     used to visualise dependency graphs. To obtain a build-time
     dependency graph, apply this to a store derivation. To obtain a
     runtime dependency graph, apply it to an output path.
 
-  - `--binding` *name*; `-b` *name*  
+  - `--binding` *name*; `-b` *name*\
     Prints the value of the attribute *name* (i.e., environment
     variable) of the store derivations *paths*. It is an error for a
     derivation to not have the specified attribute.
 
-  - `--hash`  
+  - `--hash`\
     Prints the SHA-256 hash of the contents of the store paths *paths*
     (that is, the hash of the output of `nix-store --dump` on the given
     paths). Since the hash is stored in the Nix database, this is a fast
     operation.
 
-  - `--size`  
+  - `--size`\
     Prints the size in bytes of the contents of the store paths *paths*
     — to be precise, the size of the output of `nix-store --dump` on
     the given paths. Note that the actual disk space required by the
     store paths may be higher, especially on filesystems with large
     cluster sizes.
 
-  - `--roots`  
+  - `--roots`\
     Prints the garbage collector roots that point, directly or
     indirectly, at the store paths *paths*.
 
@@ -513,7 +513,7 @@ public url or broke since the download expression was written.
 
 This operation has the following options:
 
-  - `--recursive`  
+  - `--recursive`\
     Use recursive instead of flat hashing mode, used when adding
     directories to the store.
 
@@ -540,14 +540,14 @@ being modified by non-Nix tools, or of bugs in Nix itself.
 
 This operation has the following options:
 
-  - `--check-contents`  
+  - `--check-contents`\
     Checks that the contents of every valid store path has not been
     altered by computing a SHA-256 hash of the contents and comparing it
     with the hash stored in the Nix database at build time. Paths that
     have been modified are printed out. For large stores,
     `--check-contents` is obviously quite slow.
 
-  - `--repair`  
+  - `--repair`\
     If any valid path is missing from the store, or (if
     `--check-contents` is given) the contents of a valid path has been
     modified, then try to repair the path by redownloading it. See
diff --git a/doc/manual/src/command-ref/opt-common.md b/doc/manual/src/command-ref/opt-common.md
index bc8eb6796ec729e68f4fa4c39544c04688a278f3..47862bc09bd29d0af80858bb0a662292409b7b10 100644
--- a/doc/manual/src/command-ref/opt-common.md
+++ b/doc/manual/src/command-ref/opt-common.md
@@ -2,56 +2,56 @@
 
 Most Nix commands accept the following command-line options:
 
-  - `--help`  
+  - `--help`\
     Prints out a summary of the command syntax and exits.
 
-  - `--version`  
+  - `--version`\
     Prints out the Nix version number on standard output and exits.
 
-  - `--verbose` / `-v`  
+  - `--verbose` / `-v`\
     Increases the level of verbosity of diagnostic messages printed on
     standard error. For each Nix operation, the information printed on
     standard output is well-defined; any diagnostic information is
     printed on standard error, never on standard output.
-    
+
     This option may be specified repeatedly. Currently, the following
     verbosity levels exist:
-    
-      - 0  
+
+      - 0\
         “Errors only”: only print messages explaining why the Nix
         invocation failed.
-    
-      - 1  
+
+      - 1\
         “Informational”: print *useful* messages about what Nix is
         doing. This is the default.
-    
-      - 2  
+
+      - 2\
         “Talkative”: print more informational messages.
-    
-      - 3  
+
+      - 3\
         “Chatty”: print even more informational messages.
-    
-      - 4  
+
+      - 4\
         “Debug”: print debug information.
-    
-      - 5  
+
+      - 5\
         “Vomit”: print vast amounts of debug information.
 
-  - `--quiet`  
+  - `--quiet`\
     Decreases the level of verbosity of diagnostic messages printed on
     standard error. This is the inverse option to `-v` / `--verbose`.
-    
+
     This option may be specified repeatedly. See the previous verbosity
     levels list.
 
-  - `--log-format` *format*  
+  - `--log-format` *format*\
     This option can be used to change the output of the log format, with
     *format* being one of:
-    
-      - raw  
+
+      - raw\
         This is the raw format, as outputted by nix-build.
-    
-      - internal-json  
+
+      - internal-json\
         Outputs the logs in a structured manner.
 
         > **Warning**
@@ -60,30 +60,30 @@ Most Nix commands accept the following command-line options:
         > the error-messages (namely of the `msg`-field) can change
         > between releases.
 
-      - bar  
+      - bar\
         Only display a progress bar during the builds.
-    
-      - bar-with-logs  
+
+      - bar-with-logs\
         Display the raw logs, with the progress bar at the bottom.
 
-  - `--no-build-output` / `-Q`  
+  - `--no-build-output` / `-Q`\
     By default, output written by builders to standard output and
     standard error is echoed to the Nix command's standard error. This
     option suppresses this behaviour. Note that the builder's standard
     output and error are always written to a log file in
     `prefix/nix/var/log/nix`.
 
-  - `--max-jobs` / `-j` *number*  
+  - `--max-jobs` / `-j` *number*\
     Sets the maximum number of build jobs that Nix will perform in
     parallel to the specified number. Specify `auto` to use the number
     of CPUs in the system. The default is specified by the `max-jobs`
     configuration setting, which itself defaults to `1`. A higher
     value is useful on SMP systems or to exploit I/O latency.
-    
+
     Setting it to `0` disallows building on the local machine, which is
     useful when you want builds to happen only on remote builders.
 
-  - `--cores`  
+  - `--cores`\
     Sets the value of the `NIX_BUILD_CORES` environment variable in
     the invocation of builders. Builders can use this variable at
     their discretion to control the maximum amount of parallelism. For
@@ -94,18 +94,18 @@ Most Nix commands accept the following command-line options:
     means that the builder should use all available CPU cores in the
     system.
 
-  - `--max-silent-time`  
+  - `--max-silent-time`\
     Sets the maximum number of seconds that a builder can go without
     producing any data on standard output or standard error. The
     default is specified by the `max-silent-time` configuration
     setting. `0` means no time-out.
 
-  - `--timeout`  
+  - `--timeout`\
     Sets the maximum number of seconds that a builder can run. The
     default is specified by the `timeout` configuration setting. `0`
     means no timeout.
 
-  - `--keep-going` / `-k`  
+  - `--keep-going` / `-k`\
     Keep going in case of failed builds, to the greatest extent
     possible. That is, if building an input of some derivation fails,
     Nix will still build the other inputs, but not the derivation
@@ -113,17 +113,17 @@ Most Nix commands accept the following command-line options:
     for builds of substitutes), possibly killing builds in progress (in
     case of parallel or distributed builds).
 
-  - `--keep-failed` / `-K`  
+  - `--keep-failed` / `-K`\
     Specifies that in case of a build failure, the temporary directory
     (usually in `/tmp`) in which the build takes place should not be
     deleted. The path of the build directory is printed as an
     informational message.
 
-  - `--fallback`  
+  - `--fallback`\
     Whenever Nix attempts to build a derivation for which substitutes
     are known for each output path, but realising the output paths
     through the substitutes fails, fall back on building the derivation.
-    
+
     The most common scenario in which this is useful is when we have
     registered substitutes in order to perform binary distribution from,
     say, a network repository. If the repository is down, the
@@ -134,12 +134,12 @@ Most Nix commands accept the following command-line options:
     failure in obtaining the substitutes to lead to a full build from
     source (with the related consumption of resources).
 
-  - `--readonly-mode`  
+  - `--readonly-mode`\
     When this option is used, no attempt is made to open the Nix
     database. Most Nix operations do need database access, so those
     operations will fail.
 
-  - `--arg` *name* *value*  
+  - `--arg` *name* *value*\
     This option is accepted by `nix-env`, `nix-instantiate`,
     `nix-shell` and `nix-build`. When evaluating Nix expressions, the
     expression evaluator will automatically try to call functions that
@@ -151,7 +151,7 @@ Most Nix commands accept the following command-line options:
     override a default value). That is, if the evaluator encounters a
     function with an argument named *name*, it will call it with value
     *value*.
-    
+
     For instance, the top-level `default.nix` in Nixpkgs is actually a
     function:
 
@@ -161,7 +161,7 @@ Most Nix commands accept the following command-line options:
       ...
     }: ...
     ```
-    
+
     So if you call this Nix expression (e.g., when you do `nix-env -i
     pkgname`), the function will be called automatically using the
     value [`builtins.currentSystem`](../expressions/builtins.md) for
@@ -170,13 +170,13 @@ Most Nix commands accept the following command-line options:
     since the argument is a Nix string literal, you have to escape the
     quotes.)
 
-  - `--argstr` *name* *value*  
+  - `--argstr` *name* *value*\
     This option is like `--arg`, only the value is not a Nix
     expression but a string. So instead of `--arg system
     \"i686-linux\"` (the outer quotes are to keep the shell happy) you
     can say `--argstr system i686-linux`.
 
-  - `--attr` / `-A` *attrPath*  
+  - `--attr` / `-A` *attrPath*\
     Select an attribute from the top-level Nix expression being
     evaluated. (`nix-env`, `nix-instantiate`, `nix-build` and
     `nix-shell` only.) The *attribute path* *attrPath* is a sequence
@@ -185,34 +185,34 @@ Most Nix commands accept the following command-line options:
     would cause the expression `e.xorg.xorgserver` to be used. See
     [`nix-env --install`](nix-env.md#operation---install) for some
     concrete examples.
-    
+
     In addition to attribute names, you can also specify array indices.
     For instance, the attribute path `foo.3.bar` selects the `bar`
     attribute of the fourth element of the array in the `foo` attribute
     of the top-level expression.
 
-  - `--expr` / `-E`  
+  - `--expr` / `-E`\
     Interpret the command line arguments as a list of Nix expressions to
     be parsed and evaluated, rather than as a list of file names of Nix
     expressions. (`nix-instantiate`, `nix-build` and `nix-shell` only.)
-    
+
     For `nix-shell`, this option is commonly used to give you a shell in
     which you can build the packages returned by the expression. If you
     want to get a shell which contain the *built* packages ready for
     use, give your expression to the `nix-shell -p` convenience flag
     instead.
 
-  - `-I` *path*  
+  - `-I` *path*\
     Add a path to the Nix expression search path. This option may be
     given multiple times. See the `NIX_PATH` environment variable for
     information on the semantics of the Nix search path. Paths added
     through `-I` take precedence over `NIX_PATH`.
 
-  - `--option` *name* *value*  
+  - `--option` *name* *value*\
     Set the Nix configuration option *name* to *value*. This overrides
     settings in the Nix configuration file (see nix.conf5).
 
-  - `--repair`  
+  - `--repair`\
     Fix corrupted or missing store paths by redownloading or rebuilding
     them. Note that this is slow because it requires computing a
     cryptographic hash of the contents of every path in the closure of
diff --git a/doc/manual/src/expressions/advanced-attributes.md b/doc/manual/src/expressions/advanced-attributes.md
index 31ebadda1907aacbb91e16724d7e837e792f6115..5b208df6713ed1adcc0cc247719b5bf2dd085f97 100644
--- a/doc/manual/src/expressions/advanced-attributes.md
+++ b/doc/manual/src/expressions/advanced-attributes.md
@@ -2,14 +2,14 @@
 
 Derivations can declare some infrequently used optional attributes.
 
-  - `allowedReferences`  
+  - `allowedReferences`\
     The optional attribute `allowedReferences` specifies a list of legal
     references (dependencies) of the output of the builder. For example,
-    
+
     ```nix
     allowedReferences = [];
     ```
-    
+
     enforces that the output of a derivation cannot have any runtime
     dependencies on its inputs. To allow an output to have a runtime
     dependency on itself, use `"out"` as a list item. This is used in
@@ -17,45 +17,45 @@ Derivations can declare some infrequently used optional attributes.
     booting Linux don’t have accidental dependencies on other paths in
     the Nix store.
 
-  - `allowedRequisites`  
+  - `allowedRequisites`\
     This attribute is similar to `allowedReferences`, but it specifies
     the legal requisites of the whole closure, so all the dependencies
     recursively. For example,
-    
+
     ```nix
     allowedRequisites = [ foobar ];
     ```
-    
+
     enforces that the output of a derivation cannot have any other
     runtime dependency than `foobar`, and in addition it enforces that
     `foobar` itself doesn't introduce any other dependency itself.
 
-  - `disallowedReferences`  
+  - `disallowedReferences`\
     The optional attribute `disallowedReferences` specifies a list of
     illegal references (dependencies) of the output of the builder. For
     example,
-    
+
     ```nix
     disallowedReferences = [ foo ];
     ```
-    
+
     enforces that the output of a derivation cannot have a direct
     runtime dependencies on the derivation `foo`.
 
-  - `disallowedRequisites`  
+  - `disallowedRequisites`\
     This attribute is similar to `disallowedReferences`, but it
     specifies illegal requisites for the whole closure, so all the
     dependencies recursively. For example,
-    
+
     ```nix
     disallowedRequisites = [ foobar ];
     ```
-    
+
     enforces that the output of a derivation cannot have any runtime
     dependency on `foobar` or any other derivation depending recursively
     on `foobar`.
 
-  - `exportReferencesGraph`  
+  - `exportReferencesGraph`\
     This attribute allows builders access to the references graph of
     their inputs. The attribute is a list of inputs in the Nix store
     whose references graph the builder needs to know. The value of
@@ -65,17 +65,17 @@ Derivations can declare some infrequently used optional attributes.
     files have the format used by `nix-store --register-validity`
     (with the deriver fields left empty). For example, when the
     following derivation is built:
-    
+
     ```nix
     derivation {
       ...
       exportReferencesGraph = [ "libfoo-graph" libfoo ];
     };
     ```
-    
+
     the references graph of `libfoo` is placed in the file
     `libfoo-graph` in the temporary build directory.
-    
+
     `exportReferencesGraph` is useful for builders that want to do
     something with the closure of a store path. Examples include the
     builders in NixOS that generate the initial ramdisk for booting
@@ -84,66 +84,66 @@ Derivations can declare some infrequently used optional attributes.
     with a Nix store containing the closure of a bootable NixOS
     configuration).
 
-  - `impureEnvVars`  
+  - `impureEnvVars`\
     This attribute allows you to specify a list of environment variables
     that should be passed from the environment of the calling user to
     the builder. Usually, the environment is cleared completely when the
     builder is executed, but with this attribute you can allow specific
     environment variables to be passed unmodified. For example,
     `fetchurl` in Nixpkgs has the line
-    
+
     ```nix
     impureEnvVars = [ "http_proxy" "https_proxy" ... ];
     ```
-    
+
     to make it use the proxy server configuration specified by the user
     in the environment variables `http_proxy` and friends.
-    
+
     This attribute is only allowed in *fixed-output derivations* (see
     below), where impurities such as these are okay since (the hash
     of) the output is known in advance. It is ignored for all other
     derivations.
-    
+
     > **Warning**
-    > 
+    >
     > `impureEnvVars` implementation takes environment variables from
     > the current builder process. When a daemon is building its
     > environmental variables are used. Without the daemon, the
     > environmental variables come from the environment of the
     > `nix-build`.
 
-  - `outputHash`; `outputHashAlgo`; `outputHashMode`  
+  - `outputHash`; `outputHashAlgo`; `outputHashMode`\
     These attributes declare that the derivation is a so-called
     *fixed-output derivation*, which means that a cryptographic hash of
     the output is already known in advance. When the build of a
     fixed-output derivation finishes, Nix computes the cryptographic
     hash of the output and compares it to the hash declared with these
     attributes. If there is a mismatch, the build fails.
-    
+
     The rationale for fixed-output derivations is derivations such as
     those produced by the `fetchurl` function. This function downloads a
     file from a given URL. To ensure that the downloaded file has not
     been modified, the caller must also specify a cryptographic hash of
     the file. For example,
-    
+
     ```nix
     fetchurl {
       url = "http://ftp.gnu.org/pub/gnu/hello/hello-2.1.1.tar.gz";
       sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465";
     }
     ```
-    
+
     It sometimes happens that the URL of the file changes, e.g., because
     servers are reorganised or no longer available. We then must update
     the call to `fetchurl`, e.g.,
-    
+
     ```nix
     fetchurl {
       url = "ftp://ftp.nluug.nl/pub/gnu/hello/hello-2.1.1.tar.gz";
       sha256 = "1md7jsfd8pa45z73bz1kszpp01yw6x5ljkjk2hx7wl800any6465";
     }
     ```
-    
+
     If a `fetchurl` derivation was treated like a normal derivation, the
     output paths of the derivation and *all derivations depending on it*
     would change. For instance, if we were to change the URL of the
@@ -151,16 +151,16 @@ Derivations can declare some infrequently used optional attributes.
     other packages depend) massive rebuilds would be needed. This is
     unfortunate for a change which we know cannot have a real effect as
     it propagates upwards through the dependency graph.
-    
+
     For fixed-output derivations, on the other hand, the name of the
     output path only depends on the `outputHash*` and `name` attributes,
     while all other attributes are ignored for the purpose of computing
     the output path. (The `name` attribute is included because it is
     part of the path.)
-    
+
     As an example, here is the (simplified) Nix expression for
     `fetchurl`:
-    
+
     ```nix
     { stdenv, curl }: # The curl program is used for downloading.
 
@@ -180,43 +180,51 @@ Derivations can declare some infrequently used optional attributes.
       inherit url;
     }
     ```
-    
+
     The `outputHashAlgo` attribute specifies the hash algorithm used to
     compute the hash. It can currently be `"sha1"`, `"sha256"` or
     `"sha512"`.
-    
+
     The `outputHashMode` attribute determines how the hash is computed.
     It must be one of the following two values:
-    
-      - `"flat"`  
+
+      - `"flat"`\
         The output must be a non-executable regular file. If it isn’t,
         the build fails. The hash is simply computed over the contents
         of that file (so it’s equal to what Unix commands like
         `sha256sum` or `sha1sum` produce).
-        
+
         This is the default.
-    
-      - `"recursive"`  
+
+      - `"recursive"`\
         The hash is computed over the NAR archive dump of the output
         (i.e., the result of [`nix-store
         --dump`](../command-ref/nix-store.md#operation---dump)). In
         this case, the output can be anything, including a directory
         tree.
-    
+
     The `outputHash` attribute, finally, must be a string containing
     the hash in either hexadecimal or base-32 notation. (See the
     [`nix-hash` command](../command-ref/nix-hash.md) for information
     about converting to and from base-32 notation.)
+    
+  - `__contentAddressed`
+    If this **experimental** attribute is set to true, then the derivation
+    outputs will be stored in a content-addressed location rather than the
+    traditional input-addressed one.
+    This only has an effect if the `ca-derivation` experimental feature is enabled.
+    
+    Setting this attribute also requires setting `outputHashMode` and `outputHashAlgo` like for *fixed-output derivations* (see above).
 
-  - `passAsFile`  
+  - `passAsFile`\
     A list of names of attributes that should be passed via files rather
     than environment variables. For example, if you have
-    
+
     ```nix
     passAsFile = ["big"];
     big = "a very long string";
     ```
-    
+
     then when the builder runs, the environment variable `bigPath`
     will contain the absolute path to a temporary file containing `a
     very long string`. That is, for any attribute *x* listed in
@@ -226,7 +234,7 @@ Derivations can declare some infrequently used optional attributes.
     builder, since most operating systems impose a limit on the size
     of the environment (typically, a few hundred kilobyte).
 
-  - `preferLocalBuild`  
+  - `preferLocalBuild`\
     If this attribute is set to `true` and [distributed building is
     enabled](../advanced-topics/distributed-builds.md), then, if
     possible, the derivaton will be built locally instead of forwarded
@@ -234,14 +242,14 @@ Derivations can declare some infrequently used optional attributes.
     where the cost of doing a download or remote build would exceed
     the cost of building locally.
 
-  - `allowSubstitutes`  
+  - `allowSubstitutes`\
     If this attribute is set to `false`, then Nix will always build this
     derivation; it will not try to substitute its outputs. This is
     useful for very trivial derivations (such as `writeText` in Nixpkgs)
     that are cheaper to build than to substitute from a binary cache.
-    
+
     > **Note**
-    > 
+    >
     > You need to have a builder configured which satisfies the
     > derivation’s `system` attribute, since the derivation cannot be
     > substituted. Thus it is usually a good idea to align `system` with
diff --git a/doc/manual/src/expressions/builtin-constants.md b/doc/manual/src/expressions/builtin-constants.md
index 3345a715b385df9c7756d5ec78ccdad64aaddd12..1404289e50afe4229ad0317e0e14efa88a27cc36 100644
--- a/doc/manual/src/expressions/builtin-constants.md
+++ b/doc/manual/src/expressions/builtin-constants.md
@@ -2,7 +2,7 @@
 
 Here are the constants built into the Nix expression evaluator:
 
-  - `builtins`  
+  - `builtins`\
     The set `builtins` contains all the built-in functions and values.
     You can use `builtins` to test for the availability of features in
     the Nix installation, e.g.,
@@ -14,7 +14,7 @@ Here are the constants built into the Nix expression evaluator:
     This allows a Nix expression to fall back gracefully on older Nix
     installations that don’t have the desired built-in function.
 
-  - `builtins.currentSystem`  
+  - `builtins.currentSystem`\
     The built-in value `currentSystem` evaluates to the Nix platform
     identifier for the Nix installation on which the expression is being
     evaluated, such as `"i686-linux"` or `"x86_64-darwin"`.
diff --git a/doc/manual/src/expressions/builtins-prefix.md b/doc/manual/src/expressions/builtins-prefix.md
index 0f7c3d32f6297f94f835ae97df9debc56fb8ba7c..c16b2805fd250a7299d21fa030218c01a77dfcf0 100644
--- a/doc/manual/src/expressions/builtins-prefix.md
+++ b/doc/manual/src/expressions/builtins-prefix.md
@@ -9,7 +9,7 @@ scope. Instead, you can access them through the `builtins` built-in
 value, which is a set that contains all built-in functions and values.
 For instance, `derivation` is also available as `builtins.derivation`.
 
-  - `derivation` *attrs*; `builtins.derivation` *attrs*  
+  - `derivation` *attrs*; `builtins.derivation` *attrs*\
 
     `derivation` is described in [its own section](derivations.md).
 
diff --git a/doc/manual/src/glossary.md b/doc/manual/src/glossary.md
index 56af9cd854277d10cdd221bac7d747a2967d0890..bb350d9dea06ce8fc77e5f9f2978e0cb79613447 100644
--- a/doc/manual/src/glossary.md
+++ b/doc/manual/src/glossary.md
@@ -1,48 +1,48 @@
 # Glossary
 
-  - derivation  
+  - derivation\
     A description of a build action. The result of a derivation is a
     store object. Derivations are typically specified in Nix expressions
     using the [`derivation` primitive](expressions/derivations.md). These are
     translated into low-level *store derivations* (implicitly by
     `nix-env` and `nix-build`, or explicitly by `nix-instantiate`).
 
-  - store  
+  - store\
     The location in the file system where store objects live. Typically
     `/nix/store`.
 
-  - store path  
+  - store path\
     The location in the file system of a store object, i.e., an
     immediate child of the Nix store directory.
 
-  - store object  
+  - store object\
     A file that is an immediate child of the Nix store directory. These
     can be regular files, but also entire directory trees. Store objects
     can be sources (objects copied from outside of the store),
     derivation outputs (objects produced by running a build action), or
     derivations (files describing a build action).
 
-  - substitute  
+  - substitute\
     A substitute is a command invocation stored in the Nix database that
     describes how to build a store object, bypassing the normal build
     mechanism (i.e., derivations). Typically, the substitute builds the
     store object by downloading a pre-built version of the store object
     from some server.
 
-  - purity  
+  - purity\
     The assumption that equal Nix derivations when run always produce
     the same output. This cannot be guaranteed in general (e.g., a
     builder can rely on external inputs such as the network or the
     system time) but the Nix model assumes it.
 
-  - Nix expression  
+  - Nix expression\
     A high-level description of software packages and compositions
     thereof. Deploying software using Nix entails writing Nix
     expressions for your packages. Nix expressions are translated to
     derivations that are stored in the Nix store. These derivations can
     then be built.
 
-  - reference  
+  - reference\
     A store path `P` is said to have a reference to a store path `Q` if
     the store object at `P` contains the path `Q` somewhere. The
     *references* of a store path are the set of store paths to which it
@@ -52,11 +52,11 @@
     output paths), whereas an output path only references other output
     paths.
 
-  - reachable  
+  - reachable\
     A store path `Q` is reachable from another store path `P` if `Q`
     is in the *closure* of the *references* relation.
 
-  - closure  
+  - closure\
     The closure of a store path is the set of store paths that are
     directly or indirectly “reachable” from that store path; that is,
     it’s the closure of the path under the *references* relation. For
@@ -71,29 +71,29 @@
     to path `Q`, then `Q` is in the closure of `P`. Further, if `Q`
     references `R` then `R` is also in the closure of `P`.
 
-  - output path  
+  - output path\
     A store path produced by a derivation.
 
-  - deriver  
+  - deriver\
     The deriver of an *output path* is the store
     derivation that built it.
 
-  - validity  
+  - validity\
     A store path is considered *valid* if it exists in the file system,
     is listed in the Nix database as being valid, and if all paths in
     its closure are also valid.
 
-  - user environment  
+  - user environment\
     An automatically generated store object that consists of a set of
     symlinks to “active” applications, i.e., other store paths. These
     are generated automatically by
     [`nix-env`](command-ref/nix-env.md). See *profiles*.
 
-  - profile  
+  - profile\
     A symlink to the current *user environment* of a user, e.g.,
     `/nix/var/nix/profiles/default`.
 
-  - NAR  
+  - NAR\
     A *N*ix *AR*chive. This is a serialisation of a path in the Nix
     store. It can contain regular files, directories and symbolic
     links.  NARs are generated and unpacked using `nix-store --dump`
diff --git a/doc/manual/src/installation/installing-binary.md b/doc/manual/src/installation/installing-binary.md
index ae7fd458bdfb3ccd37b09592b48f6d70ce292f25..96fa34635bee9f44e6181558da5be8acb3bf6578 100644
--- a/doc/manual/src/installation/installing-binary.md
+++ b/doc/manual/src/installation/installing-binary.md
@@ -1,18 +1,26 @@
 # Installing a Binary Distribution
 
-If you are using Linux or macOS versions up to 10.14 (Mojave), the
-easiest way to install Nix is to run the following command:
+The easiest way to install Nix is to run the following command:
 
 ```console
 $ sh <(curl -L https://nixos.org/nix/install)
 ```
 
-If you're using macOS 10.15 (Catalina) or newer, consult [the macOS
-installation instructions](#macos-installation) before installing.
+This will run the installer interactively (causing it to explain what
+it is doing more explicitly), and perform the default "type" of install
+for your platform:
+- single-user on Linux
+- multi-user on macOS
 
-As of Nix 2.1.0, the Nix installer will always default to creating a
-single-user installation, however opting in to the multi-user
-installation is highly recommended.
+  > **Notes on read-only filesystem root in macOS 10.15 Catalina +**
+  > 
+  > - It took some time to support this cleanly. You may see posts,
+  >   examples, and tutorials using obsolete workarounds.
+  > - Supporting it cleanly made macOS installs too complex to qualify
+  >   as single-user, so this type is no longer supported on macOS.
+
+We recommend the multi-user install if it supports your platform and
+you can authenticate with `sudo`.
 
 # Single User Installation
 
@@ -50,9 +58,9 @@ $ rm -rf /nix
 The multi-user Nix installation creates system users, and a system
 service for the Nix daemon.
 
-  - Linux running systemd, with SELinux disabled
-
-  - macOS
+**Supported Systems**
+- Linux running systemd, with SELinux disabled
+- macOS
 
 You can instruct the installer to perform a multi-user installation on
 your system:
@@ -96,165 +104,28 @@ sudo rm /Library/LaunchDaemons/org.nixos.nix-daemon.plist
 There may also be references to Nix in `/etc/profile`, `/etc/bashrc`,
 and `/etc/zshrc` which you may remove.
 
-# macOS Installation
-
-Starting with macOS 10.15 (Catalina), the root filesystem is read-only.
-This means `/nix` can no longer live on your system volume, and that
-you'll need a workaround to install Nix.
-
-The recommended approach, which creates an unencrypted APFS volume for
-your Nix store and a "synthetic" empty directory to mount it over at
-`/nix`, is least likely to impair Nix or your system.
-
-> **Note**
-> 
-> With all separate-volume approaches, it's possible something on your
-> system (particularly daemons/services and restored apps) may need
-> access to your Nix store before the volume is mounted. Adding
-> additional encryption makes this more likely.
-
-If you're using a recent Mac with a [T2
-chip](https://www.apple.com/euro/mac/shared/docs/Apple_T2_Security_Chip_Overview.pdf),
-your drive will still be encrypted at rest (in which case "unencrypted"
-is a bit of a misnomer). To use this approach, just install Nix with:
-
-```console
-$ sh <(curl -L https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume
-```
-
-If you don't like the sound of this, you'll want to weigh the other
-approaches and tradeoffs detailed in this section.
-
-> **Note**
-> 
-> All of the known workarounds have drawbacks, but we hope better
-> solutions will be available in the future. Some that we have our eye
-> on are:
-> 
-> 1.  A true firmlink would enable the Nix store to live on the primary
->     data volume without the build problems caused by the symlink
->     approach. End users cannot currently create true firmlinks.
-> 
-> 2.  If the Nix store volume shared FileVault encryption with the
->     primary data volume (probably by using the same volume group and
->     role), FileVault encryption could be easily supported by the
->     installer without requiring manual setup by each user.
-
-## Change the Nix store path prefix
-
-Changing the default prefix for the Nix store is a simple approach which
-enables you to leave it on your root volume, where it can take full
-advantage of FileVault encryption if enabled. Unfortunately, this
-approach also opts your device out of some benefits that are enabled by
-using the same prefix across systems:
-
-  - Your system won't be able to take advantage of the binary cache
-    (unless someone is able to stand up and support duplicate caching
-    infrastructure), which means you'll spend more time waiting for
-    builds.
-
-  - It's harder to build and deploy packages to Linux systems.
-
-It would also possible (and often requested) to just apply this change
-ecosystem-wide, but it's an intrusive process that has side effects we
-want to avoid for now.
-
-## Use a separate encrypted volume
-
-If you like, you can also add encryption to the recommended approach
-taken by the installer. You can do this by pre-creating an encrypted
-volume before you run the installer--or you can run the installer and
-encrypt the volume it creates later.
-
-In either case, adding encryption to a second volume isn't quite as
-simple as enabling FileVault for your boot volume. Before you dive in,
-there are a few things to weigh:
-
-1.  The additional volume won't be encrypted with your existing
-    FileVault key, so you'll need another mechanism to decrypt the
-    volume.
-
-2.  You can store the password in Keychain to automatically decrypt the
-    volume on boot--but it'll have to wait on Keychain and may not mount
-    before your GUI apps restore. If any of your launchd agents or apps
-    depend on Nix-installed software (for example, if you use a
-    Nix-installed login shell), the restore may fail or break.
-    
-    On a case-by-case basis, you may be able to work around this problem
-    by using `wait4path` to block execution until your executable is
-    available.
-    
-    It's also possible to decrypt and mount the volume earlier with a
-    login hook--but this mechanism appears to be deprecated and its
-    future is unclear.
-
-3.  You can hard-code the password in the clear, so that your store
-    volume can be decrypted before Keychain is available.
-
-If you are comfortable navigating these tradeoffs, you can encrypt the
-volume with something along the lines of:
-
-```console
-$ diskutil apfs enableFileVault /nix -user disk
-```
-
-## Symlink the Nix store to a custom location
-
-Another simple approach is using `/etc/synthetic.conf` to symlink the
-Nix store to the data volume. This option also enables your store to
-share any configured FileVault encryption. Unfortunately, builds that
-resolve the symlink may leak the canonical path or even fail.
-
-Because of these downsides, we can't recommend this approach.
-
-## Notes on the recommended approach
-
-This section goes into a little more detail on the recommended approach.
-You don't need to understand it to run the installer, but it can serve
-as a helpful reference if you run into trouble.
-
-1.  In order to compose user-writable locations into the new read-only
-    system root, Apple introduced a new concept called `firmlinks`,
-    which it describes as a "bi-directional wormhole" between two
-    filesystems. You can see the current firmlinks in
-    `/usr/share/firmlinks`. Unfortunately, firmlinks aren't (currently?)
-    user-configurable.
-    
-    For special cases like NFS mount points or package manager roots,
-    [synthetic.conf(5)](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man5/synthetic.conf.5.html)
-    supports limited user-controlled file-creation (of symlinks, and
-    synthetic empty directories) at `/`. To create a synthetic empty
-    directory for mounting at `/nix`, add the following line to
-    `/etc/synthetic.conf` (create it if necessary):
-    
-        nix
-
-2.  This configuration is applied at boot time, but you can use
-    `apfs.util` to trigger creation (not deletion) of new entries
-    without a reboot:
-    
-    ```console
-    $ /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -B
-    ```
-
-3.  Create the new APFS volume with diskutil:
-    
-    ```console
-    $ sudo diskutil apfs addVolume diskX APFS 'Nix Store' -mountpoint /nix
-    ```
-
-4.  Using `vifs`, add the new mount to `/etc/fstab`. If it doesn't
-    already have other entries, it should look something like:
-    
-        #
-        # Warning - this file should only be modified with vifs(8)
-        #
-        # Failure to do so is unsupported and may be destructive.
-        #
-        LABEL=Nix\040Store /nix apfs rw,nobrowse
-    
-    The nobrowse setting will keep Spotlight from indexing this volume,
-    and keep it from showing up on your desktop.
+# macOS Installation <a name="sect-macos-installation-change-store-prefix"></a><a name="sect-macos-installation-encrypted-volume"></a><a name="sect-macos-installation-symlink"></a><a name="sect-macos-installation-recommended-notes"></a>
+<!-- Note: anchors above to catch permalinks to old explanations -->
+
+We believe we have ironed out how to cleanly support the read-only root
+on modern macOS. New installs will do this automatically, and you can
+also re-run a new installer to convert your existing setup.
+
+This section previously detailed the situation, options, and trade-offs,
+but it now only outlines what the installer does. You don't need to know
+this to run the installer, but it may help if you run into trouble:
+
+- create a new APFS volume for your Nix store
+- update `/etc/synthetic.conf` to direct macOS to create a "synthetic"
+  empty root directory to mount your volume
+- specify mount options for the volume in `/etc/fstab`
+- if you have FileVault enabled
+    - generate an encryption password
+    - put it in your system Keychain
+    - use it to encrypt the volume
+- create a system LaunchDaemon to mount this volume early enough in the
+  boot process to avoid problems loading or restoring any programs that
+  need access to your Nix store
 
 # Installing a pinned Nix version from a URL
 
diff --git a/doc/manual/src/package-management/s3-substituter.md b/doc/manual/src/package-management/s3-substituter.md
index a4f4d561feea0c8f39b335d9feff64185ce81ace..30f2b2e117516806d6a697e0d8037c0dcbf907af 100644
--- a/doc/manual/src/package-management/s3-substituter.md
+++ b/doc/manual/src/package-management/s3-substituter.md
@@ -7,17 +7,17 @@ cache mechanism that Nix usually uses to fetch prebuilt binaries from
 
 The following options can be specified as URL parameters to the S3 URL:
 
-  - `profile`  
+  - `profile`\
     The name of the AWS configuration profile to use. By default Nix
     will use the `default` profile.
 
-  - `region`  
+  - `region`\
     The region of the S3 bucket. `us–east-1` by default.
     
     If your bucket is not in `us–east-1`, you should always explicitly
     specify the region parameter.
 
-  - `endpoint`  
+  - `endpoint`\
     The URL to your S3-compatible service, for when not using Amazon S3.
     Do not specify this value if you're using Amazon S3.
     
@@ -26,7 +26,7 @@ The following options can be specified as URL parameters to the S3 URL:
     > This endpoint must support HTTPS and will use path-based
     > addressing instead of virtual host based addressing.
 
-  - `scheme`  
+  - `scheme`\
     The scheme used for S3 requests, `https` (default) or `http`. This
     option allows you to disable HTTPS for binary caches which don't
     support it.
diff --git a/flake.lock b/flake.lock
index 9867e694b7f49d5e62a127418032befc7fa3ec2d..8aad229573343bb838e52ed11d2365b76585da2e 100644
--- a/flake.lock
+++ b/flake.lock
@@ -1,22 +1,40 @@
 {
   "nodes": {
+    "lowdown-src": {
+      "flake": false,
+      "locked": {
+        "lastModified": 1617481909,
+        "narHash": "sha256-SqnfOFuLuVRRNeVJr1yeEPJue/qWoCp5N6o5Kr///p4=",
+        "owner": "kristapsdz",
+        "repo": "lowdown",
+        "rev": "148f9b2f586c41b7e36e73009db43ea68c7a1a4d",
+        "type": "github"
+      },
+      "original": {
+        "owner": "kristapsdz",
+        "ref": "VERSION_0_8_4",
+        "repo": "lowdown",
+        "type": "github"
+      }
+    },
     "nixpkgs": {
       "locked": {
-        "lastModified": 1614309161,
-        "narHash": "sha256-93kRxDPyEW9QIpxU71kCaV1r+hgOgP6/aVgC7vvO8IU=",
+        "lastModified": 1622593737,
+        "narHash": "sha256-9loxFJg85AbzJrSkU4pE/divZ1+zOxDy2FSjlrufCB8=",
         "owner": "NixOS",
         "repo": "nixpkgs",
-        "rev": "0e499fde7af3c28d63e9b13636716b86c3162b93",
+        "rev": "bb8a5e54845012ed1375ffd5f317d2fdf434b20e",
         "type": "github"
       },
       "original": {
         "id": "nixpkgs",
-        "ref": "nixos-20.09-small",
+        "ref": "nixos-21.05-small",
         "type": "indirect"
       }
     },
     "root": {
       "inputs": {
+        "lowdown-src": "lowdown-src",
         "nixpkgs": "nixpkgs"
       }
     }
diff --git a/flake.nix b/flake.nix
index 1fb5911c865703bc2a7279736ba06176983669a3..829cd280475d03e7c3f30630108c266aebb5de87 100644
--- a/flake.nix
+++ b/flake.nix
@@ -1,10 +1,10 @@
 {
   description = "The purely functional package manager";
 
-  inputs.nixpkgs.url = "nixpkgs/nixos-20.09-small";
-  #inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; };
+  inputs.nixpkgs.url = "nixpkgs/nixos-21.05-small";
+  inputs.lowdown-src = { url = "github:kristapsdz/lowdown/VERSION_0_8_4"; flake = false; };
 
-  outputs = { self, nixpkgs }:
+  outputs = { self, nixpkgs, lowdown-src }:
 
     let
 
@@ -18,7 +18,7 @@
 
       linux64BitSystems = [ "x86_64-linux" "aarch64-linux" ];
       linuxSystems = linux64BitSystems ++ [ "i686-linux" ];
-      systems = linuxSystems ++ [ "x86_64-darwin" ];
+      systems = linuxSystems ++ [ "x86_64-darwin" "aarch64-darwin" ];
 
       crossSystems = [ "armv6l-linux" "armv7l-linux" ];
 
@@ -80,11 +80,12 @@
             buildPackages.git
             buildPackages.mercurial
             buildPackages.jq
-          ] ++ lib.optional stdenv.hostPlatform.isLinux buildPackages.utillinuxMinimal;
+          ]
+          ++ lib.optionals stdenv.hostPlatform.isLinux [(buildPackages.util-linuxMinimal or buildPackages.utillinuxMinimal)];
 
         buildDeps =
           [ curl
-            bzip2 xz brotli zlib editline
+            bzip2 xz brotli editline
             openssl sqlite
             libarchive
             boost
@@ -92,7 +93,7 @@
             lowdown
             gmock
           ]
-          ++ lib.optional stdenv.isLinux libseccomp
+          ++ lib.optionals stdenv.isLinux [libseccomp]
           ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
           ++ lib.optional stdenv.hostPlatform.isx86_64 libcpuid;
 
@@ -146,12 +147,46 @@
             echo "file installer $out/install" >> $out/nix-support/hydra-build-products
           '';
 
+      testNixVersions = pkgs: client: daemon: with commonDeps pkgs; pkgs.stdenv.mkDerivation {
+        NIX_DAEMON_PACKAGE = daemon;
+        NIX_CLIENT_PACKAGE = client;
+        # Must keep this name short as OSX has a rather strict limit on the
+        # socket path length, and this name appears in the path of the
+        # nix-daemon socket used in the tests
+        name = "nix-tests";
+        inherit version;
+
+        src = self;
+
+        VERSION_SUFFIX = versionSuffix;
+
+        nativeBuildInputs = nativeBuildDeps;
+        buildInputs = buildDeps ++ awsDeps;
+        propagatedBuildInputs = propagatedDeps;
+
+        enableParallelBuilding = true;
+
+        dontBuild = true;
+        doInstallCheck = true;
+
+        installPhase = ''
+          mkdir -p $out
+        '';
+        installCheckPhase = "make installcheck";
+
+      };
+
     in {
 
       # A Nixpkgs overlay that overrides the 'nix' and
       # 'nix.perl-bindings' packages.
       overlay = final: prev: {
 
+        # An older version of Nix to test against when using the daemon.
+        # Currently using `nixUnstable` as the stable one doesn't respect
+        # `NIX_DAEMON_SOCKET_PATH` which is needed for the tests.
+        nixStable = prev.nix;
+
         nix = with final; with commonDeps pkgs; stdenv.mkDerivation {
           name = "nix-${version}";
           inherit version;
@@ -201,6 +236,8 @@
 
           separateDebugInfo = true;
 
+          strictDeps = true;
+
           passthru.perl-bindings = with final; stdenv.mkDerivation {
             name = "nix-perl-${version}";
 
@@ -221,7 +258,8 @@
                 boost
                 nlohmann_json
               ]
-              ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium;
+              ++ lib.optional (stdenv.isLinux || stdenv.isDarwin) libsodium
+              ++ lib.optional stdenv.isDarwin darwin.apple_sdk.frameworks.Security;
 
             configureFlags = ''
               --with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix}
@@ -236,21 +274,23 @@
         };
 
         lowdown = with final; stdenv.mkDerivation rec {
-          name = "lowdown-0.8.0";
+          name = "lowdown-0.8.4";
 
+          /*
           src = fetchurl {
             url = "https://kristaps.bsd.lv/lowdown/snapshots/${name}.tar.gz";
             hash = "sha512-U9WeGoInT9vrawwa57t6u9dEdRge4/P+0wLxmQyOL9nhzOEUU2FRz2Be9H0dCjYE7p2v3vCXIYk40M+jjULATw==";
           };
+          */
 
-          #src = lowdown-src;
+          src = lowdown-src;
 
           outputs = [ "out" "bin" "dev" ];
 
           nativeBuildInputs = [ buildPackages.which ];
 
-          configurePhase =
-            ''
+          configurePhase = ''
+              ${if (stdenv.isDarwin && stdenv.isAarch64) then "echo \"HAVE_SANDBOX_INIT=false\" > configure.local" else ""}
               ./configure \
                 PREFIX=${placeholder "dev"} \
                 BINDIR=${placeholder "bin"}/bin
@@ -353,7 +393,7 @@
         # to https://nixos.org/nix/install. It downloads the binary
         # tarball for the user's system and calls the second half of the
         # installation script.
-        installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ];
+        installerScript = installScriptFor [ "x86_64-linux" "i686-linux" "aarch64-linux" "x86_64-darwin" "aarch64-darwin" ];
         installerScriptForGHA = installScriptFor [ "x86_64-linux" "x86_64-darwin" ];
 
         # Line coverage analysis.
@@ -439,6 +479,15 @@
       checks = forAllSystems (system: {
         binaryTarball = self.hydraJobs.binaryTarball.${system};
         perlBindings = self.hydraJobs.perlBindings.${system};
+        installTests =
+          let pkgs = nixpkgsFor.${system}; in
+          pkgs.runCommand "install-tests" {
+            againstSelf = testNixVersions pkgs pkgs.nix pkgs.pkgs.nix;
+            againstCurrentUnstable = testNixVersions pkgs pkgs.nix pkgs.nixUnstable;
+            # Disabled because the latest stable version doesn't handle
+            # `NIX_DAEMON_SOCKET_PATH` which is required for the tests to work
+            # againstLatestStable = testNixVersions pkgs pkgs.nix pkgs.nixStable;
+          } "touch $out";
       });
 
       packages = forAllSystems (system: {
@@ -479,6 +528,8 @@
           installCheckFlags = "sysconfdir=$(out)/etc";
 
           stripAllList = ["bin"];
+
+          strictDeps = true;
         };
       } // builtins.listToAttrs (map (crossSystem: {
         name = "nix-${crossSystem}";
diff --git a/maintainers/upload-release.pl b/maintainers/upload-release.pl
index 6f3882a12d0bc075e7a5ebe4c5a11659b271bce6..c2933300fcc0415295555b4a2fcb047da85b4572 100755
--- a/maintainers/upload-release.pl
+++ b/maintainers/upload-release.pl
@@ -133,20 +133,8 @@ for my $fn (glob "$tmpDir/*") {
 
 exit if $version =~ /pre/;
 
-# Update Nixpkgs in a very hacky way.
+# Update nix-fallback-paths.nix.
 system("cd $nixpkgsDir && git pull") == 0 or die;
-my $oldName = `nix-instantiate --eval $nixpkgsDir -A nix.name`; chomp $oldName;
-my $oldHash = `nix-instantiate --eval $nixpkgsDir -A nix.src.outputHash`; chomp $oldHash;
-print STDERR "old stable version in Nixpkgs = $oldName / $oldHash\n";
-
-my $fn = "$nixpkgsDir/pkgs/tools/package-management/nix/default.nix";
-my $oldFile = read_file($fn);
-$oldFile =~ s/$oldName/"$releaseName"/g;
-$oldFile =~ s/$oldHash/"$tarballHash"/g;
-write_file($fn, $oldFile);
-
-$oldName =~ s/nix-//g;
-$oldName =~ s/"//g;
 
 sub getStorePath {
     my ($jobName) = @_;
@@ -167,7 +155,7 @@ write_file("$nixpkgsDir/nixos/modules/installer/tools/nix-fallback-paths.nix",
            "  x86_64-darwin = \"" . getStorePath("build.x86_64-darwin") . "\";\n" .
            "}\n");
 
-system("cd $nixpkgsDir && git commit -a -m 'nix: $oldName -> $version'") == 0 or die;
+system("cd $nixpkgsDir && git commit -a -m 'nix-fallback-paths.nix: Update to $version'") == 0 or die;
 
 # Update the "latest" symlink.
 $channelsBucket->add_key(
diff --git a/misc/launchd/org.nixos.nix-daemon.plist.in b/misc/launchd/org.nixos.nix-daemon.plist.in
index c334639e2ec8bd0bbf456f238b12dff3146c106d..f1b43984040653c98685af2ad2af74ff9b57cb19 100644
--- a/misc/launchd/org.nixos.nix-daemon.plist.in
+++ b/misc/launchd/org.nixos.nix-daemon.plist.in
@@ -19,7 +19,7 @@
     <array>
       <string>/bin/sh</string>
       <string>-c</string>
-      <string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon &amp;&amp; /nix/var/nix/profiles/default/bin/nix-daemon</string>
+      <string>/bin/wait4path /nix/var/nix/profiles/default/bin/nix-daemon &amp;&amp; exec /nix/var/nix/profiles/default/bin/nix-daemon</string>
     </array>
     <key>StandardErrorPath</key>
     <string>/var/log/nix-daemon.log</string>
diff --git a/misc/zsh/completion.zsh b/misc/zsh/completion.zsh
index d4df6447e41d043aca13f334641cadeb2b7cc9bd..a902e37dc8990d90afd16cfb677d7cfc84c04927 100644
--- a/misc/zsh/completion.zsh
+++ b/misc/zsh/completion.zsh
@@ -1,3 +1,5 @@
+#compdef nix
+
 function _nix() {
   local ifs_bk="$IFS"
   local input=("${(Q)words[@]}")
@@ -18,4 +20,4 @@ function _nix() {
   _describe 'nix' suggestions
 }
 
-compdef _nix nix
+_nix "$@"
diff --git a/misc/zsh/local.mk b/misc/zsh/local.mk
new file mode 100644
index 0000000000000000000000000000000000000000..418fb1377b49bade3ba9934f0c29a01e3592dd36
--- /dev/null
+++ b/misc/zsh/local.mk
@@ -0,0 +1 @@
+$(eval $(call install-file-as, $(d)/completion.zsh, $(datarootdir)/zsh/site-functions/_nix, 0644))
diff --git a/nix-rust/local.mk b/nix-rust/local.mk
index 50db4783c892fc886a000d486e2a80572dae89c0..9650cdf9321a829a012a676b2aa5cd6f88d57a35 100644
--- a/nix-rust/local.mk
+++ b/nix-rust/local.mk
@@ -8,8 +8,13 @@ endif
 
 libnixrust_PATH := $(d)/target/$(RUST_DIR)/libnixrust.$(SO_EXT)
 libnixrust_INSTALL_PATH := $(libdir)/libnixrust.$(SO_EXT)
-libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust -ldl
-libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust -ldl
+libnixrust_LDFLAGS_USE := -L$(d)/target/$(RUST_DIR) -lnixrust
+libnixrust_LDFLAGS_USE_INSTALLED := -L$(libdir) -lnixrust
+
+ifeq ($(OS), Linux)
+libnixrust_LDFLAGS_USE += -ldl
+libnixrust_LDFLAGS_USE_INSTALLED += -ldl
+endif
 
 ifeq ($(OS), Darwin)
 libnixrust_BUILD_FLAGS = NIX_LDFLAGS="-undefined dynamic_lookup"
diff --git a/scripts/bigsur-nixbld-user-migration.sh b/scripts/bigsur-nixbld-user-migration.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f1619fd56d38fe1f8bafa53b9d286413e6abdc9f
--- /dev/null
+++ b/scripts/bigsur-nixbld-user-migration.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+
+((NEW_NIX_FIRST_BUILD_UID=301))
+
+id_available(){
+	dscl . list /Users UniqueID | grep -E '\b'$1'\b' >/dev/null
+}
+
+change_nixbld_names_and_ids(){
+	local name uid next_id
+	((next_id=NEW_NIX_FIRST_BUILD_UID))
+	echo "Attempting to migrate nixbld users."
+	echo "Each user should change from nixbld# to _nixbld#"
+	echo "and their IDs relocated to $next_id+"
+	while read -r name uid; do
+		echo "   Checking $name (uid: $uid)"
+		# iterate for a clean ID
+		while id_available "$next_id"; do
+			((next_id++))
+			if ((next_id >= 400)); then
+				echo "We've hit UID 400 without placing all of your users :("
+				echo "You should use the commands in this script as a starting"
+				echo "point to review your UID-space and manually move the"
+				echo "remaining users (or delete them, if you don't need them)."
+				exit 1
+			fi
+		done
+
+		if [[ $name == _* ]]; then
+			echo "      It looks like $name has already been renamed--skipping."
+		else
+			# first 3 are cleanup, it's OK if they aren't here
+			sudo dscl . delete /Users/$name dsAttrTypeNative:_writers_passwd &>/dev/null || true
+			sudo dscl . change /Users/$name NFSHomeDirectory "/private/var/empty 1" "/var/empty" &>/dev/null || true
+			# remove existing user from group
+			sudo dseditgroup -o edit -t user -d $name nixbld || true
+			sudo dscl . change /Users/$name UniqueID $uid $next_id
+			sudo dscl . change /Users/$name RecordName $name _$name
+			# add renamed user to group
+			sudo dseditgroup -o edit -t user -a _$name nixbld
+			echo "      $name migrated to _$name (uid: $next_id)"
+		fi
+	done < <(dscl . list /Users UniqueID | grep nixbld | sort -n -k2)
+}
+
+change_nixbld_names_and_ids
diff --git a/scripts/create-darwin-volume.sh b/scripts/create-darwin-volume.sh
index 32fa577a83c94de4eb83982fd382e63655a0ac97..8aff0319982889d0c5cf652413624c3d01e18f3c 100755
--- a/scripts/create-darwin-volume.sh
+++ b/scripts/create-darwin-volume.sh
@@ -1,33 +1,262 @@
-#!/bin/sh
-set -e
+#!/usr/bin/env bash
+set -eu
+set -o pipefail
 
-root_disk() {
-    diskutil info -plist /
-}
+# I'm a little agnostic on the choices, but supporting a wide
+# slate of uses for now, including:
+# - import-only: `. create-darwin-volume.sh no-main[ ...]`
+# - legacy: `./create-darwin-volume.sh` or `. create-darwin-volume.sh`
+#   (both will run main())
+# - external alt-routine: `./create-darwin-volume.sh no-main func[ ...]`
+if [ "${1-}" = "no-main" ]; then
+    shift
+    readonly _CREATE_VOLUME_NO_MAIN=1
+else
+    readonly _CREATE_VOLUME_NO_MAIN=0
+    # declare some things we expect to inherit from install-multi-user
+    # I don't love this (because it's a bit of a kludge).
+    #
+    # CAUTION: (Dec 19 2020)
+    # This is a stopgap. It doesn't cover the full slate of
+    # identifiers we inherit--just those necessary to:
+    # - avoid breaking direct invocations of this script (here/now)
+    # - avoid hard-to-reverse structural changes before the call to rm
+    #   single-user support is verified
+    #
+    # In the near-mid term, I (personally) think we should:
+    # - decide to deprecate the direct call and add a notice
+    # - fold all of this into install-darwin-multi-user.sh
+    # - intentionally remove the old direct-invocation form (kill the
+    #   routine, replace this script w/ deprecation notice and a note
+    #   on the remove-after date)
+    #
+    readonly NIX_ROOT="${NIX_ROOT:-/nix}"
+
+    _sudo() {
+        shift # throw away the 'explanation'
+        /usr/bin/sudo "$@"
+    }
+    failure() {
+        if [ "$*" = "" ]; then
+            cat
+        else
+            echo "$@"
+        fi
+        exit 1
+    }
+    task() {
+        echo "$@"
+    }
+fi
 
-# i.e., "disk1"
+# usually "disk1"
 root_disk_identifier() {
-    diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" -
+    # For performance (~10ms vs 280ms) I'm parsing 'diskX' from stat output
+    # (~diskXsY)--but I'm retaining the more-semantic approach since
+    # it documents intent better.
+    # /usr/sbin/diskutil info -plist / | xmllint --xpath "/plist/dict/key[text()='ParentWholeDisk']/following-sibling::string[1]/text()" -
+    #
+    local special_device
+    special_device="$(/usr/bin/stat -f "%Sd" /)"
+    echo "${special_device%s[0-9]*}"
+}
+
+# make it easy to play w/ 'Case-sensitive APFS'
+readonly NIX_VOLUME_FS="${NIX_VOLUME_FS:-APFS}"
+readonly NIX_VOLUME_LABEL="${NIX_VOLUME_LABEL:-Nix Store}"
+# Strongly assuming we'll make a volume on the device / is on
+# But you can override NIX_VOLUME_USE_DISK to create it on some other device
+readonly NIX_VOLUME_USE_DISK="${NIX_VOLUME_USE_DISK:-$(root_disk_identifier)}"
+NIX_VOLUME_USE_SPECIAL="${NIX_VOLUME_USE_SPECIAL:-}"
+NIX_VOLUME_USE_UUID="${NIX_VOLUME_USE_UUID:-}"
+readonly NIX_VOLUME_MOUNTD_DEST="${NIX_VOLUME_MOUNTD_DEST:-/Library/LaunchDaemons/org.nixos.darwin-store.plist}"
+
+if /usr/bin/fdesetup isactive >/dev/null; then
+    test_filevault_in_use() { return 0; }
+    # no readonly; we may modify if user refuses from cure_volume
+    NIX_VOLUME_DO_ENCRYPT="${NIX_VOLUME_DO_ENCRYPT:-1}"
+else
+    test_filevault_in_use() { return 1; }
+    NIX_VOLUME_DO_ENCRYPT="${NIX_VOLUME_DO_ENCRYPT:-0}"
+fi
+
+should_encrypt_volume() {
+    test_filevault_in_use && (( NIX_VOLUME_DO_ENCRYPT == 1 ))
 }
 
-find_nix_volume() {
-    diskutil apfs list -plist "$1" | xmllint --xpath "(/plist/dict/array/dict/key[text()='Volumes']/following-sibling::array/dict/key[text()='Name']/following-sibling::string[starts-with(translate(text(),'N','n'),'nix')]/text())[1]" - 2>/dev/null || true
+substep() {
+    printf "   %s\n" "" "- $1" "" "${@:2}"
+}
+
+
+volumes_labeled() {
+    local label="$1"
+    xsltproc --novalid --stringparam label "$label" - <(/usr/sbin/ioreg -ra -c "AppleAPFSVolume") <<'EOF'
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+  <xsl:output method="text"/>
+  <xsl:template match="/">
+    <xsl:apply-templates select="/plist/array/dict/key[text()='IORegistryEntryName']/following-sibling::*[1][text()=$label]/.."/>
+  </xsl:template>
+  <xsl:template match="dict">
+    <xsl:apply-templates match="string" select="key[text()='BSD Name']/following-sibling::*[1]"/>
+    <xsl:text>=</xsl:text>
+    <xsl:apply-templates match="string" select="key[text()='UUID']/following-sibling::*[1]"/>
+    <xsl:text>&#xA;</xsl:text>
+  </xsl:template>
+</xsl:stylesheet>
+EOF
+    # I cut label out of the extracted values, but here it is for reference:
+    # <xsl:apply-templates match="string" select="key[text()='IORegistryEntryName']/following-sibling::*[1]"/>
+    # <xsl:text>=</xsl:text>
+}
+
+right_disk() {
+    local volume_special="$1" # (i.e., disk1s7)
+    [[ "$volume_special" == "$NIX_VOLUME_USE_DISK"s* ]]
+}
+
+right_volume() {
+    local volume_special="$1" # (i.e., disk1s7)
+    # if set, it must match; otherwise ensure it's on the right disk
+    if [ -z "$NIX_VOLUME_USE_SPECIAL" ]; then
+        if right_disk "$volume_special"; then
+            NIX_VOLUME_USE_SPECIAL="$volume_special" # latch on
+            return 0
+        else
+            return 1
+        fi
+    else
+        [ "$volume_special" = "$NIX_VOLUME_USE_SPECIAL" ]
+    fi
+}
+
+right_uuid() {
+    local volume_uuid="$1"
+    # if set, it must match; otherwise allow
+    if [ -z "$NIX_VOLUME_USE_UUID" ]; then
+        NIX_VOLUME_USE_UUID="$volume_uuid" # latch on
+        return 0
+    else
+        [ "$volume_uuid" = "$NIX_VOLUME_USE_UUID" ]
+    fi
+}
+
+cure_volumes() {
+    local found volume special uuid
+    # loop just in case they have more than one volume
+    # (nothing stops you from doing this)
+    for volume in $(volumes_labeled "$NIX_VOLUME_LABEL"); do
+        # CAUTION: this could (maybe) be a more normal read
+        # loop like:
+        #   while IFS== read -r special uuid; do
+        #       # ...
+        #   done <<<"$(volumes_labeled "$NIX_VOLUME_LABEL")"
+        #
+        # I did it with for to skirt a problem with the obvious
+        # pattern replacing stdin and causing user prompts
+        # inside (which also use read and access stdin) to skip
+        #
+        # If there's an existing encrypted volume we can't find
+        # in keychain, the user never gets prompted to delete
+        # the volume, and the install fails.
+        #
+        # If you change this, a human needs to test a very
+        # specific scenario: you already have an encrypted
+        # Nix Store volume, and have deleted its credential
+        # from keychain. Ensure the script asks you if it can
+        # delete the volume, and then prompts for your sudo
+        # password to confirm.
+        #
+        # shellcheck disable=SC1097
+        IFS== read -r special uuid <<< "$volume"
+        # take the first one that's on the right disk
+        if [ -z "${found:-}" ]; then
+            if right_volume "$special" && right_uuid "$uuid"; then
+                cure_volume "$special" "$uuid"
+                found="${special} (${uuid})"
+            else
+                warning <<EOF
+Ignoring ${special} (${uuid}) because I am looking for:
+disk=${NIX_VOLUME_USE_DISK} special=${NIX_VOLUME_USE_SPECIAL:-${NIX_VOLUME_USE_DISK}sX} uuid=${NIX_VOLUME_USE_UUID:-any}
+EOF
+                # TODO: give chance to delete if ! headless?
+            fi
+        else
+            warning <<EOF
+Ignoring ${special} (${uuid}), already found target: $found
+EOF
+            # TODO reminder? I feel like I want one
+            # idiom that reminds some warnings, or warns
+            # some reminders?
+            # TODO: if ! headless, chance to delete?
+        fi
+    done
+    if [ -z "${found:-}" ]; then
+        readonly NIX_VOLUME_USE_SPECIAL NIX_VOLUME_USE_UUID
+    fi
+}
+
+volume_encrypted() {
+    local volume_special="$1" # (i.e., disk1s7)
+    # Trying to match the first line of output; known first lines:
+    # No cryptographic users for <special>
+    # Cryptographic user for <special> (1 found)
+    # Cryptographic users for <special> (2 found)
+    /usr/sbin/diskutil apfs listCryptoUsers -plist "$volume_special" | /usr/bin/grep -q APFSCryptoUserUUID
 }
 
 test_fstab() {
-    grep -q "/nix apfs rw" /etc/fstab 2>/dev/null
+    /usr/bin/grep -q "$NIX_ROOT apfs rw" /etc/fstab 2>/dev/null
+}
+
+test_nix_root_is_symlink() {
+    [ -L "$NIX_ROOT" ]
+}
+
+test_synthetic_conf_either(){
+    /usr/bin/grep -qE "^${NIX_ROOT:1}($|\t.{3,}$)" /etc/synthetic.conf 2>/dev/null
+}
+
+test_synthetic_conf_mountable() {
+    /usr/bin/grep -q "^${NIX_ROOT:1}$" /etc/synthetic.conf 2>/dev/null
+}
+
+test_synthetic_conf_symlinked() {
+    /usr/bin/grep -qE "^${NIX_ROOT:1}\t.{3,}$" /etc/synthetic.conf 2>/dev/null
+}
+
+test_nix_volume_mountd_installed() {
+    test -e "$NIX_VOLUME_MOUNTD_DEST"
 }
 
-test_nix_symlink() {
-    [ -L "/nix" ] || grep -q "^nix." /etc/synthetic.conf 2>/dev/null
+# current volume password
+test_keychain_by_uuid() {
+    local volume_uuid="$1"
+    # Note: doesn't need sudo just to check; doesn't output pw
+    security find-generic-password -s "$volume_uuid" &>/dev/null
 }
 
-test_synthetic_conf() {
-    grep -q "^nix$" /etc/synthetic.conf 2>/dev/null
+get_volume_pass() {
+    local volume_uuid="$1"
+    _sudo \
+        "to confirm keychain has a password that unlocks this volume" \
+        security find-generic-password -s "$volume_uuid" -w
+}
+
+verify_volume_pass() {
+    local volume_special="$1" # (i.e., disk1s7)
+    local volume_uuid="$2"
+    /usr/sbin/diskutil apfs unlockVolume "$volume_special" -verify -stdinpassphrase -user "$volume_uuid"
+}
+
+volume_pass_works() {
+    local volume_special="$1" # (i.e., disk1s7)
+    local volume_uuid="$2"
+    get_volume_pass "$volume_uuid" | verify_volume_pass "$volume_special" "$volume_uuid"
 }
 
 # Create the paths defined in synthetic.conf, saving us a reboot.
-create_synthetic_objects(){
+create_synthetic_objects() {
     # Big Sur takes away the -B flag we were using and replaces it
     # with a -t flag that appears to do the same thing (but they
     # don't behave exactly the same way in terms of return values).
@@ -41,129 +270,570 @@ create_synthetic_objects(){
 }
 
 test_nix() {
-    test -d "/nix"
-}
-
-test_t2_chip_present(){
-    # Use xartutil to see if system has a t2 chip.
-    #
-    # This isn't well-documented on its own; until it is,
-    # let's keep track of knowledge/assumptions.
-    #
-    # Warnings:
-    # - Don't search "xart" if porn will cause you trouble :)
-    # - Other xartutil flags do dangerous things. Don't run them
-    #   naively. If you must, search "xartutil" first.
-    #
-    # Assumptions:
-    # - the "xART session seeds recovery utility"
-    #   appears to interact with xartstorageremoted
-    # - `sudo xartutil --list` lists xART sessions
-    #   and their seeds and exits 0 if successful. If
-    #   not, it exits 1 and prints an error such as:
-    #   xartutil: ERROR: No supported link to the SEP present
-    # - xART sessions/seeds are present when a T2 chip is
-    #   (and not, otherwise)
-    # - the presence of a T2 chip means a newly-created
-    #   volume on the primary drive will be
-    #   encrypted at rest
-    # - all together: `sudo xartutil --list`
-    #   should exit 0 if a new Nix Store volume will
-    #   be encrypted at rest, and exit 1 if not.
-    sudo xartutil --list >/dev/null 2>/dev/null
-}
-
-test_filevault_in_use() {
-    fdesetup isactive >/dev/null
-}
-
-# use after error msg for conditions we don't understand
-suggest_report_error(){
-    # ex "error: something sad happened :(" >&2
-    echo "       please report this @ https://github.com/nixos/nix/issues" >&2
-}
-
-main() {
-    (
-        echo ""
-        echo "     ------------------------------------------------------------------ "
-        echo "    | This installer will create a volume for the nix store and        |"
-        echo "    | configure it to mount at /nix.  Follow these steps to uninstall. |"
-        echo "     ------------------------------------------------------------------ "
-        echo ""
-        echo "  1. Remove the entry from fstab using 'sudo vifs'"
-        echo "  2. Destroy the data volume using 'diskutil apfs deleteVolume'"
-        echo "  3. Remove the 'nix' line from /etc/synthetic.conf or the file"
-        echo ""
-    ) >&2
-
-    if test_nix_symlink; then
-        echo "error: /nix is a symlink, please remove it and make sure it's not in synthetic.conf (in which case a reboot is required)" >&2
-        echo "  /nix -> $(readlink "/nix")" >&2
-        exit 2
-    fi
-
-    if ! test_synthetic_conf; then
-        echo "Configuring /etc/synthetic.conf..." >&2
-        echo nix | sudo tee -a /etc/synthetic.conf
-        if ! test_synthetic_conf; then
-            echo "error: failed to configure synthetic.conf;" >&2
-            suggest_report_error
-            exit 1
+    test -d "$NIX_ROOT"
+}
+
+test_voldaemon() {
+    test -f "$NIX_VOLUME_MOUNTD_DEST"
+}
+
+generate_mount_command() {
+    local cmd_type="$1" # encrypted|unencrypted
+    local volume_uuid mountpoint cmd=()
+    printf -v volume_uuid "%q" "$2"
+    printf -v mountpoint "%q" "$NIX_ROOT"
+
+    case "$cmd_type" in
+        encrypted)
+            cmd=(/bin/sh -c "/usr/bin/security find-generic-password -s '$volume_uuid' -w | /usr/sbin/diskutil apfs unlockVolume '$volume_uuid' -mountpoint '$mountpoint' -stdinpassphrase");;
+        unencrypted)
+            cmd=(/usr/sbin/diskutil mount -mountPoint "$mountpoint" "$volume_uuid");;
+        *)
+            failure "Invalid first arg $cmd_type to generate_mount_command";;
+    esac
+
+    printf "    <string>%s</string>\n" "${cmd[@]}"
+}
+
+generate_mount_daemon() {
+    local cmd_type="$1" # encrypted|unencrypted
+    local volume_uuid="$2"
+    cat <<EOF
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+  <key>RunAtLoad</key>
+  <true/>
+  <key>Label</key>
+  <string>org.nixos.darwin-store</string>
+  <key>ProgramArguments</key>
+  <array>
+$(generate_mount_command "$cmd_type" "$volume_uuid")
+  </array>
+</dict>
+</plist>
+EOF
+}
+
+_eat_bootout_err() {
+    /usr/bin/grep -v "Boot-out failed: 36: Operation now in progress"
+}
+
+# TODO: remove with --uninstall?
+uninstall_launch_daemon_directions() {
+    local daemon_label="$1" # i.e., org.nixos.blah-blah
+    local daemon_plist="$2" # abspath
+    substep "Uninstall LaunchDaemon $daemon_label" \
+      "  sudo launchctl bootout system/$daemon_label" \
+      "  sudo rm $daemon_plist"
+}
+
+uninstall_launch_daemon_prompt() {
+    local daemon_label="$1" # i.e., org.nixos.blah-blah
+    local daemon_plist="$2" # abspath
+    local reason_for_daemon="$3"
+    cat <<EOF
+
+The installer adds a LaunchDaemon to $reason_for_daemon: $daemon_label
+EOF
+    if ui_confirm "Can I remove it?"; then
+        _sudo "to terminate the daemon" \
+            launchctl bootout "system/$daemon_label" 2> >(_eat_bootout_err >&2) || true
+            # this can "fail" with a message like:
+            # Boot-out failed: 36: Operation now in progress
+        _sudo "to remove the daemon definition" rm "$daemon_plist"
+    fi
+}
+
+nix_volume_mountd_uninstall_directions() {
+    uninstall_launch_daemon_directions "org.nixos.darwin-store" \
+        "$NIX_VOLUME_MOUNTD_DEST"
+}
+
+nix_volume_mountd_uninstall_prompt() {
+    uninstall_launch_daemon_prompt "org.nixos.darwin-store" \
+        "$NIX_VOLUME_MOUNTD_DEST" \
+        "mount your Nix volume"
+}
+
+# TODO: move nix_daemon to install-darwin-multi-user if/when uninstall_launch_daemon_prompt moves up to install-multi-user
+nix_daemon_uninstall_prompt() {
+    uninstall_launch_daemon_prompt "org.nixos.nix-daemon" \
+        "$NIX_DAEMON_DEST" \
+        "run the nix-daemon"
+}
+
+# TODO: remove with --uninstall?
+nix_daemon_uninstall_directions() {
+    uninstall_launch_daemon_directions "org.nixos.nix-daemon" \
+        "$NIX_DAEMON_DEST"
+}
+
+
+# TODO: remove with --uninstall?
+synthetic_conf_uninstall_directions() {
+    # :1 to strip leading slash
+    substep "Remove ${NIX_ROOT:1} from /etc/synthetic.conf" \
+        "  If nix is the only entry: sudo rm /etc/synthetic.conf" \
+        "  Otherwise: sudo /usr/bin/sed -i '' -e '/^${NIX_ROOT:1}$/d' /etc/synthetic.conf"
+}
+
+synthetic_conf_uninstall_prompt() {
+    cat <<EOF
+
+During install, I add '${NIX_ROOT:1}' to /etc/synthetic.conf, which instructs
+macOS to create an empty root directory for mounting the Nix volume.
+EOF
+    # make the edit to a copy
+    /usr/bin/grep -vE "^${NIX_ROOT:1}($|\t.{3,}$)" /etc/synthetic.conf > "$SCRATCH/synthetic.conf.edit"
+
+    if test_synthetic_conf_symlinked; then
+        warning <<EOF
+
+/etc/synthetic.conf already contains a line instructing your system
+to make '${NIX_ROOT}' as a symlink:
+    $(/usr/bin/grep -nE "^${NIX_ROOT:1}\t.{3,}$" /etc/synthetic.conf)
+
+This may mean your system has/had a non-standard Nix install.
+
+The volume-creation process in this installer is *not* compatible
+with a symlinked store, so I'll have to remove this instruction to
+continue.
+
+If you want/need to keep this instruction, answer 'n' to abort.
+
+EOF
+    fi
+
+    # ask to rm if this left the file empty aside from comments, else edit
+    if /usr/bin/diff -q <(:) <(/usr/bin/grep -v "^#" "$SCRATCH/synthetic.conf.edit") &>/dev/null; then
+        if confirm_rm "/etc/synthetic.conf"; then
+            if test_nix_root_is_symlink; then
+                failure >&2 <<EOF
+I removed /etc/synthetic.conf, but $NIX_ROOT is already a symlink
+(-> $(readlink "$NIX_ROOT")). The system should remove it when you reboot.
+Once you've rebooted, run the installer again.
+EOF
+            fi
+            return 0
+        fi
+    else
+        if confirm_edit "$SCRATCH/synthetic.conf.edit" "/etc/synthetic.conf"; then
+            if test_nix_root_is_symlink; then
+                failure >&2 <<EOF
+I edited Nix out of /etc/synthetic.conf, but $NIX_ROOT is already a symlink
+(-> $(readlink "$NIX_ROOT")). The system should remove it when you reboot.
+Once you've rebooted, run the installer again.
+EOF
+            fi
+            return 0
         fi
     fi
+    # fallback instructions
+    echo "Manually remove nix from /etc/synthetic.conf"
+    return 1
+}
 
-    if ! test_nix; then
-        echo "Creating mountpoint for /nix..." >&2
-        create_synthetic_objects # the ones we defined in synthetic.conf
-        if ! test_nix; then
-            sudo mkdir -p /nix 2>/dev/null || true
+add_nix_vol_fstab_line() {
+    local uuid="$1"
+    # shellcheck disable=SC1003,SC2026
+    local escaped_mountpoint="${NIX_ROOT/ /'\\\'040}"
+    shift
+    EDITOR="/usr/bin/ex" _sudo "to add nix to fstab" "$@" <<EOF
+:a
+UUID=$uuid $escaped_mountpoint apfs rw,noauto,nobrowse,suid,owners
+.
+:x
+EOF
+    # TODO: preserving my notes on suid,owners above until resolved
+    # There *may* be some issue regarding volume ownership, see nix#3156
+    #
+    # It seems like the cheapest fix is adding "suid,owners" to fstab, but:
+    # - We don't have much info on this condition yet
+    # - I'm not certain if these cause other problems?
+    # - There's a "chown" component some people claim to need to fix this
+    #   that I don't understand yet
+    #   (Note however that I've had to add a chown step to handle
+    #   single->multi-user reinstalls, which may cover this)
+    #
+    # I'm not sure if it's safe to approach this way?
+    #
+    # I think I think the most-proper way to test for it is:
+    # diskutil info -plist "$NIX_VOLUME_LABEL" | xmllint --xpath "(/plist/dict/key[text()='GlobalPermissionsEnabled'])/following-sibling::*[1][name()='true']" -; echo $?
+    #
+    # There's also `sudo /usr/sbin/vsdbutil -c /path` (which is much faster, but is also
+    # deprecated and needs minor parsing).
+    #
+    # If no one finds a problem with doing so, I think the simplest approach
+    # is to just eagerly set this. I found a few imperative approaches:
+    # (diskutil enableOwnership, ~100ms), a cheap one (/usr/sbin/vsdbutil -a, ~40-50ms),
+    # a very cheap one (append the internal format to /var/db/volinfo.database).
+    #
+    # But vsdbutil's deprecation notice suggests using fstab, so I want to
+    # give that a whirl first.
+    #
+    # TODO: when this is workable, poke infinisil about reproducing the issue
+    # and confirming this fix?
+}
+
+delete_nix_vol_fstab_line() {
+    # TODO: I'm scaffolding this to handle the new nix volumes
+    # but it might be nice to generalize a smidge further to
+    # go ahead and set up a pattern for curing "old" things
+    # we no longer do?
+    EDITOR="/usr/bin/patch" _sudo "to cut nix from fstab" "$@" < <(/usr/bin/diff /etc/fstab <(/usr/bin/grep -v "$NIX_ROOT apfs rw" /etc/fstab))
+    # leaving some parts out of the grep; people may fiddle this a little?
+}
+
+# TODO: hope to remove with --uninstall
+fstab_uninstall_directions() {
+    substep "Remove ${NIX_ROOT} from /etc/fstab" \
+      "  If nix is the only entry: sudo rm /etc/fstab" \
+      "  Otherwise, run 'sudo /usr/sbin/vifs' to remove the nix line"
+}
+
+fstab_uninstall_prompt() {
+    cat <<EOF
+During install, I add '${NIX_ROOT}' to /etc/fstab so that macOS knows what
+mount options to use for the Nix volume.
+EOF
+    cp /etc/fstab "$SCRATCH/fstab.edit"
+    # technically doesn't need the _sudo path, but throwing away the
+    # output is probably better than mostly-duplicating the code...
+    delete_nix_vol_fstab_line patch "$SCRATCH/fstab.edit" &>/dev/null
+
+    # if the patch test edit, minus comment lines, is equal to empty (:)
+    if /usr/bin/diff -q <(:) <(/usr/bin/grep -v "^#" "$SCRATCH/fstab.edit") &>/dev/null; then
+        # this edit would leave it empty; propose deleting it
+        if confirm_rm "/etc/fstab"; then
+            return 0
+        else
+            echo "Remove nix from /etc/fstab (or remove the file)"
         fi
-        if ! test_nix; then
-            echo "error: failed to bootstrap /nix; if a reboot doesn't help," >&2
-            suggest_report_error
-            exit 1
+    else
+        echo "I might be able to help you make this edit. Here's the diff:"
+        if ! _diff "/etc/fstab" "$SCRATCH/fstab.edit" && ui_confirm "Does the change above look right?"; then
+            delete_nix_vol_fstab_line /usr/sbin/vifs
+        else
+            echo "Remove nix from /etc/fstab (or remove the file)"
         fi
     fi
+}
+
+remove_volume() {
+    local volume_special="$1" # (i.e., disk1s7)
+    _sudo "to unmount the Nix volume" \
+        /usr/sbin/diskutil unmount force "$volume_special" || true # might not be mounted
+    _sudo "to delete the Nix volume" \
+        /usr/sbin/diskutil apfs deleteVolume "$volume_special"
+}
 
-    disk="$(root_disk_identifier)"
-    volume=$(find_nix_volume "$disk")
-    if [ -z "$volume" ]; then
-        echo "Creating a Nix Store volume..." >&2
-
-        if test_filevault_in_use; then
-            # TODO: Not sure if it's in-scope now, but `diskutil apfs list`
-            # shows both filevault and encrypted at rest status, and it
-            # may be the more semantic way to test for this? It'll show
-            # `FileVault:                 No (Encrypted at rest)`
-            # `FileVault:                 No`
-            # `FileVault:                 Yes (Unlocked)`
-            # and so on.
-            if test_t2_chip_present; then
-                echo "warning: boot volume is FileVault-encrypted, but the Nix store volume" >&2
-                echo "         is only encrypted at rest." >&2
-                echo "         See https://nixos.org/nix/manual/#sect-macos-installation" >&2
+# aspiration: robust enough to both fix problems
+# *and* update older darwin volumes
+cure_volume() {
+    local volume_special="$1" # (i.e., disk1s7)
+    local volume_uuid="$2"
+    header "Found existing Nix volume"
+    row "  special" "$volume_special"
+    row "     uuid" "$volume_uuid"
+
+    if volume_encrypted "$volume_special"; then
+        row "encrypted" "yes"
+        if volume_pass_works "$volume_special" "$volume_uuid"; then
+            NIX_VOLUME_DO_ENCRYPT=0
+            ok "Found a working decryption password in keychain :)"
+            echo ""
+        else
+            # - this is a volume we made, and
+            #   - the user encrypted it on their own
+            #   - something deleted the credential
+            # - this is an old or BYO volume and the pw
+            #   just isn't somewhere we can find it.
+            #
+            # We're going to explain why we're freaking out
+            # and prompt them to either delete the volume
+            # (requiring a sudo auth), or abort to fix
+            warning <<EOF
+
+This volume is encrypted, but I don't see a password to decrypt it.
+The quick fix is to let me delete this volume and make you a new one.
+If that's okay, enter your (sudo) password to continue. If not, you
+can ensure the decryption password is in your system keychain with a
+"Where" (service) field set to this volume's UUID:
+  $volume_uuid
+EOF
+            if password_confirm "delete this volume"; then
+                remove_volume "$volume_special"
             else
-                echo "error: refusing to create Nix store volume because the boot volume is" >&2
-                echo "       FileVault encrypted, but encryption-at-rest is not available." >&2
-                echo "       Manually create a volume for the store and re-run this script." >&2
-                echo "       See https://nixos.org/nix/manual/#sect-macos-installation" >&2
-                exit 1
+                # TODO: this is a good design case for a warn-and
+                # remind idiom...
+                failure <<EOF
+Your Nix volume is encrypted, but I couldn't find its password. Either:
+- Delete or rename the volume out of the way
+- Ensure its decryption password is in the system keychain with a
+  "Where" (service) field set to this volume's UUID:
+    $volume_uuid
+EOF
+            fi
+        fi
+    elif test_filevault_in_use; then
+        row "encrypted" "no"
+        warning <<EOF
+FileVault is on, but your $NIX_VOLUME_LABEL volume isn't encrypted.
+EOF
+        # if we're interactive, give them a chance to
+        # encrypt the volume. If not, /shrug
+        if ! headless && (( NIX_VOLUME_DO_ENCRYPT == 1 )); then
+            if ui_confirm "Should I encrypt it and add the decryption key to your keychain?"; then
+                encrypt_volume "$volume_uuid" "$NIX_VOLUME_LABEL"
+                NIX_VOLUME_DO_ENCRYPT=0
+            else
+                NIX_VOLUME_DO_ENCRYPT=0
+                reminder "FileVault is on, but your $NIX_VOLUME_LABEL volume isn't encrypted."
             fi
         fi
-
-        sudo diskutil apfs addVolume "$disk" APFS 'Nix Store' -mountpoint /nix
-        volume="Nix Store"
     else
-        echo "Using existing '$volume' volume" >&2
+        row "encrypted" "no"
     fi
+}
 
+remove_volume_artifacts() {
+    if test_synthetic_conf_either; then
+        # NIX_ROOT is in synthetic.conf
+        if synthetic_conf_uninstall_prompt; then
+            # TODO: moot until we tackle uninstall, but when we're
+            # actually uninstalling, we should issue:
+            # reminder "macOS will clean up the empty mount-point directory at $NIX_ROOT on reboot."
+            :
+        fi
+    fi
+    if test_fstab; then
+        fstab_uninstall_prompt
+    fi
+
+    if test_nix_volume_mountd_installed; then
+        nix_volume_mountd_uninstall_prompt
+    fi
+}
+
+setup_synthetic_conf() {
+    if test_nix_root_is_symlink; then
+        if ! test_synthetic_conf_symlinked; then
+            failure >&2 <<EOF
+error: $NIX_ROOT is a symlink (-> $(readlink "$NIX_ROOT")).
+Please remove it. If nix is in /etc/synthetic.conf, remove it and reboot.
+EOF
+        fi
+    fi
+    if ! test_synthetic_conf_mountable; then
+        task "Configuring /etc/synthetic.conf to make a mount-point at $NIX_ROOT" >&2
+        # technically /etc/synthetic.d/nix is supported in Big Sur+
+        # but handling both takes even more code...
+        _sudo "to add Nix to /etc/synthetic.conf" \
+            /usr/bin/ex /etc/synthetic.conf <<EOF
+:a
+${NIX_ROOT:1}
+.
+:x
+EOF
+        if ! test_synthetic_conf_mountable; then
+            failure "error: failed to configure synthetic.conf" >&2
+        fi
+        create_synthetic_objects
+        if ! test_nix; then
+            failure >&2 <<EOF
+error: failed to bootstrap $NIX_ROOT
+If you enabled FileVault after booting, this is likely a known issue
+with macOS that you'll have to reboot to fix. If you didn't enable FV,
+though, please open an issue describing how the system that you see
+this error on was set up.
+EOF
+        fi
+    fi
+}
+
+setup_fstab() {
+    local volume_uuid="$1"
+    # fstab used to be responsible for mounting the volume. Now the last
+    # step adds a LaunchDaemon responsible for mounting. This is technically
+    # redundant for mounting, but diskutil appears to pick up mount options
+    # from fstab (and diskutil's support for specifying them directly is not
+    # consistent across versions/subcommands).
     if ! test_fstab; then
-        echo "Configuring /etc/fstab..." >&2
-        label=$(echo "$volume" | sed 's/ /\\040/g')
-        # shellcheck disable=SC2209
-        printf "\$a\nLABEL=%s /nix apfs rw,nobrowse\n.\nwq\n" "$label" | EDITOR=ed sudo vifs
+        task "Configuring /etc/fstab to specify volume mount options" >&2
+        add_nix_vol_fstab_line "$volume_uuid" /usr/sbin/vifs
+    fi
+}
+
+encrypt_volume() {
+    local volume_uuid="$1"
+    local volume_label="$2"
+    local password
+    # Note: mount/unmount are late additions to support the right order
+    # of operations for creating the volume and then baking its uuid into
+    # other artifacts; not as well-trod wrt to potential errors, race
+    # conditions, etc.
+
+    /usr/sbin/diskutil mount "$volume_label"
+
+    password="$(/usr/bin/xxd -l 32 -p -c 256 /dev/random)"
+    _sudo "to add your Nix volume's password to Keychain" \
+        /usr/bin/security -i <<EOF
+add-generic-password -a "$volume_label" -s "$volume_uuid" -l "$volume_label encryption password" -D "Encrypted volume password" -j "Added automatically by the Nix installer for use by $NIX_VOLUME_MOUNTD_DEST" -w "$password" -T /System/Library/CoreServices/APFSUserAgent -T /System/Library/CoreServices/CSUserAgent -T /usr/bin/security "/Library/Keychains/System.keychain"
+EOF
+    builtin printf "%s" "$password" | _sudo "to encrypt your Nix volume" \
+        /usr/sbin/diskutil apfs encryptVolume "$volume_label" -user disk -stdinpassphrase
+
+    /usr/sbin/diskutil unmount force "$volume_label"
+}
+
+create_volume() {
+    # Notes:
+    # 1) using `-nomount` instead of `-mountpoint "$NIX_ROOT"` to get
+    # its UUID and set mount opts in fstab before first mount
+    #
+    # 2) system is in some sense less secure than user keychain... (it's
+    # possible to read the password for decrypting the keychain) but
+    # the user keychain appears to be available too late. As far as I
+    # can tell, the file with this password (/var/db/SystemKey) is
+    # inside the FileVault envelope. If that isn't true, it may make
+    # sense to store the password inside the envelope?
+    #
+    # 3) At some point it would be ideal to have a small binary to serve
+    # as the daemon itself, and for it to replace /usr/bin/security here.
+    #
+    # 4) *UserAgent exemptions should let the system seamlessly supply the
+    # password if noauto is removed from fstab entry. This is intentional;
+    # the user will hopefully look for help if the volume stops mounting,
+    # rather than failing over into subtle race-condition problems.
+    #
+    # 5) If we ever get users griping about not having space to do
+    # anything useful with Nix, it is possibly to specify
+    # `-reserve 10g` or something, which will fail w/o that much
+    #
+    # 6) getting special w/ awk may be fragile, but doing it to:
+    #    - save time over running slow diskutil commands
+    #    - skirt risk we grab wrong volume if multiple match
+    /usr/sbin/diskutil apfs addVolume "$NIX_VOLUME_USE_DISK" "$NIX_VOLUME_FS" "$NIX_VOLUME_LABEL" -nomount | /usr/bin/awk '/Created new APFS Volume/ {print $5}'
+}
+
+volume_uuid_from_special() {
+    local volume_special="$1" # (i.e., disk1s7)
+    # For reasons I won't pretend to fathom, this returns 253 when it works
+    /System/Library/Filesystems/apfs.fs/Contents/Resources/apfs.util -k "$volume_special" || true
+}
+
+# this sometimes clears immediately, and AFAIK clears
+# within about 1s. diskutil info on an unmounted path
+# fails in around 50-100ms and a match takes about
+# 250-300ms. I suspect it's usually ~250-750ms
+await_volume() {
+    # caution: this could, in theory, get stuck
+    until /usr/sbin/diskutil info "$NIX_ROOT" &>/dev/null; do
+        :
+    done
+}
+
+setup_volume() {
+    local use_special use_uuid profile_packages
+    task "Creating a Nix volume" >&2
+    # DOING: I'm tempted to wrap this call in a grep to get the new disk special without doing anything too complex, but this sudo wrapper *is* a little complex, so it'll be a PITA unless maybe we can skip sudo on this. Let's just try it without.
+
+    use_special="${NIX_VOLUME_USE_SPECIAL:-$(create_volume)}"
+
+    use_uuid=${NIX_VOLUME_USE_UUID:-$(volume_uuid_from_special "$use_special")}
+
+    setup_fstab "$use_uuid"
+
+    if should_encrypt_volume; then
+        encrypt_volume "$use_uuid" "$NIX_VOLUME_LABEL"
+        setup_volume_daemon "encrypted" "$use_uuid"
+    # TODO: might be able to save ~60ms by caching or setting
+    # this somewhere rather than re-checking here.
+    elif volume_encrypted "$use_special"; then
+        setup_volume_daemon "encrypted" "$use_uuid"
+    else
+        setup_volume_daemon "unencrypted" "$use_uuid"
+    fi
+
+    await_volume
+
+    # TODO: below is a vague kludge for now; I just don't know
+    # what if any safe action there is to take here. Also, the
+    # reminder isn't very helpful.
+    # I'm less sure where this belongs, but it also wants mounted, pre-install
+    if type -p nix-env; then
+        profile_packages="$(nix-env --query --installed)"
+        # TODO: can probably do below faster w/ read
+        # intentionally unquoted string to eat whitespace in wc output
+        # shellcheck disable=SC2046,SC2059
+        if ! [ $(printf "$profile_packages" | /usr/bin/wc -l) = "0" ]; then
+            reminder <<EOF
+Nix now supports only multi-user installs on Darwin/macOS, and your user's
+Nix profile has some packages in it. These packages may obscure those in the
+default profile, including the Nix this installer will add. You should
+review these packages:
+$profile_packages
+EOF
+        fi
+    fi
+
+}
+
+setup_volume_daemon() {
+    local cmd_type="$1" # encrypted|unencrypted
+    local volume_uuid="$2"
+    if ! test_voldaemon; then
+        task "Configuring LaunchDaemon to mount '$NIX_VOLUME_LABEL'" >&2
+        _sudo "to install the Nix volume mounter" /usr/bin/ex "$NIX_VOLUME_MOUNTD_DEST" <<EOF
+:a
+$(generate_mount_daemon "$cmd_type" "$volume_uuid")
+.
+:x
+EOF
+
+        # TODO: should probably alert the user if this is disabled?
+        _sudo "to launch the Nix volume mounter" \
+            launchctl bootstrap system "$NIX_VOLUME_MOUNTD_DEST" || true
+        # TODO: confirm whether kickstart is necessesary?
+        # I feel a little superstitous, but it can guard
+        # against multiple problems (doesn't start, old
+        # version still running for some reason...)
+        _sudo "to launch the Nix volume mounter" \
+            launchctl kickstart -k system/org.nixos.darwin-store
     fi
 }
 
-main "$@"
+setup_darwin_volume() {
+    setup_synthetic_conf
+    setup_volume
+}
+
+if [ "$_CREATE_VOLUME_NO_MAIN" = 1 ]; then
+    if [ -n "$*" ]; then
+        "$@" # expose functions in case we want multiple routines?
+    fi
+else
+    # no reason to pay for bash to process this
+    main() {
+        {
+            echo ""
+            echo "     ------------------------------------------------------------------ "
+            echo "    | This installer will create a volume for the nix store and        |"
+            echo "    | configure it to mount at $NIX_ROOT.  Follow these steps to uninstall. |"
+            echo "     ------------------------------------------------------------------ "
+            echo ""
+            echo "  1. Remove the entry from fstab using 'sudo /usr/sbin/vifs'"
+            echo "  2. Run 'sudo launchctl bootout system/org.nixos.darwin-store'"
+            echo "  3. Remove $NIX_VOLUME_MOUNTD_DEST"
+            echo "  4. Destroy the data volume using '/usr/sbin/diskutil apfs deleteVolume'"
+            echo "  5. Remove the 'nix' line from /etc/synthetic.conf (or the file)"
+            echo ""
+        } >&2
+
+        setup_darwin_volume
+    }
+
+    main "$@"
+fi
diff --git a/scripts/install-darwin-multi-user.sh b/scripts/install-darwin-multi-user.sh
index a27be2a43d5a31064cbc30b72be2bdf8a4dc7557..f8d6c5e8fc41969c270bcd66ad7dc3cb819b9caf 100644
--- a/scripts/install-darwin-multi-user.sh
+++ b/scripts/install-darwin-multi-user.sh
@@ -3,57 +3,99 @@
 set -eu
 set -o pipefail
 
-readonly PLIST_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
+readonly NIX_DAEMON_DEST=/Library/LaunchDaemons/org.nixos.nix-daemon.plist
+# create by default; set 0 to DIY, use a symlink, etc.
+readonly NIX_VOLUME_CREATE=${NIX_VOLUME_CREATE:-1} # now default
+NIX_FIRST_BUILD_UID="301"
+NIX_BUILD_USER_NAME_TEMPLATE="_nixbld%d"
+
+# caution: may update times on / if not run as normal non-root user
+read_only_root() {
+    # this touch command ~should~ always produce an error
+    # as of this change I confirmed /usr/bin/touch emits:
+    # "touch: /: Read-only file system" Catalina+ and Big Sur
+    # "touch: /: Permission denied" Mojave
+    # (not matching prefix for compat w/ coreutils touch in case using
+    # an explicit path causes problems; its prefix differs)
+    [[ "$(/usr/bin/touch / 2>&1)" = *"Read-only file system" ]]
+
+    # Avoiding the slow semantic way to get this information (~330ms vs ~8ms)
+    # unless using touch causes problems. Just in case, that approach is:
+    # diskutil info -plist / | <find the Writable or WritableVolume keys>, i.e.
+    # diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -
+}
+
+if read_only_root && [ "$NIX_VOLUME_CREATE" = 1 ]; then
+    should_create_volume() { return 0; }
+else
+    should_create_volume() { return 1; }
+fi
+
+# shellcheck source=./create-darwin-volume.sh
+. "$EXTRACTED_NIX_PATH/create-darwin-volume.sh" "no-main"
 
 dsclattr() {
     /usr/bin/dscl . -read "$1" \
-        | awk "/$2/ { print \$2 }"
+        | /usr/bin/awk "/$2/ { print \$2 }"
 }
 
-poly_validate_assumptions() {
-    if [ "$(uname -s)" != "Darwin" ]; then
-        failure "This script is for use with macOS!"
+test_nix_daemon_installed() {
+  test -e "$NIX_DAEMON_DEST"
+}
+
+poly_cure_artifacts() {
+    if should_create_volume; then
+        task "Fixing any leftover Nix volume state"
+        cat <<EOF
+Before I try to install, I'll check for any existing Nix volume config
+and ask for your permission to remove it (so that the installer can
+start fresh). I'll also ask for permission to fix any issues I spot.
+EOF
+        cure_volumes
+        remove_volume_artifacts
     fi
 }
 
 poly_service_installed_check() {
-    [ -e "$PLIST_DEST" ]
+    if should_create_volume; then
+        test_nix_daemon_installed || test_nix_volume_mountd_installed
+    else
+        test_nix_daemon_installed
+    fi
 }
 
 poly_service_uninstall_directions() {
-        cat <<EOF
-$1. Delete $PLIST_DEST
-
-  sudo launchctl unload $PLIST_DEST
-  sudo rm $PLIST_DEST
-
-EOF
+    echo "$1. Remove macOS-specific components:"
+    if should_create_volume && test_nix_volume_mountd_installed; then
+        darwin_volume_uninstall_directions
+    fi
+    if test_nix_daemon_installed; then
+        nix_daemon_uninstall_directions
+    fi
 }
 
 poly_service_setup_note() {
-    cat <<EOF
- - load and start a LaunchDaemon (at $PLIST_DEST) for nix-daemon
-
-EOF
+    if should_create_volume; then
+        echo " - create a Nix volume and a LaunchDaemon to mount it"
+    fi
+    echo " - create a LaunchDaemon (at $NIX_DAEMON_DEST) for nix-daemon"
+    echo ""
 }
 
-poly_extra_try_me_commands(){
-  :
-}
-poly_extra_setup_instructions(){
-  :
+poly_extra_try_me_commands() {
+    :
 }
 
 poly_configure_nix_daemon_service() {
+    task "Setting up the nix-daemon LaunchDaemon"
     _sudo "to set up the nix-daemon as a LaunchDaemon" \
-          cp -f "/nix/var/nix/profiles/default$PLIST_DEST" "$PLIST_DEST"
+          /bin/cp -f "/nix/var/nix/profiles/default$NIX_DAEMON_DEST" "$NIX_DAEMON_DEST"
 
     _sudo "to load the LaunchDaemon plist for nix-daemon" \
           launchctl load /Library/LaunchDaemons/org.nixos.nix-daemon.plist
 
     _sudo "to start the nix-daemon" \
-          launchctl start org.nixos.nix-daemon
-
+          launchctl kickstart -k system/org.nixos.nix-daemon
 }
 
 poly_group_exists() {
@@ -94,6 +136,8 @@ poly_user_home_get() {
 }
 
 poly_user_home_set() {
+    # This can trigger a permission prompt now:
+    # "Terminal" would like to administer your computer. Administration can include modifying passwords, networking, and system settings.
     _sudo "in order to give $1 a safe home directory" \
           /usr/bin/dscl . -create "/Users/$1" "NFSHomeDirectory" "$2"
 }
@@ -119,7 +163,7 @@ poly_user_shell_set() {
 poly_user_in_group_check() {
     username=$1
     group=$2
-    dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1
+    /usr/sbin/dseditgroup -o checkmember -m "$username" "$group" > /dev/null 2>&1
 }
 
 poly_user_in_group_set() {
@@ -149,3 +193,17 @@ poly_create_build_user() {
           /usr/bin/dscl . create "/Users/$username" \
           UniqueID "${uid}"
 }
+
+poly_prepare_to_install() {
+    if should_create_volume; then
+        header "Preparing a Nix volume"
+        # intentional indent below to match task indent
+        cat <<EOF
+    Nix traditionally stores its data in the root directory $NIX_ROOT, but
+    macOS now (starting in 10.15 Catalina) has a read-only root directory.
+    To support Nix, I will create a volume and configure macOS to mount it
+    at $NIX_ROOT.
+EOF
+        setup_darwin_volume
+    fi
+}
diff --git a/scripts/install-multi-user.sh b/scripts/install-multi-user.sh
index 5e8b4ac188b72f2f87353f1b5fa60202e2e5d1ea..e1046c19c01461cac77f3e0cd12ea829d6905d40 100644
--- a/scripts/install-multi-user.sh
+++ b/scripts/install-multi-user.sh
@@ -25,13 +25,15 @@ readonly RED='\033[31m'
 readonly NIX_USER_COUNT=${NIX_USER_COUNT:-32}
 readonly NIX_BUILD_GROUP_ID="30000"
 readonly NIX_BUILD_GROUP_NAME="nixbld"
-readonly NIX_FIRST_BUILD_UID="30001"
+# darwin installer needs to override these
+NIX_FIRST_BUILD_UID="30001"
+NIX_BUILD_USER_NAME_TEMPLATE="nixbld%d"
 # Please don't change this. We don't support it, because the
 # default shell profile that comes with Nix doesn't support it.
 readonly NIX_ROOT="/nix"
 readonly NIX_EXTRA_CONF=${NIX_EXTRA_CONF:-}
 
-readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv")
+readonly PROFILE_TARGETS=("/etc/bashrc" "/etc/profile.d/nix.sh" "/etc/zshenv" "/etc/bash.bashrc" "/etc/zsh/zshenv")
 readonly PROFILE_BACKUP_SUFFIX=".backup-before-nix"
 readonly PROFILE_NIX_FILE="$NIX_ROOT/var/nix/profiles/default/etc/profile.d/nix-daemon.sh"
 
@@ -41,7 +43,7 @@ readonly NIX_INSTALLED_CACERT="@cacert@"
 #readonly NIX_INSTALLED_CACERT="/nix/store/7dxhzymvy330i28ii676fl1pqwcahv2f-nss-cacert-3.49.2"
 readonly EXTRACTED_NIX_PATH="$(dirname "$0")"
 
-readonly ROOT_HOME=$(echo ~root)
+readonly ROOT_HOME=~root
 
 if [ -t 0 ]; then
     readonly IS_HEADLESS='no'
@@ -57,14 +59,19 @@ headless() {
     fi
 }
 
-contactme() {
-    echo "We'd love to help if you need it."
+contact_us() {
+    echo "You can open an issue at https://github.com/nixos/nix/issues"
     echo ""
-    echo "If you can, open an issue at https://github.com/nixos/nix/issues"
+    echo "Or feel free to contact the team:"
+    echo " - Matrix: #nix:nixos.org"
+    echo " - IRC: in #nixos on irc.libera.chat"
+    echo " - twitter: @nixos_org"
+    echo " - forum: https://discourse.nixos.org"
+}
+get_help() {
+    echo "We'd love to help if you need it."
     echo ""
-    echo "Or feel free to contact the team,"
-    echo " - on IRC #nixos on irc.freenode.net"
-    echo " - on twitter @nixos_org"
+    contact_us
 }
 
 uninstall_directions() {
@@ -100,11 +107,10 @@ $step. Delete the files Nix added to your system:
 and that is it.
 
 EOF
-
 }
 
 nix_user_for_core() {
-    printf "nixbld%d" "$1"
+    printf "$NIX_BUILD_USER_NAME_TEMPLATE" "$1"
 }
 
 nix_uid_for_core() {
@@ -168,7 +174,7 @@ failure() {
     header "oh no!"
     _textout "$RED" "$@"
     echo ""
-    _textout "$RED" "$(contactme)"
+    _textout "$RED" "$(get_help)"
     trap finish_cleanup EXIT
     exit 1
 }
@@ -199,6 +205,95 @@ ui_confirm() {
     return 1
 }
 
+printf -v _UNCHANGED_GRP_FMT "%b" $'\033[2m%='"$ESC" # "dim"
+# bold+invert+red and bold+invert+green just for the +/- below
+# red/green foreground for rest of the line
+printf -v _OLD_LINE_FMT "%b" $'\033[1;7;31m-'"$ESC ${RED}%L${ESC}"
+printf -v _NEW_LINE_FMT "%b" $'\033[1;7;32m+'"$ESC ${GREEN}%L${ESC}"
+
+_diff() {
+    # simple colorized diff comatible w/ pre `--color` versions
+    diff --unchanged-group-format="$_UNCHANGED_GRP_FMT" --old-line-format="$_OLD_LINE_FMT" --new-line-format="$_NEW_LINE_FMT" --unchanged-line-format="  %L" "$@"
+}
+
+confirm_rm() {
+    local path="$1"
+    if ui_confirm "Can I remove $path?"; then
+        _sudo "to remove $path" rm "$path"
+    fi
+}
+
+confirm_edit() {
+    local path="$1"
+    local edit_path="$2"
+    cat <<EOF
+
+Nix isn't the only thing in $path,
+but I think I know how to edit it out.
+Here's the diff:
+EOF
+
+    # could technically test the diff, but caller should do it
+    _diff "$path" "$edit_path"
+    if ui_confirm "Does the change above look right?"; then
+        _sudo "remove nix from $path" cp "$edit_path" "$path"
+    fi
+}
+
+_SERIOUS_BUSINESS="${RED}%s:${ESC} "
+password_confirm() {
+    local do_something_consequential="$1"
+    if ui_confirm "Can I $do_something_consequential?"; then
+        # shellcheck disable=SC2059
+        sudo -kv --prompt="$(printf "${_SERIOUS_BUSINESS}" "Enter your password to $do_something_consequential")"
+    else
+        return 1
+    fi
+}
+
+# Support accumulating reminders over the course of a run and showing
+# them at the end. An example where this helps: the installer changes
+# something, but it won't work without a reboot. If you tell the user
+# when you do it, they may miss it in the stream. The value of the
+# setting isn't enough to decide whether to message because you only
+# need to message if you *changed* it.
+
+# reminders stored in array delimited by empty entry; if ! headless,
+# user is asked to confirm after each delimiter.
+_reminders=()
+((_remind_num=1))
+
+remind() {
+    # (( arithmetic expression ))
+    if (( _remind_num > 1 )); then
+        header "Reminders"
+        for line in "${_reminders[@]}"; do
+            echo "$line"
+            if ! headless && [ "${#line}" = 0 ]; then
+                if read -r -p "Press enter/return to acknowledge."; then
+                    printf $'\033[A\33[2K\r'
+                fi
+            fi
+        done
+    fi
+}
+
+reminder() {
+    printf -v label "${BLUE}[ %d ]${ESC}" "$_remind_num"
+    _reminders+=("$label")
+    if [[ "$*" = "" ]]; then
+        while read -r line; do
+            _reminders+=("$line")
+        done
+    else
+        # this expands each arg to an array entry (and each entry will
+        # ultimately be a separate line in the output)
+        _reminders+=("$@")
+    fi
+    _reminders+=("")
+    ((_remind_num++))
+}
+
 __sudo() {
     local expl="$1"
     local cmd="$2"
@@ -219,18 +314,18 @@ _sudo() {
     local expl="$1"
     shift
     if ! headless; then
-        __sudo "$expl" "$*"
+        __sudo "$expl" "$*" >&2
     fi
     sudo "$@"
 }
 
 
-readonly SCRATCH=$(mktemp -d -t tmp.XXXXXXXXXX)
-function finish_cleanup {
+readonly SCRATCH=$(mktemp -d "${TMPDIR:-/tmp/}tmp.XXXXXXXXXX")
+finish_cleanup() {
     rm -rf "$SCRATCH"
 }
 
-function finish_fail {
+finish_fail() {
     finish_cleanup
 
     failure <<EOF
@@ -242,45 +337,46 @@ EOF
 }
 trap finish_fail EXIT
 
-channel_update_failed=0
-function finish_success {
-    finish_cleanup
-
+finish_success() {
     ok "Alright! We're done!"
-    if [ "x$channel_update_failed" = x1 ]; then
-        echo ""
-        echo "But fetching the nixpkgs channel failed. (Are you offline?)"
-        echo "To try again later, run \"sudo -i nix-channel --update nixpkgs\"."
-    fi
 
     cat <<EOF
-
-Before Nix will work in your existing shells, you'll need to close
-them and open them again. Other than that, you should be ready to go.
-
 Try it! Open a new terminal, and type:
 $(poly_extra_try_me_commands)
   $ nix-shell -p nix-info --run "nix-info -m"
-$(poly_extra_setup_instructions)
-Thank you for using this installer. If you have any feedback, don't
-hesitate:
 
-$(contactme)
-EOF
+Thank you for using this installer. If you have any feedback or need
+help, don't hesitate:
 
+$(contact_us)
+EOF
+    remind
+    finish_cleanup
 }
 
+finish_uninstall_success() {
+    ok "Alright! Nix should be removed!"
 
-validate_starting_assumptions() {
-    poly_validate_assumptions
+    cat <<EOF
+If you spot anything this uninstaller missed or have feedback,
+don't hesitate:
 
-    if [ $EUID -eq 0 ]; then
-        failure <<EOF
-Please do not run this script with root privileges. We will call sudo
-when we need to.
+$(contact_us)
 EOF
-    fi
+    remind
+    finish_cleanup
+}
 
+remove_nix_artifacts() {
+    failure "Not implemented yet"
+}
+
+cure_artifacts() {
+    poly_cure_artifacts
+    # remove_nix_artifacts (LATER)
+}
+
+validate_starting_assumptions() {
     if type nix-env 2> /dev/null >&2; then
         warning <<EOF
 Nix already appears to be installed. This installer may run into issues.
@@ -442,18 +538,46 @@ create_build_users() {
 
 create_directories() {
     # FIXME: remove all of this because it duplicates LocalStore::LocalStore().
-
+    task "Setting up the basic directory structure"
+    if [ -d "$NIX_ROOT" ]; then
+        # if /nix already exists, take ownership
+        #
+        # Caution: notes below are macOS-y
+        # This is a bit of a goldilocks zone for taking ownership
+        # if there are already files on the volume; the volume is
+        # now mounted, but we haven't added a bunch of new files
+
+        # this is probably a bit slow; I've been seeing 3.3-4s even
+        # when promptly installed over a fresh single-user install.
+        # In case anyone's aware of a shortcut.
+        # `|| true`: .Trashes errors w/o full disk perm
+
+        # rumor per #4488 that macOS 11.2 may not have
+        # sbin on path, and that's where chown is, but
+        # since this bit is cross-platform:
+        # - first try with `command -vp` to try and find
+        #   chown in the usual places
+        # - fall back on `command -v` which would find
+        #   any chown on path
+        # if we don't find one, the command is already
+        # hiding behind || true, and the general state
+        # should be one the user can repair once they
+        # figure out where chown is...
+        local get_chr_own="$(command -vp chown)"
+        if [[ -z "$get_chr_own" ]]; then
+            get_chr_own="$(command -v chown)"
+        fi
+        _sudo "to take root ownership of existing Nix store files" \
+              "$get_chr_own" -R "root:$NIX_BUILD_GROUP_NAME" "$NIX_ROOT" || true
+    fi
     _sudo "to make the basic directory structure of Nix (part 1)" \
-          mkdir -pv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user
+          install -dv -m 0755 /nix /nix/var /nix/var/log /nix/var/log/nix /nix/var/log/nix/drvs /nix/var/nix{,/db,/gcroots,/profiles,/temproots,/userpool} /nix/var/nix/{gcroots,profiles}/per-user
 
     _sudo "to make the basic directory structure of Nix (part 2)" \
-          mkdir -pv -m 1775 /nix/store
-
-    _sudo "to make the basic directory structure of Nix (part 3)" \
-          chgrp "$NIX_BUILD_GROUP_NAME" /nix/store
+          install -dv -g "$NIX_BUILD_GROUP_NAME" -m 1775 /nix/store
 
     _sudo "to place the default nix daemon configuration (part 1)" \
-          mkdir -pv -m 0555 /etc/nix
+          install -dv -m 0555 /etc/nix
 }
 
 place_channel_configuration() {
@@ -473,7 +597,7 @@ This installation tool will set up your computer with the Nix package
 manager. This will happen in a few stages:
 
 1. Make sure your computer doesn't already have Nix. If it does, I
-   will show you instructions on how to clean up your old one.
+   will show you instructions on how to clean up your old install.
 
 2. Show you what we are going to install and where. Then we will ask
    if you are ready to continue.
@@ -572,6 +696,7 @@ EOF
 }
 
 install_from_extracted_nix() {
+    task "Installing Nix"
     (
         cd "$EXTRACTED_NIX_PATH"
 
@@ -587,9 +712,8 @@ $NIX_INSTALLED_NIX.
 EOF
         fi
 
-        cat ./.reginfo \
-            | _sudo "to load data for the first time in to the Nix Database" \
-                   "$NIX_INSTALLED_NIX/bin/nix-store" --load-db
+        _sudo "to load data for the first time in to the Nix Database" \
+              "$NIX_INSTALLED_NIX/bin/nix-store" --load-db < ./.reginfo
 
         echo "      Just finished getting the nix database ready."
     )
@@ -608,6 +732,7 @@ EOF
 }
 
 configure_shell_profile() {
+    task "Setting up shell profiles: ${PROFILE_TARGETS[*]}"
     for profile_target in "${PROFILE_TARGETS[@]}"; do
         if [ -e "$profile_target" ]; then
             _sudo "to back up your current $profile_target to $profile_target$PROFILE_BACKUP_SUFFIX" \
@@ -627,14 +752,27 @@ configure_shell_profile() {
                         tee -a "$profile_target"
         fi
     done
+    # TODO: should we suggest '. $PROFILE_NIX_FILE'? It would get them on
+    # their way less disruptively, but a counter-argument is that they won't
+    # immediately notice if something didn't get set up right?
+    reminder "Nix won't work in active shell sessions until you restart them."
+}
+
+cert_in_store() {
+    # in a subshell
+    # - change into the cert-file dir
+    # - get the phyiscal pwd
+    # and test if this path is in the Nix store
+    [[ "$(cd -- "$(dirname "$NIX_SSL_CERT_FILE")" && exec pwd -P)" == "$NIX_ROOT/store/"* ]]
 }
 
 setup_default_profile() {
-    _sudo "to installing a bootstrapping Nix in to the default Profile" \
+    task "Setting up the default profile"
+    _sudo "to install a bootstrapping Nix in to the default profile" \
           HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_NIX"
 
-    if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ]; then
-        _sudo "to installing a bootstrapping SSL certificate just for Nix in to the default Profile" \
+    if [ -z "${NIX_SSL_CERT_FILE:-}" ] || ! [ -f "${NIX_SSL_CERT_FILE:-}" ] || cert_in_store; then
+        _sudo "to install a bootstrapping SSL certificate just for Nix in to the default profile" \
               HOME="$ROOT_HOME" "$NIX_INSTALLED_NIX/bin/nix-env" -i "$NIX_INSTALLED_CACERT"
         export NIX_SSL_CERT_FILE=/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt
     fi
@@ -643,9 +781,13 @@ setup_default_profile() {
         # Have to explicitly pass NIX_SSL_CERT_FILE as part of the sudo call,
         # otherwise it will be lost in environments where sudo doesn't pass
         # all the environment variables by default.
-        _sudo "to update the default channel in the default profile" \
-            HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs \
-            || channel_update_failed=1
+        if ! _sudo "to update the default channel in the default profile" \
+            HOME="$ROOT_HOME" NIX_SSL_CERT_FILE="$NIX_SSL_CERT_FILE" "$NIX_INSTALLED_NIX/bin/nix-channel" --update nixpkgs; then
+            reminder <<EOF
+I had trouble fetching the nixpkgs channel (are you offline?)
+To try again later, run: sudo -i nix-channel --update nixpkgs
+EOF
+        fi
     fi
 }
 
@@ -660,6 +802,17 @@ EOF
 }
 
 main() {
+    # TODO: I've moved this out of validate_starting_assumptions so we
+    # can fail faster in this case. Sourcing install-darwin... now runs
+    # `touch /` to detect Read-only root, but it could update times on
+    # pre-Catalina macOS if run as root user.
+    if [ $EUID -eq 0 ]; then
+        failure <<EOF
+Please do not run this script with root privileges. We will call sudo
+when we need to.
+EOF
+    fi
+
     if [ "$(uname -s)" = "Darwin" ]; then
         # shellcheck source=./install-darwin-multi-user.sh
         . "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh"
@@ -673,17 +826,24 @@ main() {
     welcome_to_nix
     chat_about_sudo
 
+    cure_artifacts
+    # TODO: there's a tension between cure and validate. I moved the
+    # the sudo/root check out of validate to the head of this func.
+    # Cure is *intended* to subsume the validate-and-abort approach,
+    # so it may eventually obsolete it.
     validate_starting_assumptions
 
     setup_report
 
     if ! ui_confirm "Ready to continue?"; then
         ok "Alright, no changes have been made :)"
-        contactme
+        get_help
         trap finish_cleanup EXIT
         exit 1
     fi
 
+    poly_prepare_to_install
+
     create_build_group
     create_build_users
     create_directories
@@ -693,6 +853,7 @@ main() {
     configure_shell_profile
 
     set +eu
+    # shellcheck disable=SC1091
     . /etc/profile
     set -eu
 
@@ -704,5 +865,20 @@ main() {
     trap finish_success EXIT
 }
 
+# set an empty initial arg for bare invocations in case we need to
+# disambiguate someone directly invoking this later.
+if [ "${#@}" = 0 ]; then
+    set ""
+fi
 
-main
+# ACTION for override
+case "${1-}" in
+    # uninstall)
+    #     shift
+    #     uninstall "$@";;
+    # install == same as the no-arg condition for now (but, explicit)
+    ""|install)
+        main;;
+    *) # holding space for future options (like uninstall + install?)
+        failure "install-multi-user: invalid argument";;
+esac
diff --git a/scripts/install-nix-from-closure.sh b/scripts/install-nix-from-closure.sh
index 0ee7ce5afe46d90d1c531c096384944ef246b926..734f0c800fbae8192fb8846623d2d9d43da5f2f0 100644
--- a/scripts/install-nix-from-closure.sh
+++ b/scripts/install-nix-from-closure.sh
@@ -26,18 +26,9 @@ fi
 
 # macOS support for 10.12.6 or higher
 if [ "$(uname -s)" = "Darwin" ]; then
-    IFS='.' read macos_major macos_minor macos_patch << EOF
+    IFS='.' read -r macos_major macos_minor macos_patch << EOF
 $(sw_vers -productVersion)
 EOF
-    # TODO: this is a temporary speed-bump to keep people from naively installing Nix
-    # on macOS Big Sur (11.0+, 10.16+) until nixpkgs updates are ready for them.
-    # *Ideally* this is gone before next Nix release. If you're intentionally working on
-    # Nix + Big Sur, just comment out this block and be on your way :)
-    if [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 15 ]; }; then
-        echo "$0: nixpkgs isn't quite ready to support macOS $(sw_vers -productVersion) yet"
-        exit 1
-    fi
-
     if [ "$macos_major" -lt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -lt 12 ]; } || { [ "$macos_minor" -eq 12 ] && [ "$macos_patch" -lt 6 ]; }; then
         # patch may not be present; command substitution for simplicity
         echo "$0: macOS $(sw_vers -productVersion) is not supported, upgrade to 10.12.6 or higher"
@@ -46,21 +37,40 @@ EOF
 fi
 
 # Determine if we could use the multi-user installer or not
-if [ "$(uname -s)" = "Darwin" ]; then
-    echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
-elif [ "$(uname -s)" = "Linux" ]; then
+if [ "$(uname -s)" = "Linux" ]; then
     echo "Note: a multi-user installation is possible. See https://nixos.org/nix/manual/#sect-multi-user-installation" >&2
 fi
 
-INSTALL_MODE=no-daemon
-CREATE_DARWIN_VOLUME=0
+case "$(uname -s)" in
+    "Darwin")
+        INSTALL_MODE=daemon;;
+    *)
+        INSTALL_MODE=no-daemon;;
+esac
+
+# space-separated string
+ACTIONS=
+
 # handle the command line flags
 while [ $# -gt 0 ]; do
     case $1 in
         --daemon)
-            INSTALL_MODE=daemon;;
+            INSTALL_MODE=daemon
+            ACTIONS="${ACTIONS}install "
+            ;;
         --no-daemon)
-            INSTALL_MODE=no-daemon;;
+            if [ "$(uname -s)" = "Darwin" ]; then
+                printf '\e[1;31mError: --no-daemon installs are no-longer supported on Darwin/macOS!\e[0m\n' >&2
+                exit 1
+            fi
+            INSTALL_MODE=no-daemon
+            # intentional tail space
+            ACTIONS="${ACTIONS}install "
+            ;;
+        # --uninstall)
+        #     # intentional tail space
+        #     ACTIONS="${ACTIONS}uninstall "
+        #     ;;
         --no-channel-add)
             export NIX_INSTALLER_NO_CHANNEL_ADD=1;;
         --daemon-user-count)
@@ -69,13 +79,18 @@ while [ $# -gt 0 ]; do
         --no-modify-profile)
             NIX_INSTALLER_NO_MODIFY_PROFILE=1;;
         --darwin-use-unencrypted-nix-store-volume)
-            CREATE_DARWIN_VOLUME=1;;
+            {
+                echo "Warning: the flag --darwin-use-unencrypted-nix-store-volume"
+                echo "         is no longer needed and will be removed in the future."
+                echo ""
+            } >&2;;
         --nix-extra-conf-file)
-            export NIX_EXTRA_CONF="$(cat $2)"
+            # shellcheck disable=SC2155
+            export NIX_EXTRA_CONF="$(cat "$2")"
             shift;;
         *)
-            (
-                echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--darwin-use-unencrypted-nix-store-volume] [--nix-extra-conf-file FILE]"
+            {
+                echo "Nix Installer [--daemon|--no-daemon] [--daemon-user-count INT] [--no-channel-add] [--no-modify-profile] [--nix-extra-conf-file FILE]"
 
                 echo "Choose installation method."
                 echo ""
@@ -101,45 +116,16 @@ while [ $# -gt 0 ]; do
                 if [ -n "${INVOKED_FROM_INSTALL_IN:-}" ]; then
                     echo " --tarball-url-prefix URL: Base URL to download the Nix tarball from."
                 fi
-            ) >&2
-
-            # darwin and Catalina+
-            if [ "$(uname -s)" = "Darwin" ] && { [ "$macos_major" -gt 10 ] || { [ "$macos_major" -eq 10 ] && [ "$macos_minor" -gt 14 ]; }; }; then
-                (
-                    echo " --darwin-use-unencrypted-nix-store-volume: Create an APFS volume for the Nix"
-                    echo "              store and mount it at /nix. This is the recommended way to create"
-                    echo "              /nix with a read-only / on macOS >=10.15."
-                    echo "              See: https://nixos.org/nix/manual/#sect-macos-installation"
-                    echo ""
-                ) >&2
-            fi
+            } >&2
+
             exit;;
     esac
     shift
 done
 
-if [ "$(uname -s)" = "Darwin" ]; then
-    if [ "$CREATE_DARWIN_VOLUME" = 1 ]; then
-        printf '\e[1;31mCreating volume and mountpoint /nix.\e[0m\n'
-        "$self/create-darwin-volume.sh"
-    fi
-
-    writable="$(diskutil info -plist / | xmllint --xpath "name(/plist/dict/key[text()='Writable']/following-sibling::*[1])" -)"
-    if ! [ -e $dest ] && [ "$writable" = "false" ]; then
-        (
-            echo ""
-            echo "Installing on macOS >=10.15 requires relocating the store to an apfs volume."
-            echo "Use sh <(curl -L https://nixos.org/nix/install) --darwin-use-unencrypted-nix-store-volume or run the preparation steps manually."
-            echo "See https://nixos.org/nix/manual/#sect-macos-installation"
-            echo ""
-        ) >&2
-        exit 1
-    fi
-fi
-
 if [ "$INSTALL_MODE" = "daemon" ]; then
     printf '\e[1;31mSwitching to the Multi-user Installer\e[0m\n'
-    exec "$self/install-multi-user"
+    exec "$self/install-multi-user" $ACTIONS # let ACTIONS split
     exit 0
 fi
 
@@ -194,6 +180,7 @@ if ! "$nix/bin/nix-store" --load-db < "$self/.reginfo"; then
     exit 1
 fi
 
+# shellcheck source=./nix-profile.sh.in
 . "$nix/etc/profile.d/nix.sh"
 
 if ! "$nix/bin/nix-env" -i "$nix"; then
diff --git a/scripts/install-systemd-multi-user.sh b/scripts/install-systemd-multi-user.sh
index fda5ef6007cff6cdf68631e1e1a603ff54cb3f5c..81c61b2a0149df6218b77a2762e25d0cd357d3cc 100755
--- a/scripts/install-systemd-multi-user.sh
+++ b/scripts/install-systemd-multi-user.sh
@@ -41,10 +41,8 @@ handle_network_proxy() {
     fi
 }
 
-poly_validate_assumptions() {
-    if [ "$(uname -s)" != "Linux" ]; then
-        failure "This script is for use with Linux!"
-    fi
+poly_cure_artifacts() {
+    :
 }
 
 poly_service_installed_check() {
@@ -72,7 +70,7 @@ poly_service_setup_note() {
 EOF
 }
 
-poly_extra_try_me_commands(){
+poly_extra_try_me_commands() {
     if [ -e /run/systemd/system ]; then
         :
     else
@@ -81,19 +79,10 @@ poly_extra_try_me_commands(){
 EOF
     fi
 }
-poly_extra_setup_instructions(){
-    if [ -e /run/systemd/system ]; then
-        :
-    else
-        cat <<EOF
-Additionally, you may want to add nix-daemon to your init-system.
-
-EOF
-    fi
-}
 
 poly_configure_nix_daemon_service() {
     if [ -e /run/systemd/system ]; then
+        task "Setting up the nix-daemon systemd service"
         _sudo "to set up the nix-daemon service" \
               systemctl link "/nix/var/nix/profiles/default$SERVICE_SRC"
 
@@ -110,6 +99,8 @@ poly_configure_nix_daemon_service() {
 
         _sudo "to start the nix-daemon.service" \
               systemctl restart nix-daemon.service
+    else
+        reminder "I don't support your init system yet; you may want to add nix-daemon manually."
     fi
 }
 
@@ -207,3 +198,7 @@ poly_create_build_user() {
           --password "!" \
           "$username"
 }
+
+poly_prepare_to_install() {
+    :
+}
diff --git a/scripts/install.in b/scripts/install.in
index 7d25f7bd738b1f71f046f12362eedd0a810f477c..39016d1613025f556cb2ea73fb5c8bb90c03683f 100755
--- a/scripts/install.in
+++ b/scripts/install.in
@@ -46,15 +46,9 @@ case "$(uname -s).$(uname -m)" in
         system=x86_64-darwin
         ;;
     Darwin.arm64|Darwin.aarch64)
-        # check for Rosetta 2 support
-        if ! [ -f /Library/Apple/System/Library/LaunchDaemons/com.apple.oahd.plist ]; then
-          oops "Rosetta 2 is not installed on this ARM64 macOS machine. Run softwareupdate --install-rosetta then restart installation"
-        fi
-
-        hash=@binaryTarball_x86_64-darwin@
-        path=@tarballPath_x86_64-darwin@
-        # eventually maybe: aarch64-darwin
-        system=x86_64-darwin
+        hash=@binaryTarball_aarch64-darwin@
+        path=@tarballPath_aarch64-darwin@
+        system=aarch64-darwin
         ;;
     *) oops "sorry, there is no binary distribution of Nix for your platform";;
 esac
diff --git a/src/build-remote/build-remote.cc b/src/build-remote/build-remote.cc
index 57f2cd32d4de4c5f553f73b9cba9e5b01903b935..0904f2ce949fa82e57ecf416063cc1d916f1630d 100644
--- a/src/build-remote/build-remote.cc
+++ b/src/build-remote/build-remote.cc
@@ -277,7 +277,16 @@ connected:
 
         auto drv = store->readDerivation(*drvPath);
         auto outputHashes = staticOutputHashes(*store, drv);
-        drv.inputSrcs = store->parseStorePathSet(inputs);
+
+        // Hijack the inputs paths of the derivation to include all the paths
+        // that come from the `inputDrvs` set.
+        // We don’t do that for the derivations whose `inputDrvs` is empty
+        // because
+        // 1. It’s not needed
+        // 2. Changing the `inputSrcs` set changes the associated output ids,
+        //  which break CA derivations
+        if (!drv.inputDrvs.empty())
+            drv.inputSrcs = store->parseStorePathSet(inputs);
 
         auto result = sshStore->buildDerivation(*drvPath, drv);
 
diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc
index d29954f679bf14cf258cc6ce7796d248ab721761..569c4b9e4c25fab9ec7c37fca179262c6e2b5913 100644
--- a/src/libcmd/command.cc
+++ b/src/libcmd/command.cc
@@ -54,7 +54,7 @@ void StoreCommand::run()
     run(getStore());
 }
 
-RealisedPathsCommand::RealisedPathsCommand(bool recursive)
+BuiltPathsCommand::BuiltPathsCommand(bool recursive)
     : recursive(recursive)
 {
     if (recursive)
@@ -81,39 +81,45 @@ RealisedPathsCommand::RealisedPathsCommand(bool recursive)
     });
 }
 
-void RealisedPathsCommand::run(ref<Store> store)
+void BuiltPathsCommand::run(ref<Store> store)
 {
-    std::vector<RealisedPath> paths;
+    BuiltPaths paths;
     if (all) {
         if (installables.size())
             throw UsageError("'--all' does not expect arguments");
         // XXX: Only uses opaque paths, ignores all the realisations
         for (auto & p : store->queryAllValidPaths())
-            paths.push_back(p);
+            paths.push_back(BuiltPath::Opaque{p});
     } else {
-        auto pathSet = toRealisedPaths(store, realiseMode, operateOn, installables);
+        paths = toBuiltPaths(store, realiseMode, operateOn, installables);
         if (recursive) {
-            auto roots = std::move(pathSet);
-            pathSet = {};
-            RealisedPath::closure(*store, roots, pathSet);
+            // XXX: This only computes the store path closure, ignoring
+            // intermediate realisations
+            StorePathSet pathsRoots, pathsClosure;
+            for (auto & root: paths) {
+                auto rootFromThis = root.outPaths();
+                pathsRoots.insert(rootFromThis.begin(), rootFromThis.end());
+            }
+            store->computeFSClosure(pathsRoots, pathsClosure);
+            for (auto & path : pathsClosure)
+                paths.push_back(BuiltPath::Opaque{path});
         }
-        for (auto & path : pathSet)
-            paths.push_back(path);
     }
 
     run(store, std::move(paths));
 }
 
 StorePathsCommand::StorePathsCommand(bool recursive)
-    : RealisedPathsCommand(recursive)
+    : BuiltPathsCommand(recursive)
 {
 }
 
-void StorePathsCommand::run(ref<Store> store, std::vector<RealisedPath> paths)
+void StorePathsCommand::run(ref<Store> store, BuiltPaths paths)
 {
     StorePaths storePaths;
-    for (auto & p : paths)
-        storePaths.push_back(p.path());
+    for (auto& builtPath : paths)
+        for (auto& p : builtPath.outPaths())
+            storePaths.push_back(p);
 
     run(store, std::move(storePaths));
 }
@@ -162,7 +168,7 @@ void MixProfile::updateProfile(const StorePath & storePath)
             profile2, storePath));
 }
 
-void MixProfile::updateProfile(const Buildables & buildables)
+void MixProfile::updateProfile(const BuiltPaths & buildables)
 {
     if (!profile) return;
 
@@ -170,18 +176,15 @@ void MixProfile::updateProfile(const Buildables & buildables)
 
     for (auto & buildable : buildables) {
         std::visit(overloaded {
-            [&](BuildableOpaque bo) {
+            [&](BuiltPath::Opaque bo) {
                 result.push_back(bo.path);
             },
-            [&](BuildableFromDrv bfd) {
+            [&](BuiltPath::Built bfd) {
                 for (auto & output : bfd.outputs) {
-                    /* Output path should be known because we just tried to
-                       build it. */
-                    assert(output.second);
-                    result.push_back(*output.second);
+                    result.push_back(output.second);
                 }
             },
-        }, buildable);
+        }, buildable.raw());
     }
 
     if (result.size() != 1)
diff --git a/src/libcmd/command.hh b/src/libcmd/command.hh
index e66c697ebf475dfa8ff52f61067a547d61ca768f..35b3a384b5b63ddbb7115ddf7e35237795061bbe 100644
--- a/src/libcmd/command.hh
+++ b/src/libcmd/command.hh
@@ -143,7 +143,7 @@ private:
 };
 
 /* A command that operates on zero or more store paths. */
-struct RealisedPathsCommand : public InstallablesCommand
+struct BuiltPathsCommand : public InstallablesCommand
 {
 private:
 
@@ -156,26 +156,26 @@ protected:
 
 public:
 
-    RealisedPathsCommand(bool recursive = false);
+    BuiltPathsCommand(bool recursive = false);
 
     using StoreCommand::run;
 
-    virtual void run(ref<Store> store, std::vector<RealisedPath> paths) = 0;
+    virtual void run(ref<Store> store, BuiltPaths paths) = 0;
 
     void run(ref<Store> store) override;
 
     bool useDefaultInstallables() override { return !all; }
 };
 
-struct StorePathsCommand : public RealisedPathsCommand
+struct StorePathsCommand : public BuiltPathsCommand
 {
     StorePathsCommand(bool recursive = false);
 
-    using RealisedPathsCommand::run;
+    using BuiltPathsCommand::run;
 
     virtual void run(ref<Store> store, std::vector<StorePath> storePaths) = 0;
 
-    void run(ref<Store> store, std::vector<RealisedPath> paths) override;
+    void run(ref<Store> store, BuiltPaths paths) override;
 };
 
 /* A command that operates on exactly one store path. */
@@ -216,7 +216,7 @@ static RegisterCommand registerCommand2(std::vector<std::string> && name)
     return RegisterCommand(std::move(name), [](){ return make_ref<T>(); });
 }
 
-Buildables build(ref<Store> store, Realise mode,
+BuiltPaths build(ref<Store> store, Realise mode,
     std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode = bmNormal);
 
 std::set<StorePath> toStorePaths(ref<Store> store,
@@ -231,7 +231,7 @@ std::set<StorePath> toDerivations(ref<Store> store,
     std::vector<std::shared_ptr<Installable>> installables,
     bool useDeriver = false);
 
-std::set<RealisedPath> toRealisedPaths(
+BuiltPaths toBuiltPaths(
     ref<Store> store,
     Realise mode,
     OperateOn operateOn,
@@ -252,7 +252,7 @@ struct MixProfile : virtual StoreCommand
 
     /* If 'profile' is set, make it point at the store path produced
        by 'buildables'. */
-    void updateProfile(const Buildables & buildables);
+    void updateProfile(const BuiltPaths & buildables);
 };
 
 struct MixDefaultProfile : MixProfile
diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc
index 7102f5a1a1619489efcff08258214c9a43fe2b78..fe52912cf95ad30badd11724daeb292e272b333d 100644
--- a/src/libcmd/installables.cc
+++ b/src/libcmd/installables.cc
@@ -20,31 +20,6 @@
 
 namespace nix {
 
-nlohmann::json BuildableOpaque::toJSON(ref<Store> store) const {
-    nlohmann::json res;
-    res["path"] = store->printStorePath(path);
-    return res;
-}
-
-nlohmann::json BuildableFromDrv::toJSON(ref<Store> store) const {
-    nlohmann::json res;
-    res["drvPath"] = store->printStorePath(drvPath);
-    for (const auto& [output, path] : outputs) {
-        res["outputs"][output] = path ? store->printStorePath(*path) : "";
-    }
-    return res;
-}
-
-nlohmann::json buildablesToJSON(const Buildables & buildables, ref<Store> store) {
-    auto res = nlohmann::json::array();
-    for (const Buildable & buildable : buildables) {
-        std::visit([&res, store](const auto & buildable) {
-            res.push_back(buildable.toJSON(store));
-        }, buildable);
-    }
-    return res;
-}
-
 void completeFlakeInputPath(
     ref<EvalState> evalState,
     const FlakeRef & flakeRef,
@@ -111,10 +86,11 @@ MixFlakeOptions::MixFlakeOptions()
 
     addFlag({
         .longName = "override-input",
-        .description = "Override a specific flake input (e.g. `dwarffs/nixpkgs`).",
+        .description = "Override a specific flake input (e.g. `dwarffs/nixpkgs`). This implies `--no-write-lock-file`.",
         .category = category,
         .labels = {"input-path", "flake-url"},
         .handler = {[&](std::string inputPath, std::string flakeRef) {
+            lockFlags.writeLockFile = false;
             lockFlags.inputOverrides.insert_or_assign(
                 flake::parseInputPath(inputPath),
                 parseFlakeRef(flakeRef, absPath(".")));
@@ -309,9 +285,9 @@ void completeFlakeRef(ref<Store> store, std::string_view prefix)
     }
 }
 
-Buildable Installable::toBuildable()
+DerivedPath Installable::toDerivedPath()
 {
-    auto buildables = toBuildables();
+    auto buildables = toDerivedPaths();
     if (buildables.size() != 1)
         throw Error("installable '%s' evaluates to %d derivations, where only one is expected", what(), buildables.size());
     return std::move(buildables[0]);
@@ -345,22 +321,19 @@ struct InstallableStorePath : Installable
 
     std::string what() override { return store->printStorePath(storePath); }
 
-    Buildables toBuildables() override
+    DerivedPaths toDerivedPaths() override
     {
         if (storePath.isDerivation()) {
-            std::map<std::string, std::optional<StorePath>> outputs;
             auto drv = store->readDerivation(storePath);
-            for (auto & [name, output] : drv.outputsAndOptPaths(*store))
-                outputs.emplace(name, output.second);
             return {
-                BuildableFromDrv {
+                DerivedPath::Built {
                     .drvPath = storePath,
-                    .outputs = std::move(outputs)
+                    .outputs = drv.outputNames(),
                 }
             };
         } else {
             return {
-                BuildableOpaque {
+                DerivedPath::Opaque {
                     .path = storePath,
                 }
             };
@@ -373,22 +346,22 @@ struct InstallableStorePath : Installable
     }
 };
 
-Buildables InstallableValue::toBuildables()
+DerivedPaths InstallableValue::toDerivedPaths()
 {
-    Buildables res;
+    DerivedPaths res;
 
-    std::map<StorePath, std::map<std::string, std::optional<StorePath>>> drvsToOutputs;
+    std::map<StorePath, std::set<std::string>> drvsToOutputs;
 
     // Group by derivation, helps with .all in particular
     for (auto & drv : toDerivations()) {
         auto outputName = drv.outputName;
         if (outputName == "")
             throw Error("derivation '%s' lacks an 'outputName' attribute", state->store->printStorePath(drv.drvPath));
-        drvsToOutputs[drv.drvPath].insert_or_assign(outputName, drv.outPath);
+        drvsToOutputs[drv.drvPath].insert(outputName);
     }
 
     for (auto & i : drvsToOutputs)
-        res.push_back(BuildableFromDrv { i.first, i.second });
+        res.push_back(DerivedPath::Built { i.first, i.second });
 
     return res;
 }
@@ -527,7 +500,11 @@ std::tuple<std::string, FlakeRef, InstallableValue::DerivationInfo> InstallableF
     auto root = cache->getRoot();
 
     for (auto & attrPath : getActualAttrPaths()) {
-        auto attr = root->findAlongAttrPath(parseAttrPath(*state, attrPath));
+        auto attr = root->findAlongAttrPath(
+            parseAttrPath(*state, attrPath),
+            true
+        );
+
         if (!attr) continue;
 
         if (!attr->isDerivation())
@@ -695,31 +672,67 @@ std::shared_ptr<Installable> SourceExprCommand::parseInstallable(
     return installables.front();
 }
 
-Buildables build(ref<Store> store, Realise mode,
+BuiltPaths getBuiltPaths(ref<Store> store, DerivedPaths hopefullyBuiltPaths)
+{
+    BuiltPaths res;
+    for (auto& b : hopefullyBuiltPaths)
+        std::visit(
+            overloaded{
+                [&](DerivedPath::Opaque bo) {
+                    res.push_back(BuiltPath::Opaque{bo.path});
+                },
+                [&](DerivedPath::Built bfd) {
+                    OutputPathMap outputs;
+                    auto drv = store->readDerivation(bfd.drvPath);
+                    auto outputHashes = staticOutputHashes(*store, drv);
+                    auto drvOutputs = drv.outputsAndOptPaths(*store);
+                    for (auto& output : bfd.outputs) {
+                        if (!outputHashes.count(output))
+                            throw Error(
+                                "the derivation '%s' doesn't have an output "
+                                "named '%s'",
+                                store->printStorePath(bfd.drvPath), output);
+                        if (settings.isExperimentalFeatureEnabled(
+                                "ca-derivations")) {
+                            auto outputId =
+                                DrvOutput{outputHashes.at(output), output};
+                            auto realisation =
+                                store->queryRealisation(outputId);
+                            if (!realisation)
+                                throw Error(
+                                    "cannot operate on an output of unbuilt "
+                                    "content-addresed derivation '%s'",
+                                    outputId.to_string());
+                            outputs.insert_or_assign(
+                                output, realisation->outPath);
+                        } else {
+                            // If ca-derivations isn't enabled, assume that
+                            // the output path is statically known.
+                            assert(drvOutputs.count(output));
+                            assert(drvOutputs.at(output).second);
+                            outputs.insert_or_assign(
+                                output, *drvOutputs.at(output).second);
+                        }
+                    }
+                    res.push_back(BuiltPath::Built{bfd.drvPath, outputs});
+                },
+            },
+            b.raw());
+
+    return res;
+}
+
+BuiltPaths build(ref<Store> store, Realise mode,
     std::vector<std::shared_ptr<Installable>> installables, BuildMode bMode)
 {
     if (mode == Realise::Nothing)
         settings.readOnlyMode = true;
 
-    Buildables buildables;
-
-    std::vector<StorePathWithOutputs> pathsToBuild;
+    std::vector<DerivedPath> pathsToBuild;
 
     for (auto & i : installables) {
-        for (auto & b : i->toBuildables()) {
-            std::visit(overloaded {
-                [&](BuildableOpaque bo) {
-                    pathsToBuild.push_back({bo.path});
-                },
-                [&](BuildableFromDrv bfd) {
-                    StringSet outputNames;
-                    for (auto & output : bfd.outputs)
-                        outputNames.insert(output.first);
-                    pathsToBuild.push_back({bfd.drvPath, outputNames});
-                },
-            }, b);
-            buildables.push_back(std::move(b));
-        }
+        auto b = i->toDerivedPaths();
+        pathsToBuild.insert(pathsToBuild.end(), b.begin(), b.end());
     }
 
     if (mode == Realise::Nothing)
@@ -727,59 +740,26 @@ Buildables build(ref<Store> store, Realise mode,
     else if (mode == Realise::Outputs)
         store->buildPaths(pathsToBuild, bMode);
 
-    return buildables;
+    return getBuiltPaths(store, pathsToBuild);
 }
 
-std::set<RealisedPath> toRealisedPaths(
+BuiltPaths toBuiltPaths(
     ref<Store> store,
     Realise mode,
     OperateOn operateOn,
     std::vector<std::shared_ptr<Installable>> installables)
 {
-    std::set<RealisedPath> res;
     if (operateOn == OperateOn::Output) {
-        for (auto & b : build(store, mode, installables))
-            std::visit(overloaded {
-                [&](BuildableOpaque bo) {
-                    res.insert(bo.path);
-                },
-                [&](BuildableFromDrv bfd) {
-                    auto drv = store->readDerivation(bfd.drvPath);
-                    auto outputHashes = staticOutputHashes(*store, drv);
-                    for (auto & output : bfd.outputs) {
-                        if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
-                            if (!outputHashes.count(output.first))
-                                throw Error(
-                                    "the derivation '%s' doesn't have an output named '%s'",
-                                    store->printStorePath(bfd.drvPath),
-                                    output.first);
-                            auto outputId = DrvOutput{outputHashes.at(output.first), output.first};
-                            auto realisation = store->queryRealisation(outputId);
-                            if (!realisation)
-                                throw Error("cannot operate on an output of unbuilt content-addresed derivation '%s'", outputId.to_string());
-                            res.insert(RealisedPath{*realisation});
-                        }
-                        else {
-                            // If ca-derivations isn't enabled, behave as if
-                            // all the paths are opaque to keep the default
-                            // behavior
-                            assert(output.second);
-                            res.insert(*output.second);
-                        }
-                    }
-                },
-            }, b);
+        return build(store, mode, installables);
     } else {
         if (mode == Realise::Nothing)
             settings.readOnlyMode = true;
 
-        for (auto & i : installables)
-            for (auto & b : i->toBuildables())
-                if (auto bfd = std::get_if<BuildableFromDrv>(&b))
-                    res.insert(bfd->drvPath);
+        BuiltPaths res;
+        for (auto & drvPath : toDerivations(store, installables, true))
+            res.push_back(BuiltPath::Opaque{drvPath});
+        return res;
     }
-
-    return res;
 }
 
 StorePathSet toStorePaths(ref<Store> store,
@@ -787,8 +767,10 @@ StorePathSet toStorePaths(ref<Store> store,
     std::vector<std::shared_ptr<Installable>> installables)
 {
     StorePathSet outPaths;
-    for (auto & path : toRealisedPaths(store, mode, operateOn, installables))
-            outPaths.insert(path.path());
+    for (auto & path : toBuiltPaths(store, mode, operateOn, installables)) {
+        auto thisOutPaths = path.outPaths();
+        outPaths.insert(thisOutPaths.begin(), thisOutPaths.end());
+    }
     return outPaths;
 }
 
@@ -810,9 +792,9 @@ StorePathSet toDerivations(ref<Store> store,
     StorePathSet drvPaths;
 
     for (auto & i : installables)
-        for (auto & b : i->toBuildables())
+        for (auto & b : i->toDerivedPaths())
             std::visit(overloaded {
-                [&](BuildableOpaque bo) {
+                [&](DerivedPath::Opaque bo) {
                     if (!useDeriver)
                         throw Error("argument '%s' did not evaluate to a derivation", i->what());
                     auto derivers = store->queryValidDerivers(bo.path);
@@ -821,10 +803,10 @@ StorePathSet toDerivations(ref<Store> store,
                     // FIXME: use all derivers?
                     drvPaths.insert(*derivers.begin());
                 },
-                [&](BuildableFromDrv bfd) {
+                [&](DerivedPath::Built bfd) {
                     drvPaths.insert(bfd.drvPath);
                 },
-            }, b);
+            }, b.raw());
 
     return drvPaths;
 }
diff --git a/src/libcmd/installables.hh b/src/libcmd/installables.hh
index b714f097b0bff051386aa0a0d66b40ce52e37050..298fd48f869f4e3f83abe76df38508cb1f120b1c 100644
--- a/src/libcmd/installables.hh
+++ b/src/libcmd/installables.hh
@@ -2,13 +2,13 @@
 
 #include "util.hh"
 #include "path.hh"
+#include "path-with-outputs.hh"
+#include "derived-path.hh"
 #include "eval.hh"
 #include "flake/flake.hh"
 
 #include <optional>
 
-#include <nlohmann/json_fwd.hpp>
-
 namespace nix {
 
 struct DrvInfo;
@@ -16,25 +16,6 @@ struct SourceExprCommand;
 
 namespace eval_cache { class EvalCache; class AttrCursor; }
 
-struct BuildableOpaque {
-    StorePath path;
-    nlohmann::json toJSON(ref<Store> store) const;
-};
-
-struct BuildableFromDrv {
-    StorePath drvPath;
-    std::map<std::string, std::optional<StorePath>> outputs;
-    nlohmann::json toJSON(ref<Store> store) const;
-};
-
-typedef std::variant<
-    BuildableOpaque,
-    BuildableFromDrv
-> Buildable;
-
-typedef std::vector<Buildable> Buildables;
-nlohmann::json buildablesToJSON(const Buildables & buildables, ref<Store> store);
-
 struct App
 {
     std::vector<StorePathWithOutputs> context;
@@ -42,17 +23,23 @@ struct App
     // FIXME: add args, sandbox settings, metadata, ...
 };
 
+struct UnresolvedApp
+{
+    App unresolved;
+    App resolve(ref<Store>);
+};
+
 struct Installable
 {
     virtual ~Installable() { }
 
     virtual std::string what() = 0;
 
-    virtual Buildables toBuildables() = 0;
+    virtual DerivedPaths toDerivedPaths() = 0;
 
-    Buildable toBuildable();
+    DerivedPath toDerivedPath();
 
-    App toApp(EvalState & state);
+    UnresolvedApp toApp(EvalState & state);
 
     virtual std::pair<Value *, Pos> toValue(EvalState & state)
     {
@@ -93,7 +80,7 @@ struct InstallableValue : Installable
 
     virtual std::vector<DerivationInfo> toDerivations() = 0;
 
-    Buildables toBuildables() override;
+    DerivedPaths toDerivedPaths() override;
 };
 
 struct InstallableFlake : InstallableValue
diff --git a/src/libexpr/attr-set.hh b/src/libexpr/attr-set.hh
index 6d68e5df32c4cc1814871791b3610b76d5472882..1da8d91dfb43c154db6e2394389bc84de9237dd5 100644
--- a/src/libexpr/attr-set.hh
+++ b/src/libexpr/attr-set.hh
@@ -35,6 +35,7 @@ class Bindings
 {
 public:
     typedef uint32_t size_t;
+    Pos *pos;
 
 private:
     size_t size_, capacity_;
diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc
index 98d91c90597eb8dbbddc6a2296906c63dc755867..d7e21783dfd0f1fc08ab67792597b01ce99b32d1 100644
--- a/src/libexpr/eval-cache.cc
+++ b/src/libexpr/eval-cache.cc
@@ -486,11 +486,11 @@ std::shared_ptr<AttrCursor> AttrCursor::getAttr(std::string_view name)
     return getAttr(root->state.symbols.create(name));
 }
 
-std::shared_ptr<AttrCursor> AttrCursor::findAlongAttrPath(const std::vector<Symbol> & attrPath)
+std::shared_ptr<AttrCursor> AttrCursor::findAlongAttrPath(const std::vector<Symbol> & attrPath, bool force)
 {
     auto res = shared_from_this();
     for (auto & attr : attrPath) {
-        res = res->maybeGetAttr(attr);
+        res = res->maybeGetAttr(attr, force);
         if (!res) return {};
     }
     return res;
diff --git a/src/libexpr/eval-cache.hh b/src/libexpr/eval-cache.hh
index e23e45c9425fd32a4a6cdecf1e6b89c37d398614..43b34ebcb31ff626c2741ba477749874afc73dec 100644
--- a/src/libexpr/eval-cache.hh
+++ b/src/libexpr/eval-cache.hh
@@ -102,7 +102,7 @@ public:
 
     std::shared_ptr<AttrCursor> getAttr(std::string_view name);
 
-    std::shared_ptr<AttrCursor> findAlongAttrPath(const std::vector<Symbol> & attrPath);
+    std::shared_ptr<AttrCursor> findAlongAttrPath(const std::vector<Symbol> & attrPath, bool force = false);
 
     std::string getString();
 
diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc
index 3afe2e47b9b06badd3f8c6da30aa9c012007bb98..ef9f8efca96c2593f32e1a3976883cc47f73e0f3 100644
--- a/src/libexpr/eval.cc
+++ b/src/libexpr/eval.cc
@@ -201,6 +201,15 @@ string showType(const Value & v)
     }
 }
 
+Pos Value::determinePos(const Pos &pos) const
+{
+    switch (internalType) {
+        case tAttrs: return *attrs->pos;
+        case tLambda: return lambda.fun->pos;
+        case tApp: return app.left->determinePos(pos);
+        default: return pos;
+    }
+}
 
 bool Value::isTrivial() const
 {
@@ -1060,6 +1069,8 @@ void ExprAttrs::eval(EvalState & state, Env & env, Value & v)
         v.attrs->push_back(Attr(nameSym, i.valueExpr->maybeThunk(state, *dynamicEnv), &i.pos));
         v.attrs->sort(); // FIXME: inefficient
     }
+
+    v.attrs->pos = &pos;
 }
 
 
@@ -2091,9 +2102,12 @@ Strings EvalSettings::getDefaultNixPath()
         }
     };
 
-    add(getHome() + "/.nix-defexpr/channels");
-    add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs");
-    add(settings.nixStateDir + "/profiles/per-user/root/channels");
+    if (!evalSettings.restrictEval && !evalSettings.pureEval) {
+        add(getHome() + "/.nix-defexpr/channels");
+        add(settings.nixStateDir + "/profiles/per-user/root/channels/nixpkgs", "nixpkgs");
+        add(settings.nixStateDir + "/profiles/per-user/root/channels");
+    }
+
     return res;
 }
 
diff --git a/src/libexpr/flake/config.cc b/src/libexpr/flake/config.cc
index 63566131e98b9beb93f69a2c96891212490db171..c8a5a319fc78c128775def3bd3353c300e93e421 100644
--- a/src/libexpr/flake/config.cc
+++ b/src/libexpr/flake/config.cc
@@ -22,7 +22,9 @@ static TrustedList readTrustedList()
 
 static void writeTrustedList(const TrustedList & trustedList)
 {
-    writeFile(trustedListPath(), nlohmann::json(trustedList).dump());
+    auto path = trustedListPath();
+    createDirs(dirOf(path));
+    writeFile(path, nlohmann::json(trustedList).dump());
 }
 
 void ConfigFile::apply()
diff --git a/src/libexpr/flake/flake.hh b/src/libexpr/flake/flake.hh
index 65ed1ad0a7df1d5d27c140ce4f90f28581aa82fe..d17d5e183511f66a51fdf4021c439d019fb8b389 100644
--- a/src/libexpr/flake/flake.hh
+++ b/src/libexpr/flake/flake.hh
@@ -113,7 +113,7 @@ struct LockFlags
     /* Whether to commit changes to flake.lock. */
     bool commitLockFile = false;
 
-    /* Flake inputs to be overriden. */
+    /* Flake inputs to be overridden. */
     std::map<InputPath, FlakeRef> inputOverrides;
 
     /* Flake inputs to be updated. This means that any existing lock
diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc
index 1a3990ea1469729e50df0e55fb380948b35476d6..f774e649332f06644cc0e24edea08112f8d13fa3 100644
--- a/src/libexpr/get-drvs.cc
+++ b/src/libexpr/get-drvs.cc
@@ -2,6 +2,7 @@
 #include "util.hh"
 #include "eval-inline.hh"
 #include "store-api.hh"
+#include "path-with-outputs.hh"
 
 #include <cstring>
 #include <regex>
@@ -19,7 +20,7 @@ DrvInfo::DrvInfo(EvalState & state, const string & attrPath, Bindings * attrs)
 DrvInfo::DrvInfo(EvalState & state, ref<Store> store, const std::string & drvPathWithOutputs)
     : state(&state), attrs(nullptr), attrPath("")
 {
-    auto [drvPath, selectedOutputs] = store->parsePathWithOutputs(drvPathWithOutputs);
+    auto [drvPath, selectedOutputs] = parsePathWithOutputs(*store, drvPathWithOutputs);
 
     this->drvPath = store->printStorePath(drvPath);
 
diff --git a/src/libexpr/local.mk b/src/libexpr/local.mk
index 26c53d30137859a9c22239913cb0815ee868bf84..c40abfb78fbb3b805504d86f17ec540ed8d98291 100644
--- a/src/libexpr/local.mk
+++ b/src/libexpr/local.mk
@@ -16,7 +16,7 @@ libexpr_CXXFLAGS += -I src/libutil -I src/libstore -I src/libfetchers -I src/lib
 libexpr_LIBS = libutil libstore libfetchers
 
 libexpr_LDFLAGS = -lboost_context
-ifneq ($(OS), FreeBSD)
+ifeq ($(OS), Linux)
  libexpr_LDFLAGS += -ldl
 endif
 
diff --git a/src/libexpr/nixexpr.hh b/src/libexpr/nixexpr.hh
index 8df8055b32ad37c2d32b0c273a6d5387d80978f7..51a14cd59a92d5c26108a0928a89fca5bc7565c3 100644
--- a/src/libexpr/nixexpr.hh
+++ b/src/libexpr/nixexpr.hh
@@ -180,6 +180,7 @@ struct ExprOpHasAttr : Expr
 struct ExprAttrs : Expr
 {
     bool recursive;
+    Pos pos;
     struct AttrDef {
         bool inherited;
         Expr * e;
@@ -199,7 +200,8 @@ struct ExprAttrs : Expr
     };
     typedef std::vector<DynamicAttrDef> DynamicAttrDefs;
     DynamicAttrDefs dynamicAttrs;
-    ExprAttrs() : recursive(false) { };
+    ExprAttrs(const Pos &pos) : recursive(false), pos(pos) { };
+    ExprAttrs() : recursive(false), pos(noPos) { };
     COMMON_METHODS
 };
 
diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y
index 49d995bb9ca9c327899615388d51cf21ab5113ec..f948dde474300c968f3d68eababe2fa1bc778360 100644
--- a/src/libexpr/parser.y
+++ b/src/libexpr/parser.y
@@ -478,7 +478,7 @@ binds
           $$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i.symbol), makeCurPos(@6, data));
       }
     }
-  | { $$ = new ExprAttrs; }
+  | { $$ = new ExprAttrs(makeCurPos(@0, data)); }
   ;
 
 attrs
diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc
index 1d1afa7686939891668cbafecbd010607aa45e63..e8569b6542b45b7612f495788d136bef6d5c3ceb 100644
--- a/src/libexpr/primops.cc
+++ b/src/libexpr/primops.cc
@@ -21,6 +21,8 @@
 #include <regex>
 #include <dlfcn.h>
 
+#include <cmath>
+
 
 namespace nix {
 
@@ -35,7 +37,7 @@ InvalidPathError::InvalidPathError(const Path & path) :
 
 void EvalState::realiseContext(const PathSet & context)
 {
-    std::vector<StorePathWithOutputs> drvs;
+    std::vector<DerivedPath::Built> drvs;
 
     for (auto & i : context) {
         auto [ctxS, outputName] = decodeContext(i);
@@ -43,7 +45,7 @@ void EvalState::realiseContext(const PathSet & context)
         if (!store->isValidPath(ctx))
             throw InvalidPathError(store->printStorePath(ctx));
         if (!outputName.empty() && ctx.isDerivation()) {
-            drvs.push_back(StorePathWithOutputs{ctx, {outputName}});
+            drvs.push_back({ctx, {outputName}});
         }
     }
 
@@ -51,14 +53,16 @@ void EvalState::realiseContext(const PathSet & context)
 
     if (!evalSettings.enableImportFromDerivation)
         throw EvalError("attempted to realize '%1%' during evaluation but 'allow-import-from-derivation' is false",
-            store->printStorePath(drvs.begin()->path));
+            store->printStorePath(drvs.begin()->drvPath));
 
     /* For performance, prefetch all substitute info. */
     StorePathSet willBuild, willSubstitute, unknown;
     uint64_t downloadSize, narSize;
-    store->queryMissing(drvs, willBuild, willSubstitute, unknown, downloadSize, narSize);
+    std::vector<DerivedPath> buildReqs;
+    for (auto & d : drvs) buildReqs.emplace_back(DerivedPath { d });
+    store->queryMissing(buildReqs, willBuild, willSubstitute, unknown, downloadSize, narSize);
 
-    store->buildPaths(drvs);
+    store->buildPaths(buildReqs);
 
     /* Add the output of this derivations to the allowed
        paths. */
@@ -545,18 +549,56 @@ typedef list<Value *> ValueList;
 #endif
 
 
+static Bindings::iterator getAttr(
+    EvalState & state,
+    string funcName,
+    string attrName,
+    Bindings * attrSet,
+    const Pos & pos)
+{
+    Bindings::iterator value = attrSet->find(state.symbols.create(attrName));
+    if (value == attrSet->end()) {
+        hintformat errorMsg = hintfmt(
+            "attribute '%s' missing for call to '%s'",
+            attrName,
+            funcName
+        );
+
+        Pos aPos = *attrSet->pos;
+        if (aPos == noPos) {
+            throw TypeError({
+                .msg = errorMsg,
+                .errPos = pos,
+            });
+        } else {
+            auto e = TypeError({
+                .msg = errorMsg,
+                .errPos = aPos,
+            });
+
+            // Adding another trace for the function name to make it clear
+            // which call received wrong arguments.
+            e.addTrace(pos, hintfmt("while invoking '%s'", funcName));
+            throw e;
+        }
+    }
+
+    return value;
+}
+
 static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
     state.forceAttrs(*args[0], pos);
 
     /* Get the start set. */
-    Bindings::iterator startSet =
-        args[0]->attrs->find(state.symbols.create("startSet"));
-    if (startSet == args[0]->attrs->end())
-        throw EvalError({
-            .msg = hintfmt("attribute 'startSet' required"),
-            .errPos = pos
-        });
+    Bindings::iterator startSet = getAttr(
+        state,
+        "genericClosure",
+        "startSet",
+        args[0]->attrs,
+        pos
+    );
+
     state.forceList(*startSet->value, pos);
 
     ValueList workSet;
@@ -564,13 +606,14 @@ static void prim_genericClosure(EvalState & state, const Pos & pos, Value * * ar
         workSet.push_back(startSet->value->listElems()[n]);
 
     /* Get the operator. */
-    Bindings::iterator op =
-        args[0]->attrs->find(state.symbols.create("operator"));
-    if (op == args[0]->attrs->end())
-        throw EvalError({
-            .msg = hintfmt("attribute 'operator' required"),
-            .errPos = pos
-        });
+    Bindings::iterator op = getAttr(
+        state,
+        "genericClosure",
+        "operator",
+        args[0]->attrs,
+        pos
+    );
+
     state.forceValue(*op->value, pos);
 
     /* Construct the closure by applying the operator to element of
@@ -673,6 +716,44 @@ static RegisterPrimOp primop_addErrorContext(RegisterPrimOp::Info {
     .fun = prim_addErrorContext,
 });
 
+static void prim_ceil(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    auto value = state.forceFloat(*args[0], args[0]->determinePos(pos));
+    mkInt(v, ceil(value));
+}
+
+static RegisterPrimOp primop_ceil({
+    .name = "__ceil",
+    .args = {"double"},
+    .doc = R"(
+        Converts an IEEE-754 double-precision floating-point number (*double*) to
+        the next higher integer.
+
+        If the datatype is neither an integer nor a "float", an evaluation error will be
+        thrown.
+    )",
+    .fun = prim_ceil,
+});
+
+static void prim_floor(EvalState & state, const Pos & pos, Value * * args, Value & v)
+{
+    auto value = state.forceFloat(*args[0], args[0]->determinePos(pos));
+    mkInt(v, floor(value));
+}
+
+static RegisterPrimOp primop_floor({
+    .name = "__floor",
+    .args = {"double"},
+    .doc = R"(
+        Converts an IEEE-754 double-precision floating-point number (*double*) to
+        the next lower integer.
+
+        If the datatype is neither an integer nor a "float", an evaluation error will be
+        thrown.
+    )",
+    .fun = prim_floor,
+});
+
 /* Try evaluating the argument. Success => {success=true; value=something;},
  * else => {success=false; value=false;} */
 static void prim_tryEval(EvalState & state, const Pos & pos, Value * * args, Value & v)
@@ -814,12 +895,14 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
     state.forceAttrs(*args[0], pos);
 
     /* Figure out the name first (for stack backtraces). */
-    Bindings::iterator attr = args[0]->attrs->find(state.sName);
-    if (attr == args[0]->attrs->end())
-        throw EvalError({
-            .msg = hintfmt("required attribute 'name' missing"),
-            .errPos = pos
-        });
+    Bindings::iterator attr = getAttr(
+        state,
+        "derivationStrict",
+        state.sName,
+        args[0]->attrs,
+        pos
+    );
+
     string drvName;
     Pos & posDrvName(*attr->pos);
     try {
@@ -951,7 +1034,7 @@ static void prim_derivationStrict(EvalState & state, const Pos & pos, Value * *
                     }
 
                 } else {
-                    auto s = state.coerceToString(posDrvName, *i->value, context, true);
+                    auto s = state.coerceToString(*i->pos, *i->value, context, true);
                     drv.env.emplace(key, s);
                     if (i->name == state.sBuilder) drv.builder = s;
                     else if (i->name == state.sSystem) drv.platform = s;
@@ -1208,7 +1291,10 @@ static RegisterPrimOp primop_toPath({
 static void prim_storePath(EvalState & state, const Pos & pos, Value * * args, Value & v)
 {
     if (evalSettings.pureEval)
-        throw EvalError("builtins.storePath' is not allowed in pure evaluation mode");
+        throw EvalError({
+            .msg = hintfmt("'%s' is not allowed in pure evaluation mode", "builtins.storePath"),
+            .errPos = pos
+        });
 
     PathSet context;
     Path path = state.checkSourcePath(state.coerceToPath(pos, *args[0], context));
@@ -1367,12 +1453,13 @@ static void prim_findFile(EvalState & state, const Pos & pos, Value * * args, Va
         if (i != v2.attrs->end())
             prefix = state.forceStringNoCtx(*i->value, pos);
 
-        i = v2.attrs->find(state.symbols.create("path"));
-        if (i == v2.attrs->end())
-            throw EvalError({
-                .msg = hintfmt("attribute 'path' missing"),
-                .errPos = pos
-            });
+        i = getAttr(
+            state,
+            "findFile",
+            "path",
+            v2.attrs,
+            pos
+        );
 
         PathSet context;
         string path = state.coerceToString(pos, *i->value, context, false, false);
@@ -1918,26 +2005,26 @@ static RegisterPrimOp primop_path({
       An enrichment of the built-in path type, based on the attributes
       present in *args*. All are optional except `path`:
 
-        - path  
+        - path\
           The underlying path.
 
-        - name  
+        - name\
           The name of the path when added to the store. This can used to
           reference paths that have nix-illegal characters in their names,
           like `@`.
 
-        - filter  
+        - filter\
           A function of the type expected by `builtins.filterSource`,
           with the same semantics.
 
-        - recursive  
+        - recursive\
           When `false`, when `path` is added to the store it is with a
           flat hash, rather than a hash of the NAR serialization of the
           file. Thus, `path` must refer to a regular file, not a
           directory. This allows similar behavior to `fetchurl`. Defaults
           to `true`.
 
-        - sha256  
+        - sha256\
           When provided, this is the expected hash of the file at the
           path. Evaluation will fail if the hash is incorrect, and
           providing a hash allows `builtins.path` to be used even when the
@@ -2014,12 +2101,13 @@ void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v)
     string attr = state.forceStringNoCtx(*args[0], pos);
     state.forceAttrs(*args[1], pos);
     // !!! Should we create a symbol here or just do a lookup?
-    Bindings::iterator i = args[1]->attrs->find(state.symbols.create(attr));
-    if (i == args[1]->attrs->end())
-        throw EvalError({
-            .msg = hintfmt("attribute '%1%' missing", attr),
-            .errPos = pos
-        });
+    Bindings::iterator i = getAttr(
+        state,
+        "getAttr",
+        attr,
+        args[1]->attrs,
+        pos
+    );
     // !!! add to stack trace?
     if (state.countCalls && i->pos) state.attrSelects[*i->pos]++;
     state.forceValue(*i->value, pos);
@@ -2146,22 +2234,25 @@ static void prim_listToAttrs(EvalState & state, const Pos & pos, Value * * args,
         Value & v2(*args[0]->listElems()[i]);
         state.forceAttrs(v2, pos);
 
-        Bindings::iterator j = v2.attrs->find(state.sName);
-        if (j == v2.attrs->end())
-            throw TypeError({
-                .msg = hintfmt("'name' attribute missing in a call to 'listToAttrs'"),
-                .errPos = pos
-            });
-        string name = state.forceStringNoCtx(*j->value, pos);
+        Bindings::iterator j = getAttr(
+            state,
+            "listToAttrs",
+            state.sName,
+            v2.attrs,
+            pos
+        );
+
+        string name = state.forceStringNoCtx(*j->value, *j->pos);
 
         Symbol sym = state.symbols.create(name);
         if (seen.insert(sym).second) {
-            Bindings::iterator j2 = v2.attrs->find(state.symbols.create(state.sValue));
-            if (j2 == v2.attrs->end())
-                throw TypeError({
-                    .msg = hintfmt("'value' attribute missing in a call to 'listToAttrs'"),
-                    .errPos = pos
-                });
+            Bindings::iterator j2 = getAttr(
+                state,
+                "listToAttrs",
+                state.sValue,
+                v2.attrs,
+                pos
+            );
             v.attrs->push_back(Attr(sym, j2->value, j2->pos));
         }
     }
@@ -2802,7 +2893,12 @@ static void prim_concatMap(EvalState & state, const Pos & pos, Value * * args, V
     for (unsigned int n = 0; n < nrLists; ++n) {
         Value * vElem = args[1]->listElems()[n];
         state.callFunction(*args[0], *vElem, lists[n], pos);
-        state.forceList(lists[n], pos);
+        try {
+            state.forceList(lists[n], lists[n].determinePos(args[0]->determinePos(pos)));
+        } catch (TypeError &e) {
+            e.addTrace(pos, hintfmt("while invoking '%s'", "concatMap"));
+            throw e;
+        }
         len += lists[n].listSize();
     }
 
diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc
index 27d8ddf35fcf322877ee5f44b2799c16aa72efff..b8b99d4fa8dcd800341618d7dfcb6fa69091bc76 100644
--- a/src/libexpr/primops/fetchTree.cc
+++ b/src/libexpr/primops/fetchTree.cc
@@ -303,17 +303,17 @@ static RegisterPrimOp primop_fetchGit({
       of the repo at that URL is fetched. Otherwise, it can be an
       attribute with the following attributes (all except `url` optional):
 
-        - url  
+        - url\
           The URL of the repo.
 
-        - name  
+        - name\
           The name of the directory the repo should be exported to in the
           store. Defaults to the basename of the URL.
 
-        - rev  
+        - rev\
           The git revision to fetch. Defaults to the tip of `ref`.
 
-        - ref  
+        - ref\
           The git ref to look for the requested revision under. This is
           often a branch or tag name. Defaults to `HEAD`.
 
@@ -321,11 +321,11 @@ static RegisterPrimOp primop_fetchGit({
           of Nix 2.3.0 Nix will not prefix `refs/heads/` if `ref` starts
           with `refs/`.
 
-        - submodules  
+        - submodules\
           A Boolean parameter that specifies whether submodules should be
           checked out. Defaults to `false`.
 
-        - allRefs  
+        - allRefs\
           Whether to fetch all refs of the repository. With this argument being
           true, it's possible to load a `rev` from *any* `ref` (by default only
           `rev`s from the specified `ref` are supported).
diff --git a/src/libexpr/value.hh b/src/libexpr/value.hh
index b317c18980ceb1d92e98e6119f88507e4cbe78a1..a1f131f9e84819fe224438ab40498910d70af267 100644
--- a/src/libexpr/value.hh
+++ b/src/libexpr/value.hh
@@ -341,6 +341,8 @@ public:
         return internalType == tList1 ? 1 : internalType == tList2 ? 2 : bigList.size;
     }
 
+    Pos determinePos(const Pos &pos) const;
+
     /* Check whether forcing this value requires a trivial amount of
        computation. In particular, function applications are
        non-trivial. */
diff --git a/src/libfetchers/attrs.hh b/src/libfetchers/attrs.hh
index a2d53a7bf8da02d7031c87b541c3dc37571c17ac..e410376338233e4921be861b0ea0d4eadf0aa3b2 100644
--- a/src/libfetchers/attrs.hh
+++ b/src/libfetchers/attrs.hh
@@ -6,6 +6,8 @@
 
 #include <nlohmann/json_fwd.hpp>
 
+#include <optional>
+
 namespace nix::fetchers {
 
 typedef std::variant<std::string, uint64_t, Explicit<bool>> Attr;
diff --git a/src/libfetchers/fetchers.hh b/src/libfetchers/fetchers.hh
index c6b219c0222858a6c260c1d2e92d7ef8895f70d6..a72cfafa4d44960f971c4354732c2e96dbfc1fac 100644
--- a/src/libfetchers/fetchers.hh
+++ b/src/libfetchers/fetchers.hh
@@ -145,13 +145,7 @@ DownloadFileResult downloadFile(
     bool immutable,
     const Headers & headers = {});
 
-struct DownloadTarballMeta
-{
-    time_t lastModified;
-    std::string effectiveUrl;
-};
-
-std::pair<Tree, DownloadTarballMeta> downloadTarball(
+std::pair<Tree, time_t> downloadTarball(
     ref<Store> store,
     const std::string & url,
     const std::string & name,
diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc
index 81c647f89d0a0cb3c8bd5002886fe7d33f50d3b6..d8e0dbe0a4d510bf3b1aa381fa933cd10e3c992f 100644
--- a/src/libfetchers/git.cc
+++ b/src/libfetchers/git.cc
@@ -6,6 +6,7 @@
 #include "url-parts.hh"
 
 #include <sys/time.h>
+#include <sys/wait.h>
 
 using namespace std::string_literals;
 
@@ -153,12 +154,14 @@ struct GitInputScheme : InputScheme
 
     std::pair<bool, std::string> getActualUrl(const Input & input) const
     {
-        // Don't clone file:// URIs (but otherwise treat them the
-        // same as remote URIs, i.e. don't use the working tree or
-        // HEAD).
+        // file:// URIs are normally not cloned (but otherwise treated the
+        // same as remote URIs, i.e. we don't use the working tree or
+        // HEAD). Exception: If _NIX_FORCE_HTTP is set, or the repo is a bare git
+        // repo, treat as a remote URI to force a clone.
         static bool forceHttp = getEnv("_NIX_FORCE_HTTP") == "1"; // for testing
         auto url = parseURL(getStrAttr(input.attrs, "url"));
-        bool isLocal = url.scheme == "file" && !forceHttp;
+        bool isBareRepository = url.scheme == "file" && !pathExists(url.path + "/.git");
+        bool isLocal = url.scheme == "file" && !forceHttp && !isBareRepository;
         return {isLocal, isLocal ? url.path : url.base};
     }
 
@@ -363,7 +366,9 @@ struct GitInputScheme : InputScheme
                         ? "refs/*"
                         : ref->compare(0, 5, "refs/") == 0
                             ? *ref
-                            : "refs/heads/" + *ref;
+                            : ref == "HEAD"
+                                ? *ref
+                                : "refs/heads/" + *ref;
                     runProgram("git", true, { "-C", repoDir, "fetch", "--quiet", "--force", "--", actualUrl, fmt("%s:%s", fetchRef, fetchRef) });
                 } catch (Error & e) {
                     if (!pathExists(localRefFile)) throw;
diff --git a/src/libfetchers/github.cc b/src/libfetchers/github.cc
index 3e5ad75a8a0629685e0afe333a5dc4dacd61bede..8352ef02d0c8a6a7aacc4ccb735cbbffc9686945 100644
--- a/src/libfetchers/github.cc
+++ b/src/libfetchers/github.cc
@@ -207,16 +207,16 @@ struct GitArchiveInputScheme : InputScheme
 
         auto url = getDownloadUrl(input);
 
-        auto [tree, meta] = downloadTarball(store, url.url, "source", true, url.headers);
+        auto [tree, lastModified] = downloadTarball(store, url.url, "source", true, url.headers);
 
-        input.attrs.insert_or_assign("lastModified", uint64_t(meta.lastModified));
+        input.attrs.insert_or_assign("lastModified", uint64_t(lastModified));
 
         getCache()->add(
             store,
             immutableAttrs,
             {
                 {"rev", rev->gitRev()},
-                {"lastModified", uint64_t(meta.lastModified)}
+                {"lastModified", uint64_t(lastModified)}
             },
             tree.storePath,
             true);
diff --git a/src/libfetchers/registry.cc b/src/libfetchers/registry.cc
index 81b2227de18daa5845f8654ba0869d2bd5f02b96..74376adc0526cd1c2f2de022462691c870d9b858 100644
--- a/src/libfetchers/registry.cc
+++ b/src/libfetchers/registry.cc
@@ -114,7 +114,7 @@ static std::shared_ptr<Registry> getSystemRegistry()
 
 Path getUserRegistryPath()
 {
-    return getHome() + "/.config/nix/registry.json";
+    return getConfigDir() + "/nix/registry.json";
 }
 
 std::shared_ptr<Registry> getUserRegistry()
diff --git a/src/libfetchers/tarball.cc b/src/libfetchers/tarball.cc
index bd05bb2f1cf2cdc0b90a3a78c4aa70e756061984..257465bae5b6347ce85d6d55761cc7c3718f9668 100644
--- a/src/libfetchers/tarball.cc
+++ b/src/libfetchers/tarball.cc
@@ -109,7 +109,7 @@ DownloadFileResult downloadFile(
     };
 }
 
-std::pair<Tree, DownloadTarballMeta> downloadTarball(
+std::pair<Tree, time_t> downloadTarball(
     ref<Store> store,
     const std::string & url,
     const std::string & name,
@@ -127,10 +127,7 @@ std::pair<Tree, DownloadTarballMeta> downloadTarball(
     if (cached && !cached->expired)
         return {
             Tree(store->toRealPath(cached->storePath), std::move(cached->storePath)),
-            {
-                .lastModified = time_t(getIntAttr(cached->infoAttrs, "lastModified")),
-                .effectiveUrl = maybeGetStrAttr(cached->infoAttrs, "effectiveUrl").value_or(url),
-            },
+            getIntAttr(cached->infoAttrs, "lastModified")
         };
 
     auto res = downloadFile(store, url, name, immutable, headers);
@@ -155,7 +152,6 @@ std::pair<Tree, DownloadTarballMeta> downloadTarball(
 
     Attrs infoAttrs({
         {"lastModified", uint64_t(lastModified)},
-        {"effectiveUrl", res.effectiveUrl},
         {"etag", res.etag},
     });
 
@@ -168,10 +164,7 @@ std::pair<Tree, DownloadTarballMeta> downloadTarball(
 
     return {
         Tree(store->toRealPath(*unpackedStorePath), std::move(*unpackedStorePath)),
-        {
-            .lastModified = lastModified,
-            .effectiveUrl = res.effectiveUrl,
-        },
+        lastModified,
     };
 }
 
@@ -185,7 +178,8 @@ struct TarballInputScheme : InputScheme
             && !hasSuffix(url.path, ".tar")
             && !hasSuffix(url.path, ".tar.gz")
             && !hasSuffix(url.path, ".tar.xz")
-            && !hasSuffix(url.path, ".tar.bz2"))
+            && !hasSuffix(url.path, ".tar.bz2")
+            && !hasSuffix(url.path, ".tar.zst"))
             return {};
 
         Input input;
@@ -230,11 +224,9 @@ struct TarballInputScheme : InputScheme
         return true;
     }
 
-    std::pair<Tree, Input> fetch(ref<Store> store, const Input & _input) override
+    std::pair<Tree, Input> fetch(ref<Store> store, const Input & input) override
     {
-        Input input(_input);
-        auto [tree, meta] = downloadTarball(store, getStrAttr(input.attrs, "url"), "source", false);
-        input.attrs.insert_or_assign("url", meta.effectiveUrl);
+        auto tree = downloadTarball(store, getStrAttr(input.attrs, "url"), "source", false).first;
         return {std::move(tree), input};
     }
 };
diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc
index 0e5432fca37e75f3aa710cd22845c165d5aad2e2..15354549a61904fa5658343bfc59b61dbf62dc11 100644
--- a/src/libmain/progress-bar.cc
+++ b/src/libmain/progress-bar.cc
@@ -122,6 +122,7 @@ public:
 
     void log(Verbosity lvl, const FormatOrString & fs) override
     {
+        if (lvl > verbosity) return;
         auto state(state_.lock());
         log(*state, lvl, fs.s);
     }
diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc
index 5baaff3e958d70246d9bb7ec04293ae2f5465035..86930c2e3030d7d5a714b31526c78ccae79b8419 100644
--- a/src/libmain/shared.cc
+++ b/src/libmain/shared.cc
@@ -36,7 +36,7 @@ void printGCWarning()
 }
 
 
-void printMissing(ref<Store> store, const std::vector<StorePathWithOutputs> & paths, Verbosity lvl)
+void printMissing(ref<Store> store, const std::vector<DerivedPath> & paths, Verbosity lvl)
 {
     uint64_t downloadSize, narSize;
     StorePathSet willBuild, willSubstitute, unknown;
@@ -310,7 +310,7 @@ void printVersion(const string & programName)
 
 void showManPage(const string & name)
 {
-    restoreSignals();
+    restoreProcessContext();
     setenv("MANPATH", settings.nixManDir.c_str(), 1);
     execlp("man", "man", name.c_str(), nullptr);
     throw SysError("command 'man %1%' failed", name.c_str());
@@ -373,7 +373,7 @@ RunPager::RunPager()
             throw SysError("dupping stdin");
         if (!getenv("LESS"))
             setenv("LESS", "FRSXMK", 1);
-        restoreSignals();
+        restoreProcessContext();
         if (pager)
             execl("/bin/sh", "sh", "-c", pager, nullptr);
         execlp("pager", "pager", nullptr);
diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh
index edc7b5efabe8a09ea69be5cf5bbe0c1e02940881..05277d90a68048963b70c7b387170ff4ccfacd59 100644
--- a/src/libmain/shared.hh
+++ b/src/libmain/shared.hh
@@ -4,6 +4,7 @@
 #include "args.hh"
 #include "common-args.hh"
 #include "path.hh"
+#include "derived-path.hh"
 
 #include <signal.h>
 
@@ -42,7 +43,7 @@ struct StorePathWithOutputs;
 
 void printMissing(
     ref<Store> store,
-    const std::vector<StorePathWithOutputs> & paths,
+    const std::vector<DerivedPath> & paths,
     Verbosity lvl = lvlInfo);
 
 void printMissing(ref<Store> store, const StorePathSet & willBuild,
diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc
index 4f5f8607d1e6b78f246cf329f397ca8781ccdfb8..df401e6f48a4481d2c17d7e9ae9bd8a2cf43588a 100644
--- a/src/libstore/binary-cache-store.cc
+++ b/src/libstore/binary-cache-store.cc
@@ -179,6 +179,9 @@ ref<const ValidPathInfo> BinaryCacheStore::addToStoreCommon(
     narInfo->url = "nar/" + narInfo->fileHash->to_string(Base32, false) + ".nar"
         + (compression == "xz" ? ".xz" :
            compression == "bzip2" ? ".bz2" :
+           compression == "zstd" ? ".zst" :
+           compression == "lzip" ? ".lzip" :
+           compression == "lz4" ? ".lz4" :
            compression == "br" ? ".br" :
            "");
 
@@ -447,18 +450,43 @@ StorePath BinaryCacheStore::addTextToStore(const string & name, const string & s
 
 std::optional<const Realisation> BinaryCacheStore::queryRealisation(const DrvOutput & id)
 {
+    if (diskCache) {
+        auto [cacheOutcome, maybeCachedRealisation] =
+            diskCache->lookupRealisation(getUri(), id);
+        switch (cacheOutcome) {
+            case NarInfoDiskCache::oValid:
+                debug("Returning a cached realisation for %s", id.to_string());
+                return *maybeCachedRealisation;
+            case NarInfoDiskCache::oInvalid:
+                debug("Returning a cached missing realisation for %s", id.to_string());
+                return {};
+            case NarInfoDiskCache::oUnknown:
+                break;
+        }
+    }
+
     auto outputInfoFilePath = realisationsPrefix + "/" + id.to_string() + ".doi";
     auto rawOutputInfo = getFile(outputInfoFilePath);
 
     if (rawOutputInfo) {
-        return {Realisation::fromJSON(
-            nlohmann::json::parse(*rawOutputInfo), outputInfoFilePath)};
+        auto realisation = Realisation::fromJSON(
+            nlohmann::json::parse(*rawOutputInfo), outputInfoFilePath);
+
+        if (diskCache)
+            diskCache->upsertRealisation(
+                getUri(), realisation);
+
+        return {realisation};
     } else {
+        if (diskCache)
+            diskCache->upsertAbsentRealisation(getUri(), id);
         return std::nullopt;
     }
 }
 
 void BinaryCacheStore::registerDrvOutput(const Realisation& info) {
+    if (diskCache)
+        diskCache->upsertRealisation(getUri(), info);
     auto filePath = realisationsPrefix + "/" + info.id.to_string() + ".doi";
     upsertFile(filePath, info.toJSON().dump(), "application/json");
 }
diff --git a/src/libstore/binary-cache-store.hh b/src/libstore/binary-cache-store.hh
index c2163166c205fc70116ffc48adceba747d492e7a..657be2fcfa525968b2671449bf1f40559b181929 100644
--- a/src/libstore/binary-cache-store.hh
+++ b/src/libstore/binary-cache-store.hh
@@ -34,7 +34,7 @@ private:
 protected:
 
     // The prefix under which realisation infos will be stored
-    const std::string realisationsPrefix = "/realisations";
+    const std::string realisationsPrefix = "realisations";
 
     BinaryCacheStore(const Params & params);
 
diff --git a/src/libstore/build/derivation-goal.cc b/src/libstore/build/derivation-goal.cc
index c29237f5c4981fa50eb39d2672f5066fcf57b66c..8c9ef0101c69b64e8ca66701d0c0a4aa0c9912c9 100644
--- a/src/libstore/build/derivation-goal.cc
+++ b/src/libstore/build/derivation-goal.cc
@@ -20,6 +20,7 @@
 #include <sys/types.h>
 #include <sys/socket.h>
 #include <sys/un.h>
+#include <sys/wait.h>
 #include <netdb.h>
 #include <fcntl.h>
 #include <termios.h>
@@ -73,7 +74,7 @@ DerivationGoal::DerivationGoal(const StorePath & drvPath,
     state = &DerivationGoal::getDerivation;
     name = fmt(
         "building of '%s' from .drv file",
-        StorePathWithOutputs { drvPath, wantedOutputs }.to_string(worker.store));
+        DerivedPath::Built { drvPath, wantedOutputs }.to_string(worker.store));
     trace("created");
 
     mcExpectedBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.expectedBuilds);
@@ -94,7 +95,7 @@ DerivationGoal::DerivationGoal(const StorePath & drvPath, const BasicDerivation
     state = &DerivationGoal::haveDerivation;
     name = fmt(
         "building of '%s' from in-memory derivation",
-        StorePathWithOutputs { drvPath, drv.outputNames() }.to_string(worker.store));
+        DerivedPath::Built { drvPath, drv.outputNames() }.to_string(worker.store));
     trace("created");
 
     mcExpectedBuilds = std::make_unique<MaintainCount<uint64_t>>(worker.expectedBuilds);
@@ -170,7 +171,7 @@ void DerivationGoal::getDerivation()
         return;
     }
 
-    addWaitee(upcast_goal(worker.makeSubstitutionGoal(drvPath)));
+    addWaitee(upcast_goal(worker.makePathSubstitutionGoal(drvPath)));
 
     state = &DerivationGoal::loadDerivation;
 }
@@ -246,17 +247,22 @@ void DerivationGoal::haveDerivation()
        through substitutes.  If that doesn't work, we'll build
        them. */
     if (settings.useSubstitutes && parsedDrv->substitutesAllowed())
-        for (auto & [_, status] : initialOutputs) {
+        for (auto & [outputName, status] : initialOutputs) {
             if (!status.wanted) continue;
-            if (!status.known) {
-                warn("do not know how to query for unknown floating content-addressed derivation output yet");
-                /* Nothing to wait for; tail call */
-                return DerivationGoal::gaveUpOnSubstitution();
-            }
-            addWaitee(upcast_goal(worker.makeSubstitutionGoal(
-                status.known->path,
-                buildMode == bmRepair ? Repair : NoRepair,
-                getDerivationCA(*drv))));
+            if (!status.known)
+                addWaitee(
+                    upcast_goal(
+                        worker.makeDrvOutputSubstitutionGoal(
+                            DrvOutput{status.outputHash, outputName},
+                            buildMode == bmRepair ? Repair : NoRepair
+                        )
+                    )
+                );
+            else
+                addWaitee(upcast_goal(worker.makePathSubstitutionGoal(
+                    status.known->path,
+                    buildMode == bmRepair ? Repair : NoRepair,
+                    getDerivationCA(*drv))));
         }
 
     if (waitees.empty()) /* to prevent hang (no wake-up event) */
@@ -337,7 +343,7 @@ void DerivationGoal::gaveUpOnSubstitution()
         if (!settings.useSubstitutes)
             throw Error("dependency '%s' of '%s' does not exist, and substitution is disabled",
                 worker.store.printStorePath(i), worker.store.printStorePath(drvPath));
-        addWaitee(upcast_goal(worker.makeSubstitutionGoal(i)));
+        addWaitee(upcast_goal(worker.makePathSubstitutionGoal(i)));
     }
 
     if (waitees.empty()) /* to prevent hang (no wake-up event) */
@@ -388,7 +394,7 @@ void DerivationGoal::repairClosure()
             worker.store.printStorePath(i), worker.store.printStorePath(drvPath));
         auto drvPath2 = outputsToDrv.find(i);
         if (drvPath2 == outputsToDrv.end())
-            addWaitee(upcast_goal(worker.makeSubstitutionGoal(i, Repair)));
+            addWaitee(upcast_goal(worker.makePathSubstitutionGoal(i, Repair)));
         else
             addWaitee(worker.makeDerivationGoal(drvPath2->second, StringSet(), bmRepair));
     }
@@ -920,6 +926,9 @@ void DerivationGoal::resolvedFinished() {
         if (realisation) {
             auto newRealisation = *realisation;
             newRealisation.id = DrvOutput{initialOutputs.at(wantedOutput).outputHash, wantedOutput};
+            newRealisation.signatures.clear();
+            newRealisation.dependentRealisations = drvOutputReferences(worker.store, *drv, realisation->outPath);
+            signRealisation(newRealisation);
             worker.store.registerDrvOutput(newRealisation);
         } else {
             // If we don't have a realisation, then it must mean that something
@@ -1243,9 +1252,12 @@ OutputPathMap DerivationGoal::queryDerivationOutputMap()
 void DerivationGoal::checkPathValidity()
 {
     bool checkHash = buildMode == bmRepair;
+    auto wantedOutputsLeft = wantedOutputs;
     for (auto & i : queryPartialDerivationOutputMap()) {
         InitialOutput & info = initialOutputs.at(i.first);
         info.wanted = wantOutput(i.first, wantedOutputs);
+        if (info.wanted)
+            wantedOutputsLeft.erase(i.first);
         if (i.second) {
             auto outputPath = *i.second;
             info.known = {
@@ -1258,15 +1270,33 @@ void DerivationGoal::checkPathValidity()
             };
         }
         if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
-            if (auto real = worker.store.queryRealisation(
-                    DrvOutput{initialOutputs.at(i.first).outputHash, i.first})) {
+            auto drvOutput = DrvOutput{initialOutputs.at(i.first).outputHash, i.first};
+            if (auto real = worker.store.queryRealisation(drvOutput)) {
                 info.known = {
                     .path = real->outPath,
                     .status = PathStatus::Valid,
                 };
+            } else if (info.known && info.known->status == PathStatus::Valid) {
+                // We know the output because it' a static output of the
+                // derivation, and the output path is valid, but we don't have
+                // its realisation stored (probably because it has been built
+                // without the `ca-derivations` experimental flag)
+                worker.store.registerDrvOutput(
+                    Realisation{
+                        drvOutput,
+                        info.known->path,
+                    }
+                );
             }
         }
     }
+    // If we requested all the outputs via the empty set, we are always fine.
+    // If we requested specific elements, the loop above removes all the valid
+    // ones, so any that are left must be invalid.
+    if (!wantedOutputsLeft.empty())
+        throw Error("derivation '%s' does not have wanted outputs %s",
+            worker.store.printStorePath(drvPath),
+            concatStringsSep(", ", quoteStrings(wantedOutputsLeft)));
 }
 
 
diff --git a/src/libstore/build/derivation-goal.hh b/src/libstore/build/derivation-goal.hh
index c85bcd84f753d4f2dc1dae8e68474491ebed25da..704b77caf92edd4cac55f2e85c91aace2abbb6d3 100644
--- a/src/libstore/build/derivation-goal.hh
+++ b/src/libstore/build/derivation-goal.hh
@@ -180,6 +180,9 @@ struct DerivationGoal : public Goal
     /* Open a log file and a pipe to it. */
     Path openLogFile();
 
+    /* Sign the newly built realisation if the store allows it */
+    virtual void signRealisation(Realisation&) {}
+
     /* Close the log file. */
     void closeLogFile();
 
diff --git a/src/libstore/build/drv-output-substitution-goal.cc b/src/libstore/build/drv-output-substitution-goal.cc
new file mode 100644
index 0000000000000000000000000000000000000000..be270d079ecba4d89241a40d2f35df809109e5ea
--- /dev/null
+++ b/src/libstore/build/drv-output-substitution-goal.cc
@@ -0,0 +1,122 @@
+#include "drv-output-substitution-goal.hh"
+#include "worker.hh"
+#include "substitution-goal.hh"
+
+namespace nix {
+
+DrvOutputSubstitutionGoal::DrvOutputSubstitutionGoal(const DrvOutput& id, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
+    : Goal(worker)
+    , id(id)
+{
+    state = &DrvOutputSubstitutionGoal::init;
+    name = fmt("substitution of '%s'", id.to_string());
+    trace("created");
+}
+
+
+void DrvOutputSubstitutionGoal::init()
+{
+    trace("init");
+
+    /* If the derivation already exists, we’re done */
+    if (worker.store.queryRealisation(id)) {
+        amDone(ecSuccess);
+        return;
+    }
+
+    subs = settings.useSubstitutes ? getDefaultSubstituters() : std::list<ref<Store>>();
+    tryNext();
+}
+
+void DrvOutputSubstitutionGoal::tryNext()
+{
+    trace("Trying next substituter");
+
+    if (subs.size() == 0) {
+        /* None left.  Terminate this goal and let someone else deal
+           with it. */
+        debug("drv output '%s' is required, but there is no substituter that can provide it", id.to_string());
+
+        /* Hack: don't indicate failure if there were no substituters.
+           In that case the calling derivation should just do a
+           build. */
+        amDone(substituterFailed ? ecFailed : ecNoSubstituters);
+
+        if (substituterFailed) {
+            worker.failedSubstitutions++;
+            worker.updateProgress();
+        }
+
+        return;
+    }
+
+    auto sub = subs.front();
+    subs.pop_front();
+
+    // FIXME: Make async
+    outputInfo = sub->queryRealisation(id);
+    if (!outputInfo) {
+        tryNext();
+        return;
+    }
+
+    for (const auto & [depId, depPath] : outputInfo->dependentRealisations) {
+        if (depId != id) {
+            if (auto localOutputInfo = worker.store.queryRealisation(depId);
+                localOutputInfo && localOutputInfo->outPath != depPath) {
+                warn(
+                    "substituter '%s' has an incompatible realisation for '%s', ignoring.\n"
+                    "Local:  %s\n"
+                    "Remote: %s",
+                    sub->getUri(),
+                    depId.to_string(),
+                    worker.store.printStorePath(localOutputInfo->outPath),
+                    worker.store.printStorePath(depPath)
+                );
+                tryNext();
+                return;
+            }
+            addWaitee(worker.makeDrvOutputSubstitutionGoal(depId));
+        }
+    }
+
+    addWaitee(worker.makePathSubstitutionGoal(outputInfo->outPath));
+
+    if (waitees.empty()) outPathValid();
+    else state = &DrvOutputSubstitutionGoal::outPathValid;
+}
+
+void DrvOutputSubstitutionGoal::outPathValid()
+{
+    assert(outputInfo);
+    trace("Output path substituted");
+
+    if (nrFailed > 0) {
+        debug("The output path of the derivation output '%s' could not be substituted", id.to_string());
+        amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure : ecFailed);
+        return;
+    }
+
+    worker.store.registerDrvOutput(*outputInfo);
+    finished();
+}
+
+void DrvOutputSubstitutionGoal::finished()
+{
+    trace("finished");
+    amDone(ecSuccess);
+}
+
+string DrvOutputSubstitutionGoal::key()
+{
+    /* "a$" ensures substitution goals happen before derivation
+       goals. */
+    return "a$" + std::string(id.to_string());
+}
+
+void DrvOutputSubstitutionGoal::work()
+{
+    (this->*state)();
+}
+
+}
diff --git a/src/libstore/build/drv-output-substitution-goal.hh b/src/libstore/build/drv-output-substitution-goal.hh
new file mode 100644
index 0000000000000000000000000000000000000000..63ab53d897b8ab9598bd68f70327657c13fdcdf5
--- /dev/null
+++ b/src/libstore/build/drv-output-substitution-goal.hh
@@ -0,0 +1,50 @@
+#pragma once
+
+#include "store-api.hh"
+#include "goal.hh"
+#include "realisation.hh"
+
+namespace nix {
+
+class Worker;
+
+// Substitution of a derivation output.
+// This is done in three steps:
+// 1. Fetch the output info from a substituter
+// 2. Substitute the corresponding output path
+// 3. Register the output info
+class DrvOutputSubstitutionGoal : public Goal {
+private:
+    // The drv output we're trying to substitue
+    DrvOutput id;
+
+    // The realisation corresponding to the given output id.
+    // Will be filled once we can get it.
+    std::optional<Realisation> outputInfo;
+
+    /* The remaining substituters. */
+    std::list<ref<Store>> subs;
+
+    /* Whether a substituter failed. */
+    bool substituterFailed = false;
+
+public:
+    DrvOutputSubstitutionGoal(const DrvOutput& id, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
+
+    typedef void (DrvOutputSubstitutionGoal::*GoalState)();
+    GoalState state;
+
+    void init();
+    void tryNext();
+    void outPathValid();
+    void finished();
+
+    void timedOut(Error && ex) override { abort(); };
+
+    string key() override;
+
+    void work() override;
+
+};
+
+}
diff --git a/src/libstore/build/entry-points.cc b/src/libstore/build/entry-points.cc
index 01a564abaec946b18ae2a13001d952ed615e341c..732d4785d6ee2c8dbdc91c159dd3a5cebda15179 100644
--- a/src/libstore/build/entry-points.cc
+++ b/src/libstore/build/entry-points.cc
@@ -6,16 +6,20 @@
 
 namespace nix {
 
-void Store::buildPaths(const std::vector<StorePathWithOutputs> & drvPaths, BuildMode buildMode)
+void Store::buildPaths(const std::vector<DerivedPath> & reqs, BuildMode buildMode)
 {
     Worker worker(*this);
 
     Goals goals;
-    for (auto & path : drvPaths) {
-        if (path.path.isDerivation())
-            goals.insert(worker.makeDerivationGoal(path.path, path.outputs, buildMode));
-        else
-            goals.insert(worker.makeSubstitutionGoal(path.path, buildMode == bmRepair ? Repair : NoRepair));
+    for (auto & br : reqs) {
+        std::visit(overloaded {
+            [&](DerivedPath::Built bfd) {
+                goals.insert(worker.makeDerivationGoal(bfd.drvPath, bfd.outputs, buildMode));
+            },
+            [&](DerivedPath::Opaque bo) {
+                goals.insert(worker.makePathSubstitutionGoal(bo.path, buildMode == bmRepair ? Repair : NoRepair));
+            },
+        }, br.raw());
     }
 
     worker.run(goals);
@@ -31,7 +35,7 @@ void Store::buildPaths(const std::vector<StorePathWithOutputs> & drvPaths, Build
         }
         if (i->exitCode != Goal::ecSuccess) {
             if (auto i2 = dynamic_cast<DerivationGoal *>(i.get())) failed.insert(i2->drvPath);
-            else if (auto i2 = dynamic_cast<SubstitutionGoal *>(i.get())) failed.insert(i2->storePath);
+            else if (auto i2 = dynamic_cast<PathSubstitutionGoal *>(i.get())) failed.insert(i2->storePath);
         }
     }
 
@@ -90,7 +94,7 @@ void Store::ensurePath(const StorePath & path)
     if (isValidPath(path)) return;
 
     Worker worker(*this);
-    GoalPtr goal = worker.makeSubstitutionGoal(path);
+    GoalPtr goal = worker.makePathSubstitutionGoal(path);
     Goals goals = {goal};
 
     worker.run(goals);
@@ -108,7 +112,7 @@ void Store::ensurePath(const StorePath & path)
 void LocalStore::repairPath(const StorePath & path)
 {
     Worker worker(*this);
-    GoalPtr goal = worker.makeSubstitutionGoal(path, Repair);
+    GoalPtr goal = worker.makePathSubstitutionGoal(path, Repair);
     Goals goals = {goal};
 
     worker.run(goals);
diff --git a/src/libstore/build/goal.cc b/src/libstore/build/goal.cc
index 2dd7a4d3723a4c9439ea0e3e0ef659cd9e90f684..9de40bdf2ff98ec773143c7a6e37f8693e985c31 100644
--- a/src/libstore/build/goal.cc
+++ b/src/libstore/build/goal.cc
@@ -78,6 +78,8 @@ void Goal::amDone(ExitCode result, std::optional<Error> ex)
     }
     waiters.clear();
     worker.removeGoal(shared_from_this());
+
+    cleanup();
 }
 
 
diff --git a/src/libstore/build/goal.hh b/src/libstore/build/goal.hh
index fca4f2d00dc990642d599d21f60bc2db5f153b7a..e6bf628cbc109a30ddba22da65ce48d809a0ee9b 100644
--- a/src/libstore/build/goal.hh
+++ b/src/libstore/build/goal.hh
@@ -100,6 +100,8 @@ struct Goal : public std::enable_shared_from_this<Goal>
     virtual string key() = 0;
 
     void amDone(ExitCode result, std::optional<Error> ex = {});
+
+    virtual void cleanup() { }
 };
 
 void addToWeakGoals(WeakGoals & goals, GoalPtr p);
diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc
index 9c2f1dda63bfb0a800dd64ddee58fae7f8755503..ba0aca29c7699623fcc7c362c5ba6f5df3825c1c 100644
--- a/src/libstore/build/local-derivation-goal.cc
+++ b/src/libstore/build/local-derivation-goal.cc
@@ -153,6 +153,7 @@ void LocalDerivationGoal::killChild()
 void LocalDerivationGoal::tryLocalBuild() {
     unsigned int curBuilds = worker.getNrLocalBuilds();
     if (curBuilds >= settings.maxBuildJobs) {
+        state = &DerivationGoal::tryToBuild;
         worker.waitForBuildSlot(shared_from_this());
         outputLocks.unlock();
         return;
@@ -287,17 +288,17 @@ bool LocalDerivationGoal::cleanupDecideWhetherDiskFull()
        So instead, check if the disk is (nearly) full now.  If
        so, we don't mark this build as a permanent failure. */
 #if HAVE_STATVFS
-	{
+    {
         auto & localStore = getLocalStore();
         uint64_t required = 8ULL * 1024 * 1024; // FIXME: make configurable
         struct statvfs st;
-        if (statvfs(localStore.realStoreDir.c_str(), &st) == 0 &&
+        if (statvfs(localStore.realStoreDir.get().c_str(), &st) == 0 &&
             (uint64_t) st.f_bavail * st.f_bsize < required)
             diskFull = true;
         if (statvfs(tmpDir.c_str(), &st) == 0 &&
             (uint64_t) st.f_bavail * st.f_bsize < required)
             diskFull = true;
-	}
+    }
 #endif
 
     deleteTmpDir(false);
@@ -416,7 +417,7 @@ void LocalDerivationGoal::startBuilder()
     }
 
     auto & localStore = getLocalStore();
-    if (localStore.storeDir != localStore.realStoreDir) {
+    if (localStore.storeDir != localStore.realStoreDir.get()) {
         #if __linux__
             useChroot = true;
         #else
@@ -581,7 +582,9 @@ void LocalDerivationGoal::startBuilder()
                 throw Error("derivation '%s' requested impure path '%s', but it was not in allowed-impure-host-deps",
                     worker.store.printStorePath(drvPath), i);
 
-            dirsInChroot[i] = i;
+            /* Allow files in __impureHostDeps to be missing; e.g.
+               macOS 11+ has no /usr/lib/libSystem*.dylib */
+            dirsInChroot[i] = {i, true};
         }
 
 #if __linux__
@@ -1190,6 +1193,26 @@ void LocalDerivationGoal::writeStructuredAttrs()
     chownToBuilder(tmpDir + "/.attrs.sh");
 }
 
+
+static StorePath pathPartOfReq(const DerivedPath & req)
+{
+    return std::visit(overloaded {
+        [&](DerivedPath::Opaque bo) {
+            return bo.path;
+        },
+        [&](DerivedPath::Built bfd)  {
+            return bfd.drvPath;
+        },
+    }, req.raw());
+}
+
+
+bool LocalDerivationGoal::isAllowed(const DerivedPath & req)
+{
+    return this->isAllowed(pathPartOfReq(req));
+}
+
+
 struct RestrictedStoreConfig : virtual LocalFSStoreConfig
 {
     using LocalFSStoreConfig::LocalFSStoreConfig;
@@ -1310,33 +1333,52 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
     std::optional<const Realisation> queryRealisation(const DrvOutput & id) override
     // XXX: This should probably be allowed if the realisation corresponds to
     // an allowed derivation
-    { throw Error("queryRealisation"); }
+    {
+        if (!goal.isAllowed(id))
+            throw InvalidPath("cannot query an unknown output id '%s' in recursive Nix", id.to_string());
+        return next->queryRealisation(id);
+    }
 
-    void buildPaths(const std::vector<StorePathWithOutputs> & paths, BuildMode buildMode) override
+    void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode) override
     {
         if (buildMode != bmNormal) throw Error("unsupported build mode");
 
         StorePathSet newPaths;
+        std::set<Realisation> newRealisations;
 
-        for (auto & path : paths) {
-            if (!goal.isAllowed(path.path))
-                throw InvalidPath("cannot build unknown path '%s' in recursive Nix", printStorePath(path.path));
+        for (auto & req : paths) {
+            if (!goal.isAllowed(req))
+                throw InvalidPath("cannot build '%s' in recursive Nix because path is unknown", req.to_string(*next));
         }
 
         next->buildPaths(paths, buildMode);
 
         for (auto & path : paths) {
-            if (!path.path.isDerivation()) continue;
-            auto outputs = next->queryDerivationOutputMap(path.path);
-            for (auto & output : outputs)
-                if (wantOutput(output.first, path.outputs))
-                    newPaths.insert(output.second);
+            auto p =  std::get_if<DerivedPath::Built>(&path);
+            if (!p) continue;
+            auto & bfd = *p;
+            auto drv = readDerivation(bfd.drvPath);
+            auto drvHashes = staticOutputHashes(*this, drv);
+            auto outputs = next->queryDerivationOutputMap(bfd.drvPath);
+            for (auto & [outputName, outputPath] : outputs)
+                if (wantOutput(outputName, bfd.outputs)) {
+                    newPaths.insert(outputPath);
+                    if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+                        auto thisRealisation = next->queryRealisation(
+                            DrvOutput{drvHashes.at(outputName), outputName}
+                        );
+                        assert(thisRealisation);
+                        newRealisations.insert(*thisRealisation);
+                    }
+                }
         }
 
         StorePathSet closure;
         next->computeFSClosure(newPaths, closure);
         for (auto & path : closure)
             goal.addDependency(path);
+        for (auto & real : Realisation::closure(*next, newRealisations))
+            goal.addedDrvOutputs.insert(real.id);
     }
 
     BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
@@ -1358,7 +1400,7 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
     void addSignatures(const StorePath & storePath, const StringSet & sigs) override
     { unsupported("addSignatures"); }
 
-    void queryMissing(const std::vector<StorePathWithOutputs> & targets,
+    void queryMissing(const std::vector<DerivedPath> & targets,
         StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown,
         uint64_t & downloadSize, uint64_t & narSize) override
     {
@@ -1366,12 +1408,12 @@ struct RestrictedStore : public virtual RestrictedStoreConfig, public virtual Lo
            client about what paths will be built/substituted or are
            already present. Probably not a big deal. */
 
-        std::vector<StorePathWithOutputs> allowed;
-        for (auto & path : targets) {
-            if (goal.isAllowed(path.path))
-                allowed.emplace_back(path);
+        std::vector<DerivedPath> allowed;
+        for (auto & req : targets) {
+            if (goal.isAllowed(req))
+                allowed.emplace_back(req);
             else
-                unknown.insert(path.path);
+                unknown.insert(pathPartOfReq(req));
         }
 
         next->queryMissing(allowed, willBuild, willSubstitute,
@@ -1703,18 +1745,18 @@ void LocalDerivationGoal::runChild()
                network, so give them access to /etc/resolv.conf and so
                on. */
             if (derivationIsImpure(derivationType)) {
-                ss.push_back("/etc/resolv.conf");
-
                 // Only use nss functions to resolve hosts and
                 // services. Don’t use it for anything else that may
                 // be configured for this system. This limits the
                 // potential impurities introduced in fixed-outputs.
                 writeFile(chrootRootDir + "/etc/nsswitch.conf", "hosts: files dns\nservices: files\n");
 
-                ss.push_back("/etc/services");
-                ss.push_back("/etc/hosts");
-                if (pathExists("/var/run/nscd/socket"))
-                    ss.push_back("/var/run/nscd/socket");
+                /* N.B. it is realistic that these paths might not exist. It
+                   happens when testing Nix building fixed-output derivations
+                   within a pure derivation. */
+                for (auto & path : { "/etc/resolv.conf", "/etc/services", "/etc/hosts", "/var/run/nscd/socket" })
+                    if (pathExists(path))
+                        ss.push_back(path);
             }
 
             for (auto & i : ss) dirsInChroot.emplace(i, i);
@@ -2276,10 +2318,6 @@ void LocalDerivationGoal::registerOutputs()
                 sink.s = make_ref<std::string>(rewriteStrings(*sink.s, outputRewrites));
                 StringSource source(*sink.s);
                 restorePath(actualPath, source);
-
-                /* FIXME: set proper permissions in restorePath() so
-                   we don't have to do another traversal. */
-                canonicalisePathMetaData(actualPath, -1, inodesSeen);
             }
         };
 
@@ -2333,32 +2371,19 @@ void LocalDerivationGoal::registerOutputs()
             }
             auto got = caSink.finish().first;
             auto refs = rewriteRefs();
-            HashModuloSink narSink { htSHA256, oldHashPart };
-            dumpPath(actualPath, narSink);
-            auto narHashAndSize = narSink.finish();
-            ValidPathInfo newInfo0 {
-                worker.store.makeFixedOutputPath(
+
+            auto finalPath = worker.store.makeFixedOutputPath(
                     outputHash.method,
                     got,
                     outputPathName(drv->name, outputName),
                     refs.second,
-                    refs.first),
-                narHashAndSize.first,
-            };
-            newInfo0.narSize = narHashAndSize.second;
-            newInfo0.ca = FixedOutputHash {
-                .method = outputHash.method,
-                .hash = got,
-            };
-            newInfo0.references = refs.second;
-            if (refs.first)
-                newInfo0.references.insert(newInfo0.path);
-            if (scratchPath != newInfo0.path) {
+                    refs.first);
+            if (scratchPath != finalPath) {
                 // Also rewrite the output path
                 auto source = sinkToSource([&](Sink & nextSink) {
                     StringSink sink;
                     dumpPath(actualPath, sink);
-                    RewritingSink rsink2(oldHashPart, std::string(newInfo0.path.hashPart()), nextSink);
+                    RewritingSink rsink2(oldHashPart, std::string(finalPath.hashPart()), nextSink);
                     rsink2(*sink.s);
                     rsink2.flush();
                 });
@@ -2368,6 +2393,21 @@ void LocalDerivationGoal::registerOutputs()
                 movePath(tmpPath, actualPath);
             }
 
+            HashResult narHashAndSize = hashPath(htSHA256, actualPath);
+            ValidPathInfo newInfo0 {
+                finalPath,
+                narHashAndSize.first,
+            };
+
+            newInfo0.narSize = narHashAndSize.second;
+            newInfo0.ca = FixedOutputHash {
+                .method = outputHash.method,
+                .hash = got,
+            };
+            newInfo0.references = refs.second;
+            if (refs.first)
+                newInfo0.references.insert(newInfo0.path);
+
             assert(newInfo0.ca);
             return newInfo0;
         };
@@ -2428,6 +2468,10 @@ void LocalDerivationGoal::registerOutputs()
             },
         }, output.output);
 
+        /* FIXME: set proper permissions in restorePath() so
+            we don't have to do another traversal. */
+        canonicalisePathMetaData(actualPath, -1, inodesSeen);
+
         /* Calculate where we'll move the output files. In the checking case we
            will leave leave them where they are, for now, rather than move to
            their usual "final destination" */
@@ -2460,6 +2504,7 @@ void LocalDerivationGoal::registerOutputs()
                 assert(newInfo.ca);
             } else {
                 auto destPath = worker.store.toRealPath(finalDestPath);
+                deletePath(destPath);
                 movePath(actualPath, destPath);
                 actualPath = destPath;
             }
@@ -2615,13 +2660,22 @@ void LocalDerivationGoal::registerOutputs()
        but it's fine to do in all cases. */
 
     if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
-        for (auto& [outputName, newInfo] : infos)
-            worker.store.registerDrvOutput(Realisation{
-                .id = DrvOutput{initialOutputs.at(outputName).outputHash, outputName},
-                .outPath = newInfo.path});
+        for (auto& [outputName, newInfo] : infos) {
+            auto thisRealisation = Realisation{
+                .id = DrvOutput{initialOutputs.at(outputName).outputHash,
+                                outputName},
+                .outPath = newInfo.path};
+            signRealisation(thisRealisation);
+            worker.store.registerDrvOutput(thisRealisation);
+        }
     }
 }
 
+void LocalDerivationGoal::signRealisation(Realisation & realisation)
+{
+    getLocalStore().signRealisation(realisation);
+}
+
 
 void LocalDerivationGoal::checkOutputs(const std::map<Path, ValidPathInfo> & outputs)
 {
diff --git a/src/libstore/build/local-derivation-goal.hh b/src/libstore/build/local-derivation-goal.hh
index 4bbf27a1bf82c8c36c1105906f60c8e89f2fb371..088a5720964d09b5e049d509e9e9788bf86f0f60 100644
--- a/src/libstore/build/local-derivation-goal.hh
+++ b/src/libstore/build/local-derivation-goal.hh
@@ -108,6 +108,9 @@ struct LocalDerivationGoal : public DerivationGoal
     /* Paths that were added via recursive Nix calls. */
     StorePathSet addedPaths;
 
+    /* Realisations that were added via recursive Nix calls. */
+    std::set<DrvOutput> addedDrvOutputs;
+
     /* Recursive Nix calls are only allowed to build or realize paths
        in the original input closure or added via a recursive Nix call
        (so e.g. you can't do 'nix-store -r /nix/store/<bla>' where
@@ -116,6 +119,12 @@ struct LocalDerivationGoal : public DerivationGoal
     {
         return inputPaths.count(path) || addedPaths.count(path);
     }
+    bool isAllowed(const DrvOutput & id)
+    {
+        return addedDrvOutputs.count(id);
+    }
+
+    bool isAllowed(const DerivedPath & req);
 
     friend struct RestrictedStore;
 
@@ -161,6 +170,8 @@ struct LocalDerivationGoal : public DerivationGoal
        as valid. */
     void registerOutputs() override;
 
+    void signRealisation(Realisation &) override;
+
     /* Check that an output meets the requirements specified by the
        'outputChecks' attribute (or the legacy
        '{allowed,disallowed}{References,Requisites}' attributes). */
diff --git a/src/libstore/build/substitution-goal.cc b/src/libstore/build/substitution-goal.cc
index c4b0de78d37e78e1c4de8d79f2f54480c4c97257..e56cfadbea4a87e0d7d8feacc727c501e0cb0228 100644
--- a/src/libstore/build/substitution-goal.cc
+++ b/src/libstore/build/substitution-goal.cc
@@ -5,40 +5,32 @@
 
 namespace nix {
 
-SubstitutionGoal::SubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
+PathSubstitutionGoal::PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair, std::optional<ContentAddress> ca)
     : Goal(worker)
     , storePath(storePath)
     , repair(repair)
     , ca(ca)
 {
-    state = &SubstitutionGoal::init;
+    state = &PathSubstitutionGoal::init;
     name = fmt("substitution of '%s'", worker.store.printStorePath(this->storePath));
     trace("created");
     maintainExpectedSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.expectedSubstitutions);
 }
 
 
-SubstitutionGoal::~SubstitutionGoal()
+PathSubstitutionGoal::~PathSubstitutionGoal()
 {
-    try {
-        if (thr.joinable()) {
-            // FIXME: signal worker thread to quit.
-            thr.join();
-            worker.childTerminated(this);
-        }
-    } catch (...) {
-        ignoreException();
-    }
+    cleanup();
 }
 
 
-void SubstitutionGoal::work()
+void PathSubstitutionGoal::work()
 {
     (this->*state)();
 }
 
 
-void SubstitutionGoal::init()
+void PathSubstitutionGoal::init()
 {
     trace("init");
 
@@ -59,10 +51,12 @@ void SubstitutionGoal::init()
 }
 
 
-void SubstitutionGoal::tryNext()
+void PathSubstitutionGoal::tryNext()
 {
     trace("trying next substituter");
 
+    cleanup();
+
     if (subs.size() == 0) {
         /* None left.  Terminate this goal and let someone else deal
            with it. */
@@ -142,7 +136,7 @@ void SubstitutionGoal::tryNext()
     /* Bail out early if this substituter lacks a valid
        signature. LocalStore::addToStore() also checks for this, but
        only after we've downloaded the path. */
-    if (!sub->isTrusted && worker.store.pathInfoIsTrusted(*info))
+    if (!sub->isTrusted && worker.store.pathInfoIsUntrusted(*info))
     {
         warn("substituter '%s' does not have a valid signature for path '%s'",
             sub->getUri(), worker.store.printStorePath(storePath));
@@ -154,16 +148,16 @@ void SubstitutionGoal::tryNext()
        paths referenced by this one. */
     for (auto & i : info->references)
         if (i != storePath) /* ignore self-references */
-            addWaitee(worker.makeSubstitutionGoal(i));
+            addWaitee(worker.makePathSubstitutionGoal(i));
 
     if (waitees.empty()) /* to prevent hang (no wake-up event) */
         referencesValid();
     else
-        state = &SubstitutionGoal::referencesValid;
+        state = &PathSubstitutionGoal::referencesValid;
 }
 
 
-void SubstitutionGoal::referencesValid()
+void PathSubstitutionGoal::referencesValid()
 {
     trace("all references realised");
 
@@ -177,12 +171,12 @@ void SubstitutionGoal::referencesValid()
         if (i != storePath) /* ignore self-references */
             assert(worker.store.isValidPath(i));
 
-    state = &SubstitutionGoal::tryToRun;
+    state = &PathSubstitutionGoal::tryToRun;
     worker.wakeUp(shared_from_this());
 }
 
 
-void SubstitutionGoal::tryToRun()
+void PathSubstitutionGoal::tryToRun()
 {
     trace("trying to run");
 
@@ -205,7 +199,7 @@ void SubstitutionGoal::tryToRun()
     thr = std::thread([this]() {
         try {
             /* Wake up the worker loop when we're done. */
-            Finally updateStats([this]() { outPipe.writeSide = -1; });
+            Finally updateStats([this]() { outPipe.writeSide.close(); });
 
             Activity act(*logger, actSubstitute, Logger::Fields{worker.store.printStorePath(storePath), sub->getUri()});
             PushActivity pact(act.id);
@@ -221,11 +215,11 @@ void SubstitutionGoal::tryToRun()
 
     worker.childStarted(shared_from_this(), {outPipe.readSide.get()}, true, false);
 
-    state = &SubstitutionGoal::finished;
+    state = &PathSubstitutionGoal::finished;
 }
 
 
-void SubstitutionGoal::finished()
+void PathSubstitutionGoal::finished()
 {
     trace("substitute finished");
 
@@ -249,7 +243,7 @@ void SubstitutionGoal::finished()
         }
 
         /* Try the next substitute. */
-        state = &SubstitutionGoal::tryNext;
+        state = &PathSubstitutionGoal::tryNext;
         worker.wakeUp(shared_from_this());
         return;
     }
@@ -278,14 +272,31 @@ void SubstitutionGoal::finished()
 }
 
 
-void SubstitutionGoal::handleChildOutput(int fd, const string & data)
+void PathSubstitutionGoal::handleChildOutput(int fd, const string & data)
 {
 }
 
 
-void SubstitutionGoal::handleEOF(int fd)
+void PathSubstitutionGoal::handleEOF(int fd)
 {
     if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
 }
 
+
+void PathSubstitutionGoal::cleanup()
+{
+    try {
+        if (thr.joinable()) {
+            // FIXME: signal worker thread to quit.
+            thr.join();
+            worker.childTerminated(this);
+        }
+
+        outPipe.close();
+    } catch (...) {
+        ignoreException();
+    }
+}
+
+
 }
diff --git a/src/libstore/build/substitution-goal.hh b/src/libstore/build/substitution-goal.hh
index dee2cecbf22d9407eea05f3bacade66068c09c79..70c806d2361f19238d0d3b4c38af6923fb43f46a 100644
--- a/src/libstore/build/substitution-goal.hh
+++ b/src/libstore/build/substitution-goal.hh
@@ -8,13 +8,13 @@ namespace nix {
 
 class Worker;
 
-struct SubstitutionGoal : public Goal
+struct PathSubstitutionGoal : public Goal
 {
     /* The store path that should be realised through a substitute. */
     StorePath storePath;
 
     /* The path the substituter refers to the path as. This will be
-     * different when the stores have different names. */
+       different when the stores have different names. */
     std::optional<StorePath> subPath;
 
     /* The remaining substituters. */
@@ -47,14 +47,15 @@ struct SubstitutionGoal : public Goal
     std::unique_ptr<MaintainCount<uint64_t>> maintainExpectedSubstitutions,
         maintainRunningSubstitutions, maintainExpectedNar, maintainExpectedDownload;
 
-    typedef void (SubstitutionGoal::*GoalState)();
+    typedef void (PathSubstitutionGoal::*GoalState)();
     GoalState state;
 
     /* Content address for recomputing store path */
     std::optional<ContentAddress> ca;
 
-    SubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
-    ~SubstitutionGoal();
+public:
+    PathSubstitutionGoal(const StorePath & storePath, Worker & worker, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
+    ~PathSubstitutionGoal();
 
     void timedOut(Error && ex) override { abort(); };
 
@@ -78,6 +79,8 @@ struct SubstitutionGoal : public Goal
     /* Callback used by the worker to write to the log. */
     void handleChildOutput(int fd, const string & data) override;
     void handleEOF(int fd) override;
+
+    void cleanup() override;
 };
 
 }
diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc
index b2223c3b6754068779156603693cff7203bc2432..0f2ade348d15c687f28fc642408e39b43da15073 100644
--- a/src/libstore/build/worker.cc
+++ b/src/libstore/build/worker.cc
@@ -1,6 +1,7 @@
 #include "machines.hh"
 #include "worker.hh"
 #include "substitution-goal.hh"
+#include "drv-output-substitution-goal.hh"
 #include "local-derivation-goal.hh"
 #include "hook-instance.hh"
 
@@ -78,20 +79,32 @@ std::shared_ptr<DerivationGoal> Worker::makeBasicDerivationGoal(const StorePath
 }
 
 
-std::shared_ptr<SubstitutionGoal> Worker::makeSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional<ContentAddress> ca)
+std::shared_ptr<PathSubstitutionGoal> Worker::makePathSubstitutionGoal(const StorePath & path, RepairFlag repair, std::optional<ContentAddress> ca)
 {
-    std::weak_ptr<SubstitutionGoal> & goal_weak = substitutionGoals[path];
+    std::weak_ptr<PathSubstitutionGoal> & goal_weak = substitutionGoals[path];
     auto goal = goal_weak.lock(); // FIXME
     if (!goal) {
-        goal = std::make_shared<SubstitutionGoal>(path, *this, repair, ca);
+        goal = std::make_shared<PathSubstitutionGoal>(path, *this, repair, ca);
         goal_weak = goal;
         wakeUp(goal);
     }
     return goal;
 }
 
-template<typename G>
-static void removeGoal(std::shared_ptr<G> goal, std::map<StorePath, std::weak_ptr<G>> & goalMap)
+std::shared_ptr<DrvOutputSubstitutionGoal> Worker::makeDrvOutputSubstitutionGoal(const DrvOutput& id, RepairFlag repair, std::optional<ContentAddress> ca)
+{
+    std::weak_ptr<DrvOutputSubstitutionGoal> & goal_weak = drvOutputSubstitutionGoals[id];
+    auto goal = goal_weak.lock(); // FIXME
+    if (!goal) {
+        goal = std::make_shared<DrvOutputSubstitutionGoal>(id, *this, repair, ca);
+        goal_weak = goal;
+        wakeUp(goal);
+    }
+    return goal;
+}
+
+template<typename K, typename G>
+static void removeGoal(std::shared_ptr<G> goal, std::map<K, std::weak_ptr<G>> & goalMap)
 {
     /* !!! inefficient */
     for (auto i = goalMap.begin();
@@ -109,10 +122,13 @@ void Worker::removeGoal(GoalPtr goal)
 {
     if (auto drvGoal = std::dynamic_pointer_cast<DerivationGoal>(goal))
         nix::removeGoal(drvGoal, derivationGoals);
-    else if (auto subGoal = std::dynamic_pointer_cast<SubstitutionGoal>(goal))
+    else if (auto subGoal = std::dynamic_pointer_cast<PathSubstitutionGoal>(goal))
         nix::removeGoal(subGoal, substitutionGoals);
+    else if (auto subGoal = std::dynamic_pointer_cast<DrvOutputSubstitutionGoal>(goal))
+        nix::removeGoal(subGoal, drvOutputSubstitutionGoals);
     else
         assert(false);
+
     if (topGoals.find(goal) != topGoals.end()) {
         topGoals.erase(goal);
         /* If a top-level goal failed, then kill all other goals
@@ -211,14 +227,14 @@ void Worker::waitForAWhile(GoalPtr goal)
 
 void Worker::run(const Goals & _topGoals)
 {
-    std::vector<nix::StorePathWithOutputs> topPaths;
+    std::vector<nix::DerivedPath> topPaths;
 
     for (auto & i : _topGoals) {
         topGoals.insert(i);
         if (auto goal = dynamic_cast<DerivationGoal *>(i.get())) {
-            topPaths.push_back({goal->drvPath, goal->wantedOutputs});
-        } else if (auto goal = dynamic_cast<SubstitutionGoal *>(i.get())) {
-            topPaths.push_back({goal->storePath});
+            topPaths.push_back(DerivedPath::Built{goal->drvPath, goal->wantedOutputs});
+        } else if (auto goal = dynamic_cast<PathSubstitutionGoal *>(i.get())) {
+            topPaths.push_back(DerivedPath::Opaque{goal->storePath});
         }
     }
 
@@ -471,7 +487,10 @@ void Worker::markContentsGood(const StorePath & path)
 }
 
 
-GoalPtr upcast_goal(std::shared_ptr<SubstitutionGoal> subGoal) {
+GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal) {
+    return subGoal;
+}
+GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal) {
     return subGoal;
 }
 
diff --git a/src/libstore/build/worker.hh b/src/libstore/build/worker.hh
index 82e711191e746c526b4a1236040be54baf173ca1..918de35f6dc5d7902c4b3681e540c9dce60e9713 100644
--- a/src/libstore/build/worker.hh
+++ b/src/libstore/build/worker.hh
@@ -4,6 +4,7 @@
 #include "lock.hh"
 #include "store-api.hh"
 #include "goal.hh"
+#include "realisation.hh"
 
 #include <future>
 #include <thread>
@@ -12,18 +13,20 @@ namespace nix {
 
 /* Forward definition. */
 struct DerivationGoal;
-struct SubstitutionGoal;
+struct PathSubstitutionGoal;
+class DrvOutputSubstitutionGoal;
 
 /* Workaround for not being able to declare a something like
 
-     class SubstitutionGoal : public Goal;
+     class PathSubstitutionGoal : public Goal;
 
    even when Goal is a complete type.
 
    This is still a static cast. The purpose of exporting it is to define it in
-   a place where `SubstitutionGoal` is concrete, and use it in a place where it
+   a place where `PathSubstitutionGoal` is concrete, and use it in a place where it
    is opaque. */
-GoalPtr upcast_goal(std::shared_ptr<SubstitutionGoal> subGoal);
+GoalPtr upcast_goal(std::shared_ptr<PathSubstitutionGoal> subGoal);
+GoalPtr upcast_goal(std::shared_ptr<DrvOutputSubstitutionGoal> subGoal);
 
 typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
 
@@ -72,7 +75,8 @@ private:
     /* Maps used to prevent multiple instantiations of a goal for the
        same derivation / path. */
     std::map<StorePath, std::weak_ptr<DerivationGoal>> derivationGoals;
-    std::map<StorePath, std::weak_ptr<SubstitutionGoal>> substitutionGoals;
+    std::map<StorePath, std::weak_ptr<PathSubstitutionGoal>> substitutionGoals;
+    std::map<DrvOutput, std::weak_ptr<DrvOutputSubstitutionGoal>> drvOutputSubstitutionGoals;
 
     /* Goals waiting for busy paths to be unlocked. */
     WeakGoals waitingForAnyGoal;
@@ -146,7 +150,8 @@ public:
         const StringSet & wantedOutputs, BuildMode buildMode = bmNormal);
 
     /* substitution goal */
-    std::shared_ptr<SubstitutionGoal> makeSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
+    std::shared_ptr<PathSubstitutionGoal> makePathSubstitutionGoal(const StorePath & storePath, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
+    std::shared_ptr<DrvOutputSubstitutionGoal> makeDrvOutputSubstitutionGoal(const DrvOutput & id, RepairFlag repair = NoRepair, std::optional<ContentAddress> ca = std::nullopt);
 
     /* Remove a dead goal. */
     void removeGoal(GoalPtr goal);
diff --git a/src/libstore/ca-specific-schema.sql b/src/libstore/ca-specific-schema.sql
index 93c4428269010cc3457305d47627a5b976c91797..08af0cc1fda01924e3b8113c8c9758ff6a126569 100644
--- a/src/libstore/ca-specific-schema.sql
+++ b/src/libstore/ca-specific-schema.sql
@@ -3,9 +3,19 @@
 -- is enabled
 
 create table if not exists Realisations (
+    id integer primary key autoincrement not null,
     drvPath text not null,
     outputName text not null, -- symbolic output id, usually "out"
     outputPath integer not null,
-    primary key (drvPath, outputName),
+    signatures text, -- space-separated list
     foreign key (outputPath) references ValidPaths(id) on delete cascade
 );
+
+create index if not exists IndexRealisations on Realisations(drvPath, outputName);
+
+create table if not exists RealisationsRefs (
+    referrer integer not null,
+    realisationReference integer,
+    foreign key (referrer) references Realisations(id) on delete cascade,
+    foreign key (realisationReference) references Realisations(id) on delete restrict
+);
diff --git a/src/libstore/daemon.cc b/src/libstore/daemon.cc
index ba79592632c0bbd3621d4c89a1e061c3270d3dae..e06fb9ce2e790ebf728a8a7da3ce17abb793baff 100644
--- a/src/libstore/daemon.cc
+++ b/src/libstore/daemon.cc
@@ -2,6 +2,7 @@
 #include "monitor-fd.hh"
 #include "worker-protocol.hh"
 #include "store-api.hh"
+#include "path-with-outputs.hh"
 #include "finally.hh"
 #include "affinity.hh"
 #include "archive.hh"
@@ -259,6 +260,18 @@ static void writeValidPathInfo(
     }
 }
 
+static std::vector<DerivedPath> readDerivedPaths(Store & store, unsigned int clientVersion, Source & from)
+{
+    std::vector<DerivedPath> reqs;
+    if (GET_PROTOCOL_MINOR(clientVersion) >= 30) {
+        reqs = worker_proto::read(store, from, Phantom<std::vector<DerivedPath>> {});
+    } else {
+        for (auto & s : readStrings<Strings>(from))
+            reqs.push_back(parsePathWithOutputs(store, s).toDerivedPath());
+    }
+    return reqs;
+}
+
 static void performOp(TunnelLogger * logger, ref<Store> store,
     TrustedFlag trusted, RecursiveFlag recursive, unsigned int clientVersion,
     Source & from, BufferedSink & to, unsigned int op)
@@ -493,9 +506,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
     }
 
     case wopBuildPaths: {
-        std::vector<StorePathWithOutputs> drvs;
-        for (auto & s : readStrings<Strings>(from))
-            drvs.push_back(store->parsePathWithOutputs(s));
+        auto drvs = readDerivedPaths(*store, clientVersion, from);
         BuildMode mode = bmNormal;
         if (GET_PROTOCOL_MINOR(clientVersion) >= 15) {
             mode = (BuildMode) readInt(from);
@@ -575,7 +586,10 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
         auto res = store->buildDerivation(drvPath, drv, buildMode);
         logger->stopWork();
         to << res.status << res.errorMsg;
-        if (GET_PROTOCOL_MINOR(clientVersion) >= 0xc) {
+        if (GET_PROTOCOL_MINOR(clientVersion) >= 29) {
+            to << res.timesBuilt << res.isNonDeterministic << res.startTime << res.stopTime;
+        }
+        if (GET_PROTOCOL_MINOR(clientVersion) >= 28) {
             worker_proto::write(*store, to, res.builtOutputs);
         }
         break;
@@ -856,9 +870,7 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
     }
 
     case wopQueryMissing: {
-        std::vector<StorePathWithOutputs> targets;
-        for (auto & s : readStrings<Strings>(from))
-            targets.push_back(store->parsePathWithOutputs(s));
+        auto targets = readDerivedPaths(*store, clientVersion, from);
         logger->startWork();
         StorePathSet willBuild, willSubstitute, unknown;
         uint64_t downloadSize, narSize;
@@ -873,11 +885,15 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
 
     case wopRegisterDrvOutput: {
         logger->startWork();
-        auto outputId = DrvOutput::parse(readString(from));
-        auto outputPath = StorePath(readString(from));
-        auto resolvedDrv = StorePath(readString(from));
-        store->registerDrvOutput(Realisation{
-            .id = outputId, .outPath = outputPath});
+        if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
+            auto outputId = DrvOutput::parse(readString(from));
+            auto outputPath = StorePath(readString(from));
+            store->registerDrvOutput(Realisation{
+                .id = outputId, .outPath = outputPath});
+        } else {
+            auto realisation = worker_proto::read(*store, from, Phantom<Realisation>());
+            store->registerDrvOutput(realisation);
+        }
         logger->stopWork();
         break;
     }
@@ -887,9 +903,15 @@ static void performOp(TunnelLogger * logger, ref<Store> store,
         auto outputId = DrvOutput::parse(readString(from));
         auto info = store->queryRealisation(outputId);
         logger->stopWork();
-        std::set<StorePath> outPaths;
-        if (info) outPaths.insert(info->outPath);
-        worker_proto::write(*store, to, outPaths);
+        if (GET_PROTOCOL_MINOR(clientVersion) < 31) {
+            std::set<StorePath> outPaths;
+            if (info) outPaths.insert(info->outPath);
+            worker_proto::write(*store, to, outPaths);
+        } else {
+            std::set<Realisation> realisations;
+            if (info) realisations.insert(*info);
+            worker_proto::write(*store, to, realisations);
+        }
         break;
     }
 
diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc
index fe98182bb6c14ed5d039427fdde3f39646d72d8d..f6defd98f2d5f1ec601d38f4a707e2e5aa5add8d 100644
--- a/src/libstore/derivations.cc
+++ b/src/libstore/derivations.cc
@@ -590,14 +590,6 @@ std::map<std::string, Hash> staticOutputHashes(Store& store, const Derivation& d
 }
 
 
-std::string StorePathWithOutputs::to_string(const Store & store) const
-{
-    return outputs.empty()
-        ? store.printStorePath(path)
-        : store.printStorePath(path) + "!" + concatStringsSep(",", outputs);
-}
-
-
 bool wantOutput(const string & output, const std::set<string> & wanted)
 {
     return wanted.empty() || wanted.find(output) != wanted.end();
diff --git a/src/libstore/derivations.hh b/src/libstore/derivations.hh
index 061d70f69ba60932f6e73bd4eadfec7bf24dd1ec..2df440536d93084f751d6e5e1279e5aafc7e1de3 100644
--- a/src/libstore/derivations.hh
+++ b/src/libstore/derivations.hh
@@ -52,7 +52,7 @@ struct DerivationOutput
         DerivationOutputCAFloating,
         DerivationOutputDeferred
     > output;
-    std::optional<HashType> hashAlgoOpt(const Store & store) const;
+
     /* Note, when you use this function you should make sure that you're passing
        the right derivation name. When in doubt, you should use the safer
        interface provided by BasicDerivation::outputsAndOptPaths */
diff --git a/src/libstore/derived-path.cc b/src/libstore/derived-path.cc
new file mode 100644
index 0000000000000000000000000000000000000000..8da81d0accd00e54b781b3f83e7c7aa7f00e99c4
--- /dev/null
+++ b/src/libstore/derived-path.cc
@@ -0,0 +1,118 @@
+#include "derived-path.hh"
+#include "store-api.hh"
+
+#include <nlohmann/json.hpp>
+
+namespace nix {
+
+nlohmann::json DerivedPath::Opaque::toJSON(ref<Store> store) const {
+    nlohmann::json res;
+    res["path"] = store->printStorePath(path);
+    return res;
+}
+
+nlohmann::json BuiltPath::Built::toJSON(ref<Store> store) const {
+    nlohmann::json res;
+    res["drvPath"] = store->printStorePath(drvPath);
+    for (const auto& [output, path] : outputs) {
+        res["outputs"][output] = store->printStorePath(path);
+    }
+    return res;
+}
+
+StorePathSet BuiltPath::outPaths() const
+{
+    return std::visit(
+        overloaded{
+            [](BuiltPath::Opaque p) { return StorePathSet{p.path}; },
+            [](BuiltPath::Built b) {
+                StorePathSet res;
+                for (auto & [_, path] : b.outputs)
+                    res.insert(path);
+                return res;
+            },
+        }, raw()
+    );
+}
+
+nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store) {
+    auto res = nlohmann::json::array();
+    for (const BuiltPath & buildable : buildables) {
+        std::visit([&res, store](const auto & buildable) {
+            res.push_back(buildable.toJSON(store));
+        }, buildable.raw());
+    }
+    return res;
+}
+
+
+std::string DerivedPath::Opaque::to_string(const Store & store) const {
+    return store.printStorePath(path);
+}
+
+std::string DerivedPath::Built::to_string(const Store & store) const {
+    return store.printStorePath(drvPath)
+        + "!"
+        + (outputs.empty() ? std::string { "*" } : concatStringsSep(",", outputs));
+}
+
+std::string DerivedPath::to_string(const Store & store) const
+{
+    return std::visit(
+        [&](const auto & req) { return req.to_string(store); },
+        this->raw());
+}
+
+
+DerivedPath::Opaque DerivedPath::Opaque::parse(const Store & store, std::string_view s)
+{
+    return {store.parseStorePath(s)};
+}
+
+DerivedPath::Built DerivedPath::Built::parse(const Store & store, std::string_view s)
+{
+    size_t n = s.find("!");
+    assert(n != s.npos);
+    auto drvPath = store.parseStorePath(s.substr(0, n));
+    auto outputsS = s.substr(n + 1);
+    std::set<string> outputs;
+    if (outputsS != "*")
+        outputs = tokenizeString<std::set<string>>(outputsS, ",");
+    return {drvPath, outputs};
+}
+
+DerivedPath DerivedPath::parse(const Store & store, std::string_view s)
+{
+    size_t n = s.find("!");
+    return n == s.npos
+        ? (DerivedPath) DerivedPath::Opaque::parse(store, s)
+        : (DerivedPath) DerivedPath::Built::parse(store, s);
+}
+
+RealisedPath::Set BuiltPath::toRealisedPaths(Store & store) const
+{
+    RealisedPath::Set res;
+    std::visit(
+        overloaded{
+            [&](BuiltPath::Opaque p) { res.insert(p.path); },
+            [&](BuiltPath::Built p) {
+                auto drvHashes =
+                    staticOutputHashes(store, store.readDerivation(p.drvPath));
+                for (auto& [outputName, outputPath] : p.outputs) {
+                    if (settings.isExperimentalFeatureEnabled(
+                            "ca-derivations")) {
+                        auto thisRealisation = store.queryRealisation(
+                            DrvOutput{drvHashes.at(outputName), outputName});
+                        assert(thisRealisation);  // We’ve built it, so we must h
+                                                  // ve the realisation
+                        res.insert(*thisRealisation);
+                    } else {
+                        res.insert(outputPath);
+                    }
+                }
+            },
+        },
+        raw());
+    return res;
+}
+}
diff --git a/src/libstore/derived-path.hh b/src/libstore/derived-path.hh
new file mode 100644
index 0000000000000000000000000000000000000000..9d6ace0696debfb8359b3a4c037a4bcdb3aef6ff
--- /dev/null
+++ b/src/libstore/derived-path.hh
@@ -0,0 +1,123 @@
+#pragma once
+
+#include "util.hh"
+#include "path.hh"
+#include "realisation.hh"
+
+#include <optional>
+
+#include <nlohmann/json_fwd.hpp>
+
+namespace nix {
+
+class Store;
+
+/**
+ * An opaque derived path.
+ *
+ * Opaque derived paths are just store paths, and fully evaluated. They
+ * cannot be simplified further. Since they are opaque, they cannot be
+ * built, but they can fetched.
+ */
+struct DerivedPathOpaque {
+    StorePath path;
+
+    nlohmann::json toJSON(ref<Store> store) const;
+    std::string to_string(const Store & store) const;
+    static DerivedPathOpaque parse(const Store & store, std::string_view);
+};
+
+/**
+ * A derived path that is built from a derivation
+ *
+ * Built derived paths are pair of a derivation and some output names.
+ * They are evaluated by building the derivation, and then replacing the
+ * output names with the resulting outputs.
+ *
+ * Note that does mean a derived store paths evaluates to multiple
+ * opaque paths, which is sort of icky as expressions are supposed to
+ * evaluate to single values. Perhaps this should have just a single
+ * output name.
+ */
+struct DerivedPathBuilt {
+    StorePath drvPath;
+    std::set<std::string> outputs;
+
+    std::string to_string(const Store & store) const;
+    static DerivedPathBuilt parse(const Store & store, std::string_view);
+};
+
+using _DerivedPathRaw = std::variant<
+    DerivedPathOpaque,
+    DerivedPathBuilt
+>;
+
+/**
+ * A "derived path" is a very simple sort of expression that evaluates
+ * to (concrete) store path. It is either:
+ *
+ * - opaque, in which case it is just a concrete store path with
+ *   possibly no known derivation
+ *
+ * - built, in which case it is a pair of a derivation path and an
+ *   output name.
+ */
+struct DerivedPath : _DerivedPathRaw {
+    using Raw = _DerivedPathRaw;
+    using Raw::Raw;
+
+    using Opaque = DerivedPathOpaque;
+    using Built = DerivedPathBuilt;
+
+    inline const Raw & raw() const {
+        return static_cast<const Raw &>(*this);
+    }
+
+    std::string to_string(const Store & store) const;
+    static DerivedPath parse(const Store & store, std::string_view);
+};
+
+/**
+ * A built derived path with hints in the form of optional concrete output paths.
+ *
+ * See 'BuiltPath' for more an explanation.
+ */
+struct BuiltPathBuilt {
+    StorePath drvPath;
+    std::map<std::string, StorePath> outputs;
+
+    nlohmann::json toJSON(ref<Store> store) const;
+    static BuiltPathBuilt parse(const Store & store, std::string_view);
+};
+
+using _BuiltPathRaw = std::variant<
+    DerivedPath::Opaque,
+    BuiltPathBuilt
+>;
+
+/**
+ * A built path. Similar to a `DerivedPath`, but enriched with the corresponding
+ * output path(s).
+ */
+struct BuiltPath : _BuiltPathRaw {
+    using Raw = _BuiltPathRaw;
+    using Raw::Raw;
+
+    using Opaque = DerivedPathOpaque;
+    using Built = BuiltPathBuilt;
+
+    inline const Raw & raw() const {
+        return static_cast<const Raw &>(*this);
+    }
+
+    StorePathSet outPaths() const;
+    RealisedPath::Set toRealisedPaths(Store & store) const;
+
+};
+
+typedef std::vector<DerivedPath> DerivedPaths;
+typedef std::vector<BuiltPath> BuiltPaths;
+
+nlohmann::json derivedPathsWithHintsToJSON(const BuiltPaths & buildables, ref<Store> store);
+
+}
diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc
index 8ea5cdc9de5815cf96d384e76efd412ed5699cf9..2cf35ec83fe30393ab0226af466d44813ed9dc08 100644
--- a/src/libstore/filetransfer.cc
+++ b/src/libstore/filetransfer.cc
@@ -7,7 +7,7 @@
 #include "finally.hh"
 #include "callback.hh"
 
-#ifdef ENABLE_S3
+#if ENABLE_S3
 #include <aws/core/client/ClientConfiguration.h>
 #endif
 
@@ -148,7 +148,7 @@ struct curlFileTransfer : public FileTransfer
         }
 
         LambdaSink finalSink;
-        std::shared_ptr<CompressionSink> decompressionSink;
+        std::shared_ptr<FinishSink> decompressionSink;
         std::optional<StringSink> errorSink;
 
         std::exception_ptr writeException;
@@ -665,7 +665,7 @@ struct curlFileTransfer : public FileTransfer
         writeFull(wakeupPipe.writeSide.get(), " ");
     }
 
-#ifdef ENABLE_S3
+#if ENABLE_S3
     std::tuple<std::string, std::string, Store::Params> parseS3Uri(std::string uri)
     {
         auto [path, params] = splitUriAndParams(uri);
@@ -688,7 +688,7 @@ struct curlFileTransfer : public FileTransfer
         if (hasPrefix(request.uri, "s3://")) {
             // FIXME: do this on a worker thread
             try {
-#ifdef ENABLE_S3
+#if ENABLE_S3
                 auto [bucketName, key, params] = parseS3Uri(request.uri);
 
                 std::string profile = get(params, "profile").value_or("");
diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc
index bc692ca4276decce4d530c06715de4272a41378d..5a62c6529fa4d6ca39a6ea14e0db003bd26800d6 100644
--- a/src/libstore/gc.cc
+++ b/src/libstore/gc.cc
@@ -775,7 +775,7 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
 
         try {
 
-            AutoCloseDir dir(opendir(realStoreDir.c_str()));
+            AutoCloseDir dir(opendir(realStoreDir.get().c_str()));
             if (!dir) throw SysError("opening directory '%1%'", realStoreDir);
 
             /* Read the store and immediately delete all paths that
@@ -856,7 +856,7 @@ void LocalStore::autoGC(bool sync)
             return std::stoll(readFile(*fakeFreeSpaceFile));
 
         struct statvfs st;
-        if (statvfs(realStoreDir.c_str(), &st))
+        if (statvfs(realStoreDir.get().c_str(), &st))
             throw SysError("getting filesystem info about '%s'", realStoreDir);
 
         return (uint64_t) st.f_bavail * st.f_frsize;
diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc
index 8d44003f4ef960a0a896b0b27599969b43d41aad..d3b27d7be91fd6889c9aeb2242165154a565194c 100644
--- a/src/libstore/globals.cc
+++ b/src/libstore/globals.cc
@@ -81,7 +81,7 @@ void loadConfFile()
 
     /* We only want to send overrides to the daemon, i.e. stuff from
        ~/.nix/nix.conf or the command line. */
-    globalConfig.resetOverriden();
+    globalConfig.resetOverridden();
 
     auto files = settings.nixUserConfFiles;
     for (auto file = files.rbegin(); file != files.rend(); file++) {
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
index a51d9c2f18bdad2b8e153030610421116a31f9f7..dd570cd63d63da9a8042ff1d0ba1afdd981af0b8 100644
--- a/src/libstore/globals.hh
+++ b/src/libstore/globals.hh
@@ -206,7 +206,10 @@ public:
 
     Setting<std::string> builders{
         this, "@" + nixConfDir + "/machines", "builders",
-        "A semicolon-separated list of build machines, in the format of `nix.machines`."};
+        R"(
+          A semicolon-separated list of build machines.
+          For the exact format and examples, see [the manual chapter on remote builds](../advanced-topics/distributed-builds.md)
+        )"};
 
     Setting<bool> buildersUseSubstitutes{
         this, false, "builders-use-substitutes",
@@ -614,8 +617,10 @@ public:
         Strings{"https://cache.nixos.org/"},
         "substituters",
         R"(
-          A list of URLs of substituters, separated by whitespace. The default
-          is `https://cache.nixos.org`.
+          A list of URLs of substituters, separated by whitespace. Substituters
+          are tried based on their Priority value, which each substituter can set
+          independently. Lower value means higher priority.
+          The default is `https://cache.nixos.org`, with a Priority of 40.
         )",
         {"binary-caches"}};
 
@@ -698,7 +703,7 @@ public:
           send a series of commands to modify various settings to stdout. The
           currently recognized commands are:
 
-            - `extra-sandbox-paths`  
+            - `extra-sandbox-paths`\
               Pass a list of files and directories to be included in the
               sandbox for this build. One entry per line, terminated by an
               empty line. Entries have the same format as `sandbox-paths`.
diff --git a/src/libstore/legacy-ssh-store.cc b/src/libstore/legacy-ssh-store.cc
index a9f53bad90907db97f20cbb440f2971a238deccd..edaf751367eed102089e1d89f166f125856e7726 100644
--- a/src/libstore/legacy-ssh-store.cc
+++ b/src/libstore/legacy-ssh-store.cc
@@ -3,6 +3,7 @@
 #include "remote-store.hh"
 #include "serve-protocol.hh"
 #include "store-api.hh"
+#include "path-with-outputs.hh"
 #include "worker-protocol.hh"
 #include "ssh.hh"
 #include "derivations.hh"
@@ -266,14 +267,23 @@ public:
         return status;
     }
 
-    void buildPaths(const std::vector<StorePathWithOutputs> & drvPaths, BuildMode buildMode) override
+    void buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode) override
     {
         auto conn(connections->get());
 
         conn->to << cmdBuildPaths;
         Strings ss;
-        for (auto & p : drvPaths)
-            ss.push_back(p.to_string(*this));
+        for (auto & p : drvPaths) {
+            auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
+            std::visit(overloaded {
+                [&](StorePathWithOutputs s) {
+                    ss.push_back(s.to_string(*this));
+                },
+                [&](StorePath drvPath) {
+                    throw Error("wanted to fetch '%s' but the legacy ssh protocol doesn't support merely substituting drv files via the build paths command. It would build them instead. Try using ssh-ng://", printStorePath(drvPath));
+                },
+            }, sOrDrvPath);
+        }
         conn->to << ss;
 
         putBuildSettings(*conn);
diff --git a/src/libstore/local-binary-cache-store.cc b/src/libstore/local-binary-cache-store.cc
index a58b7733f3ef4c3ac7fd80d3870c13ddb6eabafd..f93111fcec113e16a3caa4fb713a81e2d7eef185 100644
--- a/src/libstore/local-binary-cache-store.cc
+++ b/src/libstore/local-binary-cache-store.cc
@@ -2,6 +2,8 @@
 #include "globals.hh"
 #include "nar-info-disk-cache.hh"
 
+#include <atomic>
+
 namespace nix {
 
 struct LocalBinaryCacheStoreConfig : virtual BinaryCacheStoreConfig
@@ -50,7 +52,8 @@ protected:
         const std::string & mimeType) override
     {
         auto path2 = binaryCacheDir + "/" + path;
-        Path tmp = path2 + ".tmp." + std::to_string(getpid());
+        static std::atomic<int> counter{0};
+        Path tmp = fmt("%s.tmp.%d.%d", path2, getpid(), ++counter);
         AutoDelete del(tmp, false);
         StreamToSourceAdapter source(istream);
         writeFile(tmp, source);
@@ -90,7 +93,7 @@ protected:
 void LocalBinaryCacheStore::init()
 {
     createDirs(binaryCacheDir + "/nar");
-    createDirs(binaryCacheDir + realisationsPrefix);
+    createDirs(binaryCacheDir + "/" + realisationsPrefix);
     if (writeDebugInfo)
         createDirs(binaryCacheDir + "/debuginfo");
     BinaryCacheStore::init();
diff --git a/src/libstore/local-fs-store.hh b/src/libstore/local-fs-store.hh
index 55941b77178454531c557883cbdbe096c6a1382c..f8b19d00de3b9c0d0410cdcb4cdc036db35c4420 100644
--- a/src/libstore/local-fs-store.hh
+++ b/src/libstore/local-fs-store.hh
@@ -18,6 +18,9 @@ struct LocalFSStoreConfig : virtual StoreConfig
     const PathSetting logDir{(StoreConfig*) this, false,
         rootDir != "" ? rootDir + "/nix/var/log/nix" : settings.nixLogDir,
         "log", "directory where Nix will store state"};
+    const PathSetting realStoreDir{(StoreConfig*) this, false,
+        rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
+        "physical path to the Nix store"};
 };
 
 class LocalFSStore : public virtual LocalFSStoreConfig, public virtual Store
@@ -34,7 +37,7 @@ public:
     /* Register a permanent GC root. */
     Path addPermRoot(const StorePath & storePath, const Path & gcRoot);
 
-    virtual Path getRealStoreDir() { return storeDir; }
+    virtual Path getRealStoreDir() { return realStoreDir; }
 
     Path toRealPath(const Path & storePath) override
     {
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
index 90fb4a4bd70938af7e465d60798e3d9e9849f9da..d7c7f8e1d4c312b9a9b98c0c3d0f22b01adadeb6 100644
--- a/src/libstore/local-store.cc
+++ b/src/libstore/local-store.cc
@@ -53,12 +53,15 @@ struct LocalStore::State::Stmts {
     SQLiteStmt InvalidatePath;
     SQLiteStmt AddDerivationOutput;
     SQLiteStmt RegisterRealisedOutput;
+    SQLiteStmt UpdateRealisedOutput;
     SQLiteStmt QueryValidDerivers;
     SQLiteStmt QueryDerivationOutputs;
     SQLiteStmt QueryRealisedOutput;
     SQLiteStmt QueryAllRealisedOutputs;
     SQLiteStmt QueryPathFromHashPart;
     SQLiteStmt QueryValidPaths;
+    SQLiteStmt QueryRealisationReferences;
+    SQLiteStmt AddRealisationReference;
 };
 
 int getSchema(Path schemaPath)
@@ -76,7 +79,7 @@ int getSchema(Path schemaPath)
 
 void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
 {
-    const int nixCASchemaVersion = 1;
+    const int nixCASchemaVersion = 2;
     int curCASchema = getSchema(schemaPath);
     if (curCASchema != nixCASchemaVersion) {
         if (curCASchema > nixCASchemaVersion) {
@@ -94,7 +97,39 @@ void migrateCASchema(SQLite& db, Path schemaPath, AutoCloseFD& lockFd)
               #include "ca-specific-schema.sql.gen.hh"
                 ;
             db.exec(schema);
+            curCASchema = nixCASchemaVersion;
         }
+
+        if (curCASchema < 2) {
+            SQLiteTxn txn(db);
+            // Ugly little sql dance to add a new `id` column and make it the primary key
+            db.exec(R"(
+                create table Realisations2 (
+                    id integer primary key autoincrement not null,
+                    drvPath text not null,
+                    outputName text not null, -- symbolic output id, usually "out"
+                    outputPath integer not null,
+                    signatures text, -- space-separated list
+                    foreign key (outputPath) references ValidPaths(id) on delete cascade
+                );
+                insert into Realisations2 (drvPath, outputName, outputPath, signatures)
+                    select drvPath, outputName, outputPath, signatures from Realisations;
+                drop table Realisations;
+                alter table Realisations2 rename to Realisations;
+            )");
+            db.exec(R"(
+                create index if not exists IndexRealisations on Realisations(drvPath, outputName);
+
+                create table if not exists RealisationsRefs (
+                    referrer integer not null,
+                    realisationReference integer,
+                    foreign key (referrer) references Realisations(id) on delete cascade,
+                    foreign key (realisationReference) references Realisations(id) on delete restrict
+                );
+            )");
+            txn.commit();
+        }
+
         writeFile(schemaPath, fmt("%d", nixCASchemaVersion));
         lockFile(lockFd.get(), ltRead, true);
     }
@@ -106,9 +141,6 @@ LocalStore::LocalStore(const Params & params)
     , LocalStoreConfig(params)
     , Store(params)
     , LocalFSStore(params)
-    , realStoreDir_{this, false, rootDir != "" ? rootDir + "/nix/store" : storeDir, "real",
-        "physical path to the Nix store"}
-    , realStoreDir(realStoreDir_)
     , dbDir(stateDir + "/db")
     , linksDir(realStoreDir + "/.links")
     , reservedPath(dbDir + "/reserved")
@@ -153,13 +185,13 @@ LocalStore::LocalStore(const Params & params)
             printError("warning: the group '%1%' specified in 'build-users-group' does not exist", settings.buildUsersGroup);
         else {
             struct stat st;
-            if (stat(realStoreDir.c_str(), &st))
+            if (stat(realStoreDir.get().c_str(), &st))
                 throw SysError("getting attributes of path '%1%'", realStoreDir);
 
             if (st.st_uid != 0 || st.st_gid != gr->gr_gid || (st.st_mode & ~S_IFMT) != perm) {
-                if (chown(realStoreDir.c_str(), 0, gr->gr_gid) == -1)
+                if (chown(realStoreDir.get().c_str(), 0, gr->gr_gid) == -1)
                     throw SysError("changing ownership of path '%1%'", realStoreDir);
-                if (chmod(realStoreDir.c_str(), perm) == -1)
+                if (chmod(realStoreDir.get().c_str(), perm) == -1)
                     throw SysError("changing permissions on path '%1%'", realStoreDir);
             }
         }
@@ -310,13 +342,22 @@ LocalStore::LocalStore(const Params & params)
     if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
         state->stmts->RegisterRealisedOutput.create(state->db,
             R"(
-                insert or replace into Realisations (drvPath, outputName, outputPath)
-                values (?, ?, (select id from ValidPaths where path = ?))
+                insert or replace into Realisations (drvPath, outputName, outputPath, signatures)
+                values (?, ?, (select id from ValidPaths where path = ?), ?)
+                ;
+            )");
+        state->stmts->UpdateRealisedOutput.create(state->db,
+            R"(
+                update Realisations
+                    set signatures = ?
+                where
+                    drvPath = ? and
+                    outputName = ?
                 ;
             )");
         state->stmts->QueryRealisedOutput.create(state->db,
             R"(
-                select Output.path from Realisations
+                select Realisations.id, Output.path, Realisations.signatures from Realisations
                     inner join ValidPaths as Output on Output.id = Realisations.outputPath
                     where drvPath = ? and outputName = ?
                     ;
@@ -328,6 +369,19 @@ LocalStore::LocalStore(const Params & params)
                     where drvPath = ?
                     ;
             )");
+        state->stmts->QueryRealisationReferences.create(state->db,
+            R"(
+                select drvPath, outputName from Realisations
+                    join RealisationsRefs on realisationReference = Realisations.id
+                    where referrer = ?;
+            )");
+        state->stmts->AddRealisationReference.create(state->db,
+            R"(
+                insert or replace into RealisationsRefs (referrer, realisationReference)
+                values (
+                    ?,
+                    (select id from Realisations where drvPath = ? and outputName = ?));
+            )");
     }
 }
 
@@ -437,14 +491,14 @@ void LocalStore::makeStoreWritable()
     if (getuid() != 0) return;
     /* Check if /nix/store is on a read-only mount. */
     struct statvfs stat;
-    if (statvfs(realStoreDir.c_str(), &stat) != 0)
+    if (statvfs(realStoreDir.get().c_str(), &stat) != 0)
         throw SysError("getting info about the Nix store mount point");
 
     if (stat.f_flag & ST_RDONLY) {
         if (unshare(CLONE_NEWNS) == -1)
             throw SysError("setting up a private mount namespace");
 
-        if (mount(0, realStoreDir.c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1)
+        if (mount(0, realStoreDir.get().c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1)
             throw SysError("remounting %1% writable", realStoreDir);
     }
 #endif
@@ -652,17 +706,66 @@ void LocalStore::checkDerivationOutputs(const StorePath & drvPath, const Derivat
     }
 }
 
+void LocalStore::registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs)
+{
+    settings.requireExperimentalFeature("ca-derivations");
+    if (checkSigs == NoCheckSigs || !realisationIsUntrusted(info))
+        registerDrvOutput(info);
+    else
+        throw Error("cannot register realisation '%s' because it lacks a valid signature", info.outPath.to_string());
+}
 
 void LocalStore::registerDrvOutput(const Realisation & info)
 {
     settings.requireExperimentalFeature("ca-derivations");
-    auto state(_state.lock());
     retrySQLite<void>([&]() {
-        state->stmts->RegisterRealisedOutput.use()
-            (info.id.strHash())
-            (info.id.outputName)
-            (printStorePath(info.outPath))
-            .exec();
+        auto state(_state.lock());
+        if (auto oldR = queryRealisation_(*state, info.id)) {
+            if (info.isCompatibleWith(*oldR)) {
+                auto combinedSignatures = oldR->signatures;
+                combinedSignatures.insert(info.signatures.begin(),
+                    info.signatures.end());
+                state->stmts->UpdateRealisedOutput.use()
+                    (concatStringsSep(" ", combinedSignatures))
+                    (info.id.strHash())
+                    (info.id.outputName)
+                    .exec();
+            } else {
+                throw Error("Trying to register a realisation of '%s', but we already "
+                            "have another one locally.\n"
+                            "Local:  %s\n"
+                            "Remote: %s",
+                    info.id.to_string(),
+                    printStorePath(oldR->outPath),
+                    printStorePath(info.outPath)
+                );
+            }
+        } else {
+            state->stmts->RegisterRealisedOutput.use()
+                (info.id.strHash())
+                (info.id.outputName)
+                (printStorePath(info.outPath))
+                (concatStringsSep(" ", info.signatures))
+                .exec();
+        }
+        uint64_t myId = state->db.getLastInsertedRowId();
+        for (auto & [outputId, depPath] : info.dependentRealisations) {
+            auto localRealisation = queryRealisationCore_(*state, outputId);
+            if (!localRealisation)
+                throw Error("unable to register the derivation '%s' as it "
+                            "depends on the non existent '%s'",
+                    info.id.to_string(), outputId.to_string());
+            if (localRealisation->second.outPath != depPath)
+                throw Error("unable to register the derivation '%s' as it "
+                            "depends on a realisation of '%s' that doesn’t"
+                            "match what we have locally",
+                    info.id.to_string(), outputId.to_string());
+            state->stmts->AddRealisationReference.use()
+                (myId)
+                (outputId.strHash())
+                (outputId.outputName)
+                .exec();
+        }
     });
 }
 
@@ -1102,15 +1205,20 @@ const PublicKeys & LocalStore::getPublicKeys()
     return *state->publicKeys;
 }
 
-bool LocalStore::pathInfoIsTrusted(const ValidPathInfo & info)
+bool LocalStore::pathInfoIsUntrusted(const ValidPathInfo & info)
 {
     return requireSigs && !info.checkSignatures(*this, getPublicKeys());
 }
 
+bool LocalStore::realisationIsUntrusted(const Realisation & realisation)
+{
+    return requireSigs && !realisation.checkSignatures(getPublicKeys());
+}
+
 void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
     RepairFlag repair, CheckSigsFlag checkSigs)
 {
-    if (checkSigs && pathInfoIsTrusted(info))
+    if (checkSigs && pathInfoIsUntrusted(info))
         throw Error("cannot add path '%s' because it lacks a valid signature", printStorePath(info.path));
 
     addTempRoot(info.path);
@@ -1138,17 +1246,13 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
 
             /* While restoring the path from the NAR, compute the hash
                of the NAR. */
-            std::unique_ptr<AbstractHashSink> hashSink;
-            if (!info.ca.has_value() || !info.references.count(info.path))
-                hashSink = std::make_unique<HashSink>(htSHA256);
-            else
-                hashSink = std::make_unique<HashModuloSink>(htSHA256, std::string(info.path.hashPart()));
+            HashSink hashSink(htSHA256);
 
-            TeeSource wrapperSource { source, *hashSink };
+            TeeSource wrapperSource { source, hashSink };
 
             restorePath(realPath, wrapperSource);
 
-            auto hashResult = hashSink->finish();
+            auto hashResult = hashSink.finish();
 
             if (hashResult.first != info.narHash)
                 throw Error("hash mismatch importing path '%s';\n  specified: %s\n  got:       %s",
@@ -1158,6 +1262,31 @@ void LocalStore::addToStore(const ValidPathInfo & info, Source & source,
                 throw Error("size mismatch importing path '%s';\n  specified: %s\n  got:       %s",
                     printStorePath(info.path), info.narSize, hashResult.second);
 
+            if (info.ca) {
+                if (auto foHash = std::get_if<FixedOutputHash>(&*info.ca)) {
+                    auto actualFoHash = hashCAPath(
+                        foHash->method,
+                        foHash->hash.type,
+                        info.path
+                    );
+                    if (foHash->hash != actualFoHash.hash) {
+                        throw Error("ca hash mismatch importing path '%s';\n  specified: %s\n  got:       %s",
+                            printStorePath(info.path),
+                            foHash->hash.to_string(Base32, true),
+                            actualFoHash.hash.to_string(Base32, true));
+                    }
+                }
+                if (auto textHash = std::get_if<TextHash>(&*info.ca)) {
+                    auto actualTextHash = hashString(htSHA256, readFile(realPath));
+                    if (textHash->hash != actualTextHash) {
+                        throw Error("ca hash mismatch importing path '%s';\n  specified: %s\n  got:       %s",
+                            printStorePath(info.path),
+                            textHash->hash.to_string(Base32, true),
+                            actualTextHash.to_string(Base32, true));
+                    }
+                }
+            }
+
             autoGC();
 
             canonicalisePathMetaData(realPath, -1);
@@ -1426,14 +1555,10 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
                 /* Check the content hash (optionally - slow). */
                 printMsg(lvlTalkative, "checking contents of '%s'", printStorePath(i));
 
-                std::unique_ptr<AbstractHashSink> hashSink;
-                if (!info->ca || !info->references.count(info->path))
-                    hashSink = std::make_unique<HashSink>(info->narHash.type);
-                else
-                    hashSink = std::make_unique<HashModuloSink>(info->narHash.type, std::string(info->path.hashPart()));
+                auto hashSink = HashSink(info->narHash.type);
 
-                dumpPath(Store::toRealPath(i), *hashSink);
-                auto current = hashSink->finish();
+                dumpPath(Store::toRealPath(i), hashSink);
+                auto current = hashSink.finish();
 
                 if (info->narHash != nullHash && info->narHash != current.first) {
                     printError("path '%s' was modified! expected hash '%s', got '%s'",
@@ -1612,6 +1737,18 @@ void LocalStore::addSignatures(const StorePath & storePath, const StringSet & si
 }
 
 
+void LocalStore::signRealisation(Realisation & realisation)
+{
+    // FIXME: keep secret keys in memory.
+
+    auto secretKeyFiles = settings.secretKeyFiles;
+
+    for (auto & secretKeyFile : secretKeyFiles.get()) {
+        SecretKey secretKey(readFile(secretKeyFile));
+        realisation.sign(secretKey);
+    }
+}
+
 void LocalStore::signPathInfo(ValidPathInfo & info)
 {
     // FIXME: keep secret keys in memory.
@@ -1639,18 +1776,97 @@ void LocalStore::createUser(const std::string & userName, uid_t userId)
     }
 }
 
-std::optional<const Realisation> LocalStore::queryRealisation(
-    const DrvOutput& id) {
-    typedef std::optional<const Realisation> Ret;
-    return retrySQLite<Ret>([&]() -> Ret {
+std::optional<std::pair<int64_t, Realisation>> LocalStore::queryRealisationCore_(
+        LocalStore::State & state,
+        const DrvOutput & id)
+{
+    auto useQueryRealisedOutput(
+            state.stmts->QueryRealisedOutput.use()
+                (id.strHash())
+                (id.outputName));
+    if (!useQueryRealisedOutput.next())
+        return std::nullopt;
+    auto realisationDbId = useQueryRealisedOutput.getInt(0);
+    auto outputPath = parseStorePath(useQueryRealisedOutput.getStr(1));
+    auto signatures =
+        tokenizeString<StringSet>(useQueryRealisedOutput.getStr(2));
+
+    return {{
+        realisationDbId,
+        Realisation{
+            .id = id,
+            .outPath = outputPath,
+            .signatures = signatures,
+        }
+    }};
+}
+
+std::optional<const Realisation> LocalStore::queryRealisation_(
+            LocalStore::State & state,
+            const DrvOutput & id)
+{
+    auto maybeCore = queryRealisationCore_(state, id);
+    if (!maybeCore)
+        return std::nullopt;
+    auto [realisationDbId, res] = *maybeCore;
+
+    std::map<DrvOutput, StorePath> dependentRealisations;
+    auto useRealisationRefs(
+        state.stmts->QueryRealisationReferences.use()
+            (realisationDbId));
+    while (useRealisationRefs.next()) {
+        auto depId = DrvOutput {
+            Hash::parseAnyPrefixed(useRealisationRefs.getStr(0)),
+            useRealisationRefs.getStr(1),
+        };
+        auto dependentRealisation = queryRealisationCore_(state, depId);
+        assert(dependentRealisation); // Enforced by the db schema
+        auto outputPath = dependentRealisation->second.outPath;
+        dependentRealisations.insert({depId, outputPath});
+    }
+
+    res.dependentRealisations = dependentRealisations;
+
+    return { res };
+}
+
+std::optional<const Realisation>
+LocalStore::queryRealisation(const DrvOutput & id)
+{
+    return retrySQLite<std::optional<const Realisation>>([&]() {
         auto state(_state.lock());
-        auto use(state->stmts->QueryRealisedOutput.use()(id.strHash())(
-            id.outputName));
-        if (!use.next())
-            return std::nullopt;
-        auto outputPath = parseStorePath(use.getStr(0));
-        return Ret{
-            Realisation{.id = id, .outPath = outputPath}};
+        return queryRealisation_(*state, id);
     });
 }
+
+FixedOutputHash LocalStore::hashCAPath(
+    const FileIngestionMethod & method, const HashType & hashType,
+    const StorePath & path)
+{
+    return hashCAPath(method, hashType, Store::toRealPath(path), path.hashPart());
+}
+
+FixedOutputHash LocalStore::hashCAPath(
+    const FileIngestionMethod & method,
+    const HashType & hashType,
+    const Path & path,
+    const std::string_view pathHash
+)
+{
+    HashModuloSink caSink ( hashType, std::string(pathHash) );
+    switch (method) {
+    case FileIngestionMethod::Recursive:
+        dumpPath(path, caSink);
+        break;
+    case FileIngestionMethod::Flat:
+        readFile(path, caSink);
+        break;
+    }
+    auto hash = caSink.finish().first;
+    return FixedOutputHash{
+        .method = method,
+        .hash = hash,
+    };
+}
+
 }  // namespace nix
diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh
index 03bb0218d5748fd5b2ad8e2abc4e60d98e3ff7d9..a01d48c4b99053432cdcca12a23f3683acd61280 100644
--- a/src/libstore/local-store.hh
+++ b/src/libstore/local-store.hh
@@ -83,9 +83,6 @@ private:
 
 public:
 
-    PathSetting realStoreDir_;
-
-    const Path realStoreDir;
     const Path dbDir;
     const Path linksDir;
     const Path reservedPath;
@@ -136,7 +133,8 @@ public:
     void querySubstitutablePathInfos(const StorePathCAMap & paths,
         SubstitutablePathInfos & infos) override;
 
-    bool pathInfoIsTrusted(const ValidPathInfo &) override;
+    bool pathInfoIsUntrusted(const ValidPathInfo &) override;
+    bool realisationIsUntrusted(const Realisation & ) override;
 
     void addToStore(const ValidPathInfo & info, Source & source,
         RepairFlag repair, CheckSigsFlag checkSigs) override;
@@ -202,8 +200,11 @@ public:
     /* Register the store path 'output' as the output named 'outputName' of
        derivation 'deriver'. */
     void registerDrvOutput(const Realisation & info) override;
+    void registerDrvOutput(const Realisation & info, CheckSigsFlag checkSigs) override;
     void cacheDrvOutputMapping(State & state, const uint64_t deriver, const string & outputName, const StorePath & output);
 
+    std::optional<const Realisation> queryRealisation_(State & state, const DrvOutput & id);
+    std::optional<std::pair<int64_t, Realisation>> queryRealisationCore_(State & state, const DrvOutput & id);
     std::optional<const Realisation> queryRealisation(const DrvOutput&) override;
 
 private:
@@ -272,16 +273,30 @@ private:
     bool isValidPath_(State & state, const StorePath & path);
     void queryReferrers(State & state, const StorePath & path, StorePathSet & referrers);
 
-    /* Add signatures to a ValidPathInfo using the secret keys
+    /* Add signatures to a ValidPathInfo or Realisation using the secret keys
        specified by the ‘secret-key-files’ option. */
     void signPathInfo(ValidPathInfo & info);
-
-    Path getRealStoreDir() override { return realStoreDir; }
+    void signRealisation(Realisation &);
 
     void createUser(const std::string & userName, uid_t userId) override;
 
+    // XXX: Make a generic `Store` method
+    FixedOutputHash hashCAPath(
+        const FileIngestionMethod & method,
+        const HashType & hashType,
+        const StorePath & path);
+
+    FixedOutputHash hashCAPath(
+        const FileIngestionMethod & method,
+        const HashType & hashType,
+        const Path & path,
+        const std::string_view pathHash
+    );
+
     friend struct LocalDerivationGoal;
+    friend struct PathSubstitutionGoal;
     friend struct SubstitutionGoal;
+    friend struct DerivationGoal;
 };
 
 
diff --git a/src/libstore/local.mk b/src/libstore/local.mk
index cf0933705fc9f870c9c389d630529b319e8d865c..b6652984c3a980f272a152ded3390b259d724d11 100644
--- a/src/libstore/local.mk
+++ b/src/libstore/local.mk
@@ -9,7 +9,7 @@ libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc $(d)/build/*.cc)
 libstore_LIBS = libutil
 
 libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread
-ifneq ($(OS), FreeBSD)
+ifeq ($(OS), Linux)
  libstore_LDFLAGS += -ldl
 endif
 
diff --git a/src/libstore/misc.cc b/src/libstore/misc.cc
index f58816ad806da1da2a0266711873ff1dc3a627f0..b4929b44533d5ba06d63cfde55c3fdf0b743fb7f 100644
--- a/src/libstore/misc.cc
+++ b/src/libstore/misc.cc
@@ -6,98 +6,73 @@
 #include "thread-pool.hh"
 #include "topo-sort.hh"
 #include "callback.hh"
+#include "closure.hh"
 
 namespace nix {
 
-
 void Store::computeFSClosure(const StorePathSet & startPaths,
     StorePathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
 {
-    struct State
-    {
-        size_t pending;
-        StorePathSet & paths;
-        std::exception_ptr exc;
-    };
-
-    Sync<State> state_(State{0, paths_, 0});
-
-    std::function<void(const StorePath &)> enqueue;
-
-    std::condition_variable done;
-
-    enqueue = [&](const StorePath & path) -> void {
-        {
-            auto state(state_.lock());
-            if (state->exc) return;
-            if (!state->paths.insert(path).second) return;
-            state->pending++;
-        }
-
-        queryPathInfo(path, {[&](std::future<ref<const ValidPathInfo>> fut) {
-            // FIXME: calls to isValidPath() should be async
-
-            try {
-                auto info = fut.get();
-
-                if (flipDirection) {
-
-                    StorePathSet referrers;
-                    queryReferrers(path, referrers);
-                    for (auto & ref : referrers)
-                        if (ref != path)
-                            enqueue(ref);
-
-                    if (includeOutputs)
-                        for (auto & i : queryValidDerivers(path))
-                            enqueue(i);
-
-                    if (includeDerivers && path.isDerivation())
-                        for (auto & i : queryDerivationOutputs(path))
-                            if (isValidPath(i) && queryPathInfo(i)->deriver == path)
-                                enqueue(i);
-
-                } else {
-
-                    for (auto & ref : info->references)
-                        if (ref != path)
-                            enqueue(ref);
-
-                    if (includeOutputs && path.isDerivation())
-                        for (auto & i : queryDerivationOutputs(path))
-                            if (isValidPath(i)) enqueue(i);
-
-                    if (includeDerivers && info->deriver && isValidPath(*info->deriver))
-                        enqueue(*info->deriver);
-
-                }
-
-                {
-                    auto state(state_.lock());
-                    assert(state->pending);
-                    if (!--state->pending) done.notify_one();
-                }
-
-            } catch (...) {
-                auto state(state_.lock());
-                if (!state->exc) state->exc = std::current_exception();
-                assert(state->pending);
-                if (!--state->pending) done.notify_one();
-            };
-        }});
-    };
-
-    for (auto & startPath : startPaths)
-        enqueue(startPath);
-
-    {
-        auto state(state_.lock());
-        while (state->pending) state.wait(done);
-        if (state->exc) std::rethrow_exception(state->exc);
-    }
+    std::function<std::set<StorePath>(const StorePath & path, std::future<ref<const ValidPathInfo>> &)> queryDeps;
+    if (flipDirection)
+        queryDeps = [&](const StorePath& path,
+                        std::future<ref<const ValidPathInfo>> & fut) {
+            StorePathSet res;
+            StorePathSet referrers;
+            queryReferrers(path, referrers);
+            for (auto& ref : referrers)
+                if (ref != path)
+                    res.insert(ref);
+
+            if (includeOutputs)
+                for (auto& i : queryValidDerivers(path))
+                    res.insert(i);
+
+            if (includeDerivers && path.isDerivation())
+                for (auto& [_, maybeOutPath] : queryPartialDerivationOutputMap(path))
+                    if (maybeOutPath && isValidPath(*maybeOutPath))
+                        res.insert(*maybeOutPath);
+            return res;
+        };
+    else
+        queryDeps = [&](const StorePath& path,
+                        std::future<ref<const ValidPathInfo>> & fut) {
+            StorePathSet res;
+            auto info = fut.get();
+            for (auto& ref : info->references)
+                if (ref != path)
+                    res.insert(ref);
+
+            if (includeOutputs && path.isDerivation())
+                for (auto& [_, maybeOutPath] : queryPartialDerivationOutputMap(path))
+                    if (maybeOutPath && isValidPath(*maybeOutPath))
+                        res.insert(*maybeOutPath);
+
+            if (includeDerivers && info->deriver && isValidPath(*info->deriver))
+                res.insert(*info->deriver);
+            return res;
+        };
+
+    computeClosure<StorePath>(
+        startPaths, paths_,
+        [&](const StorePath& path,
+            std::function<void(std::promise<std::set<StorePath>>&)>
+                processEdges) {
+            std::promise<std::set<StorePath>> promise;
+            std::function<void(std::future<ref<const ValidPathInfo>>)>
+                getDependencies =
+                    [&](std::future<ref<const ValidPathInfo>> fut) {
+                        try {
+                            promise.set_value(queryDeps(path, fut));
+                        } catch (...) {
+                            promise.set_exception(std::current_exception());
+                        }
+                    };
+            queryPathInfo(path, getDependencies);
+            processEdges(promise);
+        });
 }
 
-
 void Store::computeFSClosure(const StorePath & startPath,
     StorePathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
 {
@@ -117,7 +92,7 @@ std::optional<ContentAddress> getDerivationCA(const BasicDerivation & drv)
     return std::nullopt;
 }
 
-void Store::queryMissing(const std::vector<StorePathWithOutputs> & targets,
+void Store::queryMissing(const std::vector<DerivedPath> & targets,
     StorePathSet & willBuild_, StorePathSet & willSubstitute_, StorePathSet & unknown_,
     uint64_t & downloadSize_, uint64_t & narSize_)
 {
@@ -145,7 +120,7 @@ void Store::queryMissing(const std::vector<StorePathWithOutputs> & targets,
 
     Sync<State> state_(State{{}, unknown_, willSubstitute_, willBuild_, downloadSize_, narSize_});
 
-    std::function<void(StorePathWithOutputs)> doPath;
+    std::function<void(DerivedPath)> doPath;
 
     auto mustBuildDrv = [&](const StorePath & drvPath, const Derivation & drv) {
         {
@@ -154,7 +129,7 @@ void Store::queryMissing(const std::vector<StorePathWithOutputs> & targets,
         }
 
         for (auto & i : drv.inputDrvs)
-            pool.enqueue(std::bind(doPath, StorePathWithOutputs { i.first, i.second }));
+            pool.enqueue(std::bind(doPath, DerivedPath::Built { i.first, i.second }));
     };
 
     auto checkOutput = [&](
@@ -177,24 +152,25 @@ void Store::queryMissing(const std::vector<StorePathWithOutputs> & targets,
                 drvState->outPaths.insert(outPath);
                 if (!drvState->left) {
                     for (auto & path : drvState->outPaths)
-                        pool.enqueue(std::bind(doPath, StorePathWithOutputs { path } ));
+                        pool.enqueue(std::bind(doPath, DerivedPath::Opaque { path } ));
                 }
             }
         }
     };
 
-    doPath = [&](const StorePathWithOutputs & path) {
+    doPath = [&](const DerivedPath & req) {
 
         {
             auto state(state_.lock());
-            if (!state->done.insert(path.to_string(*this)).second) return;
+            if (!state->done.insert(req.to_string(*this)).second) return;
         }
 
-        if (path.path.isDerivation()) {
-            if (!isValidPath(path.path)) {
+        std::visit(overloaded {
+          [&](DerivedPath::Built bfd) {
+            if (!isValidPath(bfd.drvPath)) {
                 // FIXME: we could try to substitute the derivation.
                 auto state(state_.lock());
-                state->unknown.insert(path.path);
+                state->unknown.insert(bfd.drvPath);
                 return;
             }
 
@@ -202,52 +178,54 @@ void Store::queryMissing(const std::vector<StorePathWithOutputs> & targets,
             /* true for regular derivations, and CA derivations for which we
                have a trust mapping for all wanted outputs. */
             auto knownOutputPaths = true;
-            for (auto & [outputName, pathOpt] : queryPartialDerivationOutputMap(path.path)) {
+            for (auto & [outputName, pathOpt] : queryPartialDerivationOutputMap(bfd.drvPath)) {
                 if (!pathOpt) {
                     knownOutputPaths = false;
                     break;
                 }
-                if (wantOutput(outputName, path.outputs) && !isValidPath(*pathOpt))
+                if (wantOutput(outputName, bfd.outputs) && !isValidPath(*pathOpt))
                     invalid.insert(*pathOpt);
             }
             if (knownOutputPaths && invalid.empty()) return;
 
-            auto drv = make_ref<Derivation>(derivationFromPath(path.path));
-            ParsedDerivation parsedDrv(StorePath(path.path), *drv);
+            auto drv = make_ref<Derivation>(derivationFromPath(bfd.drvPath));
+            ParsedDerivation parsedDrv(StorePath(bfd.drvPath), *drv);
 
             if (knownOutputPaths && settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
                 auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
                 for (auto & output : invalid)
-                    pool.enqueue(std::bind(checkOutput, path.path, drv, output, drvState));
+                    pool.enqueue(std::bind(checkOutput, bfd.drvPath, drv, output, drvState));
             } else
-                mustBuildDrv(path.path, *drv);
+                mustBuildDrv(bfd.drvPath, *drv);
 
-        } else {
+          },
+          [&](DerivedPath::Opaque bo) {
 
-            if (isValidPath(path.path)) return;
+            if (isValidPath(bo.path)) return;
 
             SubstitutablePathInfos infos;
-            querySubstitutablePathInfos({{path.path, std::nullopt}}, infos);
+            querySubstitutablePathInfos({{bo.path, std::nullopt}}, infos);
 
             if (infos.empty()) {
                 auto state(state_.lock());
-                state->unknown.insert(path.path);
+                state->unknown.insert(bo.path);
                 return;
             }
 
-            auto info = infos.find(path.path);
+            auto info = infos.find(bo.path);
             assert(info != infos.end());
 
             {
                 auto state(state_.lock());
-                state->willSubstitute.insert(path.path);
+                state->willSubstitute.insert(bo.path);
                 state->downloadSize += info->second.downloadSize;
                 state->narSize += info->second.narSize;
             }
 
             for (auto & ref : info->second.references)
-                pool.enqueue(std::bind(doPath, StorePathWithOutputs { ref }));
-        }
+                pool.enqueue(std::bind(doPath, DerivedPath::Opaque { ref }));
+          },
+        }, req.raw());
     };
 
     for (auto & path : targets)
@@ -276,5 +254,44 @@ StorePaths Store::topoSortPaths(const StorePathSet & paths)
         }});
 }
 
+std::map<DrvOutput, StorePath> drvOutputReferences(
+    const std::set<Realisation> & inputRealisations,
+    const StorePathSet & pathReferences)
+{
+    std::map<DrvOutput, StorePath> res;
+
+    for (const auto & input : inputRealisations) {
+        if (pathReferences.count(input.outPath)) {
+            res.insert({input.id, input.outPath});
+        }
+    }
+
+    return res;
+}
 
+std::map<DrvOutput, StorePath> drvOutputReferences(
+    Store & store,
+    const Derivation & drv,
+    const StorePath & outputPath)
+{
+    std::set<Realisation> inputRealisations;
+
+    for (const auto& [inputDrv, outputNames] : drv.inputDrvs) {
+        auto outputHashes =
+            staticOutputHashes(store, store.readDerivation(inputDrv));
+        for (const auto& outputName : outputNames) {
+            auto thisRealisation = store.queryRealisation(
+                DrvOutput{outputHashes.at(outputName), outputName});
+            if (!thisRealisation)
+                throw Error(
+                    "output '%s' of derivation '%s' isn’t built", outputName,
+                    store.printStorePath(inputDrv));
+            inputRealisations.insert(*thisRealisation);
+        }
+    }
+
+    auto info = store.queryPathInfo(outputPath);
+
+    return drvOutputReferences(Realisation::closure(store, inputRealisations), info->references);
+}
 }
diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc
index 1d8d2d57ec56bcf1e94b4f86aaee54bd2f467adb..9dd81ddfb9599ffe483f3450e358f8a5078ecef1 100644
--- a/src/libstore/nar-info-disk-cache.cc
+++ b/src/libstore/nar-info-disk-cache.cc
@@ -4,6 +4,7 @@
 #include "globals.hh"
 
 #include <sqlite3.h>
+#include <nlohmann/json.hpp>
 
 namespace nix {
 
@@ -38,6 +39,15 @@ create table if not exists NARs (
     foreign key (cache) references BinaryCaches(id) on delete cascade
 );
 
+create table if not exists Realisations (
+    cache integer not null,
+    outputId text not null,
+    content blob, -- Json serialisation of the realisation, or null if the realisation is absent
+    timestamp        integer not null,
+    primary key (cache, outputId),
+    foreign key (cache) references BinaryCaches(id) on delete cascade
+);
+
 create table if not exists LastPurge (
     dummy            text primary key,
     value            integer
@@ -63,7 +73,9 @@ public:
     struct State
     {
         SQLite db;
-        SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR, queryNAR, purgeCache;
+        SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR,
+            queryNAR, insertRealisation, insertMissingRealisation,
+            queryRealisation, purgeCache;
         std::map<std::string, Cache> caches;
     };
 
@@ -98,6 +110,26 @@ public:
         state->queryNAR.create(state->db,
             "select present, namePart, url, compression, fileHash, fileSize, narHash, narSize, refs, deriver, sigs, ca from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))");
 
+        state->insertRealisation.create(state->db,
+            R"(
+                insert or replace into Realisations(cache, outputId, content, timestamp)
+                    values (?, ?, ?, ?)
+            )");
+
+        state->insertMissingRealisation.create(state->db,
+            R"(
+                insert or replace into Realisations(cache, outputId, timestamp)
+                    values (?, ?, ?)
+            )");
+
+        state->queryRealisation.create(state->db,
+            R"(
+                select content from Realisations
+                    where cache = ? and outputId = ?  and
+                        ((content is null and timestamp > ?) or
+                         (content is not null and timestamp > ?))
+            )");
+
         /* Periodically purge expired entries from the database. */
         retrySQLite<void>([&]() {
             auto now = time(0);
@@ -212,6 +244,38 @@ public:
         });
     }
 
+    std::pair<Outcome, std::shared_ptr<Realisation>> lookupRealisation(
+        const std::string & uri, const DrvOutput & id) override
+    {
+        return retrySQLite<std::pair<Outcome, std::shared_ptr<Realisation>>>(
+            [&]() -> std::pair<Outcome, std::shared_ptr<Realisation>> {
+            auto state(_state.lock());
+
+            auto & cache(getCache(*state, uri));
+
+            auto now = time(0);
+
+            auto queryRealisation(state->queryRealisation.use()
+                (cache.id)
+                (id.to_string())
+                (now - settings.ttlNegativeNarInfoCache)
+                (now - settings.ttlPositiveNarInfoCache));
+
+            if (!queryRealisation.next())
+                return {oUnknown, 0};
+
+            if (queryRealisation.isNull(0))
+                return {oInvalid, 0};
+
+            auto realisation =
+                std::make_shared<Realisation>(Realisation::fromJSON(
+                    nlohmann::json::parse(queryRealisation.getStr(0)),
+                    "Local disk cache"));
+
+            return {oValid, realisation};
+        });
+    }
+
     void upsertNarInfo(
         const std::string & uri, const std::string & hashPart,
         std::shared_ptr<const ValidPathInfo> info) override
@@ -251,6 +315,39 @@ public:
             }
         });
     }
+
+    void upsertRealisation(
+        const std::string & uri,
+        const Realisation & realisation) override
+    {
+        retrySQLite<void>([&]() {
+            auto state(_state.lock());
+
+            auto & cache(getCache(*state, uri));
+
+            state->insertRealisation.use()
+                (cache.id)
+                (realisation.id.to_string())
+                (realisation.toJSON().dump())
+                (time(0)).exec();
+        });
+
+    }
+
+    virtual void upsertAbsentRealisation(
+        const std::string & uri,
+        const DrvOutput & id) override
+    {
+        retrySQLite<void>([&]() {
+            auto state(_state.lock());
+
+            auto & cache(getCache(*state, uri));
+            state->insertMissingRealisation.use()
+                (cache.id)
+                (id.to_string())
+                (time(0)).exec();
+        });
+    }
 };
 
 ref<NarInfoDiskCache> getNarInfoDiskCache()
diff --git a/src/libstore/nar-info-disk-cache.hh b/src/libstore/nar-info-disk-cache.hh
index 04de2c5eb3c7a71bad8ce53d291ac9b76e7abc49..2dcaa76a49036decf3a703443f962d6adc69d693 100644
--- a/src/libstore/nar-info-disk-cache.hh
+++ b/src/libstore/nar-info-disk-cache.hh
@@ -2,6 +2,7 @@
 
 #include "ref.hh"
 #include "nar-info.hh"
+#include "realisation.hh"
 
 namespace nix {
 
@@ -29,6 +30,15 @@ public:
     virtual void upsertNarInfo(
         const std::string & uri, const std::string & hashPart,
         std::shared_ptr<const ValidPathInfo> info) = 0;
+
+    virtual void upsertRealisation(
+        const std::string & uri,
+        const Realisation & realisation) = 0;
+    virtual void upsertAbsentRealisation(
+        const std::string & uri,
+        const DrvOutput & id) = 0;
+    virtual std::pair<Outcome, std::shared_ptr<Realisation>> lookupRealisation(
+        const std::string & uri, const DrvOutput & id) = 0;
 };
 
 /* Return a singleton cache object that can be used concurrently by
diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc
index 78d5871391c23123b6030a5d50bd009a40d3bc7e..d95e54af123ae1bd4e044a336984925826f7cb71 100644
--- a/src/libstore/optimise-store.cc
+++ b/src/libstore/optimise-store.cc
@@ -198,7 +198,7 @@ void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
     /* Make the containing directory writable, but only if it's not
        the store itself (we don't want or need to mess with its
        permissions). */
-    bool mustToggle = dirOf(path) != realStoreDir;
+    bool mustToggle = dirOf(path) != realStoreDir.get();
     if (mustToggle) makeWritable(dirOf(path));
 
     /* When we're done, make the directory read-only again and reset
diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc
index c5c3ae3dc56536814b3143645b8ea1a5a6a06be1..5e383a9a4bd1131702c1ca4ea991ed05349208e3 100644
--- a/src/libstore/parsed-derivations.cc
+++ b/src/libstore/parsed-derivations.cc
@@ -91,6 +91,8 @@ StringSet ParsedDerivation::getRequiredSystemFeatures() const
     StringSet res;
     for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
         res.insert(i);
+    if (!derivationHasKnownOutputPaths(drv.type()))
+        res.insert("ca-derivations");
     return res;
 }
 
diff --git a/src/libstore/path-with-outputs.cc b/src/libstore/path-with-outputs.cc
new file mode 100644
index 0000000000000000000000000000000000000000..865d64cf28b195896b237b776ab6abcaf7611800
--- /dev/null
+++ b/src/libstore/path-with-outputs.cc
@@ -0,0 +1,71 @@
+#include "path-with-outputs.hh"
+#include "store-api.hh"
+
+namespace nix {
+
+std::string StorePathWithOutputs::to_string(const Store & store) const
+{
+    return outputs.empty()
+        ? store.printStorePath(path)
+        : store.printStorePath(path) + "!" + concatStringsSep(",", outputs);
+}
+
+
+DerivedPath StorePathWithOutputs::toDerivedPath() const
+{
+    if (!outputs.empty() || path.isDerivation())
+        return DerivedPath::Built { path, outputs };
+    else
+        return DerivedPath::Opaque { path };
+}
+
+
+std::vector<DerivedPath> toDerivedPaths(const std::vector<StorePathWithOutputs> ss)
+{
+	std::vector<DerivedPath> reqs;
+	for (auto & s : ss) reqs.push_back(s.toDerivedPath());
+	return reqs;
+}
+
+
+std::variant<StorePathWithOutputs, StorePath> StorePathWithOutputs::tryFromDerivedPath(const DerivedPath & p)
+{
+    return std::visit(overloaded {
+        [&](DerivedPath::Opaque bo) -> std::variant<StorePathWithOutputs, StorePath> {
+            if (bo.path.isDerivation()) {
+                // drv path gets interpreted as "build", not "get drv file itself"
+                return bo.path;
+            }
+            return StorePathWithOutputs { bo.path };
+        },
+        [&](DerivedPath::Built bfd) -> std::variant<StorePathWithOutputs, StorePath> {
+            return StorePathWithOutputs { bfd.drvPath, bfd.outputs };
+        },
+    }, p.raw());
+}
+
+
+std::pair<std::string_view, StringSet> parsePathWithOutputs(std::string_view s)
+{
+    size_t n = s.find("!");
+    return n == s.npos
+        ? std::make_pair(s, std::set<string>())
+        : std::make_pair(((std::string_view) s).substr(0, n),
+            tokenizeString<std::set<string>>(((std::string_view) s).substr(n + 1), ","));
+}
+
+
+StorePathWithOutputs parsePathWithOutputs(const Store & store, std::string_view pathWithOutputs)
+{
+    auto [path, outputs] = parsePathWithOutputs(pathWithOutputs);
+    return StorePathWithOutputs { store.parseStorePath(path), std::move(outputs) };
+}
+
+
+StorePathWithOutputs followLinksToStorePathWithOutputs(const Store & store, std::string_view pathWithOutputs)
+{
+    auto [path, outputs] = parsePathWithOutputs(pathWithOutputs);
+    return StorePathWithOutputs { store.followLinksToStorePath(path), std::move(outputs) };
+}
+
+}
diff --git a/src/libstore/path-with-outputs.hh b/src/libstore/path-with-outputs.hh
new file mode 100644
index 0000000000000000000000000000000000000000..4c4023dcb58de1b9a34ba7376a12bad7bab4622e
--- /dev/null
+++ b/src/libstore/path-with-outputs.hh
@@ -0,0 +1,35 @@
+#pragma once
+
+#include <variant>
+
+#include "path.hh"
+#include "derived-path.hh"
+
+namespace nix {
+
+struct StorePathWithOutputs
+{
+    StorePath path;
+    std::set<std::string> outputs;
+
+    std::string to_string(const Store & store) const;
+
+    DerivedPath toDerivedPath() const;
+
+    static std::variant<StorePathWithOutputs, StorePath> tryFromDerivedPath(const DerivedPath &);
+};
+
+std::vector<DerivedPath> toDerivedPaths(const std::vector<StorePathWithOutputs>);
+
+std::pair<std::string_view, StringSet> parsePathWithOutputs(std::string_view s);
+
+class Store;
+
+/* Split a string specifying a derivation and a set of outputs
+   (/nix/store/hash-foo!out1,out2,...) into the derivation path
+   and the outputs. */
+StorePathWithOutputs parsePathWithOutputs(const Store & store, std::string_view pathWithOutputs);
+
+StorePathWithOutputs followLinksToStorePathWithOutputs(const Store & store, std::string_view pathWithOutputs);
+
+}
diff --git a/src/libstore/path.cc b/src/libstore/path.cc
index dc9dc3897a21208218e7c710bd30a8dc2c99c18d..e642abcd52b90751d8aad0de7a674c14d12c2695 100644
--- a/src/libstore/path.cc
+++ b/src/libstore/path.cc
@@ -82,19 +82,4 @@ PathSet Store::printStorePathSet(const StorePathSet & paths) const
     return res;
 }
 
-std::pair<std::string_view, StringSet> parsePathWithOutputs(std::string_view s)
-{
-    size_t n = s.find("!");
-    return n == s.npos
-        ? std::make_pair(s, std::set<string>())
-        : std::make_pair(((std::string_view) s).substr(0, n),
-            tokenizeString<std::set<string>>(((std::string_view) s).substr(n + 1), ","));
-}
-
-StorePathWithOutputs Store::parsePathWithOutputs(const std::string & s)
-{
-    auto [path, outputs] = nix::parsePathWithOutputs(s);
-    return {parseStorePath(path), std::move(outputs)};
-}
-
 }
diff --git a/src/libstore/path.hh b/src/libstore/path.hh
index b03a0f69d9ec1e2d9f5a022cafe7fa2ef952db1e..06ba0663bfcc2872d4a2d0f87e938cabc250c64d 100644
--- a/src/libstore/path.hh
+++ b/src/libstore/path.hh
@@ -69,16 +69,6 @@ typedef std::map<StorePath, std::optional<ContentAddress>> StorePathCAMap;
 /* Extension of derivations in the Nix store. */
 const std::string drvExtension = ".drv";
 
-struct StorePathWithOutputs
-{
-    StorePath path;
-    std::set<std::string> outputs;
-
-    std::string to_string(const Store & store) const;
-};
-
-std::pair<std::string_view, StringSet> parsePathWithOutputs(std::string_view s);
-
 }
 
 namespace std {
diff --git a/src/libstore/realisation.cc b/src/libstore/realisation.cc
index cd74af4eed5d0623bfb214bbdc69fdfcf4d59efe..eadec594cc67f9e8aff1c17412d68ba418e0bdbb 100644
--- a/src/libstore/realisation.cc
+++ b/src/libstore/realisation.cc
@@ -1,5 +1,6 @@
 #include "realisation.hh"
 #include "store-api.hh"
+#include "closure.hh"
 #include <nlohmann/json.hpp>
 
 namespace nix {
@@ -21,35 +22,134 @@ std::string DrvOutput::to_string() const {
     return strHash() + "!" + outputName;
 }
 
+std::set<Realisation> Realisation::closure(Store & store, const std::set<Realisation> & startOutputs)
+{
+    std::set<Realisation> res;
+    Realisation::closure(store, startOutputs, res);
+    return res;
+}
+
+void Realisation::closure(Store & store, const std::set<Realisation> & startOutputs, std::set<Realisation> & res)
+{
+    auto getDeps = [&](const Realisation& current) -> std::set<Realisation> {
+        std::set<Realisation> res;
+        for (auto& [currentDep, _] : current.dependentRealisations) {
+            if (auto currentRealisation = store.queryRealisation(currentDep))
+                res.insert(*currentRealisation);
+            else
+                throw Error(
+                    "Unrealised derivation '%s'", currentDep.to_string());
+        }
+        return res;
+    };
+
+    computeClosure<Realisation>(
+        startOutputs, res,
+        [&](const Realisation& current,
+            std::function<void(std::promise<std::set<Realisation>>&)>
+                processEdges) {
+            std::promise<std::set<Realisation>> promise;
+            try {
+                auto res = getDeps(current);
+                promise.set_value(res);
+            } catch (...) {
+                promise.set_exception(std::current_exception());
+            }
+            return processEdges(promise);
+        });
+}
+
 nlohmann::json Realisation::toJSON() const {
+    auto jsonDependentRealisations = nlohmann::json::object();
+    for (auto & [depId, depOutPath] : dependentRealisations)
+        jsonDependentRealisations.emplace(depId.to_string(), depOutPath.to_string());
     return nlohmann::json{
         {"id", id.to_string()},
         {"outPath", outPath.to_string()},
+        {"signatures", signatures},
+        {"dependentRealisations", jsonDependentRealisations},
     };
 }
 
 Realisation Realisation::fromJSON(
     const nlohmann::json& json,
     const std::string& whence) {
-    auto getField = [&](std::string fieldName) -> std::string {
+    auto getOptionalField = [&](std::string fieldName) -> std::optional<std::string> {
         auto fieldIterator = json.find(fieldName);
         if (fieldIterator == json.end())
+            return std::nullopt;
+        return *fieldIterator;
+    };
+    auto getField = [&](std::string fieldName) -> std::string {
+        if (auto field = getOptionalField(fieldName))
+            return *field;
+        else
             throw Error(
                 "Drv output info file '%1%' is corrupt, missing field %2%",
                 whence, fieldName);
-        return *fieldIterator;
     };
 
+    StringSet signatures;
+    if (auto signaturesIterator = json.find("signatures"); signaturesIterator != json.end())
+        signatures.insert(signaturesIterator->begin(), signaturesIterator->end());
+
+    std::map <DrvOutput, StorePath> dependentRealisations;
+    if (auto jsonDependencies = json.find("dependentRealisations"); jsonDependencies != json.end())
+        for (auto & [jsonDepId, jsonDepOutPath] : jsonDependencies->get<std::map<std::string, std::string>>())
+            dependentRealisations.insert({DrvOutput::parse(jsonDepId), StorePath(jsonDepOutPath)});
+
     return Realisation{
         .id = DrvOutput::parse(getField("id")),
         .outPath = StorePath(getField("outPath")),
+        .signatures = signatures,
+        .dependentRealisations = dependentRealisations,
     };
 }
 
+std::string Realisation::fingerprint() const
+{
+    auto serialized = toJSON();
+    serialized.erase("signatures");
+    return serialized.dump();
+}
+
+void Realisation::sign(const SecretKey & secretKey)
+{
+    signatures.insert(secretKey.signDetached(fingerprint()));
+}
+
+bool Realisation::checkSignature(const PublicKeys & publicKeys, const std::string & sig) const
+{
+    return verifyDetached(fingerprint(), sig, publicKeys);
+}
+
+size_t Realisation::checkSignatures(const PublicKeys & publicKeys) const
+{
+    // FIXME: Maybe we should return `maxSigs` if the realisation corresponds to
+    // an input-addressed one − because in that case the drv is enough to check
+    // it − but we can't know that here.
+
+    size_t good = 0;
+    for (auto & sig : signatures)
+        if (checkSignature(publicKeys, sig))
+            good++;
+    return good;
+}
+
 StorePath RealisedPath::path() const {
     return std::visit([](auto && arg) { return arg.getPath(); }, raw);
 }
 
+bool Realisation::isCompatibleWith(const Realisation & other) const
+{
+    assert (id == other.id);
+    if (outPath == other.outPath) {
+        assert(dependentRealisations == other.dependentRealisations);
+        return true;
+    }
+    return false;
+}
+
 void RealisedPath::closure(
     Store& store,
     const RealisedPath::Set& startPaths,
diff --git a/src/libstore/realisation.hh b/src/libstore/realisation.hh
index fc92d3c171b1054662696eb2c57dc2250e7dfa30..05d2bc44f1d10762a092a939e1f637dea66eada7 100644
--- a/src/libstore/realisation.hh
+++ b/src/libstore/realisation.hh
@@ -3,6 +3,7 @@
 #include "path.hh"
 #include <nlohmann/json_fwd.hpp>
 #include "comparator.hh"
+#include "crypto.hh"
 
 namespace nix {
 
@@ -25,9 +26,29 @@ struct Realisation {
     DrvOutput id;
     StorePath outPath;
 
+    StringSet signatures;
+
+    /**
+     * The realisations that are required for the current one to be valid.
+     *
+     * When importing this realisation, the store will first check that all its
+     * dependencies exist, and map to the correct output path
+     */
+    std::map<DrvOutput, StorePath> dependentRealisations;
+
     nlohmann::json toJSON() const;
     static Realisation fromJSON(const nlohmann::json& json, const std::string& whence);
 
+    std::string fingerprint() const;
+    void sign(const SecretKey &);
+    bool checkSignature(const PublicKeys & publicKeys, const std::string & sig) const;
+    size_t checkSignatures(const PublicKeys & publicKeys) const;
+
+    static std::set<Realisation> closure(Store &, const std::set<Realisation> &);
+    static void closure(Store &, const std::set<Realisation> &, std::set<Realisation>& res);
+
+    bool isCompatibleWith(const Realisation & other) const;
+
     StorePath getPath() const { return outPath; }
 
     GENERATE_CMP(Realisation, me->id, me->outPath);
diff --git a/src/libstore/remote-store.cc b/src/libstore/remote-store.cc
index 0d884389ad1c73513843b8959b59f6e437b15d0c..aec24363793c1bf5bf4b0a0bb6d98a3044000c7e 100644
--- a/src/libstore/remote-store.cc
+++ b/src/libstore/remote-store.cc
@@ -1,5 +1,6 @@
 #include "serialise.hh"
 #include "util.hh"
+#include "path-with-outputs.hh"
 #include "remote-fs-accessor.hh"
 #include "remote-store.hh"
 #include "worker-protocol.hh"
@@ -50,6 +51,19 @@ void write(const Store & store, Sink & out, const ContentAddress & ca)
     out << renderContentAddress(ca);
 }
 
+
+DerivedPath read(const Store & store, Source & from, Phantom<DerivedPath> _)
+{
+    auto s = readString(from);
+    return DerivedPath::parse(store, s);
+}
+
+void write(const Store & store, Sink & out, const DerivedPath & req)
+{
+    out << req.to_string(store);
+}
+
+
 Realisation read(const Store & store, Source & from, Phantom<Realisation> _)
 {
     std::string rawInput = readString(from);
@@ -58,13 +72,23 @@ Realisation read(const Store & store, Source & from, Phantom<Realisation> _)
         "remote-protocol"
     );
 }
+
 void write(const Store & store, Sink & out, const Realisation & realisation)
-{ out << realisation.toJSON().dump(); }
+{
+    out << realisation.toJSON().dump();
+}
+
 
 DrvOutput read(const Store & store, Source & from, Phantom<DrvOutput> _)
-{ return DrvOutput::parse(readString(from)); }
+{
+    return DrvOutput::parse(readString(from));
+}
+
 void write(const Store & store, Sink & out, const DrvOutput & drvOutput)
-{ out << drvOutput.to_string(); }
+{
+    out << drvOutput.to_string();
+}
+
 
 std::optional<StorePath> read(const Store & store, Source & from, Phantom<std::optional<StorePath>> _)
 {
@@ -629,8 +653,12 @@ void RemoteStore::registerDrvOutput(const Realisation & info)
 {
     auto conn(getConnection());
     conn->to << wopRegisterDrvOutput;
-    conn->to << info.id.to_string();
-    conn->to << std::string(info.outPath.to_string());
+    if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
+        conn->to << info.id.to_string();
+        conn->to << std::string(info.outPath.to_string());
+    } else {
+        worker_proto::write(*this, conn->to, info);
+    }
     conn.processStderr();
 }
 
@@ -640,22 +668,49 @@ std::optional<const Realisation> RemoteStore::queryRealisation(const DrvOutput &
     conn->to << wopQueryRealisation;
     conn->to << id.to_string();
     conn.processStderr();
-    auto outPaths = worker_proto::read(*this, conn->from, Phantom<std::set<StorePath>>{});
-    if (outPaths.empty())
-        return std::nullopt;
-    return {Realisation{.id = id, .outPath = *outPaths.begin()}};
+    if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 31) {
+        auto outPaths = worker_proto::read(*this, conn->from, Phantom<std::set<StorePath>>{});
+        if (outPaths.empty())
+            return std::nullopt;
+        return {Realisation{.id = id, .outPath = *outPaths.begin()}};
+    } else {
+        auto realisations = worker_proto::read(*this, conn->from, Phantom<std::set<Realisation>>{});
+        if (realisations.empty())
+            return std::nullopt;
+        return *realisations.begin();
+    }
 }
 
+static void writeDerivedPaths(RemoteStore & store, ConnectionHandle & conn, const std::vector<DerivedPath> & reqs)
+{
+    if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 30) {
+        worker_proto::write(store, conn->to, reqs);
+    } else {
+        Strings ss;
+        for (auto & p : reqs) {
+            auto sOrDrvPath = StorePathWithOutputs::tryFromDerivedPath(p);
+            std::visit(overloaded {
+                [&](StorePathWithOutputs s) {
+                    ss.push_back(s.to_string(store));
+                },
+                [&](StorePath drvPath) {
+                    throw Error("trying to request '%s', but daemon protocol %d.%d is too old (< 1.29) to request a derivation file",
+                        store.printStorePath(drvPath),
+                        GET_PROTOCOL_MAJOR(conn->daemonVersion),
+                        GET_PROTOCOL_MINOR(conn->daemonVersion));
+                },
+            }, sOrDrvPath);
+        }
+        conn->to << ss;
+    }
+}
 
-void RemoteStore::buildPaths(const std::vector<StorePathWithOutputs> & drvPaths, BuildMode buildMode)
+void RemoteStore::buildPaths(const std::vector<DerivedPath> & drvPaths, BuildMode buildMode)
 {
     auto conn(getConnection());
     conn->to << wopBuildPaths;
     assert(GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13);
-    Strings ss;
-    for (auto & p : drvPaths)
-        ss.push_back(p.to_string(*this));
-    conn->to << ss;
+    writeDerivedPaths(*this, conn, drvPaths);
     if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15)
         conn->to << buildMode;
     else
@@ -677,10 +732,12 @@ BuildResult RemoteStore::buildDerivation(const StorePath & drvPath, const BasicD
     conn->to << buildMode;
     conn.processStderr();
     BuildResult res;
-    unsigned int status;
-    conn->from >> status >> res.errorMsg;
-    res.status = (BuildResult::Status) status;
-    if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 0xc) {
+    res.status = (BuildResult::Status) readInt(conn->from);
+    conn->from >> res.errorMsg;
+    if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 29) {
+        conn->from >> res.timesBuilt >> res.isNonDeterministic >> res.startTime >> res.stopTime;
+    }
+    if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 28) {
         auto builtOutputs = worker_proto::read(*this, conn->from, Phantom<DrvOutputs> {});
         res.builtOutputs = builtOutputs;
     }
@@ -792,7 +849,7 @@ void RemoteStore::addSignatures(const StorePath & storePath, const StringSet & s
 }
 
 
-void RemoteStore::queryMissing(const std::vector<StorePathWithOutputs> & targets,
+void RemoteStore::queryMissing(const std::vector<DerivedPath> & targets,
     StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown,
     uint64_t & downloadSize, uint64_t & narSize)
 {
@@ -803,10 +860,7 @@ void RemoteStore::queryMissing(const std::vector<StorePathWithOutputs> & targets
             // to prevent a deadlock.
             goto fallback;
         conn->to << wopQueryMissing;
-        Strings ss;
-        for (auto & p : targets)
-            ss.push_back(p.to_string(*this));
-        conn->to << ss;
+        writeDerivedPaths(*this, conn, targets);
         conn.processStderr();
         willBuild = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
         willSubstitute = worker_proto::read(*this, conn->from, Phantom<StorePathSet> {});
diff --git a/src/libstore/remote-store.hh b/src/libstore/remote-store.hh
index b3a9910a36b62604c68424bbf6b731aae76b61e3..6cf76a46d3022717920c246873f53bd97304ca06 100644
--- a/src/libstore/remote-store.hh
+++ b/src/libstore/remote-store.hh
@@ -85,7 +85,7 @@ public:
 
     std::optional<const Realisation> queryRealisation(const DrvOutput &) override;
 
-    void buildPaths(const std::vector<StorePathWithOutputs> & paths, BuildMode buildMode) override;
+    void buildPaths(const std::vector<DerivedPath> & paths, BuildMode buildMode) override;
 
     BuildResult buildDerivation(const StorePath & drvPath, const BasicDerivation & drv,
         BuildMode buildMode) override;
@@ -108,7 +108,7 @@ public:
 
     void addSignatures(const StorePath & storePath, const StringSet & sigs) override;
 
-    void queryMissing(const std::vector<StorePathWithOutputs> & targets,
+    void queryMissing(const std::vector<DerivedPath> & targets,
         StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown,
         uint64_t & downloadSize, uint64_t & narSize) override;
 
diff --git a/src/libstore/sandbox-defaults.sb b/src/libstore/sandbox-defaults.sb
index 351037822fd1f131a9140477862a7bdc390def71..2bb1ea1301d90c4e7fd2b542024f585504d83ddc 100644
--- a/src/libstore/sandbox-defaults.sb
+++ b/src/libstore/sandbox-defaults.sb
@@ -32,7 +32,9 @@
        (literal "/tmp") (subpath TMPDIR))
 
 ; Some packages like to read the system version.
-(allow file-read* (literal "/System/Library/CoreServices/SystemVersion.plist"))
+(allow file-read*
+       (literal "/System/Library/CoreServices/SystemVersion.plist")
+       (literal "/System/Library/CoreServices/SystemVersionCompat.plist"))
 
 ; Without this line clang cannot write to /dev/null, breaking some configure tests.
 (allow file-read-metadata (literal "/dev"))
diff --git a/src/libstore/serve-protocol.hh b/src/libstore/serve-protocol.hh
index 0a17387cb1d686a3b059b7d084af687409849fe5..02d0810cc68c268e24db8406985fceffa3fdfff9 100644
--- a/src/libstore/serve-protocol.hh
+++ b/src/libstore/serve-protocol.hh
@@ -5,7 +5,7 @@ namespace nix {
 #define SERVE_MAGIC_1 0x390c9deb
 #define SERVE_MAGIC_2 0x5452eecb
 
-#define SERVE_PROTOCOL_VERSION 0x206
+#define SERVE_PROTOCOL_VERSION (2 << 8 | 6)
 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
 #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
 
diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc
index 235eed37a59dd17b113c241165b4053c8729eabc..93f72675d1091ef7beb425eeffcd6718f999ea73 100644
--- a/src/libstore/ssh.cc
+++ b/src/libstore/ssh.cc
@@ -50,7 +50,7 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
     options.dieWithParent = false;
 
     conn->sshPid = startProcess([&]() {
-        restoreSignals();
+        restoreProcessContext();
 
         close(in.writeSide.get());
         close(out.readSide.get());
@@ -110,7 +110,7 @@ Path SSHMaster::startMaster()
     options.dieWithParent = false;
 
     state->sshMaster = startProcess([&]() {
-        restoreSignals();
+        restoreProcessContext();
 
         close(out.readSide.get());
 
diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc
index 77c310988feab12f93f2b19745cc90b6bf1cd4e0..6736adb24e0b931328b011226b1782c09a6fc5ad 100644
--- a/src/libstore/store-api.cc
+++ b/src/libstore/store-api.cc
@@ -53,13 +53,6 @@ StorePath Store::followLinksToStorePath(std::string_view path) const
 }
 
 
-StorePathWithOutputs Store::followLinksToStorePathWithOutputs(std::string_view path) const
-{
-    auto [path2, outputs] = nix::parsePathWithOutputs(path);
-    return StorePathWithOutputs { followLinksToStorePath(path2), std::move(outputs) };
-}
-
-
 /* Store paths have the following form:
 
    <realized-path> = <store>/<h>-<name>
@@ -344,6 +337,13 @@ ValidPathInfo Store::addToStoreSlow(std::string_view name, const Path & srcPath,
     return info;
 }
 
+StringSet StoreConfig::getDefaultSystemFeatures()
+{
+    auto res = settings.systemFeatures.get();
+    if (settings.isExperimentalFeatureEnabled("ca-derivations"))
+        res.insert("ca-derivations");
+    return res;
+}
 
 Store::Store(const Params & params)
     : StoreConfig(params)
@@ -536,10 +536,10 @@ void Store::queryPathInfo(const StorePath & storePath,
 
 void Store::substitutePaths(const StorePathSet & paths)
 {
-    std::vector<StorePathWithOutputs> paths2;
+    std::vector<DerivedPath> paths2;
     for (auto & path : paths)
         if (!path.isDerivation())
-            paths2.push_back({path});
+            paths2.push_back(DerivedPath::Opaque{path});
     uint64_t downloadSize, narSize;
     StorePathSet willBuild, willSubstitute, unknown;
     queryMissing(paths2,
@@ -547,8 +547,8 @@ void Store::substitutePaths(const StorePathSet & paths)
 
     if (!willSubstitute.empty())
         try {
-            std::vector<StorePathWithOutputs> subs;
-            for (auto & p : willSubstitute) subs.push_back({p});
+            std::vector<DerivedPath> subs;
+            for (auto & p : willSubstitute) subs.push_back(DerivedPath::Opaque{p});
             buildPaths(subs);
         } catch (Error & e) {
             logWarning(e.info());
@@ -787,20 +787,39 @@ std::map<StorePath, StorePath> copyPaths(ref<Store> srcStore, ref<Store> dstStor
     RepairFlag repair, CheckSigsFlag checkSigs, SubstituteFlag substitute)
 {
     StorePathSet storePaths;
-    std::set<Realisation> realisations;
+    std::set<Realisation> toplevelRealisations;
     for (auto & path : paths) {
         storePaths.insert(path.path());
         if (auto realisation = std::get_if<Realisation>(&path.raw)) {
             settings.requireExperimentalFeature("ca-derivations");
-            realisations.insert(*realisation);
+            toplevelRealisations.insert(*realisation);
         }
     }
     auto pathsMap = copyPaths(srcStore, dstStore, storePaths, repair, checkSigs, substitute);
+
+    ThreadPool pool;
+
     try {
-        for (auto & realisation : realisations) {
-            dstStore->registerDrvOutput(realisation);
-        }
-    } catch (MissingExperimentalFeature & e) {
+        // Copy the realisation closure
+        processGraph<Realisation>(
+            pool, Realisation::closure(*srcStore, toplevelRealisations),
+            [&](const Realisation& current) -> std::set<Realisation> {
+                std::set<Realisation> children;
+                for (const auto& [drvOutput, _] : current.dependentRealisations) {
+                    auto currentChild = srcStore->queryRealisation(drvOutput);
+                    if (!currentChild)
+                        throw Error(
+                            "Incomplete realisation closure: '%s' is a "
+                            "dependency of '%s' but isn’t registered",
+                            drvOutput.to_string(), current.id.to_string());
+                    children.insert(*currentChild);
+                }
+                return children;
+            },
+            [&](const Realisation& current) -> void {
+                dstStore->registerDrvOutput(current, checkSigs);
+            });
+    } catch (MissingExperimentalFeature& e) {
         // Don't fail if the remote doesn't support CA derivations is it might
         // not be within our control to change that, and we might still want
         // to at least copy the output paths.
diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh
index 71a28eeb8a37965422b9c869ce4668fc2d18f4a3..9657d2adf61e31c4c8356ab9765b8631aff08796 100644
--- a/src/libstore/store-api.hh
+++ b/src/libstore/store-api.hh
@@ -2,6 +2,7 @@
 
 #include "realisation.hh"
 #include "path.hh"
+#include "derived-path.hh"
 #include "hash.hh"
 #include "content-address.hh"
 #include "serialise.hh"
@@ -179,6 +180,8 @@ struct StoreConfig : public Config
 
     StoreConfig() = delete;
 
+    StringSet getDefaultSystemFeatures();
+
     virtual ~StoreConfig() { }
 
     virtual const std::string name() = 0;
@@ -195,7 +198,7 @@ struct StoreConfig : public Config
 
     Setting<bool> wantMassQuery{this, false, "want-mass-query", "whether this substituter can be queried efficiently for path validity"};
 
-    Setting<StringSet> systemFeatures{this, settings.systemFeatures,
+    Setting<StringSet> systemFeatures{this, getDefaultSystemFeatures(),
         "system-features",
         "Optional features that the system this store builds on implements (like \"kvm\")."};
 
@@ -261,11 +264,6 @@ public:
 
     PathSet printStorePathSet(const StorePathSet & path) const;
 
-    /* Split a string specifying a derivation and a set of outputs
-       (/nix/store/hash-foo!out1,out2,...) into the derivation path
-       and the outputs. */
-    StorePathWithOutputs parsePathWithOutputs(const string & s);
-
     /* Display a set of paths in human-readable form (i.e., between quotes
        and separated by commas). */
     std::string showPaths(const StorePathSet & paths);
@@ -289,8 +287,6 @@ public:
        result. */
     StorePath followLinksToStorePath(std::string_view path) const;
 
-    StorePathWithOutputs followLinksToStorePathWithOutputs(std::string_view path) const;
-
     /* Constructs a unique store path name. */
     StorePath makeStorePath(std::string_view type,
         std::string_view hash, std::string_view name) const;
@@ -384,7 +380,12 @@ public:
        we don't really want to add the dependencies listed in a nar info we
        don't trust anyyways.
        */
-    virtual bool pathInfoIsTrusted(const ValidPathInfo &)
+    virtual bool pathInfoIsUntrusted(const ValidPathInfo &)
+    {
+        return true;
+    }
+
+    virtual bool realisationIsUntrusted(const Realisation & )
     {
         return true;
     }
@@ -480,6 +481,8 @@ public:
      */
     virtual void registerDrvOutput(const Realisation & output)
     { unsupported("registerDrvOutput"); }
+    virtual void registerDrvOutput(const Realisation & output, CheckSigsFlag checkSigs)
+    { return registerDrvOutput(output); }
 
     /* Write a NAR dump of a store path. */
     virtual void narFromPath(const StorePath & path, Sink & sink) = 0;
@@ -493,7 +496,7 @@ public:
        recursively building any sub-derivations. For inputs that are
        not derivations, substitute them. */
     virtual void buildPaths(
-        const std::vector<StorePathWithOutputs> & paths,
+        const std::vector<DerivedPath> & paths,
         BuildMode buildMode = bmNormal);
 
     /* Build a single non-materialized derivation (i.e. not from an
@@ -655,7 +658,7 @@ public:
     /* Given a set of paths that are to be built, return the set of
        derivations that will be built, and the set of output paths
        that will be substituted. */
-    virtual void queryMissing(const std::vector<StorePathWithOutputs> & targets,
+    virtual void queryMissing(const std::vector<DerivedPath> & targets,
         StorePathSet & willBuild, StorePathSet & willSubstitute, StorePathSet & unknown,
         uint64_t & downloadSize, uint64_t & narSize);
 
@@ -863,4 +866,9 @@ std::pair<std::string, Store::Params> splitUriAndParams(const std::string & uri)
 
 std::optional<ContentAddress> getDerivationCA(const BasicDerivation & drv);
 
+std::map<DrvOutput, StorePath> drvOutputReferences(
+    Store & store,
+    const Derivation & drv,
+    const StorePath & outputPath);
+
 }
diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh
index 95f08bc9a40cf7a3f8d3f938e37aee079bc7e5c6..e89183d407f7ac48bd4d912e90926658e1c2cbe7 100644
--- a/src/libstore/worker-protocol.hh
+++ b/src/libstore/worker-protocol.hh
@@ -9,7 +9,7 @@ namespace nix {
 #define WORKER_MAGIC_1 0x6e697863
 #define WORKER_MAGIC_2 0x6478696f
 
-#define PROTOCOL_VERSION 0x11c
+#define PROTOCOL_VERSION (1 << 8 | 31)
 #define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
 #define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
 
@@ -86,9 +86,11 @@ namespace worker_proto {
 MAKE_WORKER_PROTO(, std::string);
 MAKE_WORKER_PROTO(, StorePath);
 MAKE_WORKER_PROTO(, ContentAddress);
+MAKE_WORKER_PROTO(, DerivedPath);
 MAKE_WORKER_PROTO(, Realisation);
 MAKE_WORKER_PROTO(, DrvOutput);
 
+MAKE_WORKER_PROTO(template<typename T>, std::vector<T>);
 MAKE_WORKER_PROTO(template<typename T>, std::set<T>);
 
 #define X_ template<typename K, typename V>
@@ -113,6 +115,26 @@ MAKE_WORKER_PROTO(X_, Y_);
 MAKE_WORKER_PROTO(, std::optional<StorePath>);
 MAKE_WORKER_PROTO(, std::optional<ContentAddress>);
 
+template<typename T>
+std::vector<T> read(const Store & store, Source & from, Phantom<std::vector<T>> _)
+{
+    std::vector<T> resSet;
+    auto size = readNum<size_t>(from);
+    while (size--) {
+        resSet.push_back(read(store, from, Phantom<T> {}));
+    }
+    return resSet;
+}
+
+template<typename T>
+void write(const Store & store, Sink & out, const std::vector<T> & resSet)
+{
+    out << resSet.size();
+    for (auto & key : resSet) {
+        write(store, out, key);
+    }
+}
+
 template<typename T>
 std::set<T> read(const Store & store, Source & from, Phantom<std::set<T>> _)
 {
diff --git a/src/libutil/closure.hh b/src/libutil/closure.hh
new file mode 100644
index 0000000000000000000000000000000000000000..779b9b2d54a3e20ce2836d84ebbafeb056f6fe85
--- /dev/null
+++ b/src/libutil/closure.hh
@@ -0,0 +1,69 @@
+#include <set>
+#include <future>
+#include "sync.hh"
+
+using std::set;
+
+namespace nix {
+
+template<typename T>
+using GetEdgesAsync = std::function<void(const T &, std::function<void(std::promise<set<T>> &)>)>;
+
+template<typename T>
+void computeClosure(
+    const set<T> startElts,
+    set<T> & res,
+    GetEdgesAsync<T> getEdgesAsync
+)
+{
+    struct State
+    {
+        size_t pending;
+        set<T> & res;
+        std::exception_ptr exc;
+    };
+
+    Sync<State> state_(State{0, res, 0});
+
+    std::function<void(const T &)> enqueue;
+
+    std::condition_variable done;
+
+    enqueue = [&](const T & current) -> void {
+        {
+            auto state(state_.lock());
+            if (state->exc) return;
+            if (!state->res.insert(current).second) return;
+            state->pending++;
+        }
+
+        getEdgesAsync(current, [&](std::promise<set<T>> & prom) {
+            try {
+                auto children = prom.get_future().get();
+                for (auto & child : children)
+                    enqueue(child);
+                {
+                    auto state(state_.lock());
+                    assert(state->pending);
+                    if (!--state->pending) done.notify_one();
+                }
+            } catch (...) {
+                auto state(state_.lock());
+                if (!state->exc) state->exc = std::current_exception();
+                assert(state->pending);
+                if (!--state->pending) done.notify_one();
+            };
+        });
+    };
+
+    for (auto & startElt : startElts)
+        enqueue(startElt);
+
+    {
+        auto state(state_.lock());
+        while (state->pending) state.wait(done);
+        if (state->exc) std::rethrow_exception(state->exc);
+    }
+}
+
+}
diff --git a/src/libutil/comparator.hh b/src/libutil/comparator.hh
index 0315dc506948eda5413fef96cb139e17950a70df..eecd5b819c6317f16b9ffcd8eb4a3ba43a93fbb5 100644
--- a/src/libutil/comparator.hh
+++ b/src/libutil/comparator.hh
@@ -25,6 +25,8 @@
     }
 #define GENERATE_EQUAL(args...) GENERATE_ONE_CMP(==, args)
 #define GENERATE_LEQ(args...) GENERATE_ONE_CMP(<, args)
+#define GENERATE_NEQ(args...) GENERATE_ONE_CMP(!=, args)
 #define GENERATE_CMP(args...) \
     GENERATE_EQUAL(args) \
-    GENERATE_LEQ(args)
+    GENERATE_LEQ(args) \
+    GENERATE_NEQ(args)
diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc
index 986ba297615259387374da44bd42d9c7d4378fcd..7e725cae10c20442c1a468f08a042d43c5e2d072 100644
--- a/src/libutil/compression.cc
+++ b/src/libutil/compression.cc
@@ -1,10 +1,11 @@
 #include "compression.hh"
+#include "tarfile.hh"
 #include "util.hh"
 #include "finally.hh"
 #include "logging.hh"
 
-#include <lzma.h>
-#include <bzlib.h>
+#include <archive.h>
+#include <archive_entry.h>
 #include <cstdio>
 #include <cstring>
 
@@ -27,7 +28,7 @@ struct ChunkedCompressionSink : CompressionSink
         const size_t CHUNK_SIZE = sizeof(outbuf) << 2;
         while (!data.empty()) {
             size_t n = std::min(CHUNK_SIZE, data.size());
-            writeInternal(data);
+            writeInternal(data.substr(0, n));
             data.remove_prefix(n);
         }
     }
@@ -35,177 +36,101 @@ struct ChunkedCompressionSink : CompressionSink
     virtual void writeInternal(std::string_view data) = 0;
 };
 
-struct NoneSink : CompressionSink
+struct ArchiveDecompressionSource : Source
 {
-    Sink & nextSink;
-    NoneSink(Sink & nextSink) : nextSink(nextSink) { }
-    void finish() override { flush(); }
-    void write(std::string_view data) override { nextSink(data); }
-};
-
-struct GzipDecompressionSink : CompressionSink
-{
-    Sink & nextSink;
-    z_stream strm;
-    bool finished = false;
-    uint8_t outbuf[BUFSIZ];
-
-    GzipDecompressionSink(Sink & nextSink) : nextSink(nextSink)
-    {
-        strm.zalloc = Z_NULL;
-        strm.zfree = Z_NULL;
-        strm.opaque = Z_NULL;
-        strm.avail_in = 0;
-        strm.next_in = Z_NULL;
-        strm.next_out = outbuf;
-        strm.avail_out = sizeof(outbuf);
-
-        // Enable gzip and zlib decoding (+32) with 15 windowBits
-        int ret = inflateInit2(&strm,15+32);
-        if (ret != Z_OK)
-            throw CompressionError("unable to initialise gzip encoder");
-    }
-
-    ~GzipDecompressionSink()
-    {
-        inflateEnd(&strm);
-    }
-
-    void finish() override
-    {
-        CompressionSink::flush();
-        write({});
-    }
-
-    void write(std::string_view data) override
-    {
-        assert(data.size() <= std::numeric_limits<decltype(strm.avail_in)>::max());
-
-        strm.next_in = (Bytef *) data.data();
-        strm.avail_in = data.size();
-
-        while (!finished && (!data.data() || strm.avail_in)) {
-            checkInterrupt();
-
-            int ret = inflate(&strm,Z_SYNC_FLUSH);
-            if (ret != Z_OK && ret != Z_STREAM_END)
-                throw CompressionError("error while decompressing gzip file: %d (%d, %d)",
-                    zError(ret), data.size(), strm.avail_in);
-
-            finished = ret == Z_STREAM_END;
-
-            if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) {
-                nextSink({(char *) outbuf, sizeof(outbuf) - strm.avail_out});
-                strm.next_out = (Bytef *) outbuf;
-                strm.avail_out = sizeof(outbuf);
+    std::unique_ptr<TarArchive> archive = 0;
+    Source & src;
+    ArchiveDecompressionSource(Source & src) : src(src) {}
+    ~ArchiveDecompressionSource() override {}
+    size_t read(char * data, size_t len) override {
+        struct archive_entry * ae;
+        if (!archive) {
+            archive = std::make_unique<TarArchive>(src, true);
+            this->archive->check(archive_read_next_header(this->archive->archive, &ae),
+                "failed to read header (%s)");
+            if (archive_filter_count(this->archive->archive) < 2) {
+                throw CompressionError("input compression not recognized");
             }
         }
+        ssize_t result = archive_read_data(this->archive->archive, data, len);
+        if (result > 0) return result;
+        if (result == 0) {
+            throw EndOfFile("reached end of compressed file");
+        }
+        this->archive->check(result, "failed to read compressed data (%s)");
+        return result;
     }
 };
 
-struct XzDecompressionSink : CompressionSink
+struct ArchiveCompressionSink : CompressionSink
 {
     Sink & nextSink;
-    uint8_t outbuf[BUFSIZ];
-    lzma_stream strm = LZMA_STREAM_INIT;
-    bool finished = false;
-
-    XzDecompressionSink(Sink & nextSink) : nextSink(nextSink)
-    {
-        lzma_ret ret = lzma_stream_decoder(
-            &strm, UINT64_MAX, LZMA_CONCATENATED);
-        if (ret != LZMA_OK)
-            throw CompressionError("unable to initialise lzma decoder");
-
-        strm.next_out = outbuf;
-        strm.avail_out = sizeof(outbuf);
+    struct archive * archive;
+
+    ArchiveCompressionSink(Sink & nextSink, std::string format, bool parallel) : nextSink(nextSink) {
+        archive = archive_write_new();
+        if (!archive) throw Error("failed to initialize libarchive");
+        check(archive_write_add_filter_by_name(archive, format.c_str()), "couldn't initialize compression (%s)");
+        check(archive_write_set_format_raw(archive));
+        if (format == "xz" && parallel) {
+            check(archive_write_set_filter_option(archive, format.c_str(), "threads", "0"));
+        }
+        // disable internal buffering
+        check(archive_write_set_bytes_per_block(archive, 0));
+        // disable output padding
+        check(archive_write_set_bytes_in_last_block(archive, 1));
+        open();
     }
 
-    ~XzDecompressionSink()
+    ~ArchiveCompressionSink() override
     {
-        lzma_end(&strm);
+        if (archive) archive_write_free(archive);
     }
 
     void finish() override
     {
-        CompressionSink::flush();
-        write({});
+        flush();
+        check(archive_write_close(archive));
     }
 
-    void write(std::string_view data) override
+    void check(int err, const std::string & reason = "failed to compress (%s)")
     {
-        strm.next_in = (const unsigned char *) data.data();
-        strm.avail_in = data.size();
-
-        while (!finished && (!data.data() || strm.avail_in)) {
-            checkInterrupt();
-
-            lzma_ret ret = lzma_code(&strm, data.data() ? LZMA_RUN : LZMA_FINISH);
-            if (ret != LZMA_OK && ret != LZMA_STREAM_END)
-                throw CompressionError("error %d while decompressing xz file", ret);
-
-            finished = ret == LZMA_STREAM_END;
-
-            if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) {
-                nextSink({(char *) outbuf, sizeof(outbuf) - strm.avail_out});
-                strm.next_out = outbuf;
-                strm.avail_out = sizeof(outbuf);
-            }
-        }
+        if (err == ARCHIVE_EOF)
+            throw EndOfFile("reached end of archive");
+        else if (err != ARCHIVE_OK)
+            throw Error(reason, archive_error_string(this->archive));
     }
-};
-
-struct BzipDecompressionSink : ChunkedCompressionSink
-{
-    Sink & nextSink;
-    bz_stream strm;
-    bool finished = false;
 
-    BzipDecompressionSink(Sink & nextSink) : nextSink(nextSink)
+    void write(std::string_view data) override
     {
-        memset(&strm, 0, sizeof(strm));
-        int ret = BZ2_bzDecompressInit(&strm, 0, 0);
-        if (ret != BZ_OK)
-            throw CompressionError("unable to initialise bzip2 decoder");
-
-        strm.next_out = (char *) outbuf;
-        strm.avail_out = sizeof(outbuf);
+        ssize_t result = archive_write_data(archive, data.data(), data.length());
+        if (result <= 0) check(result);
     }
 
-    ~BzipDecompressionSink()
+private:
+    void open()
     {
-        BZ2_bzDecompressEnd(&strm);
+        check(archive_write_open(archive, this, nullptr, ArchiveCompressionSink::callback_write, nullptr));
+        auto ae = archive_entry_new();
+        archive_entry_set_filetype(ae, AE_IFREG);
+        check(archive_write_header(archive, ae));
+        archive_entry_free(ae);
     }
 
-    void finish() override
+    static ssize_t callback_write(struct archive * archive, void * _self, const void * buffer, size_t length)
     {
-        flush();
-        write({});
+        auto self = (ArchiveCompressionSink *) _self;
+        self->nextSink({(const char *) buffer, length});
+        return length;
     }
+};
 
-    void writeInternal(std::string_view data) override
-    {
-        assert(data.size() <= std::numeric_limits<decltype(strm.avail_in)>::max());
-
-        strm.next_in = (char *) data.data();
-        strm.avail_in = data.size();
-
-        while (strm.avail_in) {
-            checkInterrupt();
-
-            int ret = BZ2_bzDecompress(&strm);
-            if (ret != BZ_OK && ret != BZ_STREAM_END)
-                throw CompressionError("error while decompressing bzip2 file");
-
-            finished = ret == BZ_STREAM_END;
-
-            if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) {
-                nextSink({(char *) outbuf, sizeof(outbuf) - strm.avail_out});
-                strm.next_out = (char *) outbuf;
-                strm.avail_out = sizeof(outbuf);
-            }
-        }
-    }
+struct NoneSink : CompressionSink
+{
+    Sink & nextSink;
+    NoneSink(Sink & nextSink) : nextSink(nextSink) { }
+    void finish() override { flush(); }
+    void write(std::string_view data) override { nextSink(data); }
 };
 
 struct BrotliDecompressionSink : ChunkedCompressionSink
@@ -268,159 +193,24 @@ ref<std::string> decompress(const std::string & method, const std::string & in)
     return ssink.s;
 }
 
-ref<CompressionSink> makeDecompressionSink(const std::string & method, Sink & nextSink)
+std::unique_ptr<FinishSink> makeDecompressionSink(const std::string & method, Sink & nextSink)
 {
     if (method == "none" || method == "")
-        return make_ref<NoneSink>(nextSink);
-    else if (method == "xz")
-        return make_ref<XzDecompressionSink>(nextSink);
-    else if (method == "bzip2")
-        return make_ref<BzipDecompressionSink>(nextSink);
-    else if (method == "gzip")
-        return make_ref<GzipDecompressionSink>(nextSink);
+        return std::make_unique<NoneSink>(nextSink);
     else if (method == "br")
-        return make_ref<BrotliDecompressionSink>(nextSink);
+        return std::make_unique<BrotliDecompressionSink>(nextSink);
     else
-        throw UnknownCompressionMethod("unknown compression method '%s'", method);
+        return sourceToSink([&](Source & source) {
+            auto decompressionSource = std::make_unique<ArchiveDecompressionSource>(source);
+            decompressionSource->drainInto(nextSink);
+        });
 }
 
-struct XzCompressionSink : CompressionSink
-{
-    Sink & nextSink;
-    uint8_t outbuf[BUFSIZ];
-    lzma_stream strm = LZMA_STREAM_INIT;
-    bool finished = false;
-
-    XzCompressionSink(Sink & nextSink, bool parallel) : nextSink(nextSink)
-    {
-        lzma_ret ret;
-        bool done = false;
-
-        if (parallel) {
-#ifdef HAVE_LZMA_MT
-            lzma_mt mt_options = {};
-            mt_options.flags = 0;
-            mt_options.timeout = 300; // Using the same setting as the xz cmd line
-            mt_options.preset = LZMA_PRESET_DEFAULT;
-            mt_options.filters = NULL;
-            mt_options.check = LZMA_CHECK_CRC64;
-            mt_options.threads = lzma_cputhreads();
-            mt_options.block_size = 0;
-            if (mt_options.threads == 0)
-                mt_options.threads = 1;
-            // FIXME: maybe use lzma_stream_encoder_mt_memusage() to control the
-            // number of threads.
-            ret = lzma_stream_encoder_mt(&strm, &mt_options);
-            done = true;
-#else
-            printMsg(lvlError, "warning: parallel XZ compression requested but not supported, falling back to single-threaded compression");
-#endif
-        }
-
-        if (!done)
-            ret = lzma_easy_encoder(&strm, 6, LZMA_CHECK_CRC64);
-
-        if (ret != LZMA_OK)
-            throw CompressionError("unable to initialise lzma encoder");
-
-        // FIXME: apply the x86 BCJ filter?
-
-        strm.next_out = outbuf;
-        strm.avail_out = sizeof(outbuf);
-    }
-
-    ~XzCompressionSink()
-    {
-        lzma_end(&strm);
-    }
-
-    void finish() override
-    {
-        CompressionSink::flush();
-        write({});
-    }
-
-    void write(std::string_view data) override
-    {
-        strm.next_in = (const unsigned char *) data.data();
-        strm.avail_in = data.size();
-
-        while (!finished && (!data.data() || strm.avail_in)) {
-            checkInterrupt();
-
-            lzma_ret ret = lzma_code(&strm, data.data() ? LZMA_RUN : LZMA_FINISH);
-            if (ret != LZMA_OK && ret != LZMA_STREAM_END)
-                throw CompressionError("error %d while compressing xz file", ret);
-
-            finished = ret == LZMA_STREAM_END;
-
-            if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) {
-                nextSink({(const char *) outbuf, sizeof(outbuf) - strm.avail_out});
-                strm.next_out = outbuf;
-                strm.avail_out = sizeof(outbuf);
-            }
-        }
-    }
-};
-
-struct BzipCompressionSink : ChunkedCompressionSink
-{
-    Sink & nextSink;
-    bz_stream strm;
-    bool finished = false;
-
-    BzipCompressionSink(Sink & nextSink) : nextSink(nextSink)
-    {
-        memset(&strm, 0, sizeof(strm));
-        int ret = BZ2_bzCompressInit(&strm, 9, 0, 30);
-        if (ret != BZ_OK)
-            throw CompressionError("unable to initialise bzip2 encoder");
-
-        strm.next_out = (char *) outbuf;
-        strm.avail_out = sizeof(outbuf);
-    }
-
-    ~BzipCompressionSink()
-    {
-        BZ2_bzCompressEnd(&strm);
-    }
-
-    void finish() override
-    {
-        flush();
-        writeInternal({});
-    }
-
-    void writeInternal(std::string_view data) override
-    {
-        assert(data.size() <= std::numeric_limits<decltype(strm.avail_in)>::max());
-
-        strm.next_in = (char *) data.data();
-        strm.avail_in = data.size();
-
-        while (!finished && (!data.data() || strm.avail_in)) {
-            checkInterrupt();
-
-            int ret = BZ2_bzCompress(&strm, data.data() ? BZ_RUN : BZ_FINISH);
-            if (ret != BZ_RUN_OK && ret != BZ_FINISH_OK && ret != BZ_STREAM_END)
-                throw CompressionError("error %d while compressing bzip2 file", ret);
-
-            finished = ret == BZ_STREAM_END;
-
-            if (strm.avail_out < sizeof(outbuf) || strm.avail_in == 0) {
-                nextSink({(const char *) outbuf, sizeof(outbuf) - strm.avail_out});
-                strm.next_out = (char *) outbuf;
-                strm.avail_out = sizeof(outbuf);
-            }
-        }
-    }
-};
-
 struct BrotliCompressionSink : ChunkedCompressionSink
 {
     Sink & nextSink;
     uint8_t outbuf[BUFSIZ];
-    BrotliEncoderState *state;
+    BrotliEncoderState * state;
     bool finished = false;
 
     BrotliCompressionSink(Sink & nextSink) : nextSink(nextSink)
@@ -471,12 +261,14 @@ struct BrotliCompressionSink : ChunkedCompressionSink
 
 ref<CompressionSink> makeCompressionSink(const std::string & method, Sink & nextSink, const bool parallel)
 {
+    std::vector<std::string> la_supports = {
+        "bzip2", "compress", "grzip", "gzip", "lrzip", "lz4", "lzip", "lzma", "lzop", "xz", "zstd"
+    };
+    if (std::find(la_supports.begin(), la_supports.end(), method) != la_supports.end()) {
+        return make_ref<ArchiveCompressionSink>(nextSink, method, parallel);
+    }
     if (method == "none")
         return make_ref<NoneSink>(nextSink);
-    else if (method == "xz")
-        return make_ref<XzCompressionSink>(nextSink, parallel);
-    else if (method == "bzip2")
-        return make_ref<BzipCompressionSink>(nextSink);
     else if (method == "br")
         return make_ref<BrotliCompressionSink>(nextSink);
     else
diff --git a/src/libutil/compression.hh b/src/libutil/compression.hh
index dd666a4e19fdc2816bafc18ea3e84eff455ff2e8..338a0d9f2becf024334a160f7f7cec3b634ac828 100644
--- a/src/libutil/compression.hh
+++ b/src/libutil/compression.hh
@@ -8,14 +8,16 @@
 
 namespace nix {
 
-struct CompressionSink : BufferedSink
+struct CompressionSink : BufferedSink, FinishSink
 {
-    virtual void finish() = 0;
+    using BufferedSink::operator ();
+    using BufferedSink::write;
+    using FinishSink::finish;
 };
 
 ref<std::string> decompress(const std::string & method, const std::string & in);
 
-ref<CompressionSink> makeDecompressionSink(const std::string & method, Sink & nextSink);
+std::unique_ptr<FinishSink> makeDecompressionSink(const std::string & method, Sink & nextSink);
 
 ref<std::string> compress(const std::string & method, const std::string & in, const bool parallel = false);
 
diff --git a/src/libutil/config.cc b/src/libutil/config.cc
index 7467e5ac09bee034dbbed785baa67ee326c2f103..bda07cd55a80dc8ea8b94e6ab316e9b00602df86 100644
--- a/src/libutil/config.cc
+++ b/src/libutil/config.cc
@@ -20,7 +20,7 @@ bool Config::set(const std::string & name, const std::string & value)
             return false;
     }
     i->second.setting->set(value, append);
-    i->second.setting->overriden = true;
+    i->second.setting->overridden = true;
     return true;
 }
 
@@ -35,7 +35,7 @@ void Config::addSetting(AbstractSetting * setting)
     auto i = unknownSettings.find(setting->name);
     if (i != unknownSettings.end()) {
         setting->set(i->second);
-        setting->overriden = true;
+        setting->overridden = true;
         unknownSettings.erase(i);
         set = true;
     }
@@ -48,7 +48,7 @@ void Config::addSetting(AbstractSetting * setting)
                     alias, setting->name);
             else {
                 setting->set(i->second);
-                setting->overriden = true;
+                setting->overridden = true;
                 unknownSettings.erase(i);
                 set = true;
             }
@@ -69,10 +69,10 @@ void AbstractConfig::reapplyUnknownSettings()
         set(s.first, s.second);
 }
 
-void Config::getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly)
+void Config::getSettings(std::map<std::string, SettingInfo> & res, bool overriddenOnly)
 {
     for (auto & opt : _settings)
-        if (!opt.second.isAlias && (!overridenOnly || opt.second.setting->overriden))
+        if (!opt.second.isAlias && (!overriddenOnly || opt.second.setting->overridden))
             res.emplace(opt.first, SettingInfo{opt.second.setting->to_string(), opt.second.setting->description});
 }
 
@@ -136,10 +136,10 @@ void AbstractConfig::applyConfigFile(const Path & path)
     } catch (SysError &) { }
 }
 
-void Config::resetOverriden()
+void Config::resetOverridden()
 {
     for (auto & s : _settings)
-        s.second.setting->overriden = false;
+        s.second.setting->overridden = false;
 }
 
 nlohmann::json Config::toJSON()
@@ -169,7 +169,7 @@ AbstractSetting::AbstractSetting(
 
 void AbstractSetting::setDefault(const std::string & str)
 {
-    if (!overriden) set(str);
+    if (!overridden) set(str);
 }
 
 nlohmann::json AbstractSetting::toJSON()
@@ -203,7 +203,7 @@ void BaseSetting<T>::convertToArg(Args & args, const std::string & category)
         .description = fmt("Set the `%s` setting.", name),
         .category = category,
         .labels = {"value"},
-        .handler = {[=](std::string s) { overriden = true; set(s); }},
+        .handler = {[=](std::string s) { overridden = true; set(s); }},
     });
 
     if (isAppendable())
@@ -212,7 +212,7 @@ void BaseSetting<T>::convertToArg(Args & args, const std::string & category)
             .description = fmt("Append to the `%s` setting.", name),
             .category = category,
             .labels = {"value"},
-            .handler = {[=](std::string s) { overriden = true; set(s, true); }},
+            .handler = {[=](std::string s) { overridden = true; set(s, true); }},
         });
 }
 
@@ -365,16 +365,16 @@ bool GlobalConfig::set(const std::string & name, const std::string & value)
     return false;
 }
 
-void GlobalConfig::getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly)
+void GlobalConfig::getSettings(std::map<std::string, SettingInfo> & res, bool overriddenOnly)
 {
     for (auto & config : *configRegistrations)
-        config->getSettings(res, overridenOnly);
+        config->getSettings(res, overriddenOnly);
 }
 
-void GlobalConfig::resetOverriden()
+void GlobalConfig::resetOverridden()
 {
     for (auto & config : *configRegistrations)
-        config->resetOverriden();
+        config->resetOverridden();
 }
 
 nlohmann::json GlobalConfig::toJSON()
diff --git a/src/libutil/config.hh b/src/libutil/config.hh
index 71e31656de847fb1c12c3db301192986971a5e33..bf81b4892fe0d0a7ab15e120a4b5d5e85e8b239e 100644
--- a/src/libutil/config.hh
+++ b/src/libutil/config.hh
@@ -71,9 +71,9 @@ public:
     /**
      * Adds the currently known settings to the given result map `res`.
      * - res: map to store settings in
-     * - overridenOnly: when set to true only overridden settings will be added to `res`
+     * - overriddenOnly: when set to true only overridden settings will be added to `res`
      */
-    virtual void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) = 0;
+    virtual void getSettings(std::map<std::string, SettingInfo> & res, bool overriddenOnly = false) = 0;
 
     /**
      * Parses the configuration in `contents` and applies it
@@ -91,7 +91,7 @@ public:
     /**
      * Resets the `overridden` flag of all Settings
      */
-    virtual void resetOverriden() = 0;
+    virtual void resetOverridden() = 0;
 
     /**
      * Outputs all settings to JSON
@@ -127,7 +127,7 @@ public:
 
      MyClass() : Config(readConfigFile("/etc/my-app.conf"))
      {
-       std::cout << foo << "\n"; // will print 123 unless overriden
+       std::cout << foo << "\n"; // will print 123 unless overridden
      }
    };
 */
@@ -163,9 +163,9 @@ public:
 
     void addSetting(AbstractSetting * setting);
 
-    void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) override;
+    void getSettings(std::map<std::string, SettingInfo> & res, bool overriddenOnly = false) override;
 
-    void resetOverriden() override;
+    void resetOverridden() override;
 
     nlohmann::json toJSON() override;
 
@@ -184,7 +184,7 @@ public:
 
     int created = 123;
 
-    bool overriden = false;
+    bool overridden = false;
 
     void setDefault(const std::string & str);
 
@@ -215,7 +215,7 @@ protected:
 
     virtual void convertToArg(Args & args, const std::string & category);
 
-    bool isOverriden() const { return overriden; }
+    bool isOverridden() const { return overridden; }
 };
 
 /* A setting of type T. */
@@ -252,7 +252,7 @@ public:
 
     virtual void override(const T & v)
     {
-        overriden = true;
+        overridden = true;
         value = v;
     }
 
@@ -324,9 +324,9 @@ struct GlobalConfig : public AbstractConfig
 
     bool set(const std::string & name, const std::string & value) override;
 
-    void getSettings(std::map<std::string, SettingInfo> & res, bool overridenOnly = false) override;
+    void getSettings(std::map<std::string, SettingInfo> & res, bool overriddenOnly = false) override;
 
-    void resetOverriden() override;
+    void resetOverridden() override;
 
     nlohmann::json toJSON() override;
 
diff --git a/src/libutil/local.mk b/src/libutil/local.mk
index 5341c58e60ae900971ef7a662326d427a084d628..3a6415ee3de95c03dc157243c91ba572acb28a58 100644
--- a/src/libutil/local.mk
+++ b/src/libutil/local.mk
@@ -6,7 +6,7 @@ libutil_DIR := $(d)
 
 libutil_SOURCES := $(wildcard $(d)/*.cc)
 
-libutil_LDFLAGS = $(LIBLZMA_LIBS) -lbz2 -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) $(LIBARCHIVE_LIBS) $(BOOST_LDFLAGS) -lboost_context
+libutil_LDFLAGS = -pthread $(OPENSSL_LIBS) $(LIBBROTLI_LIBS) $(LIBARCHIVE_LIBS) $(BOOST_LDFLAGS) -lboost_context
 
 ifeq ($(HAVE_LIBCPUID), 1)
 	libutil_LDFLAGS += -lcpuid
diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc
index d1a16b6bafbfed16fb2f80fe4876cee3b574d4e8..374b48d796b6048f7bbd5a83016f231cd76a7b14 100644
--- a/src/libutil/serialise.cc
+++ b/src/libutil/serialise.cc
@@ -201,6 +201,61 @@ static DefaultStackAllocator defaultAllocatorSingleton;
 StackAllocator *StackAllocator::defaultAllocator = &defaultAllocatorSingleton;
 
 
+std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun)
+{
+    struct SourceToSink : FinishSink
+    {
+        typedef boost::coroutines2::coroutine<bool> coro_t;
+
+        std::function<void(Source &)> fun;
+        std::optional<coro_t::push_type> coro;
+
+        SourceToSink(std::function<void(Source &)> fun) : fun(fun)
+        {
+        }
+
+        std::string_view cur;
+
+        void operator () (std::string_view in) override
+        {
+            if (in.empty()) return;
+            cur = in;
+
+            if (!coro)
+                coro = coro_t::push_type(VirtualStackAllocator{}, [&](coro_t::pull_type & yield) {
+                    LambdaSource source([&](char *out, size_t out_len) {
+                        if (cur.empty()) {
+                            yield();
+                            if (yield.get()) {
+                                return (size_t)0;
+                            }
+                        }
+
+                        size_t n = std::min(cur.size(), out_len);
+                        memcpy(out, cur.data(), n);
+                        cur.remove_prefix(n);
+                        return n;
+                    });
+                    fun(source);
+                });
+
+            if (!*coro) { abort(); }
+
+            if (!cur.empty()) (*coro)(false);
+        }
+
+        void finish() {
+            if (!coro) return;
+            if (!*coro) abort();
+            (*coro)(true);
+            if (*coro) abort();
+        }
+    };
+
+    return std::make_unique<SourceToSink>(fun);
+}
+
+
 std::unique_ptr<Source> sinkToSource(
     std::function<void(Sink &)> fun,
     std::function<void()> eof)
@@ -212,7 +267,6 @@ std::unique_ptr<Source> sinkToSource(
         std::function<void(Sink &)> fun;
         std::function<void()> eof;
         std::optional<coro_t::pull_type> coro;
-        bool started = false;
 
         SinkToSource(std::function<void(Sink &)> fun, std::function<void()> eof)
             : fun(fun), eof(eof)
diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh
index 5bbbc7ce3fd7b1a7addd897e52b228c5a68c4670..0fe6e8332b8ecf8f8e864528d618fac799d87b30 100644
--- a/src/libutil/serialise.hh
+++ b/src/libutil/serialise.hh
@@ -25,6 +25,13 @@ struct NullSink : Sink
     { }
 };
 
+
+struct FinishSink : virtual Sink
+{
+    virtual void finish() = 0;
+};
+
+
 /* A buffered abstract sink. Warning: a BufferedSink should not be
    used from multiple threads concurrently. */
 struct BufferedSink : virtual Sink
@@ -281,6 +288,7 @@ struct ChainSource : Source
     size_t read(char * data, size_t len) override;
 };
 
+std::unique_ptr<FinishSink> sourceToSink(std::function<void(Source &)> fun);
 
 /* Convert a function that feeds data into a Sink into a Source. The
    Source executes the function as a coroutine. */
diff --git a/src/libutil/tarfile.cc b/src/libutil/tarfile.cc
index 2da169ba731007b0183a0e458fbe7c28cc977568..24905130da93bd90f76e3da164f229ab61165c43 100644
--- a/src/libutil/tarfile.cc
+++ b/src/libutil/tarfile.cc
@@ -2,83 +2,78 @@
 #include <archive_entry.h>
 
 #include "serialise.hh"
+#include "tarfile.hh"
 
 namespace nix {
 
-struct TarArchive {
-    struct archive * archive;
-    Source * source;
-    std::vector<unsigned char> buffer;
-
-    void check(int err, const char * reason = "failed to extract archive: %s")
-    {
-        if (err == ARCHIVE_EOF)
-            throw EndOfFile("reached end of archive");
-        else if (err != ARCHIVE_OK)
-            throw Error(reason, archive_error_string(this->archive));
+static int callback_open(struct archive *, void * self)
+{
+    return ARCHIVE_OK;
+}
+
+static ssize_t callback_read(struct archive * archive, void * _self, const void * * buffer)
+{
+    auto self = (TarArchive *) _self;
+    *buffer = self->buffer.data();
+
+    try {
+        return self->source->read((char *) self->buffer.data(), 4096);
+    } catch (EndOfFile &) {
+        return 0;
+    } catch (std::exception & err) {
+        archive_set_error(archive, EIO, "Source threw exception: %s", err.what());
+        return -1;
     }
+}
 
-    TarArchive(Source & source) : buffer(4096)
-    {
-        this->archive = archive_read_new();
-        this->source = &source;
+static int callback_close(struct archive *, void * self)
+{
+    return ARCHIVE_OK;
+}
 
-        archive_read_support_filter_all(archive);
-        archive_read_support_format_all(archive);
-        check(archive_read_open(archive,
-                (void *)this,
-                TarArchive::callback_open,
-                TarArchive::callback_read,
-                TarArchive::callback_close),
-            "failed to open archive: %s");
-    }
+void TarArchive::check(int err, const std::string & reason)
+{
+    if (err == ARCHIVE_EOF)
+        throw EndOfFile("reached end of archive");
+    else if (err != ARCHIVE_OK)
+        throw Error(reason, archive_error_string(this->archive));
+}
 
-    TarArchive(const Path & path)
-    {
-        this->archive = archive_read_new();
+TarArchive::TarArchive(Source & source, bool raw) : buffer(4096)
+{
+    this->archive = archive_read_new();
+    this->source = &source;
 
+    if (!raw) {
         archive_read_support_filter_all(archive);
         archive_read_support_format_all(archive);
-        check(archive_read_open_filename(archive, path.c_str(), 16384), "failed to open archive: %s");
-    }
-
-    TarArchive(const TarArchive &) = delete;
-
-    void close()
-    {
-        check(archive_read_close(archive), "failed to close archive: %s");
+    } else {
+        archive_read_support_filter_all(archive);
+        archive_read_support_format_raw(archive);
+        archive_read_support_format_empty(archive);
     }
+    check(archive_read_open(archive, (void *)this, callback_open, callback_read, callback_close), "Failed to open archive (%s)");
+}
 
-    ~TarArchive()
-    {
-        if (this->archive) archive_read_free(this->archive);
-    }
 
-private:
+TarArchive::TarArchive(const Path & path)
+{
+    this->archive = archive_read_new();
 
-    static int callback_open(struct archive *, void * self) {
-        return ARCHIVE_OK;
-    }
+    archive_read_support_filter_all(archive);
+    archive_read_support_format_all(archive);
+    check(archive_read_open_filename(archive, path.c_str(), 16384), "failed to open archive: %s");
+}
 
-    static ssize_t callback_read(struct archive * archive, void * _self, const void * * buffer)
-    {
-        auto self = (TarArchive *)_self;
-        *buffer = self->buffer.data();
-
-        try {
-            return self->source->read((char *) self->buffer.data(), 4096);
-        } catch (EndOfFile &) {
-            return 0;
-        } catch (std::exception & err) {
-            archive_set_error(archive, EIO, "source threw exception: %s", err.what());
-            return -1;
-        }
-    }
+void TarArchive::close()
+{
+    check(archive_read_close(this->archive), "Failed to close archive (%s)");
+}
 
-    static int callback_close(struct archive *, void * self) {
-        return ARCHIVE_OK;
-    }
-};
+TarArchive::~TarArchive()
+{
+    if (this->archive) archive_read_free(this->archive);
+}
 
 static void extract_archive(TarArchive & archive, const Path & destDir)
 {
diff --git a/src/libutil/tarfile.hh b/src/libutil/tarfile.hh
index 89a024f1d103c623fd38de0c71401180ca046739..4d9141fd458afd9c63ed983def11c018dcbade1b 100644
--- a/src/libutil/tarfile.hh
+++ b/src/libutil/tarfile.hh
@@ -1,7 +1,26 @@
 #include "serialise.hh"
+#include <archive.h>
 
 namespace nix {
 
+struct TarArchive {
+    struct archive * archive;
+    Source * source;
+    std::vector<unsigned char> buffer;
+
+    void check(int err, const std::string & reason = "failed to extract archive (%s)");
+
+    TarArchive(Source & source, bool raw = false);
+
+    TarArchive(const Path & path);
+
+    // disable copy constructor
+    TarArchive(const TarArchive &) = delete;
+
+    void close();
+
+    ~TarArchive();
+};
 void unpackTarfile(Source & source, const Path & destDir);
 
 void unpackTarfile(const Path & tarFile, const Path & destDir);
diff --git a/src/libutil/tests/closure.cc b/src/libutil/tests/closure.cc
new file mode 100644
index 0000000000000000000000000000000000000000..7597e78073b4a05a48fde155dc3b952d2e97e974
--- /dev/null
+++ b/src/libutil/tests/closure.cc
@@ -0,0 +1,70 @@
+#include "closure.hh"
+#include <gtest/gtest.h>
+
+namespace nix {
+
+using namespace std;
+
+map<string, set<string>> testGraph = {
+    { "A", { "B", "C", "G" } },
+    { "B", { "A" } }, // Loops back to A
+    { "C", { "F" } }, // Indirect reference
+    { "D", { "A" } }, // Not reachable, but has backreferences
+    { "E", {} }, // Just not reachable
+    { "F", {} },
+    { "G", { "G" } }, // Self reference
+};
+
+TEST(closure, correctClosure) {
+    set<string> aClosure;
+    set<string> expectedClosure = {"A", "B", "C", "F", "G"};
+    computeClosure<string>(
+        {"A"},
+        aClosure,
+        [&](const string currentNode, function<void(promise<set<string>> &)> processEdges) {
+            promise<set<string>> promisedNodes;
+            promisedNodes.set_value(testGraph[currentNode]);
+            processEdges(promisedNodes);
+        }
+    );
+
+    ASSERT_EQ(aClosure, expectedClosure);
+}
+
+TEST(closure, properlyHandlesDirectExceptions) {
+    struct TestExn {};
+    set<string> aClosure;
+    EXPECT_THROW(
+        computeClosure<string>(
+            {"A"},
+            aClosure,
+            [&](const string currentNode, function<void(promise<set<string>> &)> processEdges) {
+                throw TestExn();
+            }
+        ),
+        TestExn
+    );
+}
+
+TEST(closure, properlyHandlesExceptionsInPromise) {
+    struct TestExn {};
+    set<string> aClosure;
+    EXPECT_THROW(
+        computeClosure<string>(
+            {"A"},
+            aClosure,
+            [&](const string currentNode, function<void(promise<set<string>> &)> processEdges) {
+                promise<set<string>> promise;
+                try {
+                    throw TestExn();
+                } catch (...) {
+                    promise.set_exception(std::current_exception());
+                }
+                processEdges(promise);
+            }
+        ),
+        TestExn
+    );
+}
+
+}
diff --git a/src/libutil/tests/compression.cc b/src/libutil/tests/compression.cc
index 5b7a2c5b96998ca48b081c15131ed6c1dda3df3a..2efa3266b4dba5945fba5c18fe5730c8fee7d0b5 100644
--- a/src/libutil/tests/compression.cc
+++ b/src/libutil/tests/compression.cc
@@ -17,6 +17,24 @@ namespace nix {
         ASSERT_EQ(*o, "this-is-a-test");
     }
 
+    TEST(decompress, decompressNoneCompressed) {
+        auto method = "none";
+        auto str = "slfja;sljfklsa;jfklsjfkl;sdjfkl;sadjfkl;sdjf;lsdfjsadlf";
+        ref<std::string> o = decompress(method, str);
+
+        ASSERT_EQ(*o, str);
+    }
+
+    TEST(decompress, decompressEmptyCompressed) {
+        // Empty-method decompression used e.g. by S3 store
+        // (Content-Encoding == "").
+        auto method = "";
+        auto str = "slfja;sljfklsa;jfklsjfkl;sdjfkl;sadjfkl;sdjf;lsdfjsadlf";
+        ref<std::string> o = decompress(method, str);
+
+        ASSERT_EQ(*o, str);
+    }
+
     TEST(decompress, decompressXzCompressed) {
         auto method = "xz";
         auto str = "slfja;sljfklsa;jfklsjfkl;sdjfkl;sadjfkl;sdjf;lsdfjsadlf";
diff --git a/src/libutil/tests/config.cc b/src/libutil/tests/config.cc
index c305af9f5057cc3a99e4904255ab66a1027b677b..0ebdaf3db9ca7aaa7a1e820d52cec0bff8772121 100644
--- a/src/libutil/tests/config.cc
+++ b/src/libutil/tests/config.cc
@@ -29,20 +29,20 @@ namespace nix {
         std::map<std::string, Config::SettingInfo> settings;
         Setting<std::string> foo{&config, value, "name-of-the-setting", "description"};
 
-        config.getSettings(settings, /* overridenOnly = */ false);
+        config.getSettings(settings, /* overriddenOnly = */ false);
         const auto iter = settings.find("name-of-the-setting");
         ASSERT_NE(iter, settings.end());
         ASSERT_EQ(iter->second.value, "");
         ASSERT_EQ(iter->second.description, "description\n");
     }
 
-    TEST(Config, getDefinedOverridenSettingNotSet) {
+    TEST(Config, getDefinedOverriddenSettingNotSet) {
         Config config;
         std::string value;
         std::map<std::string, Config::SettingInfo> settings;
         Setting<std::string> foo{&config, value, "name-of-the-setting", "description"};
 
-        config.getSettings(settings, /* overridenOnly = */ true);
+        config.getSettings(settings, /* overriddenOnly = */ true);
         const auto e = settings.find("name-of-the-setting");
         ASSERT_EQ(e, settings.end());
     }
@@ -55,7 +55,7 @@ namespace nix {
 
         setting.assign("value");
 
-        config.getSettings(settings, /* overridenOnly = */ false);
+        config.getSettings(settings, /* overriddenOnly = */ false);
         const auto iter = settings.find("name-of-the-setting");
         ASSERT_NE(iter, settings.end());
         ASSERT_EQ(iter->second.value, "value");
@@ -69,7 +69,7 @@ namespace nix {
 
         ASSERT_TRUE(config.set("name-of-the-setting", "value"));
 
-        config.getSettings(settings, /* overridenOnly = */ false);
+        config.getSettings(settings, /* overriddenOnly = */ false);
         const auto e = settings.find("name-of-the-setting");
         ASSERT_NE(e, settings.end());
         ASSERT_EQ(e->second.value, "value");
@@ -100,7 +100,7 @@ namespace nix {
 
         {
             std::map<std::string, Config::SettingInfo> settings;
-            config.getSettings(settings, /* overridenOnly = */ false);
+            config.getSettings(settings, /* overriddenOnly = */ false);
             ASSERT_EQ(settings.find("key"), settings.end());
         }
 
@@ -108,17 +108,17 @@ namespace nix {
 
         {
             std::map<std::string, Config::SettingInfo> settings;
-            config.getSettings(settings, /* overridenOnly = */ false);
+            config.getSettings(settings, /* overriddenOnly = */ false);
             ASSERT_EQ(settings["key"].value, "value");
         }
     }
 
-    TEST(Config, resetOverriden) {
+    TEST(Config, resetOverridden) {
         Config config;
-        config.resetOverriden();
+        config.resetOverridden();
     }
 
-    TEST(Config, resetOverridenWithSetting) {
+    TEST(Config, resetOverriddenWithSetting) {
         Config config;
         Setting<std::string> setting{&config, "", "name-of-the-setting", "description"};
 
@@ -127,7 +127,7 @@ namespace nix {
 
             setting.set("foo");
             ASSERT_EQ(setting.get(), "foo");
-            config.getSettings(settings, /* overridenOnly = */ true);
+            config.getSettings(settings, /* overriddenOnly = */ true);
             ASSERT_TRUE(settings.empty());
         }
 
@@ -135,18 +135,18 @@ namespace nix {
             std::map<std::string, Config::SettingInfo> settings;
 
             setting.override("bar");
-            ASSERT_TRUE(setting.overriden);
+            ASSERT_TRUE(setting.overridden);
             ASSERT_EQ(setting.get(), "bar");
-            config.getSettings(settings, /* overridenOnly = */ true);
+            config.getSettings(settings, /* overriddenOnly = */ true);
             ASSERT_FALSE(settings.empty());
         }
 
         {
             std::map<std::string, Config::SettingInfo> settings;
 
-            config.resetOverriden();
-            ASSERT_FALSE(setting.overriden);
-            config.getSettings(settings, /* overridenOnly = */ true);
+            config.resetOverridden();
+            ASSERT_FALSE(setting.overridden);
+            config.getSettings(settings, /* overriddenOnly = */ true);
             ASSERT_TRUE(settings.empty());
         }
     }
diff --git a/src/libutil/tests/url.cc b/src/libutil/tests/url.cc
index 80646ad3e32595f7451d8d08463060464155b3ae..aff58e9ee25fae19c08f8d979bcc352a130d573a 100644
--- a/src/libutil/tests/url.cc
+++ b/src/libutil/tests/url.cc
@@ -117,6 +117,24 @@ namespace nix {
         ASSERT_EQ(parsed, expected);
     }
 
+    TEST(parseURL, parseScopedRFC4007IPv6Address) {
+        auto s = "http://[fe80::818c:da4d:8975:415c\%enp0s25]:8080";
+        auto parsed = parseURL(s);
+
+        ParsedURL expected {
+            .url = "http://[fe80::818c:da4d:8975:415c\%enp0s25]:8080",
+            .base = "http://[fe80::818c:da4d:8975:415c\%enp0s25]:8080",
+            .scheme = "http",
+            .authority = "[fe80::818c:da4d:8975:415c\%enp0s25]:8080",
+            .path = "",
+            .query = (StringMap) { },
+            .fragment = "",
+        };
+
+        ASSERT_EQ(parsed, expected);
+
+    }
+
     TEST(parseURL, parseIPv6Address) {
         auto s = "http://[2a02:8071:8192:c100:311d:192d:81ac:11ea]:8080";
         auto parsed = parseURL(s);
diff --git a/src/libutil/url-parts.hh b/src/libutil/url-parts.hh
index 862d9fa6eaabe8bbe98158b76484997e1ea3660a..da10a6bbc9e3d3a2d362856574de2632978c82ce 100644
--- a/src/libutil/url-parts.hh
+++ b/src/libutil/url-parts.hh
@@ -8,7 +8,7 @@ namespace nix {
 // URI stuff.
 const static std::string pctEncoded = "(?:%[0-9a-fA-F][0-9a-fA-F])";
 const static std::string schemeRegex = "(?:[a-z][a-z0-9+.-]*)";
-const static std::string ipv6AddressSegmentRegex = "[0-9a-fA-F:]+";
+const static std::string ipv6AddressSegmentRegex = "[0-9a-fA-F:]+(?:%\\w+)?";
 const static std::string ipv6AddressRegex = "(?:\\[" + ipv6AddressSegmentRegex + "\\]|" + ipv6AddressSegmentRegex + ")";
 const static std::string unreservedRegex = "(?:[a-zA-Z0-9-._~])";
 const static std::string subdelimsRegex = "(?:[!$&'\"()*+,;=])";
diff --git a/src/libutil/util.cc b/src/libutil/util.cc
index ef37275ac85fd1f5ca996fd58fa559f298ba5c4d..7e57fd7ca2a335bad90f56cfa56809f74194b23b 100644
--- a/src/libutil/util.cc
+++ b/src/libutil/util.cc
@@ -32,6 +32,7 @@
 
 #ifdef __linux__
 #include <sys/prctl.h>
+#include <sys/resource.h>
 #endif
 
 
@@ -143,16 +144,21 @@ Path canonPath(const Path & path, bool resolveSymlinks)
             s += '/';
             while (i != end && *i != '/') s += *i++;
 
-            /* If s points to a symlink, resolve it and restart (since
-               the symlink target might contain new symlinks). */
+            /* If s points to a symlink, resolve it and continue from there */
             if (resolveSymlinks && isLink(s)) {
                 if (++followCount >= maxFollow)
                     throw Error("infinite symlink recursion in path '%1%'", path);
-                temp = absPath(readLink(s), dirOf(s))
-                    + string(i, end);
-                i = temp.begin(); /* restart */
+                temp = readLink(s) + string(i, end);
+                i = temp.begin();
                 end = temp.end();
-                s = "";
+                if (!temp.empty() && temp[0] == '/') {
+                    s.clear();  /* restart for symlinks pointing to absolute path */
+                } else {
+                    s = dirOf(s);
+                    if (s == "/") {  // we don’t want trailing slashes here, which dirOf only produces if s = /
+                        s.clear();
+                    }
+                }
             }
         }
     }
@@ -752,13 +758,13 @@ AutoCloseFD::AutoCloseFD() : fd{-1} {}
 AutoCloseFD::AutoCloseFD(int fd) : fd{fd} {}
 
 
-AutoCloseFD::AutoCloseFD(AutoCloseFD&& that) : fd{that.fd}
+AutoCloseFD::AutoCloseFD(AutoCloseFD && that) : fd{that.fd}
 {
     that.fd = -1;
 }
 
 
-AutoCloseFD& AutoCloseFD::operator =(AutoCloseFD&& that)
+AutoCloseFD & AutoCloseFD::operator =(AutoCloseFD && that)
 {
     close();
     fd = that.fd;
@@ -789,6 +795,7 @@ void AutoCloseFD::close()
         if (::close(fd) == -1)
             /* This should never happen. */
             throw SysError("closing file descriptor %1%", fd);
+        fd = -1;
     }
 }
 
@@ -822,6 +829,12 @@ void Pipe::create()
 }
 
 
+void Pipe::close()
+{
+    readSide.close();
+    writeSide.close();
+}
+
 
 //////////////////////////////////////////////////////////////////////
 
@@ -1109,7 +1122,7 @@ void runProgram2(const RunOptions & options)
         Strings args_(options.args);
         args_.push_front(options.program);
 
-        restoreSignals();
+        restoreProcessContext();
 
         if (options.searchPath)
             execvp(options.program.c_str(), stringsToCharPtrs(args_).data());
@@ -1121,7 +1134,7 @@ void runProgram2(const RunOptions & options)
         throw SysError("executing '%1%'", options.program);
     }, processOptions);
 
-    out.writeSide = -1;
+    out.writeSide.close();
 
     std::thread writerThread;
 
@@ -1134,7 +1147,7 @@ void runProgram2(const RunOptions & options)
 
 
     if (source) {
-        in.readSide = -1;
+        in.readSide.close();
         writerThread = std::thread([&]() {
             try {
                 std::vector<char> buf(8 * 1024);
@@ -1151,7 +1164,7 @@ void runProgram2(const RunOptions & options)
             } catch (...) {
                 promise.set_exception(std::current_exception());
             }
-            in.writeSide = -1;
+            in.writeSide.close();
         });
     }
 
@@ -1590,7 +1603,7 @@ void startSignalHandlerThread()
     updateWindowSize();
 
     if (sigprocmask(SIG_BLOCK, nullptr, &savedSignalMask))
-        throw SysError("quering signal mask");
+        throw SysError("querying signal mask");
 
     sigset_t set;
     sigemptyset(&set);
@@ -1605,12 +1618,45 @@ void startSignalHandlerThread()
     std::thread(signalHandlerThread, set).detach();
 }
 
-void restoreSignals()
+static void restoreSignals()
 {
     if (sigprocmask(SIG_SETMASK, &savedSignalMask, nullptr))
         throw SysError("restoring signals");
 }
 
+#if __linux__
+rlim_t savedStackSize = 0;
+#endif
+
+void setStackSize(size_t stackSize)
+{
+    #if __linux__
+    struct rlimit limit;
+    if (getrlimit(RLIMIT_STACK, &limit) == 0 && limit.rlim_cur < stackSize) {
+        savedStackSize = limit.rlim_cur;
+        limit.rlim_cur = stackSize;
+        setrlimit(RLIMIT_STACK, &limit);
+    }
+    #endif
+}
+
+void restoreProcessContext()
+{
+    restoreSignals();
+
+    restoreAffinity();
+
+    #if __linux__
+    if (savedStackSize) {
+        struct rlimit limit;
+        if (getrlimit(RLIMIT_STACK, &limit) == 0) {
+            limit.rlim_cur = savedStackSize;
+            setrlimit(RLIMIT_STACK, &limit);
+        }
+    }
+    #endif
+}
+
 /* RAII helper to automatically deregister a callback. */
 struct InterruptCallbackImpl : InterruptCallback
 {
@@ -1673,10 +1719,11 @@ string showBytes(uint64_t bytes)
 }
 
 
+// FIXME: move to libstore/build
 void commonChildInit(Pipe & logPipe)
 {
     const static string pathNullDevice = "/dev/null";
-    restoreSignals();
+    restoreProcessContext();
 
     /* Put the child in a separate session (and thus a separate
        process group) so that it has no controlling terminal (meaning
diff --git a/src/libutil/util.hh b/src/libutil/util.hh
index ad49c65b38bd029cfdd533c7c5f340a8ef62d28c..f84d0fb31138368b9b024454b486557f4c758fed 100644
--- a/src/libutil/util.hh
+++ b/src/libutil/util.hh
@@ -188,7 +188,6 @@ public:
 class AutoCloseFD
 {
     int fd;
-    void close();
 public:
     AutoCloseFD();
     AutoCloseFD(int fd);
@@ -200,6 +199,7 @@ public:
     int get() const;
     explicit operator bool() const;
     int release();
+    void close();
 };
 
 
@@ -216,6 +216,7 @@ class Pipe
 public:
     AutoCloseFD readSide, writeSide;
     void create();
+    void close();
 };
 
 
@@ -299,6 +300,15 @@ std::pair<int, std::string> runProgram(const RunOptions & options);
 void runProgram2(const RunOptions & options);
 
 
+/* Change the stack size. */
+void setStackSize(size_t stackSize);
+
+
+/* Restore the original inherited Unix process context (such as signal
+   masks, stack size, CPU affinity). */
+void restoreProcessContext();
+
+
 class ExecError : public Error
 {
 public:
@@ -512,9 +522,6 @@ class Callback;
    on the current thread (and thus any threads created by it). */
 void startSignalHandlerThread();
 
-/* Restore default signal handling. */
-void restoreSignals();
-
 struct InterruptCallback
 {
     virtual ~InterruptCallback() { };
diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc
index 65b85b304af10524a3fd693b20b1b0a72c9891c9..3fec2c06cbee767dc5b29056e8f87a2e5fec2031 100755
--- a/src/nix-build/nix-build.cc
+++ b/src/nix-build/nix-build.cc
@@ -12,6 +12,7 @@
 #include "affinity.hh"
 #include "util.hh"
 #include "shared.hh"
+#include "path-with-outputs.hh"
 #include "eval.hh"
 #include "eval-inline.hh"
 #include "get-drvs.hh"
@@ -321,7 +322,8 @@ static void main_nix_build(int argc, char * * argv)
 
     state->printStats();
 
-    auto buildPaths = [&](const std::vector<StorePathWithOutputs> & paths) {
+    auto buildPaths = [&](const std::vector<StorePathWithOutputs> & paths0) {
+        auto paths = toDerivedPaths(paths0);
         /* Note: we do this even when !printMissing to efficiently
            fetch binary cache data. */
         uint64_t downloadSize, narSize;
@@ -385,6 +387,12 @@ static void main_nix_build(int argc, char * * argv)
 
         if (dryRun) return;
 
+        if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+            auto resolvedDrv = drv.tryResolve(*store);
+            assert(resolvedDrv && "Successfully resolved the derivation");
+            drv = *resolvedDrv;
+        }
+
         // Set the environment.
         auto env = getEnv();
 
@@ -420,8 +428,6 @@ static void main_nix_build(int argc, char * * argv)
             } else
                 env[var.first] = var.second;
 
-        restoreAffinity();
-
         /* Run a shell using the derivation's environment.  For
            convenience, source $stdenv/setup to setup additional
            environment variables and shell functions.  Also don't
@@ -471,7 +477,7 @@ static void main_nix_build(int argc, char * * argv)
 
         auto argPtrs = stringsToCharPtrs(args);
 
-        restoreSignals();
+        restoreProcessContext();
 
         logger->stop();
 
diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc
index 0f10a4cbb529e8b0eeefa59b63ba9282e26553bc..e04954d45c8f236b4b5482c79f960d0c659e52cf 100644
--- a/src/nix-env/nix-env.cc
+++ b/src/nix-env/nix-env.cc
@@ -6,6 +6,7 @@
 #include "globals.hh"
 #include "names.hh"
 #include "profiles.hh"
+#include "path-with-outputs.hh"
 #include "shared.hh"
 #include "store-api.hh"
 #include "local-fs-store.hh"
@@ -418,13 +419,13 @@ static void queryInstSources(EvalState & state,
 
 static void printMissing(EvalState & state, DrvInfos & elems)
 {
-    std::vector<StorePathWithOutputs> targets;
+    std::vector<DerivedPath> targets;
     for (auto & i : elems) {
         Path drvPath = i.queryDrvPath();
         if (drvPath != "")
-            targets.push_back({state.store->parseStorePath(drvPath)});
+            targets.push_back(DerivedPath::Built{state.store->parseStorePath(drvPath)});
         else
-            targets.push_back({state.store->parseStorePath(i.queryOutPath())});
+            targets.push_back(DerivedPath::Opaque{state.store->parseStorePath(i.queryOutPath())});
     }
 
     printMissing(state.store, targets);
@@ -693,17 +694,18 @@ static void opSet(Globals & globals, Strings opFlags, Strings opArgs)
     if (globals.forceName != "")
         drv.setName(globals.forceName);
 
-    if (drv.queryDrvPath() != "") {
-        std::vector<StorePathWithOutputs> paths{{globals.state->store->parseStorePath(drv.queryDrvPath())}};
-        printMissing(globals.state->store, paths);
-        if (globals.dryRun) return;
-        globals.state->store->buildPaths(paths, globals.state->repair ? bmRepair : bmNormal);
-    } else {
-        printMissing(globals.state->store,
-            {{globals.state->store->parseStorePath(drv.queryOutPath())}});
-        if (globals.dryRun) return;
-        globals.state->store->ensurePath(globals.state->store->parseStorePath(drv.queryOutPath()));
-    }
+    std::vector<DerivedPath> paths {
+        (drv.queryDrvPath() != "")
+        ? (DerivedPath) (DerivedPath::Built {
+                globals.state->store->parseStorePath(drv.queryDrvPath())
+            })
+        : (DerivedPath) (DerivedPath::Opaque {
+                globals.state->store->parseStorePath(drv.queryOutPath())
+            }),
+    };
+    printMissing(globals.state->store, paths);
+    if (globals.dryRun) return;
+    globals.state->store->buildPaths(paths, globals.state->repair ? bmRepair : bmNormal);
 
     debug(format("switching to new user environment"));
     Path generation = createGeneration(
diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc
index 168ac492ba912abf8bc2bae05cc866250f16b5b2..5ceb2ae67269bc6dbed84030991e6b7e3a3f119e 100644
--- a/src/nix-env/user-env.cc
+++ b/src/nix-env/user-env.cc
@@ -2,6 +2,7 @@
 #include "util.hh"
 #include "derivations.hh"
 #include "store-api.hh"
+#include "path-with-outputs.hh"
 #include "local-fs-store.hh"
 #include "globals.hh"
 #include "shared.hh"
@@ -41,7 +42,9 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
             drvsToBuild.push_back({state.store->parseStorePath(i.queryDrvPath())});
 
     debug(format("building user environment dependencies"));
-    state.store->buildPaths(drvsToBuild, state.repair ? bmRepair : bmNormal);
+    state.store->buildPaths(
+        toDerivedPaths(drvsToBuild),
+        state.repair ? bmRepair : bmNormal);
 
     /* Construct the whole top level derivation. */
     StorePathSet references;
@@ -136,7 +139,9 @@ bool createUserEnv(EvalState & state, DrvInfos & elems,
     debug("building user environment");
     std::vector<StorePathWithOutputs> topLevelDrvs;
     topLevelDrvs.push_back({topLevelDrv});
-    state.store->buildPaths(topLevelDrvs, state.repair ? bmRepair : bmNormal);
+    state.store->buildPaths(
+        toDerivedPaths(topLevelDrvs),
+        state.repair ? bmRepair : bmNormal);
 
     /* Switch the current user environment to the output path. */
     auto store2 = state.store.dynamic_pointer_cast<LocalFSStore>();
diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc
index 94d4881dd5bcf2de66bc12f7ec53a3183cb43e87..b327793e7a327686c502115f49ff7e697c8c9393 100644
--- a/src/nix-store/nix-store.cc
+++ b/src/nix-store/nix-store.cc
@@ -10,6 +10,7 @@
 #include "worker-protocol.hh"
 #include "graphml.hh"
 #include "legacy.hh"
+#include "path-with-outputs.hh"
 
 #include <iostream>
 #include <algorithm>
@@ -62,7 +63,7 @@ static PathSet realisePath(StorePathWithOutputs path, bool build = true)
     auto store2 = std::dynamic_pointer_cast<LocalFSStore>(store);
 
     if (path.path.isDerivation()) {
-        if (build) store->buildPaths({path});
+        if (build) store->buildPaths({path.toDerivedPath()});
         auto outputPaths = store->queryDerivationOutputMap(path.path);
         Derivation drv = store->derivationFromPath(path.path);
         rootNr++;
@@ -128,11 +129,13 @@ static void opRealise(Strings opFlags, Strings opArgs)
 
     std::vector<StorePathWithOutputs> paths;
     for (auto & i : opArgs)
-        paths.push_back(store->followLinksToStorePathWithOutputs(i));
+        paths.push_back(followLinksToStorePathWithOutputs(*store, i));
 
     uint64_t downloadSize, narSize;
     StorePathSet willBuild, willSubstitute, unknown;
-    store->queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize, narSize);
+    store->queryMissing(
+        toDerivedPaths(paths),
+        willBuild, willSubstitute, unknown, downloadSize, narSize);
 
     if (ignoreUnknown) {
         std::vector<StorePathWithOutputs> paths2;
@@ -148,7 +151,7 @@ static void opRealise(Strings opFlags, Strings opArgs)
     if (dryRun) return;
 
     /* Build all paths at the same time to exploit parallelism. */
-    store->buildPaths(paths, buildMode);
+    store->buildPaths(toDerivedPaths(paths), buildMode);
 
     if (!ignoreUnknown)
         for (auto & i : paths) {
@@ -873,13 +876,13 @@ static void opServe(Strings opFlags, Strings opArgs)
 
                 std::vector<StorePathWithOutputs> paths;
                 for (auto & s : readStrings<Strings>(in))
-                    paths.push_back(store->parsePathWithOutputs(s));
+                    paths.push_back(parsePathWithOutputs(*store, s));
 
                 getBuildSettings();
 
                 try {
                     MonitorFdHup monitor(in.fd);
-                    store->buildPaths(paths);
+                    store->buildPaths(toDerivedPaths(paths));
                     out << 0;
                 } catch (Error & e) {
                     assert(e.status);
@@ -905,7 +908,7 @@ static void opServe(Strings opFlags, Strings opArgs)
 
                 if (GET_PROTOCOL_MINOR(clientVersion) >= 3)
                     out << status.timesBuilt << status.isNonDeterministic << status.startTime << status.stopTime;
-                if (GET_PROTOCOL_MINOR(clientVersion >= 5)) {
+                if (GET_PROTOCOL_MINOR(clientVersion >= 6)) {
                     worker_proto::write(*store, out, status.builtOutputs);
                 }
 
diff --git a/src/nix/app.cc b/src/nix/app.cc
index cf147c6315321c9c1181bd06cc242a3cd0c99c03..01a0064db89be7f78c1f925d2e65cae8b85e6943 100644
--- a/src/nix/app.cc
+++ b/src/nix/app.cc
@@ -3,34 +3,79 @@
 #include "eval-inline.hh"
 #include "eval-cache.hh"
 #include "names.hh"
+#include "command.hh"
 
 namespace nix {
 
-App Installable::toApp(EvalState & state)
+struct InstallableDerivedPath : Installable
 {
-    auto [cursor, attrPath] = getCursor(state);
+    ref<Store> store;
+    const DerivedPath derivedPath;
 
-    auto type = cursor->getAttr("type")->getString();
+    InstallableDerivedPath(ref<Store> store, const DerivedPath & derivedPath)
+        : store(store)
+        , derivedPath(derivedPath)
+    {
+    }
+
+
+    std::string what() override { return derivedPath.to_string(*store); }
+
+    DerivedPaths toDerivedPaths() override
+    {
+        return {derivedPath};
+    }
 
-    auto checkProgram = [&](const Path & program)
+    std::optional<StorePath> getStorePath() override
     {
-        if (!state.store->isInStore(program))
-            throw Error("app program '%s' is not in the Nix store", program);
-    };
+        return std::nullopt;
+    }
+};
+
+/**
+ * Return the rewrites that are needed to resolve a string whose context is
+ * included in `dependencies`
+ */
+StringPairs resolveRewrites(Store & store, const BuiltPaths dependencies)
+{
+    StringPairs res;
+    for (auto & dep : dependencies)
+        if (auto drvDep = std::get_if<BuiltPathBuilt>(&dep))
+            for (auto & [ outputName, outputPath ] : drvDep->outputs)
+                res.emplace(
+                    downstreamPlaceholder(store, drvDep->drvPath, outputName),
+                    store.printStorePath(outputPath)
+                );
+    return res;
+}
+
+/**
+ * Resolve the given string assuming the given context
+ */
+std::string resolveString(Store & store, const std::string & toResolve, const BuiltPaths dependencies)
+{
+    auto rewrites = resolveRewrites(store, dependencies);
+    return rewriteStrings(toResolve, rewrites);
+}
+
+UnresolvedApp Installable::toApp(EvalState & state)
+{
+    auto [cursor, attrPath] = getCursor(state);
+
+    auto type = cursor->getAttr("type")->getString();
 
     if (type == "app") {
         auto [program, context] = cursor->getAttr("program")->getStringWithContext();
 
-        checkProgram(program);
 
         std::vector<StorePathWithOutputs> context2;
         for (auto & [path, name] : context)
             context2.push_back({state.store->parseStorePath(path), {name}});
 
-        return App {
+        return UnresolvedApp{App {
             .context = std::move(context2),
             .program = program,
-        };
+        }};
     }
 
     else if (type == "derivation") {
@@ -45,15 +90,32 @@ App Installable::toApp(EvalState & state)
             ? aMainProgram->getString()
             : DrvName(name).name;
         auto program = outPath + "/bin/" + mainProgram;
-        checkProgram(program);
-        return App {
+        return UnresolvedApp { App {
             .context = { { drvPath, {outputName} } },
             .program = program,
-        };
+        }};
     }
 
     else
         throw Error("attribute '%s' has unsupported type '%s'", attrPath, type);
 }
 
+App UnresolvedApp::resolve(ref<Store> store)
+{
+    auto res = unresolved;
+
+    std::vector<std::shared_ptr<Installable>> installableContext;
+
+    for (auto & ctxElt : unresolved.context)
+        installableContext.push_back(
+            std::make_shared<InstallableDerivedPath>(store, ctxElt.toDerivedPath()));
+
+    auto builtContext = build(store, Realise::Outputs, installableContext);
+    res.program = resolveString(*store, unresolved.program, builtContext);
+    if (!store->isInStore(res.program))
+        throw Error("app program '%s' is not in the Nix store", res.program);
+
+    return res;
+}
+
 }
diff --git a/src/nix/build.cc b/src/nix/build.cc
index 724ce9d79bb41971c963613f26a39838726074ed..15923ebc3d9f4dbc4332f9260ca7411397bf1435 100644
--- a/src/nix/build.cc
+++ b/src/nix/build.cc
@@ -54,6 +54,8 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
     {
         auto buildables = build(store, dryRun ? Realise::Nothing : Realise::Outputs, installables, buildMode);
 
+        if (json) logger->cout("%s", derivedPathsWithHintsToJSON(buildables, store).dump());
+
         if (dryRun) return;
 
         if (outLink != "")
@@ -61,26 +63,23 @@ struct CmdBuild : InstallablesCommand, MixDryRun, MixJSON, MixProfile
                 for (const auto & [_i, buildable] : enumerate(buildables)) {
                     auto i = _i;
                     std::visit(overloaded {
-                        [&](BuildableOpaque bo) {
+                        [&](BuiltPath::Opaque bo) {
                             std::string symlink = outLink;
                             if (i) symlink += fmt("-%d", i);
                             store2->addPermRoot(bo.path, absPath(symlink));
                         },
-                        [&](BuildableFromDrv bfd) {
-                            auto builtOutputs = store->queryDerivationOutputMap(bfd.drvPath);
-                            for (auto & output : builtOutputs) {
+                        [&](BuiltPath::Built bfd) {
+                            for (auto & output : bfd.outputs) {
                                 std::string symlink = outLink;
                                 if (i) symlink += fmt("-%d", i);
                                 if (output.first != "out") symlink += fmt("-%s", output.first);
                                 store2->addPermRoot(output.second, absPath(symlink));
                             }
                         },
-                    }, buildable);
+                    }, buildable.raw());
                 }
 
         updateProfile(buildables);
-
-        if (json) logger->cout("%s", buildablesToJSON(buildables, store).dump());
     }
 };
 
diff --git a/src/nix/build.md b/src/nix/build.md
index c2f3e387ad98ec324bd8226a241b6cb5e47005cb..20138b7e0c3e0ccec367141c132f3cdfa17d2963 100644
--- a/src/nix/build.md
+++ b/src/nix/build.md
@@ -81,7 +81,7 @@ path installables are substituted.
 
 Unless `--no-link` is specified, after a successful build, it creates
 symlinks to the store paths of the installables. These symlinks have
-the prefix `./result` by default; this can be overriden using the
+the prefix `./result` by default; this can be overridden using the
 `--out-link` option. Each symlink has a suffix `-<N>-<outname>`, where
 *N* is the index of the installable (with the left-most installable
 having index 0), and *outname* is the symbolic derivation output name
diff --git a/src/nix/bundle.cc b/src/nix/bundle.cc
index 48f4eb6e3ce316fda9829d0af14eee4199b87797..88bc3d1d1c8c2923b099ad79c34873e79912e7f5 100644
--- a/src/nix/bundle.cc
+++ b/src/nix/bundle.cc
@@ -69,8 +69,7 @@ struct CmdBundle : InstallableCommand
     {
         auto evalState = getEvalState();
 
-        auto app = installable->toApp(*evalState);
-        store->buildPaths(app.context);
+        auto app = installable->toApp(*evalState).resolve(store);
 
         auto [bundlerFlakeRef, bundlerName] = parseFlakeRefWithFragment(bundler, absPath("."));
         const flake::LockFlags lockFlags{ .writeLockFile = false };
@@ -110,7 +109,7 @@ struct CmdBundle : InstallableCommand
 
         StorePath outPath = store->parseStorePath(evalState->coerceToPath(*attr2->pos, *attr2->value, context2));
 
-        store->buildPaths({{drvPath}});
+        store->buildPaths({ DerivedPath::Built { drvPath } });
 
         auto outPathS = store->printStorePath(outPath);
 
diff --git a/src/nix/copy.cc b/src/nix/copy.cc
index f59f7c76bce357fe29ea40330649fe95767a3f74..674cce4b48ac60a662c3d8a50552e41b1878733e 100644
--- a/src/nix/copy.cc
+++ b/src/nix/copy.cc
@@ -8,7 +8,7 @@
 
 using namespace nix;
 
-struct CmdCopy : RealisedPathsCommand
+struct CmdCopy : BuiltPathsCommand
 {
     std::string srcUri, dstUri;
 
@@ -16,10 +16,10 @@ struct CmdCopy : RealisedPathsCommand
 
     SubstituteFlag substitute = NoSubstitute;
 
-    using RealisedPathsCommand::run;
+    using BuiltPathsCommand::run;
 
     CmdCopy()
-        : RealisedPathsCommand(true)
+        : BuiltPathsCommand(true)
     {
         addFlag({
             .longName = "from",
@@ -75,16 +75,22 @@ struct CmdCopy : RealisedPathsCommand
         if (srcUri.empty() && dstUri.empty())
             throw UsageError("you must pass '--from' and/or '--to'");
 
-        RealisedPathsCommand::run(store);
+        BuiltPathsCommand::run(store);
     }
 
-    void run(ref<Store> srcStore, std::vector<RealisedPath> paths) override
+    void run(ref<Store> srcStore, BuiltPaths paths) override
     {
         ref<Store> dstStore = dstUri.empty() ? openStore() : openStore(dstUri);
 
+        RealisedPath::Set stuffToCopy;
+
+        for (auto & builtPath : paths) {
+            auto theseRealisations = builtPath.toRealisedPaths(*srcStore);
+            stuffToCopy.insert(theseRealisations.begin(), theseRealisations.end());
+        }
+
         copyPaths(
-            srcStore, dstStore, RealisedPath::Set(paths.begin(), paths.end()),
-            NoRepair, checkSigs, substitute);
+            srcStore, dstStore, stuffToCopy, NoRepair, checkSigs, substitute);
     }
 };
 
diff --git a/src/nix/develop.cc b/src/nix/develop.cc
index d0b1405700cafd7d43a0be140a7e5276b85eecce..d77ff52d7a44ab9a1c78d457cba325459dc1fc4b 100644
--- a/src/nix/develop.cc
+++ b/src/nix/develop.cc
@@ -3,6 +3,7 @@
 #include "common-args.hh"
 #include "shared.hh"
 #include "store-api.hh"
+#include "path-with-outputs.hh"
 #include "derivations.hh"
 #include "affinity.hh"
 #include "progress-bar.hh"
@@ -143,26 +144,34 @@ StorePath getDerivationEnvironment(ref<Store> store, const StorePath & drvPath)
     /* Rehash and write the derivation. FIXME: would be nice to use
        'buildDerivation', but that's privileged. */
     drv.name += "-env";
-    for (auto & output : drv.outputs) {
-        output.second = { .output = DerivationOutputInputAddressed { .path = StorePath::dummy } };
-        drv.env[output.first] = "";
-    }
     drv.inputSrcs.insert(std::move(getEnvShPath));
-    Hash h = std::get<0>(hashDerivationModulo(*store, drv, true));
+    if (settings.isExperimentalFeatureEnabled("ca-derivations")) {
+        for (auto & output : drv.outputs) {
+            output.second = {
+                .output = DerivationOutputDeferred{},
+            };
+            drv.env[output.first] = hashPlaceholder(output.first);
+        }
+    } else {
+        for (auto & output : drv.outputs) {
+            output.second = { .output = DerivationOutputInputAddressed { .path = StorePath::dummy } };
+            drv.env[output.first] = "";
+        }
+        Hash h = std::get<0>(hashDerivationModulo(*store, drv, true));
 
-    for (auto & output : drv.outputs) {
-        auto outPath = store->makeOutputPath(output.first, h, drv.name);
-        output.second = { .output = DerivationOutputInputAddressed { .path = outPath } };
-        drv.env[output.first] = store->printStorePath(outPath);
+        for (auto & output : drv.outputs) {
+            auto outPath = store->makeOutputPath(output.first, h, drv.name);
+            output.second = { .output = DerivationOutputInputAddressed { .path = outPath } };
+            drv.env[output.first] = store->printStorePath(outPath);
+        }
     }
 
     auto shellDrvPath = writeDerivation(*store, drv);
 
     /* Build the derivation. */
-    store->buildPaths({{shellDrvPath}});
+    store->buildPaths({DerivedPath::Built{shellDrvPath}});
 
-    for (auto & [_0, outputAndOptPath] : drv.outputsAndOptPaths(*store)) {
-        auto & [_1, optPath] = outputAndOptPath;
+    for (auto & [_0, optPath] : store->queryPartialDerivationOutputMap(shellDrvPath)) {
         assert(optPath);
         auto & outPath = *optPath;
         assert(store->isValidPath(outPath));
@@ -184,6 +193,7 @@ struct Common : InstallableCommand, MixProfile
         "NIX_BUILD_TOP",
         "NIX_ENFORCE_PURITY",
         "NIX_LOG_FD",
+        "NIX_REMOTE",
         "PPID",
         "PWD",
         "SHELLOPTS",
@@ -264,9 +274,9 @@ struct Common : InstallableCommand, MixProfile
         for (auto & [installable_, dir_] : redirects) {
             auto dir = absPath(dir_);
             auto installable = parseInstallable(store, installable_);
-            auto buildable = installable->toBuildable();
-            auto doRedirect = [&](const StorePath & path)
-            {
+            auto builtPaths = toStorePaths(
+                store, Realise::Nothing, OperateOn::Output, {installable});
+            for (auto & path: builtPaths) {
                 auto from = store->printStorePath(path);
                 if (script.find(from) == std::string::npos)
                     warn("'%s' (path '%s') is not used by this build environment", installable->what(), from);
@@ -274,16 +284,7 @@ struct Common : InstallableCommand, MixProfile
                     printInfo("redirecting '%s' to '%s'", from, dir);
                     rewrites.insert({from, dir});
                 }
-            };
-            std::visit(overloaded {
-                [&](const BuildableOpaque & bo) {
-                    doRedirect(bo.path);
-                },
-                [&](const BuildableFromDrv & bfd) {
-                    for (auto & [outputName, path] : bfd.outputs)
-                        if (path) doRedirect(*path);
-                },
-            }, buildable);
+            }
         }
 
         return rewriteStrings(script, rewrites);
@@ -403,7 +404,7 @@ struct CmdDevelop : Common, MixEnvironment
         if (verbosity >= lvlDebug)
             script += "set -x\n";
 
-        script += fmt("rm -f '%s'\n", rcFilePath);
+        script += fmt("command rm -f '%s'\n", rcFilePath);
 
         if (phase) {
             if (!command.empty())
@@ -422,7 +423,7 @@ struct CmdDevelop : Common, MixEnvironment
         }
 
         else {
-            script += "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;\n";
+            script = "[ -n \"$PS1\" ] && [ -e ~/.bashrc ] && source ~/.bashrc;\n" + script;
             if (developSettings.bashPrompt != "")
                 script += fmt("[ -n \"$PS1\" ] && PS1=%s;\n", shellEscape(developSettings.bashPrompt));
             if (developSettings.bashPromptSuffix != "")
@@ -461,8 +462,7 @@ struct CmdDevelop : Common, MixEnvironment
         auto args = phase || !command.empty() ? Strings{std::string(baseNameOf(shell)), rcFilePath}
             : Strings{std::string(baseNameOf(shell)), "--rcfile", rcFilePath};
 
-        restoreAffinity();
-        restoreSignals();
+        restoreProcessContext();
 
         execvp(shell.c_str(), stringsToCharPtrs(args).data());
 
diff --git a/src/nix/edit.cc b/src/nix/edit.cc
index 6472dd27a092a31f8d87ac5f97e8fd5bff73fada..b26417b18c12fc61ba94930e5dc48ffbe0cb40fe 100644
--- a/src/nix/edit.cc
+++ b/src/nix/edit.cc
@@ -42,7 +42,8 @@ struct CmdEdit : InstallableCommand
 
         auto args = editorFor(pos);
 
-        restoreSignals();
+        restoreProcessContext();
+
         execvp(args.front().c_str(), stringsToCharPtrs(args).data());
 
         std::string command;
diff --git a/src/nix/flake-check.md b/src/nix/flake-check.md
index dc079ba0ca96f9fd348e883e0f6be29f74480c29..8ef93295421b0cc28e75a8a0c05b0638ac5f764f 100644
--- a/src/nix/flake-check.md
+++ b/src/nix/flake-check.md
@@ -22,9 +22,13 @@ This command verifies that the flake specified by flake reference
 that the derivations specified by the flake's `checks` output can be
 built successfully.
 
+If the `keep-going` option is set to `true`, Nix will keep evaluating as much
+as it can and report the errors as it encounters them. Otherwise it will stop
+at the first error.
+
 # Evaluation checks
 
-This following flake output attributes must be derivations:
+The following flake output attributes must be derivations:
 
 * `checks.`*system*`.`*name*
 * `defaultPackage.`*system*`
diff --git a/src/nix/flake-init.md b/src/nix/flake-init.md
index c66154ad55c4b8e0a5b0e1069bb1c65f6c8d3770..890038016f135d089c55e68a1740f0ee9bb1873f 100644
--- a/src/nix/flake-init.md
+++ b/src/nix/flake-init.md
@@ -24,7 +24,7 @@ R""(
 
 This command creates a flake in the current directory by copying the
 files of a template. It will not overwrite existing files. The default
-template is `templates#defaultTemplate`, but this can be overriden
+template is `templates#defaultTemplate`, but this can be overridden
 using `-t`.
 
 # Template definitions
diff --git a/src/nix/flake-list-inputs.md b/src/nix/flake-list-inputs.md
deleted file mode 100644
index 250e13be01a58984fbca1eb42412118bed657334..0000000000000000000000000000000000000000
--- a/src/nix/flake-list-inputs.md
+++ /dev/null
@@ -1,23 +0,0 @@
-R""(
-
-# Examples
-
-* Show the inputs of the `hydra` flake:
-
-  ```console
-  # nix flake list-inputs github:NixOS/hydra
-  github:NixOS/hydra/bde8d81876dfc02143e5070e42c78d8f0d83d6f7
-  ├───nix: github:NixOS/nix/79aa7d95183cbe6c0d786965f0dbff414fd1aa67
-  │   ├───lowdown-src: github:kristapsdz/lowdown/1705b4a26fbf065d9574dce47a94e8c7c79e052f
-  │   └───nixpkgs: github:NixOS/nixpkgs/ad0d20345219790533ebe06571f82ed6b034db31
-  └───nixpkgs follows input 'nix/nixpkgs'
-  ```
-
-# Description
-
-This command shows the inputs of the flake specified by the flake
-referenced *flake-url*. Since it prints the locked inputs that result
-from generating or updating the lock file, this command essentially
-displays the contents of the flake's lock file in human-readable form.
-
-)""
diff --git a/src/nix/flake-info.md b/src/nix/flake-metadata.md
similarity index 75%
rename from src/nix/flake-info.md
rename to src/nix/flake-metadata.md
index fda3171db12e5da15f039b4e27a8eae4344b538b..5a009409b5c61119b921d85c3047b9d70c6fb38f 100644
--- a/src/nix/flake-info.md
+++ b/src/nix/flake-metadata.md
@@ -5,19 +5,24 @@ R""(
 * Show what `nixpkgs` resolves to:
 
   ```console
-  # nix flake info nixpkgs
-  Resolved URL:  github:NixOS/nixpkgs
-  Locked URL:    github:NixOS/nixpkgs/b67ba0bfcc714453cdeb8d713e35751eb8b4c8f4
-  Description:   A collection of packages for the Nix package manager
-  Path:          /nix/store/23qapccs6cfmwwrlq8kr41vz5vdmns3r-source
-  Revision:      b67ba0bfcc714453cdeb8d713e35751eb8b4c8f4
-  Last modified: 2020-12-23 12:36:12
+  # nix flake metadata nixpkgs
+  Resolved URL:  github:edolstra/dwarffs
+  Locked URL:    github:edolstra/dwarffs/f691e2c991e75edb22836f1dbe632c40324215c5
+  Description:   A filesystem that fetches DWARF debug info from the Internet on demand
+  Path:          /nix/store/769s05vjydmc2lcf6b02az28wsa9ixh1-source
+  Revision:      f691e2c991e75edb22836f1dbe632c40324215c5
+  Last modified: 2021-01-21 15:41:26
+  Inputs:
+  ├───nix: github:NixOS/nix/6254b1f5d298ff73127d7b0f0da48f142bdc753c
+  │   ├───lowdown-src: github:kristapsdz/lowdown/1705b4a26fbf065d9574dce47a94e8c7c79e052f
+  │   └───nixpkgs: github:NixOS/nixpkgs/ad0d20345219790533ebe06571f82ed6b034db31
+  └───nixpkgs follows input 'nix/nixpkgs'
   ```
 
 * Show information about `dwarffs` in JSON format:
 
   ```console
-  # nix flake info dwarffs --json | jq .
+  # nix flake metadata dwarffs --json | jq .
   {
     "description": "A filesystem that fetches DWARF debug info from the Internet on demand",
     "lastModified": 1597153508,
@@ -29,6 +34,7 @@ R""(
       "rev": "d181d714fd36eb06f4992a1997cd5601e26db8f5",
       "type": "github"
     },
+    "locks": { ... },
     "original": {
       "id": "dwarffs",
       "type": "indirect"
@@ -75,6 +81,9 @@ data. This includes:
   time of the commit of the locked flake; for tarball flakes, it's the
   most recent timestamp of any file inside the tarball.
 
+* `Inputs`: The flake inputs with their corresponding lock file
+  entries.
+
 With `--json`, the output is a JSON object with the following fields:
 
 * `original` and `originalUrl`: The flake reference specified by the
@@ -96,4 +105,6 @@ With `--json`, the output is a JSON object with the following fields:
 
 * `lastModified`: See `Last modified` above.
 
+* `locks`: The contents of `flake.lock`.
+
 )""
diff --git a/src/nix/flake.cc b/src/nix/flake.cc
index 2f0c468a89544d0f7579c61cf1d000256bf1d649..64fcfc0005ce67bd2bd56f56080723bd5a7af82d 100644
--- a/src/nix/flake.cc
+++ b/src/nix/flake.cc
@@ -7,6 +7,7 @@
 #include "get-drvs.hh"
 #include "store-api.hh"
 #include "derivations.hh"
+#include "path-with-outputs.hh"
 #include "attr-path.hh"
 #include "fetchers.hh"
 #include "registry.hh"
@@ -43,12 +44,6 @@ public:
         return parseFlakeRef(flakeUrl, absPath(".")); //FIXME
     }
 
-    Flake getFlake()
-    {
-        auto evalState = getEvalState();
-        return flake::getFlake(*evalState, getFlakeRef(), lockFlags.useRegistries);
-    }
-
     LockedFlake lockFlake()
     {
         return flake::lockFlake(*getEvalState(), getFlakeRef(), lockFlags);
@@ -60,43 +55,6 @@ public:
     }
 };
 
-static void printFlakeInfo(const Store & store, const Flake & flake)
-{
-    logger->cout("Resolved URL:  %s", flake.resolvedRef.to_string());
-    logger->cout("Locked URL:    %s", flake.lockedRef.to_string());
-    if (flake.description)
-        logger->cout("Description:   %s", *flake.description);
-    logger->cout("Path:          %s", store.printStorePath(flake.sourceInfo->storePath));
-    if (auto rev = flake.lockedRef.input.getRev())
-        logger->cout("Revision:      %s", rev->to_string(Base16, false));
-    if (auto revCount = flake.lockedRef.input.getRevCount())
-        logger->cout("Revisions:     %s", *revCount);
-    if (auto lastModified = flake.lockedRef.input.getLastModified())
-        logger->cout("Last modified: %s",
-            std::put_time(std::localtime(&*lastModified), "%F %T"));
-}
-
-static nlohmann::json flakeToJSON(const Store & store, const Flake & flake)
-{
-    nlohmann::json j;
-    if (flake.description)
-        j["description"] = *flake.description;
-    j["originalUrl"] = flake.originalRef.to_string();
-    j["original"] = fetchers::attrsToJSON(flake.originalRef.toAttrs());
-    j["resolvedUrl"] = flake.resolvedRef.to_string();
-    j["resolved"] = fetchers::attrsToJSON(flake.resolvedRef.toAttrs());
-    j["url"] = flake.lockedRef.to_string(); // FIXME: rename to lockedUrl
-    j["locked"] = fetchers::attrsToJSON(flake.lockedRef.toAttrs());
-    if (auto rev = flake.lockedRef.input.getRev())
-        j["revision"] = rev->to_string(Base16, false);
-    if (auto revCount = flake.lockedRef.input.getRevCount())
-        j["revCount"] = *revCount;
-    if (auto lastModified = flake.lockedRef.input.getLastModified())
-        j["lastModified"] = *lastModified;
-    j["path"] = store.printStorePath(flake.sourceInfo->storePath);
-    return j;
-}
-
 struct CmdFlakeUpdate : FlakeCommand
 {
     std::string description() override
@@ -110,6 +68,7 @@ struct CmdFlakeUpdate : FlakeCommand
         removeFlag("recreate-lock-file");
         removeFlag("update-input");
         removeFlag("no-update-lock-file");
+        removeFlag("no-write-lock-file");
     }
 
     std::string doc() override
@@ -124,6 +83,7 @@ struct CmdFlakeUpdate : FlakeCommand
         settings.tarballTtl = 0;
 
         lockFlags.recreateLockFile = true;
+        lockFlags.writeLockFile = true;
 
         lockFlake();
     }
@@ -136,6 +96,12 @@ struct CmdFlakeLock : FlakeCommand
         return "create missing lock file entries";
     }
 
+    CmdFlakeLock()
+    {
+        /* Remove flags that don't make sense. */
+        removeFlag("no-write-lock-file");
+    }
+
     std::string doc() override
     {
         return
@@ -147,6 +113,8 @@ struct CmdFlakeLock : FlakeCommand
     {
         settings.tarballTtl = 0;
 
+        lockFlags.writeLockFile = true;
+
         lockFlake();
     }
 };
@@ -165,54 +133,72 @@ static void enumerateOutputs(EvalState & state, Value & vFlake,
         callback(attr.name, *attr.value, *attr.pos);
 }
 
-struct CmdFlakeInfo : FlakeCommand, MixJSON
+struct CmdFlakeMetadata : FlakeCommand, MixJSON
 {
     std::string description() override
     {
-        return "list info about a given flake";
+        return "show flake metadata";
     }
 
     std::string doc() override
     {
         return
-          #include "flake-info.md"
+          #include "flake-metadata.md"
           ;
     }
 
     void run(nix::ref<nix::Store> store) override
     {
-        auto flake = getFlake();
+        auto lockedFlake = lockFlake();
+        auto & flake = lockedFlake.flake;
 
         if (json) {
-            auto json = flakeToJSON(*store, flake);
-            logger->cout("%s", json.dump());
-        } else
-            printFlakeInfo(*store, flake);
-    }
-};
-
-struct CmdFlakeListInputs : FlakeCommand, MixJSON
-{
-    std::string description() override
-    {
-        return "list flake inputs";
-    }
-
-    std::string doc() override
-    {
-        return
-          #include "flake-list-inputs.md"
-          ;
-    }
-
-    void run(nix::ref<nix::Store> store) override
-    {
-        auto flake = lockFlake();
-
-        if (json)
-            logger->cout("%s", flake.lockFile.toJSON());
-        else {
-            logger->cout("%s", flake.flake.lockedRef);
+            nlohmann::json j;
+            if (flake.description)
+                j["description"] = *flake.description;
+            j["originalUrl"] = flake.originalRef.to_string();
+            j["original"] = fetchers::attrsToJSON(flake.originalRef.toAttrs());
+            j["resolvedUrl"] = flake.resolvedRef.to_string();
+            j["resolved"] = fetchers::attrsToJSON(flake.resolvedRef.toAttrs());
+            j["url"] = flake.lockedRef.to_string(); // FIXME: rename to lockedUrl
+            j["locked"] = fetchers::attrsToJSON(flake.lockedRef.toAttrs());
+            if (auto rev = flake.lockedRef.input.getRev())
+                j["revision"] = rev->to_string(Base16, false);
+            if (auto revCount = flake.lockedRef.input.getRevCount())
+                j["revCount"] = *revCount;
+            if (auto lastModified = flake.lockedRef.input.getLastModified())
+                j["lastModified"] = *lastModified;
+            j["path"] = store->printStorePath(flake.sourceInfo->storePath);
+            j["locks"] = lockedFlake.lockFile.toJSON();
+            logger->cout("%s", j.dump());
+        } else {
+            logger->cout(
+                ANSI_BOLD "Resolved URL:" ANSI_NORMAL "  %s",
+                flake.resolvedRef.to_string());
+            logger->cout(
+                ANSI_BOLD "Locked URL:" ANSI_NORMAL "    %s",
+                flake.lockedRef.to_string());
+            if (flake.description)
+                logger->cout(
+                    ANSI_BOLD "Description:" ANSI_NORMAL "   %s",
+                    *flake.description);
+            logger->cout(
+                ANSI_BOLD "Path:" ANSI_NORMAL "          %s",
+                store->printStorePath(flake.sourceInfo->storePath));
+            if (auto rev = flake.lockedRef.input.getRev())
+                logger->cout(
+                    ANSI_BOLD "Revision:" ANSI_NORMAL "      %s",
+                    rev->to_string(Base16, false));
+            if (auto revCount = flake.lockedRef.input.getRevCount())
+                logger->cout(
+                    ANSI_BOLD "Revisions:" ANSI_NORMAL "     %s",
+                    *revCount);
+            if (auto lastModified = flake.lockedRef.input.getLastModified())
+                logger->cout(
+                    ANSI_BOLD "Last modified:" ANSI_NORMAL " %s",
+                    std::put_time(std::localtime(&*lastModified), "%F %T"));
+
+            logger->cout(ANSI_BOLD "Inputs:" ANSI_NORMAL);
 
             std::unordered_set<std::shared_ptr<Node>> visited;
 
@@ -226,7 +212,7 @@ struct CmdFlakeListInputs : FlakeCommand, MixJSON
                     if (auto lockedNode = std::get_if<0>(&input.second)) {
                         logger->cout("%s" ANSI_BOLD "%s" ANSI_NORMAL ": %s",
                             prefix + (last ? treeLast : treeConn), input.first,
-                            *lockedNode ? (*lockedNode)->lockedRef : flake.flake.lockedRef);
+                            *lockedNode ? (*lockedNode)->lockedRef : flake.lockedRef);
 
                         bool firstVisit = visited.insert(*lockedNode).second;
 
@@ -239,12 +225,21 @@ struct CmdFlakeListInputs : FlakeCommand, MixJSON
                 }
             };
 
-            visited.insert(flake.lockFile.root);
-            recurse(*flake.lockFile.root, "");
+            visited.insert(lockedFlake.lockFile.root);
+            recurse(*lockedFlake.lockFile.root, "");
         }
     }
 };
 
+struct CmdFlakeInfo : CmdFlakeMetadata
+{
+    void run(nix::ref<nix::Store> store) override
+    {
+        warn("'nix flake info' is a deprecated alias for 'nix flake metadata'");
+        CmdFlakeMetadata::run(store);
+    }
+};
+
 struct CmdFlakeCheck : FlakeCommand
 {
     bool build = true;
@@ -277,28 +272,43 @@ struct CmdFlakeCheck : FlakeCommand
         auto state = getEvalState();
         auto flake = lockFlake();
 
+        bool hasErrors = false;
+        auto reportError = [&](const Error & e) {
+            try {
+                throw e;
+            } catch (Error & e) {
+                if (settings.keepGoing) {
+                    ignoreException();
+                    hasErrors = true;
+                }
+                else
+                    throw;
+            }
+        };
+
         // FIXME: rewrite to use EvalCache.
 
         auto checkSystemName = [&](const std::string & system, const Pos & pos) {
             // FIXME: what's the format of "system"?
             if (system.find('-') == std::string::npos)
-                throw Error("'%s' is not a valid system type, at %s", system, pos);
+                reportError(Error("'%s' is not a valid system type, at %s", system, pos));
         };
 
-        auto checkDerivation = [&](const std::string & attrPath, Value & v, const Pos & pos) {
+        auto checkDerivation = [&](const std::string & attrPath, Value & v, const Pos & pos) -> std::optional<StorePath> {
             try {
                 auto drvInfo = getDerivation(*state, v, false);
                 if (!drvInfo)
                     throw Error("flake attribute '%s' is not a derivation", attrPath);
                 // FIXME: check meta attributes
-                return store->parseStorePath(drvInfo->queryDrvPath());
+                return std::make_optional(store->parseStorePath(drvInfo->queryDrvPath()));
             } catch (Error & e) {
                 e.addTrace(pos, hintfmt("while checking the derivation '%s'", attrPath));
-                throw;
+                reportError(e);
             }
+            return std::nullopt;
         };
 
-        std::vector<StorePathWithOutputs> drvPaths;
+        std::vector<DerivedPath> drvPaths;
 
         auto checkApp = [&](const std::string & attrPath, Value & v, const Pos & pos) {
             try {
@@ -312,7 +322,7 @@ struct CmdFlakeCheck : FlakeCommand
                 #endif
             } catch (Error & e) {
                 e.addTrace(pos, hintfmt("while checking the app definition '%s'", attrPath));
-                throw;
+                reportError(e);
             }
         };
 
@@ -328,7 +338,7 @@ struct CmdFlakeCheck : FlakeCommand
                 // evaluate the overlay.
             } catch (Error & e) {
                 e.addTrace(pos, hintfmt("while checking the overlay '%s'", attrPath));
-                throw;
+                reportError(e);
             }
         };
 
@@ -352,7 +362,7 @@ struct CmdFlakeCheck : FlakeCommand
                 // check the module.
             } catch (Error & e) {
                 e.addTrace(pos, hintfmt("while checking the NixOS module '%s'", attrPath));
-                throw;
+                reportError(e);
             }
         };
 
@@ -374,7 +384,7 @@ struct CmdFlakeCheck : FlakeCommand
 
             } catch (Error & e) {
                 e.addTrace(pos, hintfmt("while checking the Hydra jobset '%s'", attrPath));
-                throw;
+                reportError(e);
             }
         };
 
@@ -389,7 +399,7 @@ struct CmdFlakeCheck : FlakeCommand
                     throw Error("attribute 'config.system.build.toplevel' is not a derivation");
             } catch (Error & e) {
                 e.addTrace(pos, hintfmt("while checking the NixOS configuration '%s'", attrPath));
-                throw;
+                reportError(e);
             }
         };
 
@@ -423,7 +433,7 @@ struct CmdFlakeCheck : FlakeCommand
                 }
             } catch (Error & e) {
                 e.addTrace(pos, hintfmt("while checking the template '%s'", attrPath));
-                throw;
+                reportError(e);
             }
         };
 
@@ -438,7 +448,7 @@ struct CmdFlakeCheck : FlakeCommand
                     throw Error("bundler must take formal arguments 'program' and 'system'");
             } catch (Error & e) {
                 e.addTrace(pos, hintfmt("while checking the template '%s'", attrPath));
-                throw;
+                reportError(e);
             }
         };
 
@@ -466,8 +476,8 @@ struct CmdFlakeCheck : FlakeCommand
                                     auto drvPath = checkDerivation(
                                         fmt("%s.%s.%s", name, attr.name, attr2.name),
                                         *attr2.value, *attr2.pos);
-                                    if ((std::string) attr.name == settings.thisSystem.get())
-                                        drvPaths.push_back({drvPath});
+                                    if (drvPath && (std::string) attr.name == settings.thisSystem.get())
+                                        drvPaths.push_back(DerivedPath::Built{*drvPath});
                                 }
                             }
                         }
@@ -579,7 +589,7 @@ struct CmdFlakeCheck : FlakeCommand
 
                     } catch (Error & e) {
                         e.addTrace(pos, hintfmt("while checking flake output '%s'", name));
-                        throw;
+                        reportError(e);
                     }
                 });
         }
@@ -588,6 +598,8 @@ struct CmdFlakeCheck : FlakeCommand
             Activity act(*logger, lvlInfo, actUnknown, "running flake checks");
             store->buildPaths(drvPaths);
         }
+        if (hasErrors)
+            throw Error("Some errors were encountered during the evaluation");
     }
 };
 
@@ -1038,8 +1050,8 @@ struct CmdFlake : NixMultiCommand
         : MultiCommand({
                 {"update", []() { return make_ref<CmdFlakeUpdate>(); }},
                 {"lock", []() { return make_ref<CmdFlakeLock>(); }},
+                {"metadata", []() { return make_ref<CmdFlakeMetadata>(); }},
                 {"info", []() { return make_ref<CmdFlakeInfo>(); }},
-                {"list-inputs", []() { return make_ref<CmdFlakeListInputs>(); }},
                 {"check", []() { return make_ref<CmdFlakeCheck>(); }},
                 {"init", []() { return make_ref<CmdFlakeInit>(); }},
                 {"new", []() { return make_ref<CmdFlakeNew>(); }},
diff --git a/src/nix/flake.md b/src/nix/flake.md
index 440c45dd13b246b7f2aa43da19b84bdbd84f03c8..3d273100b6203068ac430b5a4cf01171c2d0963e 100644
--- a/src/nix/flake.md
+++ b/src/nix/flake.md
@@ -70,7 +70,7 @@ Here are some examples of flake references in their URL-like representation:
 * `/home/alice/src/patchelf`: A flake in some other directory.
 * `nixpkgs`: The `nixpkgs` entry in the flake registry.
 * `nixpkgs/a3a3dda3bacf61e8a39258a0ed9c924eeca8e293`: The `nixpkgs`
-  entry in the flake registry, with its Git revision overriden to a
+  entry in the flake registry, with its Git revision overridden to a
   specific value.
 * `github:NixOS/nixpkgs`: The `master` branch of the `NixOS/nixpkgs`
   repository on GitHub.
@@ -186,8 +186,8 @@ Currently the `type` attribute can be one of the following:
   attribute `url`.
 
   In URL form, the schema must be `http://`, `https://` or `file://`
-  URLs and the extension must be `.zip`, `.tar`, `.tar.gz`, `.tar.xz`
-  or `.tar.bz2`.
+  URLs and the extension must be `.zip`, `.tar`, `.tar.gz`, `.tar.xz`,
+  `.tar.bz2` or `.tar.zst`.
 
 * `github`: A more efficient way to fetch repositories from
   GitHub. The following attributes are required:
@@ -377,7 +377,7 @@ outputs = { self, nixpkgs, grcov }: {
 };
 ```
 
-Transitive inputs can be overriden from a `flake.nix` file. For
+Transitive inputs can be overridden from a `flake.nix` file. For
 example, the following overrides the `nixpkgs` input of the `nixops`
 input:
 
@@ -395,7 +395,7 @@ the `nixpkgs` input of the top-level flake to be equal to the
 `nixpkgs` input of the `dwarffs` input of the top-level flake:
 
 ```nix
-inputs.nixops.follows = "dwarffs/nixpkgs";
+inputs.nixpkgs.follows = "dwarffs/nixpkgs";
 ```
 
 The value of the `follows` attribute is a `/`-separated sequence of
diff --git a/src/nix/log.cc b/src/nix/log.cc
index 67d3742d6565b25a3a5a93046107f69b3a56592e..962c47525ce0b4089ffad5a4fbe1ff316425ac4e 100644
--- a/src/nix/log.cc
+++ b/src/nix/log.cc
@@ -30,18 +30,18 @@ struct CmdLog : InstallableCommand
 
         subs.push_front(store);
 
-        auto b = installable->toBuildable();
+        auto b = installable->toDerivedPath();
 
         RunPager pager;
         for (auto & sub : subs) {
             auto log = std::visit(overloaded {
-                [&](BuildableOpaque bo) {
+                [&](DerivedPath::Opaque bo) {
                     return sub->getBuildLog(bo.path);
                 },
-                [&](BuildableFromDrv bfd) {
+                [&](DerivedPath::Built bfd) {
                     return sub->getBuildLog(bfd.drvPath);
                 },
-            }, b);
+            }, b.raw());
             if (!log) continue;
             stopProgressBar();
             printInfo("got build log for '%s' from '%s'", installable->what(), sub->getUri());
diff --git a/src/nix/main.cc b/src/nix/main.cc
index 06e221682ae232d5a96eae81dd8401738c28e90c..008482be3299b91136d95b58048804f105b63b06 100644
--- a/src/nix/main.cc
+++ b/src/nix/main.cc
@@ -17,10 +17,6 @@
 #include <netdb.h>
 #include <netinet/in.h>
 
-#if __linux__
-#include <sys/resource.h>
-#endif
-
 #include <nlohmann/json.hpp>
 
 extern std::string chrootHelperName;
@@ -309,13 +305,13 @@ void mainWrapped(int argc, char * * argv)
 
     if (!args.useNet) {
         // FIXME: should check for command line overrides only.
-        if (!settings.useSubstitutes.overriden)
+        if (!settings.useSubstitutes.overridden)
             settings.useSubstitutes = false;
-        if (!settings.tarballTtl.overriden)
+        if (!settings.tarballTtl.overridden)
             settings.tarballTtl = std::numeric_limits<unsigned int>::max();
-        if (!fileTransferSettings.tries.overriden)
+        if (!fileTransferSettings.tries.overridden)
             fileTransferSettings.tries = 0;
-        if (!fileTransferSettings.connectTimeout.overriden)
+        if (!fileTransferSettings.connectTimeout.overridden)
             fileTransferSettings.connectTimeout = 1;
     }
 
@@ -335,14 +331,7 @@ int main(int argc, char * * argv)
 {
     // Increase the default stack size for the evaluator and for
     // libstdc++'s std::regex.
-    #if __linux__
-    rlim_t stackSize = 64 * 1024 * 1024;
-    struct rlimit limit;
-    if (getrlimit(RLIMIT_STACK, &limit) == 0 && limit.rlim_cur < stackSize) {
-        limit.rlim_cur = stackSize;
-        setrlimit(RLIMIT_STACK, &limit);
-    }
-    #endif
+    nix::setStackSize(64 * 1024 * 1024);
 
     return nix::handleExceptions(argv[0], [&]() {
         nix::mainWrapped(argc, argv);
diff --git a/src/nix/profile-upgrade.md b/src/nix/profile-upgrade.md
index 2bd5d256d6e852358e3a53ade8967da7eb048933..e06e74abee8f54e0955cf51769ec6ba2ec761252 100644
--- a/src/nix/profile-upgrade.md
+++ b/src/nix/profile-upgrade.md
@@ -18,7 +18,7 @@ R""(
 * Upgrade a specific profile element by number:
 
   ```console
-  # nix profile info
+  # nix profile list
   0 flake:nixpkgs#legacyPackages.x86_64-linux.spotify …
 
   # nix profile upgrade 0
diff --git a/src/nix/profile.cc b/src/nix/profile.cc
index 4d275f577c68f8682571367da641c7c628384b55..511771f89e531ca092f298e46e0f4d555d8c7866 100644
--- a/src/nix/profile.cc
+++ b/src/nix/profile.cc
@@ -233,7 +233,7 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile
     {
         ProfileManifest manifest(*getEvalState(), *profile);
 
-        std::vector<StorePathWithOutputs> pathsToBuild;
+        std::vector<DerivedPath> pathsToBuild;
 
         for (auto & installable : installables) {
             if (auto installable2 = std::dynamic_pointer_cast<InstallableFlake>(installable)) {
@@ -249,7 +249,7 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile
                     attrPath,
                 };
 
-                pathsToBuild.push_back({drv.drvPath, StringSet{drv.outputName}});
+                pathsToBuild.push_back(DerivedPath::Built{drv.drvPath, StringSet{drv.outputName}});
 
                 manifest.elements.emplace_back(std::move(element));
             } else {
@@ -259,17 +259,20 @@ struct CmdProfileInstall : InstallablesCommand, MixDefaultProfile
                     ProfileElement element;
 
                     std::visit(overloaded {
-                        [&](BuildableOpaque bo) {
-                            pathsToBuild.push_back({bo.path, {}});
+                        [&](BuiltPath::Opaque bo) {
+                            pathsToBuild.push_back(bo);
                             element.storePaths.insert(bo.path);
                         },
-                        [&](BuildableFromDrv bfd) {
+                        [&](BuiltPath::Built bfd) {
+                            // TODO: Why are we querying if we know the output
+                            // names already? Is it just to figure out what the
+                            // default one is?
                             for (auto & output : store->queryDerivationOutputMap(bfd.drvPath)) {
-                                pathsToBuild.push_back({bfd.drvPath, {output.first}});
+                                pathsToBuild.push_back(DerivedPath::Built{bfd.drvPath, {output.first}});
                                 element.storePaths.insert(output.second);
                             }
                         },
-                    }, buildable);
+                    }, buildable.raw());
 
                     manifest.elements.emplace_back(std::move(element));
                 }
@@ -388,7 +391,7 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf
         auto matchers = getMatchers(store);
 
         // FIXME: code duplication
-        std::vector<StorePathWithOutputs> pathsToBuild;
+        std::vector<DerivedPath> pathsToBuild;
 
         for (size_t i = 0; i < manifest.elements.size(); ++i) {
             auto & element(manifest.elements[i]);
@@ -423,7 +426,7 @@ struct CmdProfileUpgrade : virtual SourceExprCommand, MixDefaultProfile, MixProf
                     attrPath,
                 };
 
-                pathsToBuild.push_back({drv.drvPath, StringSet{"out"}}); // FIXME
+                pathsToBuild.push_back(DerivedPath::Built{drv.drvPath, {drv.outputName}});
             }
         }
 
diff --git a/src/nix/realisation.cc b/src/nix/realisation.cc
new file mode 100644
index 0000000000000000000000000000000000000000..d59e594df812702651d199b10f537f9db9cb1168
--- /dev/null
+++ b/src/nix/realisation.cc
@@ -0,0 +1,85 @@
+#include "command.hh"
+#include "common-args.hh"
+
+#include <nlohmann/json.hpp>
+
+using namespace nix;
+
+struct CmdRealisation : virtual NixMultiCommand
+{
+    CmdRealisation() : MultiCommand(RegisterCommand::getCommandsFor({"realisation"}))
+    { }
+
+    std::string description() override
+    {
+        return "manipulate a Nix realisation";
+    }
+
+    Category category() override { return catUtility; }
+
+    void run() override
+    {
+        if (!command)
+            throw UsageError("'nix realisation' requires a sub-command.");
+        command->second->prepare();
+        command->second->run();
+    }
+};
+
+static auto rCmdRealisation = registerCommand<CmdRealisation>("realisation");
+
+struct CmdRealisationInfo : BuiltPathsCommand, MixJSON
+{
+    std::string description() override
+    {
+        return "query information about one or several realisations";
+    }
+
+    std::string doc() override
+    {
+        return
+            #include "realisation/info.md"
+            ;
+    }
+
+    Category category() override { return catSecondary; }
+
+    void run(ref<Store> store, BuiltPaths paths) override
+    {
+        settings.requireExperimentalFeature("ca-derivations");
+        RealisedPath::Set realisations;
+
+        for (auto & builtPath : paths) {
+            auto theseRealisations = builtPath.toRealisedPaths(*store);
+            realisations.insert(theseRealisations.begin(), theseRealisations.end());
+        }
+
+        if (json) {
+            nlohmann::json res = nlohmann::json::array();
+            for (auto & path : realisations) {
+                nlohmann::json currentPath;
+                if (auto realisation = std::get_if<Realisation>(&path.raw))
+                    currentPath = realisation->toJSON();
+                else
+                    currentPath["opaquePath"] = store->printStorePath(path.path());
+
+                res.push_back(currentPath);
+            }
+            std::cout << res.dump();
+        }
+        else {
+            for (auto & path : realisations) {
+                if (auto realisation = std::get_if<Realisation>(&path.raw)) {
+                    std::cout <<
+                        realisation->id.to_string() << " " <<
+                        store->printStorePath(realisation->outPath);
+                } else
+                    std::cout << store->printStorePath(path.path());
+
+                std::cout << std::endl;
+            }
+        }
+    }
+};
+
+static auto rCmdRealisationInfo = registerCommand2<CmdRealisationInfo>({"realisation", "info"});
diff --git a/src/nix/realisation/info.md b/src/nix/realisation/info.md
new file mode 100644
index 0000000000000000000000000000000000000000..852240f442f4c7d218c921d98df2f4ffcdf2d27a
--- /dev/null
+++ b/src/nix/realisation/info.md
@@ -0,0 +1,15 @@
+R"MdBoundary(
+# Description
+
+Display some informations about the given realisation
+
+# Examples
+
+Show some information about the realisation of the `hello` package:
+
+```console
+$ nix realisation info nixpkgs#hello --json
+[{"id":"sha256:3d382378a00588e064ee30be96dd0fa7e7df7cf3fbcace85a0e7b7dada1eef25!out","outPath":"fd3m7xawvrqcg98kgz5hc2vk3x9q0lh7-hello"}]
+```
+
+)MdBoundary"
diff --git a/src/nix/repl.cc b/src/nix/repl.cc
index bce8d31dc4630aabc21244343c1ebd3cf0eed8ab..eed79c33262d39484d8cd92e3d913d58b9330162 100644
--- a/src/nix/repl.cc
+++ b/src/nix/repl.cc
@@ -343,24 +343,6 @@ StringSet NixRepl::completePrefix(string prefix)
 }
 
 
-static int runProgram(const string & program, const Strings & args)
-{
-    Strings args2(args);
-    args2.push_front(program);
-
-    Pid pid;
-    pid = fork();
-    if (pid == -1) throw SysError("forking");
-    if (pid == 0) {
-        restoreAffinity();
-        execvp(program.c_str(), stringsToCharPtrs(args2).data());
-        _exit(1);
-    }
-
-    return pid.wait();
-}
-
-
 bool isVarName(const string & s)
 {
     if (s.size() == 0) return false;
@@ -462,7 +444,7 @@ bool NixRepl::processLine(string line)
         auto args = editorFor(pos);
         auto editor = args.front();
         args.pop_front();
-        runProgram(editor, args);
+        runProgram(editor, true, args);
 
         // Reload right after exiting the editor
         state->resetFileCache();
@@ -481,7 +463,7 @@ bool NixRepl::processLine(string line)
         state->callFunction(f, v, result, Pos());
 
         StorePath drvPath = getDerivationPath(result);
-        runProgram(settings.nixBinDir + "/nix-shell", Strings{state->store->printStorePath(drvPath)});
+        runProgram(settings.nixBinDir + "/nix-shell", true, {state->store->printStorePath(drvPath)});
     }
 
     else if (command == ":b" || command == ":i" || command == ":s") {
@@ -494,16 +476,18 @@ bool NixRepl::processLine(string line)
             /* We could do the build in this process using buildPaths(),
                but doing it in a child makes it easier to recover from
                problems / SIGINT. */
-            if (runProgram(settings.nixBinDir + "/nix", Strings{"build", "--no-link", drvPathRaw}) == 0) {
+            try {
+                runProgram(settings.nixBinDir + "/nix", true, {"build", "--no-link", drvPathRaw});
                 auto drv = state->store->readDerivation(drvPath);
                 std::cout << std::endl << "this derivation produced the following outputs:" << std::endl;
                 for (auto & i : drv.outputsAndOptPaths(*state->store))
                     std::cout << fmt("  %s -> %s\n", i.first, state->store->printStorePath(*i.second.second));
+            } catch (ExecError &) {
             }
         } else if (command == ":i") {
-            runProgram(settings.nixBinDir + "/nix-env", Strings{"-i", drvPathRaw});
+            runProgram(settings.nixBinDir + "/nix-env", true, {"-i", drvPathRaw});
         } else {
-            runProgram(settings.nixBinDir + "/nix-shell", Strings{drvPathRaw});
+            runProgram(settings.nixBinDir + "/nix-shell", true, {drvPathRaw});
         }
     }
 
diff --git a/src/nix/run.cc b/src/nix/run.cc
index ec93882341a05f42a347101d3ddd813bc14b88d9..c0ba05a3e82246c64597bc2ac8787525a9c3bd21 100644
--- a/src/nix/run.cc
+++ b/src/nix/run.cc
@@ -31,9 +31,7 @@ struct RunCommon : virtual Command
     {
         stopProgressBar();
 
-        restoreSignals();
-
-        restoreAffinity();
+        restoreProcessContext();
 
         /* If this is a diverted store (i.e. its "logical" location
            (typically /nix/store) differs from its "physical" location
@@ -45,8 +43,8 @@ struct RunCommon : virtual Command
            helper program (chrootHelper() below) to do the work. */
         auto store2 = store.dynamic_pointer_cast<LocalStore>();
 
-        if (store2 && store->storeDir != store2->realStoreDir) {
-            Strings helperArgs = { chrootHelperName, store->storeDir, store2->realStoreDir, program };
+        if (store2 && store->storeDir != store2->getRealStoreDir()) {
+            Strings helperArgs = { chrootHelperName, store->storeDir, store2->getRealStoreDir(), program };
             for (auto & arg : args) helperArgs.push_back(arg);
 
             execv(readLink("/proc/self/exe").c_str(), stringsToCharPtrs(helperArgs).data());
@@ -180,9 +178,7 @@ struct CmdRun : InstallableCommand, RunCommon
     {
         auto state = getEvalState();
 
-        auto app = installable->toApp(*state);
-
-        state->store->buildPaths(app.context);
+        auto app = installable->toApp(*state).resolve(store);
 
         Strings allArgs{app.program};
         for (auto & i : args) allArgs.push_back(i);
diff --git a/src/nix/store-prefetch-file.md b/src/nix/store-prefetch-file.md
index 1663b847b9187b465b3acf6c9972703aa6b8caf0..f9fdcbc57d0bd145365747c7c16cca6b2ac9255a 100644
--- a/src/nix/store-prefetch-file.md
+++ b/src/nix/store-prefetch-file.md
@@ -27,6 +27,6 @@ the resulting store path and the cryptographic hash of the contents of
 the file.
 
 The name component of the store path defaults to the last component of
-*url*, but this can be overriden using `--name`.
+*url*, but this can be overridden using `--name`.
 
 )""
diff --git a/src/nix/verify.cc b/src/nix/verify.cc
index 1721c7f166455d60ae2724dc86a774031c17c223..f5a5760644746e4f8ff8abff02eb211a4828d0e0 100644
--- a/src/nix/verify.cc
+++ b/src/nix/verify.cc
@@ -97,15 +97,11 @@ struct CmdVerify : StorePathsCommand
 
                 if (!noContents) {
 
-                    std::unique_ptr<AbstractHashSink> hashSink;
-                    if (!info->ca)
-                        hashSink = std::make_unique<HashSink>(info->narHash.type);
-                    else
-                        hashSink = std::make_unique<HashModuloSink>(info->narHash.type, std::string(info->path.hashPart()));
+                    auto hashSink = HashSink(info->narHash.type);
 
-                    store->narFromPath(info->path, *hashSink);
+                    store->narFromPath(info->path, hashSink);
 
-                    auto hash = hashSink->finish();
+                    auto hash = hashSink.finish();
 
                     if (hash.first != info->narHash) {
                         corrupted++;
diff --git a/src/nlohmann/json.hpp b/src/nlohmann/json.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..c9af0bed36d6852de735b8d0b5d034614aef0e54
--- /dev/null
+++ b/src/nlohmann/json.hpp
@@ -0,0 +1,20406 @@
+/*
+    __ _____ _____ _____
+ __|  |   __|     |   | |  JSON for Modern C++
+|  |  |__   |  |  | | | |  version 3.5.0
+|_____|_____|_____|_|___|  https://github.com/nlohmann/json
+
+Licensed under the MIT License <http://opensource.org/licenses/MIT>.
+SPDX-License-Identifier: MIT
+Copyright (c) 2013-2018 Niels Lohmann <http://nlohmann.me>.
+
+Permission is hereby  granted, free of charge, to any  person obtaining a copy
+of this software and associated  documentation files (the "Software"), to deal
+in the Software  without restriction, including without  limitation the rights
+to  use, copy,  modify, merge,  publish, distribute,  sublicense, and/or  sell
+copies  of  the Software,  and  to  permit persons  to  whom  the Software  is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE  IS PROVIDED "AS  IS", WITHOUT WARRANTY  OF ANY KIND,  EXPRESS OR
+IMPLIED,  INCLUDING BUT  NOT  LIMITED TO  THE  WARRANTIES OF  MERCHANTABILITY,
+FITNESS FOR  A PARTICULAR PURPOSE AND  NONINFRINGEMENT. IN NO EVENT  SHALL THE
+AUTHORS  OR COPYRIGHT  HOLDERS  BE  LIABLE FOR  ANY  CLAIM,  DAMAGES OR  OTHER
+LIABILITY, WHETHER IN AN ACTION OF  CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE  OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+#ifndef NLOHMANN_JSON_HPP
+#define NLOHMANN_JSON_HPP
+
+#define NLOHMANN_JSON_VERSION_MAJOR 3
+#define NLOHMANN_JSON_VERSION_MINOR 5
+#define NLOHMANN_JSON_VERSION_PATCH 0
+
+#include <algorithm> // all_of, find, for_each
+#include <cassert> // assert
+#include <ciso646> // and, not, or
+#include <cstddef> // nullptr_t, ptrdiff_t, size_t
+#include <functional> // hash, less
+#include <initializer_list> // initializer_list
+#include <iosfwd> // istream, ostream
+#include <iterator> // random_access_iterator_tag
+#include <numeric> // accumulate
+#include <string> // string, stoi, to_string
+#include <utility> // declval, forward, move, pair, swap
+
+// #include <nlohmann/json_fwd.hpp>
+#ifndef NLOHMANN_JSON_FWD_HPP
+#define NLOHMANN_JSON_FWD_HPP
+
+#include <cstdint> // int64_t, uint64_t
+#include <map> // map
+#include <memory> // allocator
+#include <string> // string
+#include <vector> // vector
+
+/*!
+@brief namespace for Niels Lohmann
+@see https://github.com/nlohmann
+@since version 1.0.0
+*/
+namespace nlohmann
+{
+/*!
+@brief default JSONSerializer template argument
+
+This serializer ignores the template arguments and uses ADL
+([argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl))
+for serialization.
+*/
+template<typename T = void, typename SFINAE = void>
+struct adl_serializer;
+
+template<template<typename U, typename V, typename... Args> class ObjectType =
+         std::map,
+         template<typename U, typename... Args> class ArrayType = std::vector,
+         class StringType = std::string, class BooleanType = bool,
+         class NumberIntegerType = std::int64_t,
+         class NumberUnsignedType = std::uint64_t,
+         class NumberFloatType = double,
+         template<typename U> class AllocatorType = std::allocator,
+         template<typename T, typename SFINAE = void> class JSONSerializer =
+         adl_serializer>
+class basic_json;
+
+/*!
+@brief JSON Pointer
+
+A JSON pointer defines a string syntax for identifying a specific value
+within a JSON document. It can be used with functions `at` and
+`operator[]`. Furthermore, JSON pointers are the base for JSON patches.
+
+@sa [RFC 6901](https://tools.ietf.org/html/rfc6901)
+
+@since version 2.0.0
+*/
+template<typename BasicJsonType>
+class json_pointer;
+
+/*!
+@brief default JSON class
+
+This type is the default specialization of the @ref basic_json class which
+uses the standard template types.
+
+@since version 1.0.0
+*/
+using json = basic_json<>;
+}  // namespace nlohmann
+
+#endif
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+
+// This file contains all internal macro definitions
+// You MUST include macro_unscope.hpp at the end of json.hpp to undef all of them
+
+// exclude unsupported compilers
+#if !defined(JSON_SKIP_UNSUPPORTED_COMPILER_CHECK)
+    #if defined(__clang__)
+        #if (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) < 30400
+            #error "unsupported Clang version - see https://github.com/nlohmann/json#supported-compilers"
+        #endif
+    #elif defined(__GNUC__) && !(defined(__ICC) || defined(__INTEL_COMPILER))
+        #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) < 40800
+            #error "unsupported GCC version - see https://github.com/nlohmann/json#supported-compilers"
+        #endif
+    #endif
+#endif
+
+// disable float-equal warnings on GCC/clang
+#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
+    #pragma GCC diagnostic push
+    #pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+
+// disable documentation warnings on clang
+#if defined(__clang__)
+    #pragma GCC diagnostic push
+    #pragma GCC diagnostic ignored "-Wdocumentation"
+#endif
+
+// allow for portable deprecation warnings
+#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
+    #define JSON_DEPRECATED __attribute__((deprecated))
+#elif defined(_MSC_VER)
+    #define JSON_DEPRECATED __declspec(deprecated)
+#else
+    #define JSON_DEPRECATED
+#endif
+
+// allow to disable exceptions
+#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(JSON_NOEXCEPTION)
+    #define JSON_THROW(exception) throw exception
+    #define JSON_TRY try
+    #define JSON_CATCH(exception) catch(exception)
+    #define JSON_INTERNAL_CATCH(exception) catch(exception)
+#else
+    #define JSON_THROW(exception) std::abort()
+    #define JSON_TRY if(true)
+    #define JSON_CATCH(exception) if(false)
+    #define JSON_INTERNAL_CATCH(exception) if(false)
+#endif
+
+// override exception macros
+#if defined(JSON_THROW_USER)
+    #undef JSON_THROW
+    #define JSON_THROW JSON_THROW_USER
+#endif
+#if defined(JSON_TRY_USER)
+    #undef JSON_TRY
+    #define JSON_TRY JSON_TRY_USER
+#endif
+#if defined(JSON_CATCH_USER)
+    #undef JSON_CATCH
+    #define JSON_CATCH JSON_CATCH_USER
+    #undef JSON_INTERNAL_CATCH
+    #define JSON_INTERNAL_CATCH JSON_CATCH_USER
+#endif
+#if defined(JSON_INTERNAL_CATCH_USER)
+    #undef JSON_INTERNAL_CATCH
+    #define JSON_INTERNAL_CATCH JSON_INTERNAL_CATCH_USER
+#endif
+
+// manual branch prediction
+#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
+    #define JSON_LIKELY(x)      __builtin_expect(!!(x), 1)
+    #define JSON_UNLIKELY(x)    __builtin_expect(!!(x), 0)
+#else
+    #define JSON_LIKELY(x)      x
+    #define JSON_UNLIKELY(x)    x
+#endif
+
+// C++ language standard detection
+#if (defined(__cplusplus) && __cplusplus >= 201703L) || (defined(_HAS_CXX17) && _HAS_CXX17 == 1) // fix for issue #464
+    #define JSON_HAS_CPP_17
+    #define JSON_HAS_CPP_14
+#elif (defined(__cplusplus) && __cplusplus >= 201402L) || (defined(_HAS_CXX14) && _HAS_CXX14 == 1)
+    #define JSON_HAS_CPP_14
+#endif
+
+/*!
+@brief macro to briefly define a mapping between an enum and JSON
+@def NLOHMANN_JSON_SERIALIZE_ENUM
+@since version 3.4.0
+*/
+#define NLOHMANN_JSON_SERIALIZE_ENUM(ENUM_TYPE, ...)                                           \
+    template<typename BasicJsonType>                                                           \
+    inline void to_json(BasicJsonType& j, const ENUM_TYPE& e)                                  \
+    {                                                                                          \
+        static_assert(std::is_enum<ENUM_TYPE>::value, #ENUM_TYPE " must be an enum!");         \
+        static const std::pair<ENUM_TYPE, BasicJsonType> m[] = __VA_ARGS__;                    \
+        auto it = std::find_if(std::begin(m), std::end(m),                                     \
+                               [e](const std::pair<ENUM_TYPE, BasicJsonType>& ej_pair) -> bool \
+        {                                                                                      \
+            return ej_pair.first == e;                                                         \
+        });                                                                                    \
+        j = ((it != std::end(m)) ? it : std::begin(m))->second;                                \
+    }                                                                                          \
+    template<typename BasicJsonType>                                                           \
+    inline void from_json(const BasicJsonType& j, ENUM_TYPE& e)                                \
+    {                                                                                          \
+        static_assert(std::is_enum<ENUM_TYPE>::value, #ENUM_TYPE " must be an enum!");         \
+        static const std::pair<ENUM_TYPE, BasicJsonType> m[] = __VA_ARGS__;                    \
+        auto it = std::find_if(std::begin(m), std::end(m),                                     \
+                               [j](const std::pair<ENUM_TYPE, BasicJsonType>& ej_pair) -> bool \
+        {                                                                                      \
+            return ej_pair.second == j;                                                        \
+        });                                                                                    \
+        e = ((it != std::end(m)) ? it : std::begin(m))->first;                                 \
+    }
+
+// Ugly macros to avoid uglier copy-paste when specializing basic_json. They
+// may be removed in the future once the class is split.
+
+#define NLOHMANN_BASIC_JSON_TPL_DECLARATION                                \
+    template<template<typename, typename, typename...> class ObjectType,   \
+             template<typename, typename...> class ArrayType,              \
+             class StringType, class BooleanType, class NumberIntegerType, \
+             class NumberUnsignedType, class NumberFloatType,              \
+             template<typename> class AllocatorType,                       \
+             template<typename, typename = void> class JSONSerializer>
+
+#define NLOHMANN_BASIC_JSON_TPL                                            \
+    basic_json<ObjectType, ArrayType, StringType, BooleanType,             \
+    NumberIntegerType, NumberUnsignedType, NumberFloatType,                \
+    AllocatorType, JSONSerializer>
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+
+#include <ciso646> // not
+#include <cstddef> // size_t
+#include <type_traits> // conditional, enable_if, false_type, integral_constant, is_constructible, is_integral, is_same, remove_cv, remove_reference, true_type
+
+namespace nlohmann
+{
+namespace detail
+{
+// alias templates to reduce boilerplate
+template<bool B, typename T = void>
+using enable_if_t = typename std::enable_if<B, T>::type;
+
+template<typename T>
+using uncvref_t = typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+
+// implementation of C++14 index_sequence and affiliates
+// source: https://stackoverflow.com/a/32223343
+template<std::size_t... Ints>
+struct index_sequence
+{
+    using type = index_sequence;
+    using value_type = std::size_t;
+    static constexpr std::size_t size() noexcept
+    {
+        return sizeof...(Ints);
+    }
+};
+
+template<class Sequence1, class Sequence2>
+struct merge_and_renumber;
+
+template<std::size_t... I1, std::size_t... I2>
+struct merge_and_renumber<index_sequence<I1...>, index_sequence<I2...>>
+        : index_sequence < I1..., (sizeof...(I1) + I2)... > {};
+
+template<std::size_t N>
+struct make_index_sequence
+    : merge_and_renumber < typename make_index_sequence < N / 2 >::type,
+      typename make_index_sequence < N - N / 2 >::type > {};
+
+template<> struct make_index_sequence<0> : index_sequence<> {};
+template<> struct make_index_sequence<1> : index_sequence<0> {};
+
+template<typename... Ts>
+using index_sequence_for = make_index_sequence<sizeof...(Ts)>;
+
+// dispatch utility (taken from ranges-v3)
+template<unsigned N> struct priority_tag : priority_tag < N - 1 > {};
+template<> struct priority_tag<0> {};
+
+// taken from ranges-v3
+template<typename T>
+struct static_const
+{
+    static constexpr T value{};
+};
+
+template<typename T>
+constexpr T static_const<T>::value;
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+
+#include <ciso646> // not
+#include <limits> // numeric_limits
+#include <type_traits> // false_type, is_constructible, is_integral, is_same, true_type
+#include <utility> // declval
+
+// #include <nlohmann/json_fwd.hpp>
+
+// #include <nlohmann/detail/iterators/iterator_traits.hpp>
+
+
+#include <iterator> // random_access_iterator_tag
+
+// #include <nlohmann/detail/meta/void_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+template <typename ...Ts> struct make_void
+{
+    using type = void;
+};
+template <typename ...Ts> using void_t = typename make_void<Ts...>::type;
+} // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+template <typename It, typename = void>
+struct iterator_types {};
+
+template <typename It>
+struct iterator_types <
+    It,
+    void_t<typename It::difference_type, typename It::value_type, typename It::pointer,
+    typename It::reference, typename It::iterator_category >>
+{
+    using difference_type = typename It::difference_type;
+    using value_type = typename It::value_type;
+    using pointer = typename It::pointer;
+    using reference = typename It::reference;
+    using iterator_category = typename It::iterator_category;
+};
+
+// This is required as some compilers implement std::iterator_traits in a way that
+// doesn't work with SFINAE. See https://github.com/nlohmann/json/issues/1341.
+template <typename T, typename = void>
+struct iterator_traits
+{
+};
+
+template <typename T>
+struct iterator_traits < T, enable_if_t < !std::is_pointer<T>::value >>
+            : iterator_types<T>
+{
+};
+
+template <typename T>
+struct iterator_traits<T*, enable_if_t<std::is_object<T>::value>>
+{
+    using iterator_category = std::random_access_iterator_tag;
+    using value_type = T;
+    using difference_type = ptrdiff_t;
+    using pointer = T*;
+    using reference = T&;
+};
+}
+}
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+// #include <nlohmann/detail/meta/detected.hpp>
+
+
+#include <type_traits>
+
+// #include <nlohmann/detail/meta/void_t.hpp>
+
+
+// http://en.cppreference.com/w/cpp/experimental/is_detected
+namespace nlohmann
+{
+namespace detail
+{
+struct nonesuch
+{
+    nonesuch() = delete;
+    ~nonesuch() = delete;
+    nonesuch(nonesuch const&) = delete;
+    void operator=(nonesuch const&) = delete;
+};
+
+template <class Default,
+          class AlwaysVoid,
+          template <class...> class Op,
+          class... Args>
+struct detector
+{
+    using value_t = std::false_type;
+    using type = Default;
+};
+
+template <class Default, template <class...> class Op, class... Args>
+struct detector<Default, void_t<Op<Args...>>, Op, Args...>
+{
+    using value_t = std::true_type;
+    using type = Op<Args...>;
+};
+
+template <template <class...> class Op, class... Args>
+using is_detected = typename detector<nonesuch, void, Op, Args...>::value_t;
+
+template <template <class...> class Op, class... Args>
+using detected_t = typename detector<nonesuch, void, Op, Args...>::type;
+
+template <class Default, template <class...> class Op, class... Args>
+using detected_or = detector<Default, void, Op, Args...>;
+
+template <class Default, template <class...> class Op, class... Args>
+using detected_or_t = typename detected_or<Default, Op, Args...>::type;
+
+template <class Expected, template <class...> class Op, class... Args>
+using is_detected_exact = std::is_same<Expected, detected_t<Op, Args...>>;
+
+template <class To, template <class...> class Op, class... Args>
+using is_detected_convertible =
+    std::is_convertible<detected_t<Op, Args...>, To>;
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+
+namespace nlohmann
+{
+/*!
+@brief detail namespace with internal helper functions
+
+This namespace collects functions that should not be exposed,
+implementations of some @ref basic_json methods, and meta-programming helpers.
+
+@since version 2.1.0
+*/
+namespace detail
+{
+/////////////
+// helpers //
+/////////////
+
+// Note to maintainers:
+//
+// Every trait in this file expects a non CV-qualified type.
+// The only exceptions are in the 'aliases for detected' section
+// (i.e. those of the form: decltype(T::member_function(std::declval<T>())))
+//
+// In this case, T has to be properly CV-qualified to constraint the function arguments
+// (e.g. to_json(BasicJsonType&, const T&))
+
+template<typename> struct is_basic_json : std::false_type {};
+
+NLOHMANN_BASIC_JSON_TPL_DECLARATION
+struct is_basic_json<NLOHMANN_BASIC_JSON_TPL> : std::true_type {};
+
+//////////////////////////
+// aliases for detected //
+//////////////////////////
+
+template <typename T>
+using mapped_type_t = typename T::mapped_type;
+
+template <typename T>
+using key_type_t = typename T::key_type;
+
+template <typename T>
+using value_type_t = typename T::value_type;
+
+template <typename T>
+using difference_type_t = typename T::difference_type;
+
+template <typename T>
+using pointer_t = typename T::pointer;
+
+template <typename T>
+using reference_t = typename T::reference;
+
+template <typename T>
+using iterator_category_t = typename T::iterator_category;
+
+template <typename T>
+using iterator_t = typename T::iterator;
+
+template <typename T, typename... Args>
+using to_json_function = decltype(T::to_json(std::declval<Args>()...));
+
+template <typename T, typename... Args>
+using from_json_function = decltype(T::from_json(std::declval<Args>()...));
+
+template <typename T, typename U>
+using get_template_function = decltype(std::declval<T>().template get<U>());
+
+// trait checking if JSONSerializer<T>::from_json(json const&, udt&) exists
+template <typename BasicJsonType, typename T, typename = void>
+struct has_from_json : std::false_type {};
+
+template <typename BasicJsonType, typename T>
+struct has_from_json<BasicJsonType, T,
+           enable_if_t<not is_basic_json<T>::value>>
+{
+    using serializer = typename BasicJsonType::template json_serializer<T, void>;
+
+    static constexpr bool value =
+        is_detected_exact<void, from_json_function, serializer,
+        const BasicJsonType&, T&>::value;
+};
+
+// This trait checks if JSONSerializer<T>::from_json(json const&) exists
+// this overload is used for non-default-constructible user-defined-types
+template <typename BasicJsonType, typename T, typename = void>
+struct has_non_default_from_json : std::false_type {};
+
+template<typename BasicJsonType, typename T>
+struct has_non_default_from_json<BasicJsonType, T, enable_if_t<not is_basic_json<T>::value>>
+{
+    using serializer = typename BasicJsonType::template json_serializer<T, void>;
+
+    static constexpr bool value =
+        is_detected_exact<T, from_json_function, serializer,
+        const BasicJsonType&>::value;
+};
+
+// This trait checks if BasicJsonType::json_serializer<T>::to_json exists
+// Do not evaluate the trait when T is a basic_json type, to avoid template instantiation infinite recursion.
+template <typename BasicJsonType, typename T, typename = void>
+struct has_to_json : std::false_type {};
+
+template <typename BasicJsonType, typename T>
+struct has_to_json<BasicJsonType, T, enable_if_t<not is_basic_json<T>::value>>
+{
+    using serializer = typename BasicJsonType::template json_serializer<T, void>;
+
+    static constexpr bool value =
+        is_detected_exact<void, to_json_function, serializer, BasicJsonType&,
+        T>::value;
+};
+
+
+///////////////////
+// is_ functions //
+///////////////////
+
+template <typename T, typename = void>
+struct is_iterator_traits : std::false_type {};
+
+template <typename T>
+struct is_iterator_traits<iterator_traits<T>>
+{
+  private:
+    using traits = iterator_traits<T>;
+
+  public:
+    static constexpr auto value =
+        is_detected<value_type_t, traits>::value &&
+        is_detected<difference_type_t, traits>::value &&
+        is_detected<pointer_t, traits>::value &&
+        is_detected<iterator_category_t, traits>::value &&
+        is_detected<reference_t, traits>::value;
+};
+
+// source: https://stackoverflow.com/a/37193089/4116453
+
+template <typename T, typename = void>
+struct is_complete_type : std::false_type {};
+
+template <typename T>
+struct is_complete_type<T, decltype(void(sizeof(T)))> : std::true_type {};
+
+template <typename BasicJsonType, typename CompatibleObjectType,
+          typename = void>
+struct is_compatible_object_type_impl : std::false_type {};
+
+template <typename BasicJsonType, typename CompatibleObjectType>
+struct is_compatible_object_type_impl <
+    BasicJsonType, CompatibleObjectType,
+    enable_if_t<is_detected<mapped_type_t, CompatibleObjectType>::value and
+    is_detected<key_type_t, CompatibleObjectType>::value >>
+{
+
+    using object_t = typename BasicJsonType::object_t;
+
+    // macOS's is_constructible does not play well with nonesuch...
+    static constexpr bool value =
+        std::is_constructible<typename object_t::key_type,
+        typename CompatibleObjectType::key_type>::value and
+        std::is_constructible<typename object_t::mapped_type,
+        typename CompatibleObjectType::mapped_type>::value;
+};
+
+template <typename BasicJsonType, typename CompatibleObjectType>
+struct is_compatible_object_type
+    : is_compatible_object_type_impl<BasicJsonType, CompatibleObjectType> {};
+
+template <typename BasicJsonType, typename ConstructibleObjectType,
+          typename = void>
+struct is_constructible_object_type_impl : std::false_type {};
+
+template <typename BasicJsonType, typename ConstructibleObjectType>
+struct is_constructible_object_type_impl <
+    BasicJsonType, ConstructibleObjectType,
+    enable_if_t<is_detected<mapped_type_t, ConstructibleObjectType>::value and
+    is_detected<key_type_t, ConstructibleObjectType>::value >>
+{
+    using object_t = typename BasicJsonType::object_t;
+
+    static constexpr bool value =
+        (std::is_constructible<typename ConstructibleObjectType::key_type, typename object_t::key_type>::value and
+         std::is_same<typename object_t::mapped_type, typename ConstructibleObjectType::mapped_type>::value) or
+        (has_from_json<BasicJsonType, typename ConstructibleObjectType::mapped_type>::value or
+         has_non_default_from_json<BasicJsonType, typename ConstructibleObjectType::mapped_type >::value);
+};
+
+template <typename BasicJsonType, typename ConstructibleObjectType>
+struct is_constructible_object_type
+    : is_constructible_object_type_impl<BasicJsonType,
+      ConstructibleObjectType> {};
+
+template <typename BasicJsonType, typename CompatibleStringType,
+          typename = void>
+struct is_compatible_string_type_impl : std::false_type {};
+
+template <typename BasicJsonType, typename CompatibleStringType>
+struct is_compatible_string_type_impl <
+    BasicJsonType, CompatibleStringType,
+    enable_if_t<is_detected_exact<typename BasicJsonType::string_t::value_type,
+    value_type_t, CompatibleStringType>::value >>
+{
+    static constexpr auto value =
+        std::is_constructible<typename BasicJsonType::string_t, CompatibleStringType>::value;
+};
+
+template <typename BasicJsonType, typename ConstructibleStringType>
+struct is_compatible_string_type
+    : is_compatible_string_type_impl<BasicJsonType, ConstructibleStringType> {};
+
+template <typename BasicJsonType, typename ConstructibleStringType,
+          typename = void>
+struct is_constructible_string_type_impl : std::false_type {};
+
+template <typename BasicJsonType, typename ConstructibleStringType>
+struct is_constructible_string_type_impl <
+    BasicJsonType, ConstructibleStringType,
+    enable_if_t<is_detected_exact<typename BasicJsonType::string_t::value_type,
+    value_type_t, ConstructibleStringType>::value >>
+{
+    static constexpr auto value =
+        std::is_constructible<ConstructibleStringType,
+        typename BasicJsonType::string_t>::value;
+};
+
+template <typename BasicJsonType, typename ConstructibleStringType>
+struct is_constructible_string_type
+    : is_constructible_string_type_impl<BasicJsonType, ConstructibleStringType> {};
+
+template <typename BasicJsonType, typename CompatibleArrayType, typename = void>
+struct is_compatible_array_type_impl : std::false_type {};
+
+template <typename BasicJsonType, typename CompatibleArrayType>
+struct is_compatible_array_type_impl <
+    BasicJsonType, CompatibleArrayType,
+    enable_if_t<is_detected<value_type_t, CompatibleArrayType>::value and
+    is_detected<iterator_t, CompatibleArrayType>::value and
+// This is needed because json_reverse_iterator has a ::iterator type...
+// Therefore it is detected as a CompatibleArrayType.
+// The real fix would be to have an Iterable concept.
+    not is_iterator_traits<
+    iterator_traits<CompatibleArrayType>>::value >>
+{
+    static constexpr bool value =
+        std::is_constructible<BasicJsonType,
+        typename CompatibleArrayType::value_type>::value;
+};
+
+template <typename BasicJsonType, typename CompatibleArrayType>
+struct is_compatible_array_type
+    : is_compatible_array_type_impl<BasicJsonType, CompatibleArrayType> {};
+
+template <typename BasicJsonType, typename ConstructibleArrayType, typename = void>
+struct is_constructible_array_type_impl : std::false_type {};
+
+template <typename BasicJsonType, typename ConstructibleArrayType>
+struct is_constructible_array_type_impl <
+    BasicJsonType, ConstructibleArrayType,
+    enable_if_t<std::is_same<ConstructibleArrayType,
+    typename BasicJsonType::value_type>::value >>
+            : std::true_type {};
+
+template <typename BasicJsonType, typename ConstructibleArrayType>
+struct is_constructible_array_type_impl <
+    BasicJsonType, ConstructibleArrayType,
+    enable_if_t<not std::is_same<ConstructibleArrayType,
+    typename BasicJsonType::value_type>::value and
+    is_detected<value_type_t, ConstructibleArrayType>::value and
+    is_detected<iterator_t, ConstructibleArrayType>::value and
+    is_complete_type<
+    detected_t<value_type_t, ConstructibleArrayType>>::value >>
+{
+    static constexpr bool value =
+        // This is needed because json_reverse_iterator has a ::iterator type,
+        // furthermore, std::back_insert_iterator (and other iterators) have a base class `iterator`...
+        // Therefore it is detected as a ConstructibleArrayType.
+        // The real fix would be to have an Iterable concept.
+        not is_iterator_traits <
+        iterator_traits<ConstructibleArrayType >>::value and
+
+        (std::is_same<typename ConstructibleArrayType::value_type, typename BasicJsonType::array_t::value_type>::value or
+         has_from_json<BasicJsonType,
+         typename ConstructibleArrayType::value_type>::value or
+         has_non_default_from_json <
+         BasicJsonType, typename ConstructibleArrayType::value_type >::value);
+};
+
+template <typename BasicJsonType, typename ConstructibleArrayType>
+struct is_constructible_array_type
+    : is_constructible_array_type_impl<BasicJsonType, ConstructibleArrayType> {};
+
+template <typename RealIntegerType, typename CompatibleNumberIntegerType,
+          typename = void>
+struct is_compatible_integer_type_impl : std::false_type {};
+
+template <typename RealIntegerType, typename CompatibleNumberIntegerType>
+struct is_compatible_integer_type_impl <
+    RealIntegerType, CompatibleNumberIntegerType,
+    enable_if_t<std::is_integral<RealIntegerType>::value and
+    std::is_integral<CompatibleNumberIntegerType>::value and
+    not std::is_same<bool, CompatibleNumberIntegerType>::value >>
+{
+    // is there an assert somewhere on overflows?
+    using RealLimits = std::numeric_limits<RealIntegerType>;
+    using CompatibleLimits = std::numeric_limits<CompatibleNumberIntegerType>;
+
+    static constexpr auto value =
+        std::is_constructible<RealIntegerType,
+        CompatibleNumberIntegerType>::value and
+        CompatibleLimits::is_integer and
+        RealLimits::is_signed == CompatibleLimits::is_signed;
+};
+
+template <typename RealIntegerType, typename CompatibleNumberIntegerType>
+struct is_compatible_integer_type
+    : is_compatible_integer_type_impl<RealIntegerType,
+      CompatibleNumberIntegerType> {};
+
+template <typename BasicJsonType, typename CompatibleType, typename = void>
+struct is_compatible_type_impl: std::false_type {};
+
+template <typename BasicJsonType, typename CompatibleType>
+struct is_compatible_type_impl <
+    BasicJsonType, CompatibleType,
+    enable_if_t<is_complete_type<CompatibleType>::value >>
+{
+    static constexpr bool value =
+        has_to_json<BasicJsonType, CompatibleType>::value;
+};
+
+template <typename BasicJsonType, typename CompatibleType>
+struct is_compatible_type
+    : is_compatible_type_impl<BasicJsonType, CompatibleType> {};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+
+#include <exception> // exception
+#include <stdexcept> // runtime_error
+#include <string> // to_string
+
+// #include <nlohmann/detail/input/position_t.hpp>
+
+
+#include <cstddef> // size_t
+
+namespace nlohmann
+{
+namespace detail
+{
+/// struct to capture the start position of the current token
+struct position_t
+{
+    /// the total number of characters read
+    std::size_t chars_read_total = 0;
+    /// the number of characters read in the current line
+    std::size_t chars_read_current_line = 0;
+    /// the number of lines read
+    std::size_t lines_read = 0;
+
+    /// conversion to size_t to preserve SAX interface
+    constexpr operator size_t() const
+    {
+        return chars_read_total;
+    }
+};
+
+}
+}
+
+
+namespace nlohmann
+{
+namespace detail
+{
+////////////////
+// exceptions //
+////////////////
+
+/*!
+@brief general exception of the @ref basic_json class
+
+This class is an extension of `std::exception` objects with a member @a id for
+exception ids. It is used as the base class for all exceptions thrown by the
+@ref basic_json class. This class can hence be used as "wildcard" to catch
+exceptions.
+
+Subclasses:
+- @ref parse_error for exceptions indicating a parse error
+- @ref invalid_iterator for exceptions indicating errors with iterators
+- @ref type_error for exceptions indicating executing a member function with
+                  a wrong type
+- @ref out_of_range for exceptions indicating access out of the defined range
+- @ref other_error for exceptions indicating other library errors
+
+@internal
+@note To have nothrow-copy-constructible exceptions, we internally use
+      `std::runtime_error` which can cope with arbitrary-length error messages.
+      Intermediate strings are built with static functions and then passed to
+      the actual constructor.
+@endinternal
+
+@liveexample{The following code shows how arbitrary library exceptions can be
+caught.,exception}
+
+@since version 3.0.0
+*/
+class exception : public std::exception
+{
+  public:
+    /// returns the explanatory string
+    const char* what() const noexcept override
+    {
+        return m.what();
+    }
+
+    /// the id of the exception
+    const int id;
+
+  protected:
+    exception(int id_, const char* what_arg) : id(id_), m(what_arg) {}
+
+    static std::string name(const std::string& ename, int id_)
+    {
+        return "[json.exception." + ename + "." + std::to_string(id_) + "] ";
+    }
+
+  private:
+    /// an exception object as storage for error messages
+    std::runtime_error m;
+};
+
+/*!
+@brief exception indicating a parse error
+
+This exception is thrown by the library when a parse error occurs. Parse errors
+can occur during the deserialization of JSON text, CBOR, MessagePack, as well
+as when using JSON Patch.
+
+Member @a byte holds the byte index of the last read character in the input
+file.
+
+Exceptions have ids 1xx.
+
+name / id                      | example message | description
+------------------------------ | --------------- | -------------------------
+json.exception.parse_error.101 | parse error at 2: unexpected end of input; expected string literal | This error indicates a syntax error while deserializing a JSON text. The error message describes that an unexpected token (character) was encountered, and the member @a byte indicates the error position.
+json.exception.parse_error.102 | parse error at 14: missing or wrong low surrogate | JSON uses the `\uxxxx` format to describe Unicode characters. Code points above above 0xFFFF are split into two `\uxxxx` entries ("surrogate pairs"). This error indicates that the surrogate pair is incomplete or contains an invalid code point.
+json.exception.parse_error.103 | parse error: code points above 0x10FFFF are invalid | Unicode supports code points up to 0x10FFFF. Code points above 0x10FFFF are invalid.
+json.exception.parse_error.104 | parse error: JSON patch must be an array of objects | [RFC 6902](https://tools.ietf.org/html/rfc6902) requires a JSON Patch document to be a JSON document that represents an array of objects.
+json.exception.parse_error.105 | parse error: operation must have string member 'op' | An operation of a JSON Patch document must contain exactly one "op" member, whose value indicates the operation to perform. Its value must be one of "add", "remove", "replace", "move", "copy", or "test"; other values are errors.
+json.exception.parse_error.106 | parse error: array index '01' must not begin with '0' | An array index in a JSON Pointer ([RFC 6901](https://tools.ietf.org/html/rfc6901)) may be `0` or any number without a leading `0`.
+json.exception.parse_error.107 | parse error: JSON pointer must be empty or begin with '/' - was: 'foo' | A JSON Pointer must be a Unicode string containing a sequence of zero or more reference tokens, each prefixed by a `/` character.
+json.exception.parse_error.108 | parse error: escape character '~' must be followed with '0' or '1' | In a JSON Pointer, only `~0` and `~1` are valid escape sequences.
+json.exception.parse_error.109 | parse error: array index 'one' is not a number | A JSON Pointer array index must be a number.
+json.exception.parse_error.110 | parse error at 1: cannot read 2 bytes from vector | When parsing CBOR or MessagePack, the byte vector ends before the complete value has been read.
+json.exception.parse_error.112 | parse error at 1: error reading CBOR; last byte: 0xF8 | Not all types of CBOR or MessagePack are supported. This exception occurs if an unsupported byte was read.
+json.exception.parse_error.113 | parse error at 2: expected a CBOR string; last byte: 0x98 | While parsing a map key, a value that is not a string has been read.
+json.exception.parse_error.114 | parse error: Unsupported BSON record type 0x0F | The parsing of the corresponding BSON record type is not implemented (yet).
+
+@note For an input with n bytes, 1 is the index of the first character and n+1
+      is the index of the terminating null byte or the end of file. This also
+      holds true when reading a byte vector (CBOR or MessagePack).
+
+@liveexample{The following code shows how a `parse_error` exception can be
+caught.,parse_error}
+
+@sa @ref exception for the base class of the library exceptions
+@sa @ref invalid_iterator for exceptions indicating errors with iterators
+@sa @ref type_error for exceptions indicating executing a member function with
+                    a wrong type
+@sa @ref out_of_range for exceptions indicating access out of the defined range
+@sa @ref other_error for exceptions indicating other library errors
+
+@since version 3.0.0
+*/
+class parse_error : public exception
+{
+  public:
+    /*!
+    @brief create a parse error exception
+    @param[in] id_       the id of the exception
+    @param[in] position  the position where the error occurred (or with
+                         chars_read_total=0 if the position cannot be
+                         determined)
+    @param[in] what_arg  the explanatory string
+    @return parse_error object
+    */
+    static parse_error create(int id_, const position_t& pos, const std::string& what_arg)
+    {
+        std::string w = exception::name("parse_error", id_) + "parse error" +
+                        position_string(pos) + ": " + what_arg;
+        return parse_error(id_, pos.chars_read_total, w.c_str());
+    }
+
+    static parse_error create(int id_, std::size_t byte_, const std::string& what_arg)
+    {
+        std::string w = exception::name("parse_error", id_) + "parse error" +
+                        (byte_ != 0 ? (" at byte " + std::to_string(byte_)) : "") +
+                        ": " + what_arg;
+        return parse_error(id_, byte_, w.c_str());
+    }
+
+    /*!
+    @brief byte index of the parse error
+
+    The byte index of the last read character in the input file.
+
+    @note For an input with n bytes, 1 is the index of the first character and
+          n+1 is the index of the terminating null byte or the end of file.
+          This also holds true when reading a byte vector (CBOR or MessagePack).
+    */
+    const std::size_t byte;
+
+  private:
+    parse_error(int id_, std::size_t byte_, const char* what_arg)
+        : exception(id_, what_arg), byte(byte_) {}
+
+    static std::string position_string(const position_t& pos)
+    {
+        return " at line " + std::to_string(pos.lines_read + 1) +
+               ", column " + std::to_string(pos.chars_read_current_line);
+    }
+};
+
+/*!
+@brief exception indicating errors with iterators
+
+This exception is thrown if iterators passed to a library function do not match
+the expected semantics.
+
+Exceptions have ids 2xx.
+
+name / id                           | example message | description
+----------------------------------- | --------------- | -------------------------
+json.exception.invalid_iterator.201 | iterators are not compatible | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid.
+json.exception.invalid_iterator.202 | iterator does not fit current value | In an erase or insert function, the passed iterator @a pos does not belong to the JSON value for which the function was called. It hence does not define a valid position for the deletion/insertion.
+json.exception.invalid_iterator.203 | iterators do not fit current value | Either iterator passed to function @ref erase(IteratorType first, IteratorType last) does not belong to the JSON value from which values shall be erased. It hence does not define a valid range to delete values from.
+json.exception.invalid_iterator.204 | iterators out of range | When an iterator range for a primitive type (number, boolean, or string) is passed to a constructor or an erase function, this range has to be exactly (@ref begin(), @ref end()), because this is the only way the single stored value is expressed. All other ranges are invalid.
+json.exception.invalid_iterator.205 | iterator out of range | When an iterator for a primitive type (number, boolean, or string) is passed to an erase function, the iterator has to be the @ref begin() iterator, because it is the only way to address the stored value. All other iterators are invalid.
+json.exception.invalid_iterator.206 | cannot construct with iterators from null | The iterators passed to constructor @ref basic_json(InputIT first, InputIT last) belong to a JSON null value and hence to not define a valid range.
+json.exception.invalid_iterator.207 | cannot use key() for non-object iterators | The key() member function can only be used on iterators belonging to a JSON object, because other types do not have a concept of a key.
+json.exception.invalid_iterator.208 | cannot use operator[] for object iterators | The operator[] to specify a concrete offset cannot be used on iterators belonging to a JSON object, because JSON objects are unordered.
+json.exception.invalid_iterator.209 | cannot use offsets with object iterators | The offset operators (+, -, +=, -=) cannot be used on iterators belonging to a JSON object, because JSON objects are unordered.
+json.exception.invalid_iterator.210 | iterators do not fit | The iterator range passed to the insert function are not compatible, meaning they do not belong to the same container. Therefore, the range (@a first, @a last) is invalid.
+json.exception.invalid_iterator.211 | passed iterators may not belong to container | The iterator range passed to the insert function must not be a subrange of the container to insert to.
+json.exception.invalid_iterator.212 | cannot compare iterators of different containers | When two iterators are compared, they must belong to the same container.
+json.exception.invalid_iterator.213 | cannot compare order of object iterators | The order of object iterators cannot be compared, because JSON objects are unordered.
+json.exception.invalid_iterator.214 | cannot get value | Cannot get value for iterator: Either the iterator belongs to a null value or it is an iterator to a primitive type (number, boolean, or string), but the iterator is different to @ref begin().
+
+@liveexample{The following code shows how an `invalid_iterator` exception can be
+caught.,invalid_iterator}
+
+@sa @ref exception for the base class of the library exceptions
+@sa @ref parse_error for exceptions indicating a parse error
+@sa @ref type_error for exceptions indicating executing a member function with
+                    a wrong type
+@sa @ref out_of_range for exceptions indicating access out of the defined range
+@sa @ref other_error for exceptions indicating other library errors
+
+@since version 3.0.0
+*/
+class invalid_iterator : public exception
+{
+  public:
+    static invalid_iterator create(int id_, const std::string& what_arg)
+    {
+        std::string w = exception::name("invalid_iterator", id_) + what_arg;
+        return invalid_iterator(id_, w.c_str());
+    }
+
+  private:
+    invalid_iterator(int id_, const char* what_arg)
+        : exception(id_, what_arg) {}
+};
+
+/*!
+@brief exception indicating executing a member function with a wrong type
+
+This exception is thrown in case of a type error; that is, a library function is
+executed on a JSON value whose type does not match the expected semantics.
+
+Exceptions have ids 3xx.
+
+name / id                     | example message | description
+----------------------------- | --------------- | -------------------------
+json.exception.type_error.301 | cannot create object from initializer list | To create an object from an initializer list, the initializer list must consist only of a list of pairs whose first element is a string. When this constraint is violated, an array is created instead.
+json.exception.type_error.302 | type must be object, but is array | During implicit or explicit value conversion, the JSON type must be compatible to the target type. For instance, a JSON string can only be converted into string types, but not into numbers or boolean types.
+json.exception.type_error.303 | incompatible ReferenceType for get_ref, actual type is object | To retrieve a reference to a value stored in a @ref basic_json object with @ref get_ref, the type of the reference must match the value type. For instance, for a JSON array, the @a ReferenceType must be @ref array_t&.
+json.exception.type_error.304 | cannot use at() with string | The @ref at() member functions can only be executed for certain JSON types.
+json.exception.type_error.305 | cannot use operator[] with string | The @ref operator[] member functions can only be executed for certain JSON types.
+json.exception.type_error.306 | cannot use value() with string | The @ref value() member functions can only be executed for certain JSON types.
+json.exception.type_error.307 | cannot use erase() with string | The @ref erase() member functions can only be executed for certain JSON types.
+json.exception.type_error.308 | cannot use push_back() with string | The @ref push_back() and @ref operator+= member functions can only be executed for certain JSON types.
+json.exception.type_error.309 | cannot use insert() with | The @ref insert() member functions can only be executed for certain JSON types.
+json.exception.type_error.310 | cannot use swap() with number | The @ref swap() member functions can only be executed for certain JSON types.
+json.exception.type_error.311 | cannot use emplace_back() with string | The @ref emplace_back() member function can only be executed for certain JSON types.
+json.exception.type_error.312 | cannot use update() with string | The @ref update() member functions can only be executed for certain JSON types.
+json.exception.type_error.313 | invalid value to unflatten | The @ref unflatten function converts an object whose keys are JSON Pointers back into an arbitrary nested JSON value. The JSON Pointers must not overlap, because then the resulting value would not be well defined.
+json.exception.type_error.314 | only objects can be unflattened | The @ref unflatten function only works for an object whose keys are JSON Pointers.
+json.exception.type_error.315 | values in object must be primitive | The @ref unflatten function only works for an object whose keys are JSON Pointers and whose values are primitive.
+json.exception.type_error.316 | invalid UTF-8 byte at index 10: 0x7E | The @ref dump function only works with UTF-8 encoded strings; that is, if you assign a `std::string` to a JSON value, make sure it is UTF-8 encoded. |
+json.exception.type_error.317 | JSON value cannot be serialized to requested format | The dynamic type of the object cannot be represented in the requested serialization format (e.g. a raw `true` or `null` JSON object cannot be serialized to BSON) |
+
+@liveexample{The following code shows how a `type_error` exception can be
+caught.,type_error}
+
+@sa @ref exception for the base class of the library exceptions
+@sa @ref parse_error for exceptions indicating a parse error
+@sa @ref invalid_iterator for exceptions indicating errors with iterators
+@sa @ref out_of_range for exceptions indicating access out of the defined range
+@sa @ref other_error for exceptions indicating other library errors
+
+@since version 3.0.0
+*/
+class type_error : public exception
+{
+  public:
+    static type_error create(int id_, const std::string& what_arg)
+    {
+        std::string w = exception::name("type_error", id_) + what_arg;
+        return type_error(id_, w.c_str());
+    }
+
+  private:
+    type_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
+};
+
+/*!
+@brief exception indicating access out of the defined range
+
+This exception is thrown in case a library function is called on an input
+parameter that exceeds the expected range, for instance in case of array
+indices or nonexisting object keys.
+
+Exceptions have ids 4xx.
+
+name / id                       | example message | description
+------------------------------- | --------------- | -------------------------
+json.exception.out_of_range.401 | array index 3 is out of range | The provided array index @a i is larger than @a size-1.
+json.exception.out_of_range.402 | array index '-' (3) is out of range | The special array index `-` in a JSON Pointer never describes a valid element of the array, but the index past the end. That is, it can only be used to add elements at this position, but not to read it.
+json.exception.out_of_range.403 | key 'foo' not found | The provided key was not found in the JSON object.
+json.exception.out_of_range.404 | unresolved reference token 'foo' | A reference token in a JSON Pointer could not be resolved.
+json.exception.out_of_range.405 | JSON pointer has no parent | The JSON Patch operations 'remove' and 'add' can not be applied to the root element of the JSON value.
+json.exception.out_of_range.406 | number overflow parsing '10E1000' | A parsed number could not be stored as without changing it to NaN or INF.
+json.exception.out_of_range.407 | number overflow serializing '9223372036854775808' | UBJSON and BSON only support integer numbers up to 9223372036854775807. |
+json.exception.out_of_range.408 | excessive array size: 8658170730974374167 | The size (following `#`) of an UBJSON array or object exceeds the maximal capacity. |
+json.exception.out_of_range.409 | BSON key cannot contain code point U+0000 (at byte 2) | Key identifiers to be serialized to BSON cannot contain code point U+0000, since the key is stored as zero-terminated c-string |
+
+@liveexample{The following code shows how an `out_of_range` exception can be
+caught.,out_of_range}
+
+@sa @ref exception for the base class of the library exceptions
+@sa @ref parse_error for exceptions indicating a parse error
+@sa @ref invalid_iterator for exceptions indicating errors with iterators
+@sa @ref type_error for exceptions indicating executing a member function with
+                    a wrong type
+@sa @ref other_error for exceptions indicating other library errors
+
+@since version 3.0.0
+*/
+class out_of_range : public exception
+{
+  public:
+    static out_of_range create(int id_, const std::string& what_arg)
+    {
+        std::string w = exception::name("out_of_range", id_) + what_arg;
+        return out_of_range(id_, w.c_str());
+    }
+
+  private:
+    out_of_range(int id_, const char* what_arg) : exception(id_, what_arg) {}
+};
+
+/*!
+@brief exception indicating other library errors
+
+This exception is thrown in case of errors that cannot be classified with the
+other exception types.
+
+Exceptions have ids 5xx.
+
+name / id                      | example message | description
+------------------------------ | --------------- | -------------------------
+json.exception.other_error.501 | unsuccessful: {"op":"test","path":"/baz", "value":"bar"} | A JSON Patch operation 'test' failed. The unsuccessful operation is also printed.
+
+@sa @ref exception for the base class of the library exceptions
+@sa @ref parse_error for exceptions indicating a parse error
+@sa @ref invalid_iterator for exceptions indicating errors with iterators
+@sa @ref type_error for exceptions indicating executing a member function with
+                    a wrong type
+@sa @ref out_of_range for exceptions indicating access out of the defined range
+
+@liveexample{The following code shows how an `other_error` exception can be
+caught.,other_error}
+
+@since version 3.0.0
+*/
+class other_error : public exception
+{
+  public:
+    static other_error create(int id_, const std::string& what_arg)
+    {
+        std::string w = exception::name("other_error", id_) + what_arg;
+        return other_error(id_, w.c_str());
+    }
+
+  private:
+    other_error(int id_, const char* what_arg) : exception(id_, what_arg) {}
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+#include <array> // array
+#include <ciso646> // and
+#include <cstddef> // size_t
+#include <cstdint> // uint8_t
+
+namespace nlohmann
+{
+namespace detail
+{
+///////////////////////////
+// JSON type enumeration //
+///////////////////////////
+
+/*!
+@brief the JSON type enumeration
+
+This enumeration collects the different JSON types. It is internally used to
+distinguish the stored values, and the functions @ref basic_json::is_null(),
+@ref basic_json::is_object(), @ref basic_json::is_array(),
+@ref basic_json::is_string(), @ref basic_json::is_boolean(),
+@ref basic_json::is_number() (with @ref basic_json::is_number_integer(),
+@ref basic_json::is_number_unsigned(), and @ref basic_json::is_number_float()),
+@ref basic_json::is_discarded(), @ref basic_json::is_primitive(), and
+@ref basic_json::is_structured() rely on it.
+
+@note There are three enumeration entries (number_integer, number_unsigned, and
+number_float), because the library distinguishes these three types for numbers:
+@ref basic_json::number_unsigned_t is used for unsigned integers,
+@ref basic_json::number_integer_t is used for signed integers, and
+@ref basic_json::number_float_t is used for floating-point numbers or to
+approximate integers which do not fit in the limits of their respective type.
+
+@sa @ref basic_json::basic_json(const value_t value_type) -- create a JSON
+value with the default value for a given type
+
+@since version 1.0.0
+*/
+enum class value_t : std::uint8_t
+{
+    null,             ///< null value
+    object,           ///< object (unordered set of name/value pairs)
+    array,            ///< array (ordered collection of values)
+    string,           ///< string value
+    boolean,          ///< boolean value
+    number_integer,   ///< number value (signed integer)
+    number_unsigned,  ///< number value (unsigned integer)
+    number_float,     ///< number value (floating-point)
+    discarded         ///< discarded by the the parser callback function
+};
+
+/*!
+@brief comparison operator for JSON types
+
+Returns an ordering that is similar to Python:
+- order: null < boolean < number < object < array < string
+- furthermore, each type is not smaller than itself
+- discarded values are not comparable
+
+@since version 1.0.0
+*/
+inline bool operator<(const value_t lhs, const value_t rhs) noexcept
+{
+    static constexpr std::array<std::uint8_t, 8> order = {{
+            0 /* null */, 3 /* object */, 4 /* array */, 5 /* string */,
+            1 /* boolean */, 2 /* integer */, 2 /* unsigned */, 2 /* float */
+        }
+    };
+
+    const auto l_index = static_cast<std::size_t>(lhs);
+    const auto r_index = static_cast<std::size_t>(rhs);
+    return l_index < order.size() and r_index < order.size() and order[l_index] < order[r_index];
+}
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/conversions/from_json.hpp>
+
+
+#include <algorithm> // transform
+#include <array> // array
+#include <ciso646> // and, not
+#include <forward_list> // forward_list
+#include <iterator> // inserter, front_inserter, end
+#include <map> // map
+#include <string> // string
+#include <tuple> // tuple, make_tuple
+#include <type_traits> // is_arithmetic, is_same, is_enum, underlying_type, is_convertible
+#include <unordered_map> // unordered_map
+#include <utility> // pair, declval
+#include <valarray> // valarray
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename std::nullptr_t& n)
+{
+    if (JSON_UNLIKELY(not j.is_null()))
+    {
+        JSON_THROW(type_error::create(302, "type must be null, but is " + std::string(j.type_name())));
+    }
+    n = nullptr;
+}
+
+// overloads for basic_json template parameters
+template<typename BasicJsonType, typename ArithmeticType,
+         enable_if_t<std::is_arithmetic<ArithmeticType>::value and
+                     not std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
+                     int> = 0>
+void get_arithmetic_value(const BasicJsonType& j, ArithmeticType& val)
+{
+    switch (static_cast<value_t>(j))
+    {
+        case value_t::number_unsigned:
+        {
+            val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>());
+            break;
+        }
+        case value_t::number_integer:
+        {
+            val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>());
+            break;
+        }
+        case value_t::number_float:
+        {
+            val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>());
+            break;
+        }
+
+        default:
+            JSON_THROW(type_error::create(302, "type must be number, but is " + std::string(j.type_name())));
+    }
+}
+
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename BasicJsonType::boolean_t& b)
+{
+    if (JSON_UNLIKELY(not j.is_boolean()))
+    {
+        JSON_THROW(type_error::create(302, "type must be boolean, but is " + std::string(j.type_name())));
+    }
+    b = *j.template get_ptr<const typename BasicJsonType::boolean_t*>();
+}
+
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename BasicJsonType::string_t& s)
+{
+    if (JSON_UNLIKELY(not j.is_string()))
+    {
+        JSON_THROW(type_error::create(302, "type must be string, but is " + std::string(j.type_name())));
+    }
+    s = *j.template get_ptr<const typename BasicJsonType::string_t*>();
+}
+
+template <
+    typename BasicJsonType, typename ConstructibleStringType,
+    enable_if_t <
+        is_constructible_string_type<BasicJsonType, ConstructibleStringType>::value and
+        not std::is_same<typename BasicJsonType::string_t,
+                         ConstructibleStringType>::value,
+        int > = 0 >
+void from_json(const BasicJsonType& j, ConstructibleStringType& s)
+{
+    if (JSON_UNLIKELY(not j.is_string()))
+    {
+        JSON_THROW(type_error::create(302, "type must be string, but is " + std::string(j.type_name())));
+    }
+
+    s = *j.template get_ptr<const typename BasicJsonType::string_t*>();
+}
+
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename BasicJsonType::number_float_t& val)
+{
+    get_arithmetic_value(j, val);
+}
+
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename BasicJsonType::number_unsigned_t& val)
+{
+    get_arithmetic_value(j, val);
+}
+
+template<typename BasicJsonType>
+void from_json(const BasicJsonType& j, typename BasicJsonType::number_integer_t& val)
+{
+    get_arithmetic_value(j, val);
+}
+
+template<typename BasicJsonType, typename EnumType,
+         enable_if_t<std::is_enum<EnumType>::value, int> = 0>
+void from_json(const BasicJsonType& j, EnumType& e)
+{
+    typename std::underlying_type<EnumType>::type val;
+    get_arithmetic_value(j, val);
+    e = static_cast<EnumType>(val);
+}
+
+// forward_list doesn't have an insert method
+template<typename BasicJsonType, typename T, typename Allocator,
+         enable_if_t<std::is_convertible<BasicJsonType, T>::value, int> = 0>
+void from_json(const BasicJsonType& j, std::forward_list<T, Allocator>& l)
+{
+    if (JSON_UNLIKELY(not j.is_array()))
+    {
+        JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name())));
+    }
+    std::transform(j.rbegin(), j.rend(),
+                   std::front_inserter(l), [](const BasicJsonType & i)
+    {
+        return i.template get<T>();
+    });
+}
+
+// valarray doesn't have an insert method
+template<typename BasicJsonType, typename T,
+         enable_if_t<std::is_convertible<BasicJsonType, T>::value, int> = 0>
+void from_json(const BasicJsonType& j, std::valarray<T>& l)
+{
+    if (JSON_UNLIKELY(not j.is_array()))
+    {
+        JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name())));
+    }
+    l.resize(j.size());
+    std::copy(j.m_value.array->begin(), j.m_value.array->end(), std::begin(l));
+}
+
+template<typename BasicJsonType>
+void from_json_array_impl(const BasicJsonType& j, typename BasicJsonType::array_t& arr, priority_tag<3> /*unused*/)
+{
+    arr = *j.template get_ptr<const typename BasicJsonType::array_t*>();
+}
+
+template <typename BasicJsonType, typename T, std::size_t N>
+auto from_json_array_impl(const BasicJsonType& j, std::array<T, N>& arr,
+                          priority_tag<2> /*unused*/)
+-> decltype(j.template get<T>(), void())
+{
+    for (std::size_t i = 0; i < N; ++i)
+    {
+        arr[i] = j.at(i).template get<T>();
+    }
+}
+
+template<typename BasicJsonType, typename ConstructibleArrayType>
+auto from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr, priority_tag<1> /*unused*/)
+-> decltype(
+    arr.reserve(std::declval<typename ConstructibleArrayType::size_type>()),
+    j.template get<typename ConstructibleArrayType::value_type>(),
+    void())
+{
+    using std::end;
+
+    arr.reserve(j.size());
+    std::transform(j.begin(), j.end(),
+                   std::inserter(arr, end(arr)), [](const BasicJsonType & i)
+    {
+        // get<BasicJsonType>() returns *this, this won't call a from_json
+        // method when value_type is BasicJsonType
+        return i.template get<typename ConstructibleArrayType::value_type>();
+    });
+}
+
+template <typename BasicJsonType, typename ConstructibleArrayType>
+void from_json_array_impl(const BasicJsonType& j, ConstructibleArrayType& arr,
+                          priority_tag<0> /*unused*/)
+{
+    using std::end;
+
+    std::transform(
+        j.begin(), j.end(), std::inserter(arr, end(arr)),
+        [](const BasicJsonType & i)
+    {
+        // get<BasicJsonType>() returns *this, this won't call a from_json
+        // method when value_type is BasicJsonType
+        return i.template get<typename ConstructibleArrayType::value_type>();
+    });
+}
+
+template <typename BasicJsonType, typename ConstructibleArrayType,
+          enable_if_t <
+              is_constructible_array_type<BasicJsonType, ConstructibleArrayType>::value and
+              not is_constructible_object_type<BasicJsonType, ConstructibleArrayType>::value and
+              not is_constructible_string_type<BasicJsonType, ConstructibleArrayType>::value and
+              not is_basic_json<ConstructibleArrayType>::value,
+              int > = 0 >
+
+auto from_json(const BasicJsonType& j, ConstructibleArrayType& arr)
+-> decltype(from_json_array_impl(j, arr, priority_tag<3> {}),
+j.template get<typename ConstructibleArrayType::value_type>(),
+void())
+{
+    if (JSON_UNLIKELY(not j.is_array()))
+    {
+        JSON_THROW(type_error::create(302, "type must be array, but is " +
+                                      std::string(j.type_name())));
+    }
+
+    from_json_array_impl(j, arr, priority_tag<3> {});
+}
+
+template<typename BasicJsonType, typename ConstructibleObjectType,
+         enable_if_t<is_constructible_object_type<BasicJsonType, ConstructibleObjectType>::value, int> = 0>
+void from_json(const BasicJsonType& j, ConstructibleObjectType& obj)
+{
+    if (JSON_UNLIKELY(not j.is_object()))
+    {
+        JSON_THROW(type_error::create(302, "type must be object, but is " + std::string(j.type_name())));
+    }
+
+    auto inner_object = j.template get_ptr<const typename BasicJsonType::object_t*>();
+    using value_type = typename ConstructibleObjectType::value_type;
+    std::transform(
+        inner_object->begin(), inner_object->end(),
+        std::inserter(obj, obj.begin()),
+        [](typename BasicJsonType::object_t::value_type const & p)
+    {
+        return value_type(p.first, p.second.template get<typename ConstructibleObjectType::mapped_type>());
+    });
+}
+
+// overload for arithmetic types, not chosen for basic_json template arguments
+// (BooleanType, etc..); note: Is it really necessary to provide explicit
+// overloads for boolean_t etc. in case of a custom BooleanType which is not
+// an arithmetic type?
+template<typename BasicJsonType, typename ArithmeticType,
+         enable_if_t <
+             std::is_arithmetic<ArithmeticType>::value and
+             not std::is_same<ArithmeticType, typename BasicJsonType::number_unsigned_t>::value and
+             not std::is_same<ArithmeticType, typename BasicJsonType::number_integer_t>::value and
+             not std::is_same<ArithmeticType, typename BasicJsonType::number_float_t>::value and
+             not std::is_same<ArithmeticType, typename BasicJsonType::boolean_t>::value,
+             int> = 0>
+void from_json(const BasicJsonType& j, ArithmeticType& val)
+{
+    switch (static_cast<value_t>(j))
+    {
+        case value_t::number_unsigned:
+        {
+            val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_unsigned_t*>());
+            break;
+        }
+        case value_t::number_integer:
+        {
+            val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_integer_t*>());
+            break;
+        }
+        case value_t::number_float:
+        {
+            val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::number_float_t*>());
+            break;
+        }
+        case value_t::boolean:
+        {
+            val = static_cast<ArithmeticType>(*j.template get_ptr<const typename BasicJsonType::boolean_t*>());
+            break;
+        }
+
+        default:
+            JSON_THROW(type_error::create(302, "type must be number, but is " + std::string(j.type_name())));
+    }
+}
+
+template<typename BasicJsonType, typename A1, typename A2>
+void from_json(const BasicJsonType& j, std::pair<A1, A2>& p)
+{
+    p = {j.at(0).template get<A1>(), j.at(1).template get<A2>()};
+}
+
+template<typename BasicJsonType, typename Tuple, std::size_t... Idx>
+void from_json_tuple_impl(const BasicJsonType& j, Tuple& t, index_sequence<Idx...> /*unused*/)
+{
+    t = std::make_tuple(j.at(Idx).template get<typename std::tuple_element<Idx, Tuple>::type>()...);
+}
+
+template<typename BasicJsonType, typename... Args>
+void from_json(const BasicJsonType& j, std::tuple<Args...>& t)
+{
+    from_json_tuple_impl(j, t, index_sequence_for<Args...> {});
+}
+
+template <typename BasicJsonType, typename Key, typename Value, typename Compare, typename Allocator,
+          typename = enable_if_t<not std::is_constructible<
+                                     typename BasicJsonType::string_t, Key>::value>>
+void from_json(const BasicJsonType& j, std::map<Key, Value, Compare, Allocator>& m)
+{
+    if (JSON_UNLIKELY(not j.is_array()))
+    {
+        JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name())));
+    }
+    for (const auto& p : j)
+    {
+        if (JSON_UNLIKELY(not p.is_array()))
+        {
+            JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(p.type_name())));
+        }
+        m.emplace(p.at(0).template get<Key>(), p.at(1).template get<Value>());
+    }
+}
+
+template <typename BasicJsonType, typename Key, typename Value, typename Hash, typename KeyEqual, typename Allocator,
+          typename = enable_if_t<not std::is_constructible<
+                                     typename BasicJsonType::string_t, Key>::value>>
+void from_json(const BasicJsonType& j, std::unordered_map<Key, Value, Hash, KeyEqual, Allocator>& m)
+{
+    if (JSON_UNLIKELY(not j.is_array()))
+    {
+        JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(j.type_name())));
+    }
+    for (const auto& p : j)
+    {
+        if (JSON_UNLIKELY(not p.is_array()))
+        {
+            JSON_THROW(type_error::create(302, "type must be array, but is " + std::string(p.type_name())));
+        }
+        m.emplace(p.at(0).template get<Key>(), p.at(1).template get<Value>());
+    }
+}
+
+struct from_json_fn
+{
+    template<typename BasicJsonType, typename T>
+    auto operator()(const BasicJsonType& j, T& val) const
+    noexcept(noexcept(from_json(j, val)))
+    -> decltype(from_json(j, val), void())
+    {
+        return from_json(j, val);
+    }
+};
+}  // namespace detail
+
+/// namespace to hold default `from_json` function
+/// to see why this is required:
+/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4381.html
+namespace
+{
+constexpr const auto& from_json = detail::static_const<detail::from_json_fn>::value;
+} // namespace
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/conversions/to_json.hpp>
+
+
+#include <ciso646> // or, and, not
+#include <iterator> // begin, end
+#include <tuple> // tuple, get
+#include <type_traits> // is_same, is_constructible, is_floating_point, is_enum, underlying_type
+#include <utility> // move, forward, declval, pair
+#include <valarray> // valarray
+#include <vector> // vector
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+// #include <nlohmann/detail/iterators/iteration_proxy.hpp>
+
+
+#include <cstddef> // size_t
+#include <string> // string, to_string
+#include <iterator> // input_iterator_tag
+#include <tuple> // tuple_size, get, tuple_element
+
+// #include <nlohmann/detail/value_t.hpp>
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+template <typename IteratorType> class iteration_proxy_value
+{
+  public:
+    using difference_type = std::ptrdiff_t;
+    using value_type = iteration_proxy_value;
+    using pointer = value_type * ;
+    using reference = value_type & ;
+    using iterator_category = std::input_iterator_tag;
+
+  private:
+    /// the iterator
+    IteratorType anchor;
+    /// an index for arrays (used to create key names)
+    std::size_t array_index = 0;
+    /// last stringified array index
+    mutable std::size_t array_index_last = 0;
+    /// a string representation of the array index
+    mutable std::string array_index_str = "0";
+    /// an empty string (to return a reference for primitive values)
+    const std::string empty_str = "";
+
+  public:
+    explicit iteration_proxy_value(IteratorType it) noexcept : anchor(it) {}
+
+    /// dereference operator (needed for range-based for)
+    iteration_proxy_value& operator*()
+    {
+        return *this;
+    }
+
+    /// increment operator (needed for range-based for)
+    iteration_proxy_value& operator++()
+    {
+        ++anchor;
+        ++array_index;
+
+        return *this;
+    }
+
+    /// equality operator (needed for InputIterator)
+    bool operator==(const iteration_proxy_value& o) const noexcept
+    {
+        return anchor == o.anchor;
+    }
+
+    /// inequality operator (needed for range-based for)
+    bool operator!=(const iteration_proxy_value& o) const noexcept
+    {
+        return anchor != o.anchor;
+    }
+
+    /// return key of the iterator
+    const std::string& key() const
+    {
+        assert(anchor.m_object != nullptr);
+
+        switch (anchor.m_object->type())
+        {
+            // use integer array index as key
+            case value_t::array:
+            {
+                if (array_index != array_index_last)
+                {
+                    array_index_str = std::to_string(array_index);
+                    array_index_last = array_index;
+                }
+                return array_index_str;
+            }
+
+            // use key from the object
+            case value_t::object:
+                return anchor.key();
+
+            // use an empty key for all primitive types
+            default:
+                return empty_str;
+        }
+    }
+
+    /// return value of the iterator
+    typename IteratorType::reference value() const
+    {
+        return anchor.value();
+    }
+};
+
+/// proxy class for the items() function
+template<typename IteratorType> class iteration_proxy
+{
+  private:
+    /// the container to iterate
+    typename IteratorType::reference container;
+
+  public:
+    /// construct iteration proxy from a container
+    explicit iteration_proxy(typename IteratorType::reference cont) noexcept
+        : container(cont) {}
+
+    /// return iterator begin (needed for range-based for)
+    iteration_proxy_value<IteratorType> begin() noexcept
+    {
+        return iteration_proxy_value<IteratorType>(container.begin());
+    }
+
+    /// return iterator end (needed for range-based for)
+    iteration_proxy_value<IteratorType> end() noexcept
+    {
+        return iteration_proxy_value<IteratorType>(container.end());
+    }
+};
+// Structured Bindings Support
+// For further reference see https://blog.tartanllama.xyz/structured-bindings/
+// And see https://github.com/nlohmann/json/pull/1391
+template <std::size_t N, typename IteratorType, enable_if_t<N == 0, int> = 0>
+auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decltype(i.key())
+{
+    return i.key();
+}
+// Structured Bindings Support
+// For further reference see https://blog.tartanllama.xyz/structured-bindings/
+// And see https://github.com/nlohmann/json/pull/1391
+template <std::size_t N, typename IteratorType, enable_if_t<N == 1, int> = 0>
+auto get(const nlohmann::detail::iteration_proxy_value<IteratorType>& i) -> decltype(i.value())
+{
+    return i.value();
+}
+}  // namespace detail
+}  // namespace nlohmann
+
+// The Addition to the STD Namespace is required to add
+// Structured Bindings Support to the iteration_proxy_value class
+// For further reference see https://blog.tartanllama.xyz/structured-bindings/
+// And see https://github.com/nlohmann/json/pull/1391
+namespace std
+{
+template <typename IteratorType>
+class tuple_size<::nlohmann::detail::iteration_proxy_value<IteratorType>>
+            : public std::integral_constant<std::size_t, 2> {};
+
+template <std::size_t N, typename IteratorType>
+class tuple_element<N, ::nlohmann::detail::iteration_proxy_value<IteratorType >>
+{
+  public:
+    using type = decltype(
+                     get<N>(std::declval <
+                            ::nlohmann::detail::iteration_proxy_value<IteratorType >> ()));
+};
+}
+
+namespace nlohmann
+{
+namespace detail
+{
+//////////////////
+// constructors //
+//////////////////
+
+template<value_t> struct external_constructor;
+
+template<>
+struct external_constructor<value_t::boolean>
+{
+    template<typename BasicJsonType>
+    static void construct(BasicJsonType& j, typename BasicJsonType::boolean_t b) noexcept
+    {
+        j.m_type = value_t::boolean;
+        j.m_value = b;
+        j.assert_invariant();
+    }
+};
+
+template<>
+struct external_constructor<value_t::string>
+{
+    template<typename BasicJsonType>
+    static void construct(BasicJsonType& j, const typename BasicJsonType::string_t& s)
+    {
+        j.m_type = value_t::string;
+        j.m_value = s;
+        j.assert_invariant();
+    }
+
+    template<typename BasicJsonType>
+    static void construct(BasicJsonType& j, typename BasicJsonType::string_t&& s)
+    {
+        j.m_type = value_t::string;
+        j.m_value = std::move(s);
+        j.assert_invariant();
+    }
+
+    template<typename BasicJsonType, typename CompatibleStringType,
+             enable_if_t<not std::is_same<CompatibleStringType, typename BasicJsonType::string_t>::value,
+                         int> = 0>
+    static void construct(BasicJsonType& j, const CompatibleStringType& str)
+    {
+        j.m_type = value_t::string;
+        j.m_value.string = j.template create<typename BasicJsonType::string_t>(str);
+        j.assert_invariant();
+    }
+};
+
+template<>
+struct external_constructor<value_t::number_float>
+{
+    template<typename BasicJsonType>
+    static void construct(BasicJsonType& j, typename BasicJsonType::number_float_t val) noexcept
+    {
+        j.m_type = value_t::number_float;
+        j.m_value = val;
+        j.assert_invariant();
+    }
+};
+
+template<>
+struct external_constructor<value_t::number_unsigned>
+{
+    template<typename BasicJsonType>
+    static void construct(BasicJsonType& j, typename BasicJsonType::number_unsigned_t val) noexcept
+    {
+        j.m_type = value_t::number_unsigned;
+        j.m_value = val;
+        j.assert_invariant();
+    }
+};
+
+template<>
+struct external_constructor<value_t::number_integer>
+{
+    template<typename BasicJsonType>
+    static void construct(BasicJsonType& j, typename BasicJsonType::number_integer_t val) noexcept
+    {
+        j.m_type = value_t::number_integer;
+        j.m_value = val;
+        j.assert_invariant();
+    }
+};
+
+template<>
+struct external_constructor<value_t::array>
+{
+    template<typename BasicJsonType>
+    static void construct(BasicJsonType& j, const typename BasicJsonType::array_t& arr)
+    {
+        j.m_type = value_t::array;
+        j.m_value = arr;
+        j.assert_invariant();
+    }
+
+    template<typename BasicJsonType>
+    static void construct(BasicJsonType& j, typename BasicJsonType::array_t&& arr)
+    {
+        j.m_type = value_t::array;
+        j.m_value = std::move(arr);
+        j.assert_invariant();
+    }
+
+    template<typename BasicJsonType, typename CompatibleArrayType,
+             enable_if_t<not std::is_same<CompatibleArrayType, typename BasicJsonType::array_t>::value,
+                         int> = 0>
+    static void construct(BasicJsonType& j, const CompatibleArrayType& arr)
+    {
+        using std::begin;
+        using std::end;
+        j.m_type = value_t::array;
+        j.m_value.array = j.template create<typename BasicJsonType::array_t>(begin(arr), end(arr));
+        j.assert_invariant();
+    }
+
+    template<typename BasicJsonType>
+    static void construct(BasicJsonType& j, const std::vector<bool>& arr)
+    {
+        j.m_type = value_t::array;
+        j.m_value = value_t::array;
+        j.m_value.array->reserve(arr.size());
+        for (const bool x : arr)
+        {
+            j.m_value.array->push_back(x);
+        }
+        j.assert_invariant();
+    }
+
+    template<typename BasicJsonType, typename T,
+             enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0>
+    static void construct(BasicJsonType& j, const std::valarray<T>& arr)
+    {
+        j.m_type = value_t::array;
+        j.m_value = value_t::array;
+        j.m_value.array->resize(arr.size());
+        std::copy(std::begin(arr), std::end(arr), j.m_value.array->begin());
+        j.assert_invariant();
+    }
+};
+
+template<>
+struct external_constructor<value_t::object>
+{
+    template<typename BasicJsonType>
+    static void construct(BasicJsonType& j, const typename BasicJsonType::object_t& obj)
+    {
+        j.m_type = value_t::object;
+        j.m_value = obj;
+        j.assert_invariant();
+    }
+
+    template<typename BasicJsonType>
+    static void construct(BasicJsonType& j, typename BasicJsonType::object_t&& obj)
+    {
+        j.m_type = value_t::object;
+        j.m_value = std::move(obj);
+        j.assert_invariant();
+    }
+
+    template<typename BasicJsonType, typename CompatibleObjectType,
+             enable_if_t<not std::is_same<CompatibleObjectType, typename BasicJsonType::object_t>::value, int> = 0>
+    static void construct(BasicJsonType& j, const CompatibleObjectType& obj)
+    {
+        using std::begin;
+        using std::end;
+
+        j.m_type = value_t::object;
+        j.m_value.object = j.template create<typename BasicJsonType::object_t>(begin(obj), end(obj));
+        j.assert_invariant();
+    }
+};
+
+/////////////
+// to_json //
+/////////////
+
+template<typename BasicJsonType, typename T,
+         enable_if_t<std::is_same<T, typename BasicJsonType::boolean_t>::value, int> = 0>
+void to_json(BasicJsonType& j, T b) noexcept
+{
+    external_constructor<value_t::boolean>::construct(j, b);
+}
+
+template<typename BasicJsonType, typename CompatibleString,
+         enable_if_t<std::is_constructible<typename BasicJsonType::string_t, CompatibleString>::value, int> = 0>
+void to_json(BasicJsonType& j, const CompatibleString& s)
+{
+    external_constructor<value_t::string>::construct(j, s);
+}
+
+template<typename BasicJsonType>
+void to_json(BasicJsonType& j, typename BasicJsonType::string_t&& s)
+{
+    external_constructor<value_t::string>::construct(j, std::move(s));
+}
+
+template<typename BasicJsonType, typename FloatType,
+         enable_if_t<std::is_floating_point<FloatType>::value, int> = 0>
+void to_json(BasicJsonType& j, FloatType val) noexcept
+{
+    external_constructor<value_t::number_float>::construct(j, static_cast<typename BasicJsonType::number_float_t>(val));
+}
+
+template<typename BasicJsonType, typename CompatibleNumberUnsignedType,
+         enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_unsigned_t, CompatibleNumberUnsignedType>::value, int> = 0>
+void to_json(BasicJsonType& j, CompatibleNumberUnsignedType val) noexcept
+{
+    external_constructor<value_t::number_unsigned>::construct(j, static_cast<typename BasicJsonType::number_unsigned_t>(val));
+}
+
+template<typename BasicJsonType, typename CompatibleNumberIntegerType,
+         enable_if_t<is_compatible_integer_type<typename BasicJsonType::number_integer_t, CompatibleNumberIntegerType>::value, int> = 0>
+void to_json(BasicJsonType& j, CompatibleNumberIntegerType val) noexcept
+{
+    external_constructor<value_t::number_integer>::construct(j, static_cast<typename BasicJsonType::number_integer_t>(val));
+}
+
+template<typename BasicJsonType, typename EnumType,
+         enable_if_t<std::is_enum<EnumType>::value, int> = 0>
+void to_json(BasicJsonType& j, EnumType e) noexcept
+{
+    using underlying_type = typename std::underlying_type<EnumType>::type;
+    external_constructor<value_t::number_integer>::construct(j, static_cast<underlying_type>(e));
+}
+
+template<typename BasicJsonType>
+void to_json(BasicJsonType& j, const std::vector<bool>& e)
+{
+    external_constructor<value_t::array>::construct(j, e);
+}
+
+template <typename BasicJsonType, typename CompatibleArrayType,
+          enable_if_t<is_compatible_array_type<BasicJsonType,
+                      CompatibleArrayType>::value and
+                      not is_compatible_object_type<
+                          BasicJsonType, CompatibleArrayType>::value and
+                      not is_compatible_string_type<BasicJsonType, CompatibleArrayType>::value and
+                      not is_basic_json<CompatibleArrayType>::value,
+                      int> = 0>
+void to_json(BasicJsonType& j, const CompatibleArrayType& arr)
+{
+    external_constructor<value_t::array>::construct(j, arr);
+}
+
+template<typename BasicJsonType, typename T,
+         enable_if_t<std::is_convertible<T, BasicJsonType>::value, int> = 0>
+void to_json(BasicJsonType& j, const std::valarray<T>& arr)
+{
+    external_constructor<value_t::array>::construct(j, std::move(arr));
+}
+
+template<typename BasicJsonType>
+void to_json(BasicJsonType& j, typename BasicJsonType::array_t&& arr)
+{
+    external_constructor<value_t::array>::construct(j, std::move(arr));
+}
+
+template<typename BasicJsonType, typename CompatibleObjectType,
+         enable_if_t<is_compatible_object_type<BasicJsonType, CompatibleObjectType>::value and not is_basic_json<CompatibleObjectType>::value, int> = 0>
+void to_json(BasicJsonType& j, const CompatibleObjectType& obj)
+{
+    external_constructor<value_t::object>::construct(j, obj);
+}
+
+template<typename BasicJsonType>
+void to_json(BasicJsonType& j, typename BasicJsonType::object_t&& obj)
+{
+    external_constructor<value_t::object>::construct(j, std::move(obj));
+}
+
+template <
+    typename BasicJsonType, typename T, std::size_t N,
+    enable_if_t<not std::is_constructible<typename BasicJsonType::string_t,
+                const T(&)[N]>::value,
+                int> = 0 >
+void to_json(BasicJsonType& j, const T(&arr)[N])
+{
+    external_constructor<value_t::array>::construct(j, arr);
+}
+
+template<typename BasicJsonType, typename... Args>
+void to_json(BasicJsonType& j, const std::pair<Args...>& p)
+{
+    j = { p.first, p.second };
+}
+
+// for https://github.com/nlohmann/json/pull/1134
+template < typename BasicJsonType, typename T,
+           enable_if_t<std::is_same<T, iteration_proxy_value<typename BasicJsonType::iterator>>::value, int> = 0>
+void to_json(BasicJsonType& j, const T& b)
+{
+    j = { {b.key(), b.value()} };
+}
+
+template<typename BasicJsonType, typename Tuple, std::size_t... Idx>
+void to_json_tuple_impl(BasicJsonType& j, const Tuple& t, index_sequence<Idx...> /*unused*/)
+{
+    j = { std::get<Idx>(t)... };
+}
+
+template<typename BasicJsonType, typename... Args>
+void to_json(BasicJsonType& j, const std::tuple<Args...>& t)
+{
+    to_json_tuple_impl(j, t, index_sequence_for<Args...> {});
+}
+
+struct to_json_fn
+{
+    template<typename BasicJsonType, typename T>
+    auto operator()(BasicJsonType& j, T&& val) const noexcept(noexcept(to_json(j, std::forward<T>(val))))
+    -> decltype(to_json(j, std::forward<T>(val)), void())
+    {
+        return to_json(j, std::forward<T>(val));
+    }
+};
+}  // namespace detail
+
+/// namespace to hold default `to_json` function
+namespace
+{
+constexpr const auto& to_json = detail::static_const<detail::to_json_fn>::value;
+} // namespace
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/input/input_adapters.hpp>
+
+
+#include <cassert> // assert
+#include <cstddef> // size_t
+#include <cstring> // strlen
+#include <istream> // istream
+#include <iterator> // begin, end, iterator_traits, random_access_iterator_tag, distance, next
+#include <memory> // shared_ptr, make_shared, addressof
+#include <numeric> // accumulate
+#include <string> // string, char_traits
+#include <type_traits> // enable_if, is_base_of, is_pointer, is_integral, remove_pointer
+#include <utility> // pair, declval
+#include <cstdio> //FILE *
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+/// the supported input formats
+enum class input_format_t { json, cbor, msgpack, ubjson, bson };
+
+////////////////////
+// input adapters //
+////////////////////
+
+/*!
+@brief abstract input adapter interface
+
+Produces a stream of std::char_traits<char>::int_type characters from a
+std::istream, a buffer, or some other input type. Accepts the return of
+exactly one non-EOF character for future input. The int_type characters
+returned consist of all valid char values as positive values (typically
+unsigned char), plus an EOF value outside that range, specified by the value
+of the function std::char_traits<char>::eof(). This value is typically -1, but
+could be any arbitrary value which is not a valid char value.
+*/
+struct input_adapter_protocol
+{
+    /// get a character [0,255] or std::char_traits<char>::eof().
+    virtual std::char_traits<char>::int_type get_character() = 0;
+    virtual ~input_adapter_protocol() = default;
+};
+
+/// a type to simplify interfaces
+using input_adapter_t = std::shared_ptr<input_adapter_protocol>;
+
+/*!
+Input adapter for stdio file access. This adapter read only 1 byte and do not use any
+ buffer. This adapter is a very low level adapter.
+*/
+class file_input_adapter : public input_adapter_protocol
+{
+  public:
+    explicit file_input_adapter(std::FILE* f)  noexcept
+        : m_file(f)
+    {}
+
+    std::char_traits<char>::int_type get_character() noexcept override
+    {
+        return std::fgetc(m_file);
+    }
+  private:
+    /// the file pointer to read from
+    std::FILE* m_file;
+};
+
+
+/*!
+Input adapter for a (caching) istream. Ignores a UFT Byte Order Mark at
+beginning of input. Does not support changing the underlying std::streambuf
+in mid-input. Maintains underlying std::istream and std::streambuf to support
+subsequent use of standard std::istream operations to process any input
+characters following those used in parsing the JSON input.  Clears the
+std::istream flags; any input errors (e.g., EOF) will be detected by the first
+subsequent call for input from the std::istream.
+*/
+class input_stream_adapter : public input_adapter_protocol
+{
+  public:
+    ~input_stream_adapter() override
+    {
+        // clear stream flags; we use underlying streambuf I/O, do not
+        // maintain ifstream flags, except eof
+        is.clear(is.rdstate() & std::ios::eofbit);
+    }
+
+    explicit input_stream_adapter(std::istream& i)
+        : is(i), sb(*i.rdbuf())
+    {}
+
+    // delete because of pointer members
+    input_stream_adapter(const input_stream_adapter&) = delete;
+    input_stream_adapter& operator=(input_stream_adapter&) = delete;
+    input_stream_adapter(input_stream_adapter&&) = delete;
+    input_stream_adapter& operator=(input_stream_adapter&&) = delete;
+
+    // std::istream/std::streambuf use std::char_traits<char>::to_int_type, to
+    // ensure that std::char_traits<char>::eof() and the character 0xFF do not
+    // end up as the same value, eg. 0xFFFFFFFF.
+    std::char_traits<char>::int_type get_character() override
+    {
+        auto res = sb.sbumpc();
+        // set eof manually, as we don't use the istream interface.
+        if (res == EOF)
+        {
+            is.clear(is.rdstate() | std::ios::eofbit);
+        }
+        return res;
+    }
+
+  private:
+    /// the associated input stream
+    std::istream& is;
+    std::streambuf& sb;
+};
+
+/// input adapter for buffer input
+class input_buffer_adapter : public input_adapter_protocol
+{
+  public:
+    input_buffer_adapter(const char* b, const std::size_t l) noexcept
+        : cursor(b), limit(b + l)
+    {}
+
+    // delete because of pointer members
+    input_buffer_adapter(const input_buffer_adapter&) = delete;
+    input_buffer_adapter& operator=(input_buffer_adapter&) = delete;
+    input_buffer_adapter(input_buffer_adapter&&) = delete;
+    input_buffer_adapter& operator=(input_buffer_adapter&&) = delete;
+    ~input_buffer_adapter() override = default;
+
+    std::char_traits<char>::int_type get_character() noexcept override
+    {
+        if (JSON_LIKELY(cursor < limit))
+        {
+            return std::char_traits<char>::to_int_type(*(cursor++));
+        }
+
+        return std::char_traits<char>::eof();
+    }
+
+  private:
+    /// pointer to the current character
+    const char* cursor;
+    /// pointer past the last character
+    const char* const limit;
+};
+
+template<typename WideStringType, size_t T>
+struct wide_string_input_helper
+{
+    // UTF-32
+    static void fill_buffer(const WideStringType& str, size_t& current_wchar, std::array<std::char_traits<char>::int_type, 4>& utf8_bytes, size_t& utf8_bytes_index, size_t& utf8_bytes_filled)
+    {
+        utf8_bytes_index = 0;
+
+        if (current_wchar == str.size())
+        {
+            utf8_bytes[0] = std::char_traits<char>::eof();
+            utf8_bytes_filled = 1;
+        }
+        else
+        {
+            // get the current character
+            const auto wc = static_cast<int>(str[current_wchar++]);
+
+            // UTF-32 to UTF-8 encoding
+            if (wc < 0x80)
+            {
+                utf8_bytes[0] = wc;
+                utf8_bytes_filled = 1;
+            }
+            else if (wc <= 0x7FF)
+            {
+                utf8_bytes[0] = 0xC0 | ((wc >> 6) & 0x1F);
+                utf8_bytes[1] = 0x80 | (wc & 0x3F);
+                utf8_bytes_filled = 2;
+            }
+            else if (wc <= 0xFFFF)
+            {
+                utf8_bytes[0] = 0xE0 | ((wc >> 12) & 0x0F);
+                utf8_bytes[1] = 0x80 | ((wc >> 6) & 0x3F);
+                utf8_bytes[2] = 0x80 | (wc & 0x3F);
+                utf8_bytes_filled = 3;
+            }
+            else if (wc <= 0x10FFFF)
+            {
+                utf8_bytes[0] = 0xF0 | ((wc >> 18) & 0x07);
+                utf8_bytes[1] = 0x80 | ((wc >> 12) & 0x3F);
+                utf8_bytes[2] = 0x80 | ((wc >> 6) & 0x3F);
+                utf8_bytes[3] = 0x80 | (wc & 0x3F);
+                utf8_bytes_filled = 4;
+            }
+            else
+            {
+                // unknown character
+                utf8_bytes[0] = wc;
+                utf8_bytes_filled = 1;
+            }
+        }
+    }
+};
+
+template<typename WideStringType>
+struct wide_string_input_helper<WideStringType, 2>
+{
+    // UTF-16
+    static void fill_buffer(const WideStringType& str, size_t& current_wchar, std::array<std::char_traits<char>::int_type, 4>& utf8_bytes, size_t& utf8_bytes_index, size_t& utf8_bytes_filled)
+    {
+        utf8_bytes_index = 0;
+
+        if (current_wchar == str.size())
+        {
+            utf8_bytes[0] = std::char_traits<char>::eof();
+            utf8_bytes_filled = 1;
+        }
+        else
+        {
+            // get the current character
+            const auto wc = static_cast<int>(str[current_wchar++]);
+
+            // UTF-16 to UTF-8 encoding
+            if (wc < 0x80)
+            {
+                utf8_bytes[0] = wc;
+                utf8_bytes_filled = 1;
+            }
+            else if (wc <= 0x7FF)
+            {
+                utf8_bytes[0] = 0xC0 | ((wc >> 6));
+                utf8_bytes[1] = 0x80 | (wc & 0x3F);
+                utf8_bytes_filled = 2;
+            }
+            else if (0xD800 > wc or wc >= 0xE000)
+            {
+                utf8_bytes[0] = 0xE0 | ((wc >> 12));
+                utf8_bytes[1] = 0x80 | ((wc >> 6) & 0x3F);
+                utf8_bytes[2] = 0x80 | (wc & 0x3F);
+                utf8_bytes_filled = 3;
+            }
+            else
+            {
+                if (current_wchar < str.size())
+                {
+                    const auto wc2 = static_cast<int>(str[current_wchar++]);
+                    const int charcode = 0x10000 + (((wc & 0x3FF) << 10) | (wc2 & 0x3FF));
+                    utf8_bytes[0] = 0xf0 | (charcode >> 18);
+                    utf8_bytes[1] = 0x80 | ((charcode >> 12) & 0x3F);
+                    utf8_bytes[2] = 0x80 | ((charcode >> 6) & 0x3F);
+                    utf8_bytes[3] = 0x80 | (charcode & 0x3F);
+                    utf8_bytes_filled = 4;
+                }
+                else
+                {
+                    // unknown character
+                    ++current_wchar;
+                    utf8_bytes[0] = wc;
+                    utf8_bytes_filled = 1;
+                }
+            }
+        }
+    }
+};
+
+template<typename WideStringType>
+class wide_string_input_adapter : public input_adapter_protocol
+{
+  public:
+    explicit wide_string_input_adapter(const WideStringType& w)  noexcept
+        : str(w)
+    {}
+
+    std::char_traits<char>::int_type get_character() noexcept override
+    {
+        // check if buffer needs to be filled
+        if (utf8_bytes_index == utf8_bytes_filled)
+        {
+            fill_buffer<sizeof(typename WideStringType::value_type)>();
+
+            assert(utf8_bytes_filled > 0);
+            assert(utf8_bytes_index == 0);
+        }
+
+        // use buffer
+        assert(utf8_bytes_filled > 0);
+        assert(utf8_bytes_index < utf8_bytes_filled);
+        return utf8_bytes[utf8_bytes_index++];
+    }
+
+  private:
+    template<size_t T>
+    void fill_buffer()
+    {
+        wide_string_input_helper<WideStringType, T>::fill_buffer(str, current_wchar, utf8_bytes, utf8_bytes_index, utf8_bytes_filled);
+    }
+
+    /// the wstring to process
+    const WideStringType& str;
+
+    /// index of the current wchar in str
+    std::size_t current_wchar = 0;
+
+    /// a buffer for UTF-8 bytes
+    std::array<std::char_traits<char>::int_type, 4> utf8_bytes = {{0, 0, 0, 0}};
+
+    /// index to the utf8_codes array for the next valid byte
+    std::size_t utf8_bytes_index = 0;
+    /// number of valid bytes in the utf8_codes array
+    std::size_t utf8_bytes_filled = 0;
+};
+
+class input_adapter
+{
+  public:
+    // native support
+    input_adapter(std::FILE* file)
+        : ia(std::make_shared<file_input_adapter>(file)) {}
+    /// input adapter for input stream
+    input_adapter(std::istream& i)
+        : ia(std::make_shared<input_stream_adapter>(i)) {}
+
+    /// input adapter for input stream
+    input_adapter(std::istream&& i)
+        : ia(std::make_shared<input_stream_adapter>(i)) {}
+
+    input_adapter(const std::wstring& ws)
+        : ia(std::make_shared<wide_string_input_adapter<std::wstring>>(ws)) {}
+
+    input_adapter(const std::u16string& ws)
+        : ia(std::make_shared<wide_string_input_adapter<std::u16string>>(ws)) {}
+
+    input_adapter(const std::u32string& ws)
+        : ia(std::make_shared<wide_string_input_adapter<std::u32string>>(ws)) {}
+
+    /// input adapter for buffer
+    template<typename CharT,
+             typename std::enable_if<
+                 std::is_pointer<CharT>::value and
+                 std::is_integral<typename std::remove_pointer<CharT>::type>::value and
+                 sizeof(typename std::remove_pointer<CharT>::type) == 1,
+                 int>::type = 0>
+    input_adapter(CharT b, std::size_t l)
+        : ia(std::make_shared<input_buffer_adapter>(reinterpret_cast<const char*>(b), l)) {}
+
+    // derived support
+
+    /// input adapter for string literal
+    template<typename CharT,
+             typename std::enable_if<
+                 std::is_pointer<CharT>::value and
+                 std::is_integral<typename std::remove_pointer<CharT>::type>::value and
+                 sizeof(typename std::remove_pointer<CharT>::type) == 1,
+                 int>::type = 0>
+    input_adapter(CharT b)
+        : input_adapter(reinterpret_cast<const char*>(b),
+                        std::strlen(reinterpret_cast<const char*>(b))) {}
+
+    /// input adapter for iterator range with contiguous storage
+    template<class IteratorType,
+             typename std::enable_if<
+                 std::is_same<typename iterator_traits<IteratorType>::iterator_category, std::random_access_iterator_tag>::value,
+                 int>::type = 0>
+    input_adapter(IteratorType first, IteratorType last)
+    {
+#ifndef NDEBUG
+        // assertion to check that the iterator range is indeed contiguous,
+        // see http://stackoverflow.com/a/35008842/266378 for more discussion
+        const auto is_contiguous = std::accumulate(
+                                       first, last, std::pair<bool, int>(true, 0),
+                                       [&first](std::pair<bool, int> res, decltype(*first) val)
+        {
+            res.first &= (val == *(std::next(std::addressof(*first), res.second++)));
+            return res;
+        }).first;
+        assert(is_contiguous);
+#endif
+
+        // assertion to check that each element is 1 byte long
+        static_assert(
+            sizeof(typename iterator_traits<IteratorType>::value_type) == 1,
+            "each element in the iterator range must have the size of 1 byte");
+
+        const auto len = static_cast<size_t>(std::distance(first, last));
+        if (JSON_LIKELY(len > 0))
+        {
+            // there is at least one element: use the address of first
+            ia = std::make_shared<input_buffer_adapter>(reinterpret_cast<const char*>(&(*first)), len);
+        }
+        else
+        {
+            // the address of first cannot be used: use nullptr
+            ia = std::make_shared<input_buffer_adapter>(nullptr, len);
+        }
+    }
+
+    /// input adapter for array
+    template<class T, std::size_t N>
+    input_adapter(T (&array)[N])
+        : input_adapter(std::begin(array), std::end(array)) {}
+
+    /// input adapter for contiguous container
+    template<class ContiguousContainer, typename
+             std::enable_if<not std::is_pointer<ContiguousContainer>::value and
+                            std::is_base_of<std::random_access_iterator_tag, typename iterator_traits<decltype(std::begin(std::declval<ContiguousContainer const>()))>::iterator_category>::value,
+                            int>::type = 0>
+    input_adapter(const ContiguousContainer& c)
+        : input_adapter(std::begin(c), std::end(c)) {}
+
+    operator input_adapter_t()
+    {
+        return ia;
+    }
+
+  private:
+    /// the actual adapter
+    input_adapter_t ia = nullptr;
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/input/lexer.hpp>
+
+
+#include <clocale> // localeconv
+#include <cstddef> // size_t
+#include <cstdlib> // strtof, strtod, strtold, strtoll, strtoull
+#include <cstdio> // snprintf
+#include <initializer_list> // initializer_list
+#include <string> // char_traits, string
+#include <vector> // vector
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/input/input_adapters.hpp>
+
+// #include <nlohmann/detail/input/position_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+///////////
+// lexer //
+///////////
+
+/*!
+@brief lexical analysis
+
+This class organizes the lexical analysis during JSON deserialization.
+*/
+template<typename BasicJsonType>
+class lexer
+{
+    using number_integer_t = typename BasicJsonType::number_integer_t;
+    using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+    using number_float_t = typename BasicJsonType::number_float_t;
+    using string_t = typename BasicJsonType::string_t;
+
+  public:
+    /// token types for the parser
+    enum class token_type
+    {
+        uninitialized,    ///< indicating the scanner is uninitialized
+        literal_true,     ///< the `true` literal
+        literal_false,    ///< the `false` literal
+        literal_null,     ///< the `null` literal
+        value_string,     ///< a string -- use get_string() for actual value
+        value_unsigned,   ///< an unsigned integer -- use get_number_unsigned() for actual value
+        value_integer,    ///< a signed integer -- use get_number_integer() for actual value
+        value_float,      ///< an floating point number -- use get_number_float() for actual value
+        begin_array,      ///< the character for array begin `[`
+        begin_object,     ///< the character for object begin `{`
+        end_array,        ///< the character for array end `]`
+        end_object,       ///< the character for object end `}`
+        name_separator,   ///< the name separator `:`
+        value_separator,  ///< the value separator `,`
+        parse_error,      ///< indicating a parse error
+        end_of_input,     ///< indicating the end of the input buffer
+        literal_or_value  ///< a literal or the begin of a value (only for diagnostics)
+    };
+
+    /// return name of values of type token_type (only used for errors)
+    static const char* token_type_name(const token_type t) noexcept
+    {
+        switch (t)
+        {
+            case token_type::uninitialized:
+                return "<uninitialized>";
+            case token_type::literal_true:
+                return "true literal";
+            case token_type::literal_false:
+                return "false literal";
+            case token_type::literal_null:
+                return "null literal";
+            case token_type::value_string:
+                return "string literal";
+            case lexer::token_type::value_unsigned:
+            case lexer::token_type::value_integer:
+            case lexer::token_type::value_float:
+                return "number literal";
+            case token_type::begin_array:
+                return "'['";
+            case token_type::begin_object:
+                return "'{'";
+            case token_type::end_array:
+                return "']'";
+            case token_type::end_object:
+                return "'}'";
+            case token_type::name_separator:
+                return "':'";
+            case token_type::value_separator:
+                return "','";
+            case token_type::parse_error:
+                return "<parse error>";
+            case token_type::end_of_input:
+                return "end of input";
+            case token_type::literal_or_value:
+                return "'[', '{', or a literal";
+            // LCOV_EXCL_START
+            default: // catch non-enum values
+                return "unknown token";
+                // LCOV_EXCL_STOP
+        }
+    }
+
+    explicit lexer(detail::input_adapter_t&& adapter)
+        : ia(std::move(adapter)), decimal_point_char(get_decimal_point()) {}
+
+    // delete because of pointer members
+    lexer(const lexer&) = delete;
+    lexer(lexer&&) = delete;
+    lexer& operator=(lexer&) = delete;
+    lexer& operator=(lexer&&) = delete;
+    ~lexer() = default;
+
+  private:
+    /////////////////////
+    // locales
+    /////////////////////
+
+    /// return the locale-dependent decimal point
+    static char get_decimal_point() noexcept
+    {
+        const auto loc = localeconv();
+        assert(loc != nullptr);
+        return (loc->decimal_point == nullptr) ? '.' : *(loc->decimal_point);
+    }
+
+    /////////////////////
+    // scan functions
+    /////////////////////
+
+    /*!
+    @brief get codepoint from 4 hex characters following `\u`
+
+    For input "\u c1 c2 c3 c4" the codepoint is:
+      (c1 * 0x1000) + (c2 * 0x0100) + (c3 * 0x0010) + c4
+    = (c1 << 12) + (c2 << 8) + (c3 << 4) + (c4 << 0)
+
+    Furthermore, the possible characters '0'..'9', 'A'..'F', and 'a'..'f'
+    must be converted to the integers 0x0..0x9, 0xA..0xF, 0xA..0xF, resp. The
+    conversion is done by subtracting the offset (0x30, 0x37, and 0x57)
+    between the ASCII value of the character and the desired integer value.
+
+    @return codepoint (0x0000..0xFFFF) or -1 in case of an error (e.g. EOF or
+            non-hex character)
+    */
+    int get_codepoint()
+    {
+        // this function only makes sense after reading `\u`
+        assert(current == 'u');
+        int codepoint = 0;
+
+        const auto factors = { 12, 8, 4, 0 };
+        for (const auto factor : factors)
+        {
+            get();
+
+            if (current >= '0' and current <= '9')
+            {
+                codepoint += ((current - 0x30) << factor);
+            }
+            else if (current >= 'A' and current <= 'F')
+            {
+                codepoint += ((current - 0x37) << factor);
+            }
+            else if (current >= 'a' and current <= 'f')
+            {
+                codepoint += ((current - 0x57) << factor);
+            }
+            else
+            {
+                return -1;
+            }
+        }
+
+        assert(0x0000 <= codepoint and codepoint <= 0xFFFF);
+        return codepoint;
+    }
+
+    /*!
+    @brief check if the next byte(s) are inside a given range
+
+    Adds the current byte and, for each passed range, reads a new byte and
+    checks if it is inside the range. If a violation was detected, set up an
+    error message and return false. Otherwise, return true.
+
+    @param[in] ranges  list of integers; interpreted as list of pairs of
+                       inclusive lower and upper bound, respectively
+
+    @pre The passed list @a ranges must have 2, 4, or 6 elements; that is,
+         1, 2, or 3 pairs. This precondition is enforced by an assertion.
+
+    @return true if and only if no range violation was detected
+    */
+    bool next_byte_in_range(std::initializer_list<int> ranges)
+    {
+        assert(ranges.size() == 2 or ranges.size() == 4 or ranges.size() == 6);
+        add(current);
+
+        for (auto range = ranges.begin(); range != ranges.end(); ++range)
+        {
+            get();
+            if (JSON_LIKELY(*range <= current and current <= *(++range)))
+            {
+                add(current);
+            }
+            else
+            {
+                error_message = "invalid string: ill-formed UTF-8 byte";
+                return false;
+            }
+        }
+
+        return true;
+    }
+
+    /*!
+    @brief scan a string literal
+
+    This function scans a string according to Sect. 7 of RFC 7159. While
+    scanning, bytes are escaped and copied into buffer token_buffer. Then the
+    function returns successfully, token_buffer is *not* null-terminated (as it
+    may contain \0 bytes), and token_buffer.size() is the number of bytes in the
+    string.
+
+    @return token_type::value_string if string could be successfully scanned,
+            token_type::parse_error otherwise
+
+    @note In case of errors, variable error_message contains a textual
+          description.
+    */
+    token_type scan_string()
+    {
+        // reset token_buffer (ignore opening quote)
+        reset();
+
+        // we entered the function by reading an open quote
+        assert(current == '\"');
+
+        while (true)
+        {
+            // get next character
+            switch (get())
+            {
+                // end of file while parsing string
+                case std::char_traits<char>::eof():
+                {
+                    error_message = "invalid string: missing closing quote";
+                    return token_type::parse_error;
+                }
+
+                // closing quote
+                case '\"':
+                {
+                    return token_type::value_string;
+                }
+
+                // escapes
+                case '\\':
+                {
+                    switch (get())
+                    {
+                        // quotation mark
+                        case '\"':
+                            add('\"');
+                            break;
+                        // reverse solidus
+                        case '\\':
+                            add('\\');
+                            break;
+                        // solidus
+                        case '/':
+                            add('/');
+                            break;
+                        // backspace
+                        case 'b':
+                            add('\b');
+                            break;
+                        // form feed
+                        case 'f':
+                            add('\f');
+                            break;
+                        // line feed
+                        case 'n':
+                            add('\n');
+                            break;
+                        // carriage return
+                        case 'r':
+                            add('\r');
+                            break;
+                        // tab
+                        case 't':
+                            add('\t');
+                            break;
+
+                        // unicode escapes
+                        case 'u':
+                        {
+                            const int codepoint1 = get_codepoint();
+                            int codepoint = codepoint1; // start with codepoint1
+
+                            if (JSON_UNLIKELY(codepoint1 == -1))
+                            {
+                                error_message = "invalid string: '\\u' must be followed by 4 hex digits";
+                                return token_type::parse_error;
+                            }
+
+                            // check if code point is a high surrogate
+                            if (0xD800 <= codepoint1 and codepoint1 <= 0xDBFF)
+                            {
+                                // expect next \uxxxx entry
+                                if (JSON_LIKELY(get() == '\\' and get() == 'u'))
+                                {
+                                    const int codepoint2 = get_codepoint();
+
+                                    if (JSON_UNLIKELY(codepoint2 == -1))
+                                    {
+                                        error_message = "invalid string: '\\u' must be followed by 4 hex digits";
+                                        return token_type::parse_error;
+                                    }
+
+                                    // check if codepoint2 is a low surrogate
+                                    if (JSON_LIKELY(0xDC00 <= codepoint2 and codepoint2 <= 0xDFFF))
+                                    {
+                                        // overwrite codepoint
+                                        codepoint =
+                                            // high surrogate occupies the most significant 22 bits
+                                            (codepoint1 << 10)
+                                            // low surrogate occupies the least significant 15 bits
+                                            + codepoint2
+                                            // there is still the 0xD800, 0xDC00 and 0x10000 noise
+                                            // in the result so we have to subtract with:
+                                            // (0xD800 << 10) + DC00 - 0x10000 = 0x35FDC00
+                                            - 0x35FDC00;
+                                    }
+                                    else
+                                    {
+                                        error_message = "invalid string: surrogate U+DC00..U+DFFF must be followed by U+DC00..U+DFFF";
+                                        return token_type::parse_error;
+                                    }
+                                }
+                                else
+                                {
+                                    error_message = "invalid string: surrogate U+DC00..U+DFFF must be followed by U+DC00..U+DFFF";
+                                    return token_type::parse_error;
+                                }
+                            }
+                            else
+                            {
+                                if (JSON_UNLIKELY(0xDC00 <= codepoint1 and codepoint1 <= 0xDFFF))
+                                {
+                                    error_message = "invalid string: surrogate U+DC00..U+DFFF must follow U+D800..U+DBFF";
+                                    return token_type::parse_error;
+                                }
+                            }
+
+                            // result of the above calculation yields a proper codepoint
+                            assert(0x00 <= codepoint and codepoint <= 0x10FFFF);
+
+                            // translate codepoint into bytes
+                            if (codepoint < 0x80)
+                            {
+                                // 1-byte characters: 0xxxxxxx (ASCII)
+                                add(codepoint);
+                            }
+                            else if (codepoint <= 0x7FF)
+                            {
+                                // 2-byte characters: 110xxxxx 10xxxxxx
+                                add(0xC0 | (codepoint >> 6));
+                                add(0x80 | (codepoint & 0x3F));
+                            }
+                            else if (codepoint <= 0xFFFF)
+                            {
+                                // 3-byte characters: 1110xxxx 10xxxxxx 10xxxxxx
+                                add(0xE0 | (codepoint >> 12));
+                                add(0x80 | ((codepoint >> 6) & 0x3F));
+                                add(0x80 | (codepoint & 0x3F));
+                            }
+                            else
+                            {
+                                // 4-byte characters: 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+                                add(0xF0 | (codepoint >> 18));
+                                add(0x80 | ((codepoint >> 12) & 0x3F));
+                                add(0x80 | ((codepoint >> 6) & 0x3F));
+                                add(0x80 | (codepoint & 0x3F));
+                            }
+
+                            break;
+                        }
+
+                        // other characters after escape
+                        default:
+                            error_message = "invalid string: forbidden character after backslash";
+                            return token_type::parse_error;
+                    }
+
+                    break;
+                }
+
+                // invalid control characters
+                case 0x00:
+                {
+                    error_message = "invalid string: control character U+0000 (NUL) must be escaped to \\u0000";
+                    return token_type::parse_error;
+                }
+
+                case 0x01:
+                {
+                    error_message = "invalid string: control character U+0001 (SOH) must be escaped to \\u0001";
+                    return token_type::parse_error;
+                }
+
+                case 0x02:
+                {
+                    error_message = "invalid string: control character U+0002 (STX) must be escaped to \\u0002";
+                    return token_type::parse_error;
+                }
+
+                case 0x03:
+                {
+                    error_message = "invalid string: control character U+0003 (ETX) must be escaped to \\u0003";
+                    return token_type::parse_error;
+                }
+
+                case 0x04:
+                {
+                    error_message = "invalid string: control character U+0004 (EOT) must be escaped to \\u0004";
+                    return token_type::parse_error;
+                }
+
+                case 0x05:
+                {
+                    error_message = "invalid string: control character U+0005 (ENQ) must be escaped to \\u0005";
+                    return token_type::parse_error;
+                }
+
+                case 0x06:
+                {
+                    error_message = "invalid string: control character U+0006 (ACK) must be escaped to \\u0006";
+                    return token_type::parse_error;
+                }
+
+                case 0x07:
+                {
+                    error_message = "invalid string: control character U+0007 (BEL) must be escaped to \\u0007";
+                    return token_type::parse_error;
+                }
+
+                case 0x08:
+                {
+                    error_message = "invalid string: control character U+0008 (BS) must be escaped to \\u0008 or \\b";
+                    return token_type::parse_error;
+                }
+
+                case 0x09:
+                {
+                    error_message = "invalid string: control character U+0009 (HT) must be escaped to \\u0009 or \\t";
+                    return token_type::parse_error;
+                }
+
+                case 0x0A:
+                {
+                    error_message = "invalid string: control character U+000A (LF) must be escaped to \\u000A or \\n";
+                    return token_type::parse_error;
+                }
+
+                case 0x0B:
+                {
+                    error_message = "invalid string: control character U+000B (VT) must be escaped to \\u000B";
+                    return token_type::parse_error;
+                }
+
+                case 0x0C:
+                {
+                    error_message = "invalid string: control character U+000C (FF) must be escaped to \\u000C or \\f";
+                    return token_type::parse_error;
+                }
+
+                case 0x0D:
+                {
+                    error_message = "invalid string: control character U+000D (CR) must be escaped to \\u000D or \\r";
+                    return token_type::parse_error;
+                }
+
+                case 0x0E:
+                {
+                    error_message = "invalid string: control character U+000E (SO) must be escaped to \\u000E";
+                    return token_type::parse_error;
+                }
+
+                case 0x0F:
+                {
+                    error_message = "invalid string: control character U+000F (SI) must be escaped to \\u000F";
+                    return token_type::parse_error;
+                }
+
+                case 0x10:
+                {
+                    error_message = "invalid string: control character U+0010 (DLE) must be escaped to \\u0010";
+                    return token_type::parse_error;
+                }
+
+                case 0x11:
+                {
+                    error_message = "invalid string: control character U+0011 (DC1) must be escaped to \\u0011";
+                    return token_type::parse_error;
+                }
+
+                case 0x12:
+                {
+                    error_message = "invalid string: control character U+0012 (DC2) must be escaped to \\u0012";
+                    return token_type::parse_error;
+                }
+
+                case 0x13:
+                {
+                    error_message = "invalid string: control character U+0013 (DC3) must be escaped to \\u0013";
+                    return token_type::parse_error;
+                }
+
+                case 0x14:
+                {
+                    error_message = "invalid string: control character U+0014 (DC4) must be escaped to \\u0014";
+                    return token_type::parse_error;
+                }
+
+                case 0x15:
+                {
+                    error_message = "invalid string: control character U+0015 (NAK) must be escaped to \\u0015";
+                    return token_type::parse_error;
+                }
+
+                case 0x16:
+                {
+                    error_message = "invalid string: control character U+0016 (SYN) must be escaped to \\u0016";
+                    return token_type::parse_error;
+                }
+
+                case 0x17:
+                {
+                    error_message = "invalid string: control character U+0017 (ETB) must be escaped to \\u0017";
+                    return token_type::parse_error;
+                }
+
+                case 0x18:
+                {
+                    error_message = "invalid string: control character U+0018 (CAN) must be escaped to \\u0018";
+                    return token_type::parse_error;
+                }
+
+                case 0x19:
+                {
+                    error_message = "invalid string: control character U+0019 (EM) must be escaped to \\u0019";
+                    return token_type::parse_error;
+                }
+
+                case 0x1A:
+                {
+                    error_message = "invalid string: control character U+001A (SUB) must be escaped to \\u001A";
+                    return token_type::parse_error;
+                }
+
+                case 0x1B:
+                {
+                    error_message = "invalid string: control character U+001B (ESC) must be escaped to \\u001B";
+                    return token_type::parse_error;
+                }
+
+                case 0x1C:
+                {
+                    error_message = "invalid string: control character U+001C (FS) must be escaped to \\u001C";
+                    return token_type::parse_error;
+                }
+
+                case 0x1D:
+                {
+                    error_message = "invalid string: control character U+001D (GS) must be escaped to \\u001D";
+                    return token_type::parse_error;
+                }
+
+                case 0x1E:
+                {
+                    error_message = "invalid string: control character U+001E (RS) must be escaped to \\u001E";
+                    return token_type::parse_error;
+                }
+
+                case 0x1F:
+                {
+                    error_message = "invalid string: control character U+001F (US) must be escaped to \\u001F";
+                    return token_type::parse_error;
+                }
+
+                // U+0020..U+007F (except U+0022 (quote) and U+005C (backspace))
+                case 0x20:
+                case 0x21:
+                case 0x23:
+                case 0x24:
+                case 0x25:
+                case 0x26:
+                case 0x27:
+                case 0x28:
+                case 0x29:
+                case 0x2A:
+                case 0x2B:
+                case 0x2C:
+                case 0x2D:
+                case 0x2E:
+                case 0x2F:
+                case 0x30:
+                case 0x31:
+                case 0x32:
+                case 0x33:
+                case 0x34:
+                case 0x35:
+                case 0x36:
+                case 0x37:
+                case 0x38:
+                case 0x39:
+                case 0x3A:
+                case 0x3B:
+                case 0x3C:
+                case 0x3D:
+                case 0x3E:
+                case 0x3F:
+                case 0x40:
+                case 0x41:
+                case 0x42:
+                case 0x43:
+                case 0x44:
+                case 0x45:
+                case 0x46:
+                case 0x47:
+                case 0x48:
+                case 0x49:
+                case 0x4A:
+                case 0x4B:
+                case 0x4C:
+                case 0x4D:
+                case 0x4E:
+                case 0x4F:
+                case 0x50:
+                case 0x51:
+                case 0x52:
+                case 0x53:
+                case 0x54:
+                case 0x55:
+                case 0x56:
+                case 0x57:
+                case 0x58:
+                case 0x59:
+                case 0x5A:
+                case 0x5B:
+                case 0x5D:
+                case 0x5E:
+                case 0x5F:
+                case 0x60:
+                case 0x61:
+                case 0x62:
+                case 0x63:
+                case 0x64:
+                case 0x65:
+                case 0x66:
+                case 0x67:
+                case 0x68:
+                case 0x69:
+                case 0x6A:
+                case 0x6B:
+                case 0x6C:
+                case 0x6D:
+                case 0x6E:
+                case 0x6F:
+                case 0x70:
+                case 0x71:
+                case 0x72:
+                case 0x73:
+                case 0x74:
+                case 0x75:
+                case 0x76:
+                case 0x77:
+                case 0x78:
+                case 0x79:
+                case 0x7A:
+                case 0x7B:
+                case 0x7C:
+                case 0x7D:
+                case 0x7E:
+                case 0x7F:
+                {
+                    add(current);
+                    break;
+                }
+
+                // U+0080..U+07FF: bytes C2..DF 80..BF
+                case 0xC2:
+                case 0xC3:
+                case 0xC4:
+                case 0xC5:
+                case 0xC6:
+                case 0xC7:
+                case 0xC8:
+                case 0xC9:
+                case 0xCA:
+                case 0xCB:
+                case 0xCC:
+                case 0xCD:
+                case 0xCE:
+                case 0xCF:
+                case 0xD0:
+                case 0xD1:
+                case 0xD2:
+                case 0xD3:
+                case 0xD4:
+                case 0xD5:
+                case 0xD6:
+                case 0xD7:
+                case 0xD8:
+                case 0xD9:
+                case 0xDA:
+                case 0xDB:
+                case 0xDC:
+                case 0xDD:
+                case 0xDE:
+                case 0xDF:
+                {
+                    if (JSON_UNLIKELY(not next_byte_in_range({0x80, 0xBF})))
+                    {
+                        return token_type::parse_error;
+                    }
+                    break;
+                }
+
+                // U+0800..U+0FFF: bytes E0 A0..BF 80..BF
+                case 0xE0:
+                {
+                    if (JSON_UNLIKELY(not (next_byte_in_range({0xA0, 0xBF, 0x80, 0xBF}))))
+                    {
+                        return token_type::parse_error;
+                    }
+                    break;
+                }
+
+                // U+1000..U+CFFF: bytes E1..EC 80..BF 80..BF
+                // U+E000..U+FFFF: bytes EE..EF 80..BF 80..BF
+                case 0xE1:
+                case 0xE2:
+                case 0xE3:
+                case 0xE4:
+                case 0xE5:
+                case 0xE6:
+                case 0xE7:
+                case 0xE8:
+                case 0xE9:
+                case 0xEA:
+                case 0xEB:
+                case 0xEC:
+                case 0xEE:
+                case 0xEF:
+                {
+                    if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0xBF, 0x80, 0xBF}))))
+                    {
+                        return token_type::parse_error;
+                    }
+                    break;
+                }
+
+                // U+D000..U+D7FF: bytes ED 80..9F 80..BF
+                case 0xED:
+                {
+                    if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0x9F, 0x80, 0xBF}))))
+                    {
+                        return token_type::parse_error;
+                    }
+                    break;
+                }
+
+                // U+10000..U+3FFFF F0 90..BF 80..BF 80..BF
+                case 0xF0:
+                {
+                    if (JSON_UNLIKELY(not (next_byte_in_range({0x90, 0xBF, 0x80, 0xBF, 0x80, 0xBF}))))
+                    {
+                        return token_type::parse_error;
+                    }
+                    break;
+                }
+
+                // U+40000..U+FFFFF F1..F3 80..BF 80..BF 80..BF
+                case 0xF1:
+                case 0xF2:
+                case 0xF3:
+                {
+                    if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0xBF, 0x80, 0xBF, 0x80, 0xBF}))))
+                    {
+                        return token_type::parse_error;
+                    }
+                    break;
+                }
+
+                // U+100000..U+10FFFF F4 80..8F 80..BF 80..BF
+                case 0xF4:
+                {
+                    if (JSON_UNLIKELY(not (next_byte_in_range({0x80, 0x8F, 0x80, 0xBF, 0x80, 0xBF}))))
+                    {
+                        return token_type::parse_error;
+                    }
+                    break;
+                }
+
+                // remaining bytes (80..C1 and F5..FF) are ill-formed
+                default:
+                {
+                    error_message = "invalid string: ill-formed UTF-8 byte";
+                    return token_type::parse_error;
+                }
+            }
+        }
+    }
+
+    static void strtof(float& f, const char* str, char** endptr) noexcept
+    {
+        f = std::strtof(str, endptr);
+    }
+
+    static void strtof(double& f, const char* str, char** endptr) noexcept
+    {
+        f = std::strtod(str, endptr);
+    }
+
+    static void strtof(long double& f, const char* str, char** endptr) noexcept
+    {
+        f = std::strtold(str, endptr);
+    }
+
+    /*!
+    @brief scan a number literal
+
+    This function scans a string according to Sect. 6 of RFC 7159.
+
+    The function is realized with a deterministic finite state machine derived
+    from the grammar described in RFC 7159. Starting in state "init", the
+    input is read and used to determined the next state. Only state "done"
+    accepts the number. State "error" is a trap state to model errors. In the
+    table below, "anything" means any character but the ones listed before.
+
+    state    | 0        | 1-9      | e E      | +       | -       | .        | anything
+    ---------|----------|----------|----------|---------|---------|----------|-----------
+    init     | zero     | any1     | [error]  | [error] | minus   | [error]  | [error]
+    minus    | zero     | any1     | [error]  | [error] | [error] | [error]  | [error]
+    zero     | done     | done     | exponent | done    | done    | decimal1 | done
+    any1     | any1     | any1     | exponent | done    | done    | decimal1 | done
+    decimal1 | decimal2 | [error]  | [error]  | [error] | [error] | [error]  | [error]
+    decimal2 | decimal2 | decimal2 | exponent | done    | done    | done     | done
+    exponent | any2     | any2     | [error]  | sign    | sign    | [error]  | [error]
+    sign     | any2     | any2     | [error]  | [error] | [error] | [error]  | [error]
+    any2     | any2     | any2     | done     | done    | done    | done     | done
+
+    The state machine is realized with one label per state (prefixed with
+    "scan_number_") and `goto` statements between them. The state machine
+    contains cycles, but any cycle can be left when EOF is read. Therefore,
+    the function is guaranteed to terminate.
+
+    During scanning, the read bytes are stored in token_buffer. This string is
+    then converted to a signed integer, an unsigned integer, or a
+    floating-point number.
+
+    @return token_type::value_unsigned, token_type::value_integer, or
+            token_type::value_float if number could be successfully scanned,
+            token_type::parse_error otherwise
+
+    @note The scanner is independent of the current locale. Internally, the
+          locale's decimal point is used instead of `.` to work with the
+          locale-dependent converters.
+    */
+    token_type scan_number()  // lgtm [cpp/use-of-goto]
+    {
+        // reset token_buffer to store the number's bytes
+        reset();
+
+        // the type of the parsed number; initially set to unsigned; will be
+        // changed if minus sign, decimal point or exponent is read
+        token_type number_type = token_type::value_unsigned;
+
+        // state (init): we just found out we need to scan a number
+        switch (current)
+        {
+            case '-':
+            {
+                add(current);
+                goto scan_number_minus;
+            }
+
+            case '0':
+            {
+                add(current);
+                goto scan_number_zero;
+            }
+
+            case '1':
+            case '2':
+            case '3':
+            case '4':
+            case '5':
+            case '6':
+            case '7':
+            case '8':
+            case '9':
+            {
+                add(current);
+                goto scan_number_any1;
+            }
+
+            // LCOV_EXCL_START
+            default:
+            {
+                // all other characters are rejected outside scan_number()
+                assert(false);
+            }
+                // LCOV_EXCL_STOP
+        }
+
+scan_number_minus:
+        // state: we just parsed a leading minus sign
+        number_type = token_type::value_integer;
+        switch (get())
+        {
+            case '0':
+            {
+                add(current);
+                goto scan_number_zero;
+            }
+
+            case '1':
+            case '2':
+            case '3':
+            case '4':
+            case '5':
+            case '6':
+            case '7':
+            case '8':
+            case '9':
+            {
+                add(current);
+                goto scan_number_any1;
+            }
+
+            default:
+            {
+                error_message = "invalid number; expected digit after '-'";
+                return token_type::parse_error;
+            }
+        }
+
+scan_number_zero:
+        // state: we just parse a zero (maybe with a leading minus sign)
+        switch (get())
+        {
+            case '.':
+            {
+                add(decimal_point_char);
+                goto scan_number_decimal1;
+            }
+
+            case 'e':
+            case 'E':
+            {
+                add(current);
+                goto scan_number_exponent;
+            }
+
+            default:
+                goto scan_number_done;
+        }
+
+scan_number_any1:
+        // state: we just parsed a number 0-9 (maybe with a leading minus sign)
+        switch (get())
+        {
+            case '0':
+            case '1':
+            case '2':
+            case '3':
+            case '4':
+            case '5':
+            case '6':
+            case '7':
+            case '8':
+            case '9':
+            {
+                add(current);
+                goto scan_number_any1;
+            }
+
+            case '.':
+            {
+                add(decimal_point_char);
+                goto scan_number_decimal1;
+            }
+
+            case 'e':
+            case 'E':
+            {
+                add(current);
+                goto scan_number_exponent;
+            }
+
+            default:
+                goto scan_number_done;
+        }
+
+scan_number_decimal1:
+        // state: we just parsed a decimal point
+        number_type = token_type::value_float;
+        switch (get())
+        {
+            case '0':
+            case '1':
+            case '2':
+            case '3':
+            case '4':
+            case '5':
+            case '6':
+            case '7':
+            case '8':
+            case '9':
+            {
+                add(current);
+                goto scan_number_decimal2;
+            }
+
+            default:
+            {
+                error_message = "invalid number; expected digit after '.'";
+                return token_type::parse_error;
+            }
+        }
+
+scan_number_decimal2:
+        // we just parsed at least one number after a decimal point
+        switch (get())
+        {
+            case '0':
+            case '1':
+            case '2':
+            case '3':
+            case '4':
+            case '5':
+            case '6':
+            case '7':
+            case '8':
+            case '9':
+            {
+                add(current);
+                goto scan_number_decimal2;
+            }
+
+            case 'e':
+            case 'E':
+            {
+                add(current);
+                goto scan_number_exponent;
+            }
+
+            default:
+                goto scan_number_done;
+        }
+
+scan_number_exponent:
+        // we just parsed an exponent
+        number_type = token_type::value_float;
+        switch (get())
+        {
+            case '+':
+            case '-':
+            {
+                add(current);
+                goto scan_number_sign;
+            }
+
+            case '0':
+            case '1':
+            case '2':
+            case '3':
+            case '4':
+            case '5':
+            case '6':
+            case '7':
+            case '8':
+            case '9':
+            {
+                add(current);
+                goto scan_number_any2;
+            }
+
+            default:
+            {
+                error_message =
+                    "invalid number; expected '+', '-', or digit after exponent";
+                return token_type::parse_error;
+            }
+        }
+
+scan_number_sign:
+        // we just parsed an exponent sign
+        switch (get())
+        {
+            case '0':
+            case '1':
+            case '2':
+            case '3':
+            case '4':
+            case '5':
+            case '6':
+            case '7':
+            case '8':
+            case '9':
+            {
+                add(current);
+                goto scan_number_any2;
+            }
+
+            default:
+            {
+                error_message = "invalid number; expected digit after exponent sign";
+                return token_type::parse_error;
+            }
+        }
+
+scan_number_any2:
+        // we just parsed a number after the exponent or exponent sign
+        switch (get())
+        {
+            case '0':
+            case '1':
+            case '2':
+            case '3':
+            case '4':
+            case '5':
+            case '6':
+            case '7':
+            case '8':
+            case '9':
+            {
+                add(current);
+                goto scan_number_any2;
+            }
+
+            default:
+                goto scan_number_done;
+        }
+
+scan_number_done:
+        // unget the character after the number (we only read it to know that
+        // we are done scanning a number)
+        unget();
+
+        char* endptr = nullptr;
+        errno = 0;
+
+        // try to parse integers first and fall back to floats
+        if (number_type == token_type::value_unsigned)
+        {
+            const auto x = std::strtoull(token_buffer.data(), &endptr, 10);
+
+            // we checked the number format before
+            assert(endptr == token_buffer.data() + token_buffer.size());
+
+            if (errno == 0)
+            {
+                value_unsigned = static_cast<number_unsigned_t>(x);
+                if (value_unsigned == x)
+                {
+                    return token_type::value_unsigned;
+                }
+            }
+        }
+        else if (number_type == token_type::value_integer)
+        {
+            const auto x = std::strtoll(token_buffer.data(), &endptr, 10);
+
+            // we checked the number format before
+            assert(endptr == token_buffer.data() + token_buffer.size());
+
+            if (errno == 0)
+            {
+                value_integer = static_cast<number_integer_t>(x);
+                if (value_integer == x)
+                {
+                    return token_type::value_integer;
+                }
+            }
+        }
+
+        // this code is reached if we parse a floating-point number or if an
+        // integer conversion above failed
+        strtof(value_float, token_buffer.data(), &endptr);
+
+        // we checked the number format before
+        assert(endptr == token_buffer.data() + token_buffer.size());
+
+        return token_type::value_float;
+    }
+
+    /*!
+    @param[in] literal_text  the literal text to expect
+    @param[in] length        the length of the passed literal text
+    @param[in] return_type   the token type to return on success
+    */
+    token_type scan_literal(const char* literal_text, const std::size_t length,
+                            token_type return_type)
+    {
+        assert(current == literal_text[0]);
+        for (std::size_t i = 1; i < length; ++i)
+        {
+            if (JSON_UNLIKELY(get() != literal_text[i]))
+            {
+                error_message = "invalid literal";
+                return token_type::parse_error;
+            }
+        }
+        return return_type;
+    }
+
+    /////////////////////
+    // input management
+    /////////////////////
+
+    /// reset token_buffer; current character is beginning of token
+    void reset() noexcept
+    {
+        token_buffer.clear();
+        token_string.clear();
+        token_string.push_back(std::char_traits<char>::to_char_type(current));
+    }
+
+    /*
+    @brief get next character from the input
+
+    This function provides the interface to the used input adapter. It does
+    not throw in case the input reached EOF, but returns a
+    `std::char_traits<char>::eof()` in that case.  Stores the scanned characters
+    for use in error messages.
+
+    @return character read from the input
+    */
+    std::char_traits<char>::int_type get()
+    {
+        ++position.chars_read_total;
+        ++position.chars_read_current_line;
+
+        if (next_unget)
+        {
+            // just reset the next_unget variable and work with current
+            next_unget = false;
+        }
+        else
+        {
+            current = ia->get_character();
+        }
+
+        if (JSON_LIKELY(current != std::char_traits<char>::eof()))
+        {
+            token_string.push_back(std::char_traits<char>::to_char_type(current));
+        }
+
+        if (current == '\n')
+        {
+            ++position.lines_read;
+            ++position.chars_read_current_line = 0;
+        }
+
+        return current;
+    }
+
+    /*!
+    @brief unget current character (read it again on next get)
+
+    We implement unget by setting variable next_unget to true. The input is not
+    changed - we just simulate ungetting by modifying chars_read_total,
+    chars_read_current_line, and token_string. The next call to get() will
+    behave as if the unget character is read again.
+    */
+    void unget()
+    {
+        next_unget = true;
+
+        --position.chars_read_total;
+
+        // in case we "unget" a newline, we have to also decrement the lines_read
+        if (position.chars_read_current_line == 0)
+        {
+            if (position.lines_read > 0)
+            {
+                --position.lines_read;
+            }
+        }
+        else
+        {
+            --position.chars_read_current_line;
+        }
+
+        if (JSON_LIKELY(current != std::char_traits<char>::eof()))
+        {
+            assert(token_string.size() != 0);
+            token_string.pop_back();
+        }
+    }
+
+    /// add a character to token_buffer
+    void add(int c)
+    {
+        token_buffer.push_back(std::char_traits<char>::to_char_type(c));
+    }
+
+  public:
+    /////////////////////
+    // value getters
+    /////////////////////
+
+    /// return integer value
+    constexpr number_integer_t get_number_integer() const noexcept
+    {
+        return value_integer;
+    }
+
+    /// return unsigned integer value
+    constexpr number_unsigned_t get_number_unsigned() const noexcept
+    {
+        return value_unsigned;
+    }
+
+    /// return floating-point value
+    constexpr number_float_t get_number_float() const noexcept
+    {
+        return value_float;
+    }
+
+    /// return current string value (implicitly resets the token; useful only once)
+    string_t& get_string()
+    {
+        return token_buffer;
+    }
+
+    /////////////////////
+    // diagnostics
+    /////////////////////
+
+    /// return position of last read token
+    constexpr position_t get_position() const noexcept
+    {
+        return position;
+    }
+
+    /// return the last read token (for errors only).  Will never contain EOF
+    /// (an arbitrary value that is not a valid char value, often -1), because
+    /// 255 may legitimately occur.  May contain NUL, which should be escaped.
+    std::string get_token_string() const
+    {
+        // escape control characters
+        std::string result;
+        for (const auto c : token_string)
+        {
+            if ('\x00' <= c and c <= '\x1F')
+            {
+                // escape control characters
+                char cs[9];
+                (std::snprintf)(cs, 9, "<U+%.4X>", static_cast<unsigned char>(c));
+                result += cs;
+            }
+            else
+            {
+                // add character as is
+                result.push_back(c);
+            }
+        }
+
+        return result;
+    }
+
+    /// return syntax error message
+    constexpr const char* get_error_message() const noexcept
+    {
+        return error_message;
+    }
+
+    /////////////////////
+    // actual scanner
+    /////////////////////
+
+    /*!
+    @brief skip the UTF-8 byte order mark
+    @return true iff there is no BOM or the correct BOM has been skipped
+    */
+    bool skip_bom()
+    {
+        if (get() == 0xEF)
+        {
+            // check if we completely parse the BOM
+            return get() == 0xBB and get() == 0xBF;
+        }
+
+        // the first character is not the beginning of the BOM; unget it to
+        // process is later
+        unget();
+        return true;
+    }
+
+    token_type scan()
+    {
+        // initially, skip the BOM
+        if (position.chars_read_total == 0 and not skip_bom())
+        {
+            error_message = "invalid BOM; must be 0xEF 0xBB 0xBF if given";
+            return token_type::parse_error;
+        }
+
+        // read next character and ignore whitespace
+        do
+        {
+            get();
+        }
+        while (current == ' ' or current == '\t' or current == '\n' or current == '\r');
+
+        switch (current)
+        {
+            // structural characters
+            case '[':
+                return token_type::begin_array;
+            case ']':
+                return token_type::end_array;
+            case '{':
+                return token_type::begin_object;
+            case '}':
+                return token_type::end_object;
+            case ':':
+                return token_type::name_separator;
+            case ',':
+                return token_type::value_separator;
+
+            // literals
+            case 't':
+                return scan_literal("true", 4, token_type::literal_true);
+            case 'f':
+                return scan_literal("false", 5, token_type::literal_false);
+            case 'n':
+                return scan_literal("null", 4, token_type::literal_null);
+
+            // string
+            case '\"':
+                return scan_string();
+
+            // number
+            case '-':
+            case '0':
+            case '1':
+            case '2':
+            case '3':
+            case '4':
+            case '5':
+            case '6':
+            case '7':
+            case '8':
+            case '9':
+                return scan_number();
+
+            // end of input (the null byte is needed when parsing from
+            // string literals)
+            case '\0':
+            case std::char_traits<char>::eof():
+                return token_type::end_of_input;
+
+            // error
+            default:
+                error_message = "invalid literal";
+                return token_type::parse_error;
+        }
+    }
+
+  private:
+    /// input adapter
+    detail::input_adapter_t ia = nullptr;
+
+    /// the current character
+    std::char_traits<char>::int_type current = std::char_traits<char>::eof();
+
+    /// whether the next get() call should just return current
+    bool next_unget = false;
+
+    /// the start position of the current token
+    position_t position;
+
+    /// raw input token string (for error messages)
+    std::vector<char> token_string {};
+
+    /// buffer for variable-length tokens (numbers, strings)
+    string_t token_buffer {};
+
+    /// a description of occurred lexer errors
+    const char* error_message = "";
+
+    // number values
+    number_integer_t value_integer = 0;
+    number_unsigned_t value_unsigned = 0;
+    number_float_t value_float = 0;
+
+    /// the decimal point
+    const char decimal_point_char = '.';
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/input/parser.hpp>
+
+
+#include <cassert> // assert
+#include <cmath> // isfinite
+#include <cstdint> // uint8_t
+#include <functional> // function
+#include <string> // string
+#include <utility> // move
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/is_sax.hpp>
+
+
+#include <cstdint> // size_t
+#include <utility> // declval
+
+// #include <nlohmann/detail/meta/detected.hpp>
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+template <typename T>
+using null_function_t = decltype(std::declval<T&>().null());
+
+template <typename T>
+using boolean_function_t =
+    decltype(std::declval<T&>().boolean(std::declval<bool>()));
+
+template <typename T, typename Integer>
+using number_integer_function_t =
+    decltype(std::declval<T&>().number_integer(std::declval<Integer>()));
+
+template <typename T, typename Unsigned>
+using number_unsigned_function_t =
+    decltype(std::declval<T&>().number_unsigned(std::declval<Unsigned>()));
+
+template <typename T, typename Float, typename String>
+using number_float_function_t = decltype(std::declval<T&>().number_float(
+                                    std::declval<Float>(), std::declval<const String&>()));
+
+template <typename T, typename String>
+using string_function_t =
+    decltype(std::declval<T&>().string(std::declval<String&>()));
+
+template <typename T>
+using start_object_function_t =
+    decltype(std::declval<T&>().start_object(std::declval<std::size_t>()));
+
+template <typename T, typename String>
+using key_function_t =
+    decltype(std::declval<T&>().key(std::declval<String&>()));
+
+template <typename T>
+using end_object_function_t = decltype(std::declval<T&>().end_object());
+
+template <typename T>
+using start_array_function_t =
+    decltype(std::declval<T&>().start_array(std::declval<std::size_t>()));
+
+template <typename T>
+using end_array_function_t = decltype(std::declval<T&>().end_array());
+
+template <typename T, typename Exception>
+using parse_error_function_t = decltype(std::declval<T&>().parse_error(
+        std::declval<std::size_t>(), std::declval<const std::string&>(),
+        std::declval<const Exception&>()));
+
+template <typename SAX, typename BasicJsonType>
+struct is_sax
+{
+  private:
+    static_assert(is_basic_json<BasicJsonType>::value,
+                  "BasicJsonType must be of type basic_json<...>");
+
+    using number_integer_t = typename BasicJsonType::number_integer_t;
+    using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+    using number_float_t = typename BasicJsonType::number_float_t;
+    using string_t = typename BasicJsonType::string_t;
+    using exception_t = typename BasicJsonType::exception;
+
+  public:
+    static constexpr bool value =
+        is_detected_exact<bool, null_function_t, SAX>::value &&
+        is_detected_exact<bool, boolean_function_t, SAX>::value &&
+        is_detected_exact<bool, number_integer_function_t, SAX,
+        number_integer_t>::value &&
+        is_detected_exact<bool, number_unsigned_function_t, SAX,
+        number_unsigned_t>::value &&
+        is_detected_exact<bool, number_float_function_t, SAX, number_float_t,
+        string_t>::value &&
+        is_detected_exact<bool, string_function_t, SAX, string_t>::value &&
+        is_detected_exact<bool, start_object_function_t, SAX>::value &&
+        is_detected_exact<bool, key_function_t, SAX, string_t>::value &&
+        is_detected_exact<bool, end_object_function_t, SAX>::value &&
+        is_detected_exact<bool, start_array_function_t, SAX>::value &&
+        is_detected_exact<bool, end_array_function_t, SAX>::value &&
+        is_detected_exact<bool, parse_error_function_t, SAX, exception_t>::value;
+};
+
+template <typename SAX, typename BasicJsonType>
+struct is_sax_static_asserts
+{
+  private:
+    static_assert(is_basic_json<BasicJsonType>::value,
+                  "BasicJsonType must be of type basic_json<...>");
+
+    using number_integer_t = typename BasicJsonType::number_integer_t;
+    using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+    using number_float_t = typename BasicJsonType::number_float_t;
+    using string_t = typename BasicJsonType::string_t;
+    using exception_t = typename BasicJsonType::exception;
+
+  public:
+    static_assert(is_detected_exact<bool, null_function_t, SAX>::value,
+                  "Missing/invalid function: bool null()");
+    static_assert(is_detected_exact<bool, boolean_function_t, SAX>::value,
+                  "Missing/invalid function: bool boolean(bool)");
+    static_assert(is_detected_exact<bool, boolean_function_t, SAX>::value,
+                  "Missing/invalid function: bool boolean(bool)");
+    static_assert(
+        is_detected_exact<bool, number_integer_function_t, SAX,
+        number_integer_t>::value,
+        "Missing/invalid function: bool number_integer(number_integer_t)");
+    static_assert(
+        is_detected_exact<bool, number_unsigned_function_t, SAX,
+        number_unsigned_t>::value,
+        "Missing/invalid function: bool number_unsigned(number_unsigned_t)");
+    static_assert(is_detected_exact<bool, number_float_function_t, SAX,
+                  number_float_t, string_t>::value,
+                  "Missing/invalid function: bool number_float(number_float_t, const string_t&)");
+    static_assert(
+        is_detected_exact<bool, string_function_t, SAX, string_t>::value,
+        "Missing/invalid function: bool string(string_t&)");
+    static_assert(is_detected_exact<bool, start_object_function_t, SAX>::value,
+                  "Missing/invalid function: bool start_object(std::size_t)");
+    static_assert(is_detected_exact<bool, key_function_t, SAX, string_t>::value,
+                  "Missing/invalid function: bool key(string_t&)");
+    static_assert(is_detected_exact<bool, end_object_function_t, SAX>::value,
+                  "Missing/invalid function: bool end_object()");
+    static_assert(is_detected_exact<bool, start_array_function_t, SAX>::value,
+                  "Missing/invalid function: bool start_array(std::size_t)");
+    static_assert(is_detected_exact<bool, end_array_function_t, SAX>::value,
+                  "Missing/invalid function: bool end_array()");
+    static_assert(
+        is_detected_exact<bool, parse_error_function_t, SAX, exception_t>::value,
+        "Missing/invalid function: bool parse_error(std::size_t, const "
+        "std::string&, const exception&)");
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/input/input_adapters.hpp>
+
+// #include <nlohmann/detail/input/json_sax.hpp>
+
+
+#include <cstddef>
+#include <string>
+#include <vector>
+
+// #include <nlohmann/detail/input/parser.hpp>
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+
+namespace nlohmann
+{
+
+/*!
+@brief SAX interface
+
+This class describes the SAX interface used by @ref nlohmann::json::sax_parse.
+Each function is called in different situations while the input is parsed. The
+boolean return value informs the parser whether to continue processing the
+input.
+*/
+template<typename BasicJsonType>
+struct json_sax
+{
+    /// type for (signed) integers
+    using number_integer_t = typename BasicJsonType::number_integer_t;
+    /// type for unsigned integers
+    using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+    /// type for floating-point numbers
+    using number_float_t = typename BasicJsonType::number_float_t;
+    /// type for strings
+    using string_t = typename BasicJsonType::string_t;
+
+    /*!
+    @brief a null value was read
+    @return whether parsing should proceed
+    */
+    virtual bool null() = 0;
+
+    /*!
+    @brief a boolean value was read
+    @param[in] val  boolean value
+    @return whether parsing should proceed
+    */
+    virtual bool boolean(bool val) = 0;
+
+    /*!
+    @brief an integer number was read
+    @param[in] val  integer value
+    @return whether parsing should proceed
+    */
+    virtual bool number_integer(number_integer_t val) = 0;
+
+    /*!
+    @brief an unsigned integer number was read
+    @param[in] val  unsigned integer value
+    @return whether parsing should proceed
+    */
+    virtual bool number_unsigned(number_unsigned_t val) = 0;
+
+    /*!
+    @brief an floating-point number was read
+    @param[in] val  floating-point value
+    @param[in] s    raw token value
+    @return whether parsing should proceed
+    */
+    virtual bool number_float(number_float_t val, const string_t& s) = 0;
+
+    /*!
+    @brief a string was read
+    @param[in] val  string value
+    @return whether parsing should proceed
+    @note It is safe to move the passed string.
+    */
+    virtual bool string(string_t& val) = 0;
+
+    /*!
+    @brief the beginning of an object was read
+    @param[in] elements  number of object elements or -1 if unknown
+    @return whether parsing should proceed
+    @note binary formats may report the number of elements
+    */
+    virtual bool start_object(std::size_t elements) = 0;
+
+    /*!
+    @brief an object key was read
+    @param[in] val  object key
+    @return whether parsing should proceed
+    @note It is safe to move the passed string.
+    */
+    virtual bool key(string_t& val) = 0;
+
+    /*!
+    @brief the end of an object was read
+    @return whether parsing should proceed
+    */
+    virtual bool end_object() = 0;
+
+    /*!
+    @brief the beginning of an array was read
+    @param[in] elements  number of array elements or -1 if unknown
+    @return whether parsing should proceed
+    @note binary formats may report the number of elements
+    */
+    virtual bool start_array(std::size_t elements) = 0;
+
+    /*!
+    @brief the end of an array was read
+    @return whether parsing should proceed
+    */
+    virtual bool end_array() = 0;
+
+    /*!
+    @brief a parse error occurred
+    @param[in] position    the position in the input where the error occurs
+    @param[in] last_token  the last read token
+    @param[in] ex          an exception object describing the error
+    @return whether parsing should proceed (must return false)
+    */
+    virtual bool parse_error(std::size_t position,
+                             const std::string& last_token,
+                             const detail::exception& ex) = 0;
+
+    virtual ~json_sax() = default;
+};
+
+
+namespace detail
+{
+/*!
+@brief SAX implementation to create a JSON value from SAX events
+
+This class implements the @ref json_sax interface and processes the SAX events
+to create a JSON value which makes it basically a DOM parser. The structure or
+hierarchy of the JSON value is managed by the stack `ref_stack` which contains
+a pointer to the respective array or object for each recursion depth.
+
+After successful parsing, the value that is passed by reference to the
+constructor contains the parsed value.
+
+@tparam BasicJsonType  the JSON type
+*/
+template<typename BasicJsonType>
+class json_sax_dom_parser
+{
+  public:
+    using number_integer_t = typename BasicJsonType::number_integer_t;
+    using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+    using number_float_t = typename BasicJsonType::number_float_t;
+    using string_t = typename BasicJsonType::string_t;
+
+    /*!
+    @param[in, out] r  reference to a JSON value that is manipulated while
+                       parsing
+    @param[in] allow_exceptions_  whether parse errors yield exceptions
+    */
+    explicit json_sax_dom_parser(BasicJsonType& r, const bool allow_exceptions_ = true)
+        : root(r), allow_exceptions(allow_exceptions_)
+    {}
+
+    bool null()
+    {
+        handle_value(nullptr);
+        return true;
+    }
+
+    bool boolean(bool val)
+    {
+        handle_value(val);
+        return true;
+    }
+
+    bool number_integer(number_integer_t val)
+    {
+        handle_value(val);
+        return true;
+    }
+
+    bool number_unsigned(number_unsigned_t val)
+    {
+        handle_value(val);
+        return true;
+    }
+
+    bool number_float(number_float_t val, const string_t& /*unused*/)
+    {
+        handle_value(val);
+        return true;
+    }
+
+    bool string(string_t& val)
+    {
+        handle_value(val);
+        return true;
+    }
+
+    bool start_object(std::size_t len)
+    {
+        ref_stack.push_back(handle_value(BasicJsonType::value_t::object));
+
+        if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size()))
+        {
+            JSON_THROW(out_of_range::create(408,
+                                            "excessive object size: " + std::to_string(len)));
+        }
+
+        return true;
+    }
+
+    bool key(string_t& val)
+    {
+        // add null at given key and store the reference for later
+        object_element = &(ref_stack.back()->m_value.object->operator[](val));
+        return true;
+    }
+
+    bool end_object()
+    {
+        ref_stack.pop_back();
+        return true;
+    }
+
+    bool start_array(std::size_t len)
+    {
+        ref_stack.push_back(handle_value(BasicJsonType::value_t::array));
+
+        if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size()))
+        {
+            JSON_THROW(out_of_range::create(408,
+                                            "excessive array size: " + std::to_string(len)));
+        }
+
+        return true;
+    }
+
+    bool end_array()
+    {
+        ref_stack.pop_back();
+        return true;
+    }
+
+    bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
+                     const detail::exception& ex)
+    {
+        errored = true;
+        if (allow_exceptions)
+        {
+            // determine the proper exception type from the id
+            switch ((ex.id / 100) % 100)
+            {
+                case 1:
+                    JSON_THROW(*reinterpret_cast<const detail::parse_error*>(&ex));
+                case 4:
+                    JSON_THROW(*reinterpret_cast<const detail::out_of_range*>(&ex));
+                // LCOV_EXCL_START
+                case 2:
+                    JSON_THROW(*reinterpret_cast<const detail::invalid_iterator*>(&ex));
+                case 3:
+                    JSON_THROW(*reinterpret_cast<const detail::type_error*>(&ex));
+                case 5:
+                    JSON_THROW(*reinterpret_cast<const detail::other_error*>(&ex));
+                default:
+                    assert(false);
+                    // LCOV_EXCL_STOP
+            }
+        }
+        return false;
+    }
+
+    constexpr bool is_errored() const
+    {
+        return errored;
+    }
+
+  private:
+    /*!
+    @invariant If the ref stack is empty, then the passed value will be the new
+               root.
+    @invariant If the ref stack contains a value, then it is an array or an
+               object to which we can add elements
+    */
+    template<typename Value>
+    BasicJsonType* handle_value(Value&& v)
+    {
+        if (ref_stack.empty())
+        {
+            root = BasicJsonType(std::forward<Value>(v));
+            return &root;
+        }
+
+        assert(ref_stack.back()->is_array() or ref_stack.back()->is_object());
+
+        if (ref_stack.back()->is_array())
+        {
+            ref_stack.back()->m_value.array->emplace_back(std::forward<Value>(v));
+            return &(ref_stack.back()->m_value.array->back());
+        }
+        else
+        {
+            assert(object_element);
+            *object_element = BasicJsonType(std::forward<Value>(v));
+            return object_element;
+        }
+    }
+
+    /// the parsed JSON value
+    BasicJsonType& root;
+    /// stack to model hierarchy of values
+    std::vector<BasicJsonType*> ref_stack;
+    /// helper to hold the reference for the next object element
+    BasicJsonType* object_element = nullptr;
+    /// whether a syntax error occurred
+    bool errored = false;
+    /// whether to throw exceptions in case of errors
+    const bool allow_exceptions = true;
+};
+
+template<typename BasicJsonType>
+class json_sax_dom_callback_parser
+{
+  public:
+    using number_integer_t = typename BasicJsonType::number_integer_t;
+    using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+    using number_float_t = typename BasicJsonType::number_float_t;
+    using string_t = typename BasicJsonType::string_t;
+    using parser_callback_t = typename BasicJsonType::parser_callback_t;
+    using parse_event_t = typename BasicJsonType::parse_event_t;
+
+    json_sax_dom_callback_parser(BasicJsonType& r,
+                                 const parser_callback_t cb,
+                                 const bool allow_exceptions_ = true)
+        : root(r), callback(cb), allow_exceptions(allow_exceptions_)
+    {
+        keep_stack.push_back(true);
+    }
+
+    bool null()
+    {
+        handle_value(nullptr);
+        return true;
+    }
+
+    bool boolean(bool val)
+    {
+        handle_value(val);
+        return true;
+    }
+
+    bool number_integer(number_integer_t val)
+    {
+        handle_value(val);
+        return true;
+    }
+
+    bool number_unsigned(number_unsigned_t val)
+    {
+        handle_value(val);
+        return true;
+    }
+
+    bool number_float(number_float_t val, const string_t& /*unused*/)
+    {
+        handle_value(val);
+        return true;
+    }
+
+    bool string(string_t& val)
+    {
+        handle_value(val);
+        return true;
+    }
+
+    bool start_object(std::size_t len)
+    {
+        // check callback for object start
+        const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::object_start, discarded);
+        keep_stack.push_back(keep);
+
+        auto val = handle_value(BasicJsonType::value_t::object, true);
+        ref_stack.push_back(val.second);
+
+        // check object limit
+        if (ref_stack.back())
+        {
+            if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size()))
+            {
+                JSON_THROW(out_of_range::create(408,
+                                                "excessive object size: " + std::to_string(len)));
+            }
+        }
+
+        return true;
+    }
+
+    bool key(string_t& val)
+    {
+        BasicJsonType k = BasicJsonType(val);
+
+        // check callback for key
+        const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::key, k);
+        key_keep_stack.push_back(keep);
+
+        // add discarded value at given key and store the reference for later
+        if (keep and ref_stack.back())
+        {
+            object_element = &(ref_stack.back()->m_value.object->operator[](val) = discarded);
+        }
+
+        return true;
+    }
+
+    bool end_object()
+    {
+        if (ref_stack.back())
+        {
+            if (not callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::object_end, *ref_stack.back()))
+            {
+                // discard object
+                *ref_stack.back() = discarded;
+            }
+        }
+
+        assert(not ref_stack.empty());
+        assert(not keep_stack.empty());
+        ref_stack.pop_back();
+        keep_stack.pop_back();
+
+        if (not ref_stack.empty() and ref_stack.back())
+        {
+            // remove discarded value
+            if (ref_stack.back()->is_object())
+            {
+                for (auto it = ref_stack.back()->begin(); it != ref_stack.back()->end(); ++it)
+                {
+                    if (it->is_discarded())
+                    {
+                        ref_stack.back()->erase(it);
+                        break;
+                    }
+                }
+            }
+        }
+
+        return true;
+    }
+
+    bool start_array(std::size_t len)
+    {
+        const bool keep = callback(static_cast<int>(ref_stack.size()), parse_event_t::array_start, discarded);
+        keep_stack.push_back(keep);
+
+        auto val = handle_value(BasicJsonType::value_t::array, true);
+        ref_stack.push_back(val.second);
+
+        // check array limit
+        if (ref_stack.back())
+        {
+            if (JSON_UNLIKELY(len != std::size_t(-1) and len > ref_stack.back()->max_size()))
+            {
+                JSON_THROW(out_of_range::create(408,
+                                                "excessive array size: " + std::to_string(len)));
+            }
+        }
+
+        return true;
+    }
+
+    bool end_array()
+    {
+        bool keep = true;
+
+        if (ref_stack.back())
+        {
+            keep = callback(static_cast<int>(ref_stack.size()) - 1, parse_event_t::array_end, *ref_stack.back());
+            if (not keep)
+            {
+                // discard array
+                *ref_stack.back() = discarded;
+            }
+        }
+
+        assert(not ref_stack.empty());
+        assert(not keep_stack.empty());
+        ref_stack.pop_back();
+        keep_stack.pop_back();
+
+        // remove discarded value
+        if (not keep and not ref_stack.empty())
+        {
+            if (ref_stack.back()->is_array())
+            {
+                ref_stack.back()->m_value.array->pop_back();
+            }
+        }
+
+        return true;
+    }
+
+    bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/,
+                     const detail::exception& ex)
+    {
+        errored = true;
+        if (allow_exceptions)
+        {
+            // determine the proper exception type from the id
+            switch ((ex.id / 100) % 100)
+            {
+                case 1:
+                    JSON_THROW(*reinterpret_cast<const detail::parse_error*>(&ex));
+                case 4:
+                    JSON_THROW(*reinterpret_cast<const detail::out_of_range*>(&ex));
+                // LCOV_EXCL_START
+                case 2:
+                    JSON_THROW(*reinterpret_cast<const detail::invalid_iterator*>(&ex));
+                case 3:
+                    JSON_THROW(*reinterpret_cast<const detail::type_error*>(&ex));
+                case 5:
+                    JSON_THROW(*reinterpret_cast<const detail::other_error*>(&ex));
+                default:
+                    assert(false);
+                    // LCOV_EXCL_STOP
+            }
+        }
+        return false;
+    }
+
+    constexpr bool is_errored() const
+    {
+        return errored;
+    }
+
+  private:
+    /*!
+    @param[in] v  value to add to the JSON value we build during parsing
+    @param[in] skip_callback  whether we should skip calling the callback
+               function; this is required after start_array() and
+               start_object() SAX events, because otherwise we would call the
+               callback function with an empty array or object, respectively.
+
+    @invariant If the ref stack is empty, then the passed value will be the new
+               root.
+    @invariant If the ref stack contains a value, then it is an array or an
+               object to which we can add elements
+
+    @return pair of boolean (whether value should be kept) and pointer (to the
+            passed value in the ref_stack hierarchy; nullptr if not kept)
+    */
+    template<typename Value>
+    std::pair<bool, BasicJsonType*> handle_value(Value&& v, const bool skip_callback = false)
+    {
+        assert(not keep_stack.empty());
+
+        // do not handle this value if we know it would be added to a discarded
+        // container
+        if (not keep_stack.back())
+        {
+            return {false, nullptr};
+        }
+
+        // create value
+        auto value = BasicJsonType(std::forward<Value>(v));
+
+        // check callback
+        const bool keep = skip_callback or callback(static_cast<int>(ref_stack.size()), parse_event_t::value, value);
+
+        // do not handle this value if we just learnt it shall be discarded
+        if (not keep)
+        {
+            return {false, nullptr};
+        }
+
+        if (ref_stack.empty())
+        {
+            root = std::move(value);
+            return {true, &root};
+        }
+
+        // skip this value if we already decided to skip the parent
+        // (https://github.com/nlohmann/json/issues/971#issuecomment-413678360)
+        if (not ref_stack.back())
+        {
+            return {false, nullptr};
+        }
+
+        // we now only expect arrays and objects
+        assert(ref_stack.back()->is_array() or ref_stack.back()->is_object());
+
+        if (ref_stack.back()->is_array())
+        {
+            ref_stack.back()->m_value.array->push_back(std::move(value));
+            return {true, &(ref_stack.back()->m_value.array->back())};
+        }
+        else
+        {
+            // check if we should store an element for the current key
+            assert(not key_keep_stack.empty());
+            const bool store_element = key_keep_stack.back();
+            key_keep_stack.pop_back();
+
+            if (not store_element)
+            {
+                return {false, nullptr};
+            }
+
+            assert(object_element);
+            *object_element = std::move(value);
+            return {true, object_element};
+        }
+    }
+
+    /// the parsed JSON value
+    BasicJsonType& root;
+    /// stack to model hierarchy of values
+    std::vector<BasicJsonType*> ref_stack;
+    /// stack to manage which values to keep
+    std::vector<bool> keep_stack;
+    /// stack to manage which object keys to keep
+    std::vector<bool> key_keep_stack;
+    /// helper to hold the reference for the next object element
+    BasicJsonType* object_element = nullptr;
+    /// whether a syntax error occurred
+    bool errored = false;
+    /// callback function
+    const parser_callback_t callback = nullptr;
+    /// whether to throw exceptions in case of errors
+    const bool allow_exceptions = true;
+    /// a discarded value for the callback
+    BasicJsonType discarded = BasicJsonType::value_t::discarded;
+};
+
+template<typename BasicJsonType>
+class json_sax_acceptor
+{
+  public:
+    using number_integer_t = typename BasicJsonType::number_integer_t;
+    using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+    using number_float_t = typename BasicJsonType::number_float_t;
+    using string_t = typename BasicJsonType::string_t;
+
+    bool null()
+    {
+        return true;
+    }
+
+    bool boolean(bool /*unused*/)
+    {
+        return true;
+    }
+
+    bool number_integer(number_integer_t /*unused*/)
+    {
+        return true;
+    }
+
+    bool number_unsigned(number_unsigned_t /*unused*/)
+    {
+        return true;
+    }
+
+    bool number_float(number_float_t /*unused*/, const string_t& /*unused*/)
+    {
+        return true;
+    }
+
+    bool string(string_t& /*unused*/)
+    {
+        return true;
+    }
+
+    bool start_object(std::size_t  /*unused*/ = std::size_t(-1))
+    {
+        return true;
+    }
+
+    bool key(string_t& /*unused*/)
+    {
+        return true;
+    }
+
+    bool end_object()
+    {
+        return true;
+    }
+
+    bool start_array(std::size_t  /*unused*/ = std::size_t(-1))
+    {
+        return true;
+    }
+
+    bool end_array()
+    {
+        return true;
+    }
+
+    bool parse_error(std::size_t /*unused*/, const std::string& /*unused*/, const detail::exception& /*unused*/)
+    {
+        return false;
+    }
+};
+}  // namespace detail
+
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/input/lexer.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+////////////
+// parser //
+////////////
+
+/*!
+@brief syntax analysis
+
+This class implements a recursive decent parser.
+*/
+template<typename BasicJsonType>
+class parser
+{
+    using number_integer_t = typename BasicJsonType::number_integer_t;
+    using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+    using number_float_t = typename BasicJsonType::number_float_t;
+    using string_t = typename BasicJsonType::string_t;
+    using lexer_t = lexer<BasicJsonType>;
+    using token_type = typename lexer_t::token_type;
+
+  public:
+    enum class parse_event_t : uint8_t
+    {
+        /// the parser read `{` and started to process a JSON object
+        object_start,
+        /// the parser read `}` and finished processing a JSON object
+        object_end,
+        /// the parser read `[` and started to process a JSON array
+        array_start,
+        /// the parser read `]` and finished processing a JSON array
+        array_end,
+        /// the parser read a key of a value in an object
+        key,
+        /// the parser finished reading a JSON value
+        value
+    };
+
+    using parser_callback_t =
+        std::function<bool(int depth, parse_event_t event, BasicJsonType& parsed)>;
+
+    /// a parser reading from an input adapter
+    explicit parser(detail::input_adapter_t&& adapter,
+                    const parser_callback_t cb = nullptr,
+                    const bool allow_exceptions_ = true)
+        : callback(cb), m_lexer(std::move(adapter)), allow_exceptions(allow_exceptions_)
+    {
+        // read first token
+        get_token();
+    }
+
+    /*!
+    @brief public parser interface
+
+    @param[in] strict      whether to expect the last token to be EOF
+    @param[in,out] result  parsed JSON value
+
+    @throw parse_error.101 in case of an unexpected token
+    @throw parse_error.102 if to_unicode fails or surrogate error
+    @throw parse_error.103 if to_unicode fails
+    */
+    void parse(const bool strict, BasicJsonType& result)
+    {
+        if (callback)
+        {
+            json_sax_dom_callback_parser<BasicJsonType> sdp(result, callback, allow_exceptions);
+            sax_parse_internal(&sdp);
+            result.assert_invariant();
+
+            // in strict mode, input must be completely read
+            if (strict and (get_token() != token_type::end_of_input))
+            {
+                sdp.parse_error(m_lexer.get_position(),
+                                m_lexer.get_token_string(),
+                                parse_error::create(101, m_lexer.get_position(),
+                                                    exception_message(token_type::end_of_input, "value")));
+            }
+
+            // in case of an error, return discarded value
+            if (sdp.is_errored())
+            {
+                result = value_t::discarded;
+                return;
+            }
+
+            // set top-level value to null if it was discarded by the callback
+            // function
+            if (result.is_discarded())
+            {
+                result = nullptr;
+            }
+        }
+        else
+        {
+            json_sax_dom_parser<BasicJsonType> sdp(result, allow_exceptions);
+            sax_parse_internal(&sdp);
+            result.assert_invariant();
+
+            // in strict mode, input must be completely read
+            if (strict and (get_token() != token_type::end_of_input))
+            {
+                sdp.parse_error(m_lexer.get_position(),
+                                m_lexer.get_token_string(),
+                                parse_error::create(101, m_lexer.get_position(),
+                                                    exception_message(token_type::end_of_input, "value")));
+            }
+
+            // in case of an error, return discarded value
+            if (sdp.is_errored())
+            {
+                result = value_t::discarded;
+                return;
+            }
+        }
+    }
+
+    /*!
+    @brief public accept interface
+
+    @param[in] strict  whether to expect the last token to be EOF
+    @return whether the input is a proper JSON text
+    */
+    bool accept(const bool strict = true)
+    {
+        json_sax_acceptor<BasicJsonType> sax_acceptor;
+        return sax_parse(&sax_acceptor, strict);
+    }
+
+    template <typename SAX>
+    bool sax_parse(SAX* sax, const bool strict = true)
+    {
+        (void)detail::is_sax_static_asserts<SAX, BasicJsonType> {};
+        const bool result = sax_parse_internal(sax);
+
+        // strict mode: next byte must be EOF
+        if (result and strict and (get_token() != token_type::end_of_input))
+        {
+            return sax->parse_error(m_lexer.get_position(),
+                                    m_lexer.get_token_string(),
+                                    parse_error::create(101, m_lexer.get_position(),
+                                            exception_message(token_type::end_of_input, "value")));
+        }
+
+        return result;
+    }
+
+  private:
+    template <typename SAX>
+    bool sax_parse_internal(SAX* sax)
+    {
+        // stack to remember the hierarchy of structured values we are parsing
+        // true = array; false = object
+        std::vector<bool> states;
+        // value to avoid a goto (see comment where set to true)
+        bool skip_to_state_evaluation = false;
+
+        while (true)
+        {
+            if (not skip_to_state_evaluation)
+            {
+                // invariant: get_token() was called before each iteration
+                switch (last_token)
+                {
+                    case token_type::begin_object:
+                    {
+                        if (JSON_UNLIKELY(not sax->start_object(std::size_t(-1))))
+                        {
+                            return false;
+                        }
+
+                        // closing } -> we are done
+                        if (get_token() == token_type::end_object)
+                        {
+                            if (JSON_UNLIKELY(not sax->end_object()))
+                            {
+                                return false;
+                            }
+                            break;
+                        }
+
+                        // parse key
+                        if (JSON_UNLIKELY(last_token != token_type::value_string))
+                        {
+                            return sax->parse_error(m_lexer.get_position(),
+                                                    m_lexer.get_token_string(),
+                                                    parse_error::create(101, m_lexer.get_position(),
+                                                            exception_message(token_type::value_string, "object key")));
+                        }
+                        if (JSON_UNLIKELY(not sax->key(m_lexer.get_string())))
+                        {
+                            return false;
+                        }
+
+                        // parse separator (:)
+                        if (JSON_UNLIKELY(get_token() != token_type::name_separator))
+                        {
+                            return sax->parse_error(m_lexer.get_position(),
+                                                    m_lexer.get_token_string(),
+                                                    parse_error::create(101, m_lexer.get_position(),
+                                                            exception_message(token_type::name_separator, "object separator")));
+                        }
+
+                        // remember we are now inside an object
+                        states.push_back(false);
+
+                        // parse values
+                        get_token();
+                        continue;
+                    }
+
+                    case token_type::begin_array:
+                    {
+                        if (JSON_UNLIKELY(not sax->start_array(std::size_t(-1))))
+                        {
+                            return false;
+                        }
+
+                        // closing ] -> we are done
+                        if (get_token() == token_type::end_array)
+                        {
+                            if (JSON_UNLIKELY(not sax->end_array()))
+                            {
+                                return false;
+                            }
+                            break;
+                        }
+
+                        // remember we are now inside an array
+                        states.push_back(true);
+
+                        // parse values (no need to call get_token)
+                        continue;
+                    }
+
+                    case token_type::value_float:
+                    {
+                        const auto res = m_lexer.get_number_float();
+
+                        if (JSON_UNLIKELY(not std::isfinite(res)))
+                        {
+                            return sax->parse_error(m_lexer.get_position(),
+                                                    m_lexer.get_token_string(),
+                                                    out_of_range::create(406, "number overflow parsing '" + m_lexer.get_token_string() + "'"));
+                        }
+                        else
+                        {
+                            if (JSON_UNLIKELY(not sax->number_float(res, m_lexer.get_string())))
+                            {
+                                return false;
+                            }
+                            break;
+                        }
+                    }
+
+                    case token_type::literal_false:
+                    {
+                        if (JSON_UNLIKELY(not sax->boolean(false)))
+                        {
+                            return false;
+                        }
+                        break;
+                    }
+
+                    case token_type::literal_null:
+                    {
+                        if (JSON_UNLIKELY(not sax->null()))
+                        {
+                            return false;
+                        }
+                        break;
+                    }
+
+                    case token_type::literal_true:
+                    {
+                        if (JSON_UNLIKELY(not sax->boolean(true)))
+                        {
+                            return false;
+                        }
+                        break;
+                    }
+
+                    case token_type::value_integer:
+                    {
+                        if (JSON_UNLIKELY(not sax->number_integer(m_lexer.get_number_integer())))
+                        {
+                            return false;
+                        }
+                        break;
+                    }
+
+                    case token_type::value_string:
+                    {
+                        if (JSON_UNLIKELY(not sax->string(m_lexer.get_string())))
+                        {
+                            return false;
+                        }
+                        break;
+                    }
+
+                    case token_type::value_unsigned:
+                    {
+                        if (JSON_UNLIKELY(not sax->number_unsigned(m_lexer.get_number_unsigned())))
+                        {
+                            return false;
+                        }
+                        break;
+                    }
+
+                    case token_type::parse_error:
+                    {
+                        // using "uninitialized" to avoid "expected" message
+                        return sax->parse_error(m_lexer.get_position(),
+                                                m_lexer.get_token_string(),
+                                                parse_error::create(101, m_lexer.get_position(),
+                                                        exception_message(token_type::uninitialized, "value")));
+                    }
+
+                    default: // the last token was unexpected
+                    {
+                        return sax->parse_error(m_lexer.get_position(),
+                                                m_lexer.get_token_string(),
+                                                parse_error::create(101, m_lexer.get_position(),
+                                                        exception_message(token_type::literal_or_value, "value")));
+                    }
+                }
+            }
+            else
+            {
+                skip_to_state_evaluation = false;
+            }
+
+            // we reached this line after we successfully parsed a value
+            if (states.empty())
+            {
+                // empty stack: we reached the end of the hierarchy: done
+                return true;
+            }
+            else
+            {
+                if (states.back())  // array
+                {
+                    // comma -> next value
+                    if (get_token() == token_type::value_separator)
+                    {
+                        // parse a new value
+                        get_token();
+                        continue;
+                    }
+
+                    // closing ]
+                    if (JSON_LIKELY(last_token == token_type::end_array))
+                    {
+                        if (JSON_UNLIKELY(not sax->end_array()))
+                        {
+                            return false;
+                        }
+
+                        // We are done with this array. Before we can parse a
+                        // new value, we need to evaluate the new state first.
+                        // By setting skip_to_state_evaluation to false, we
+                        // are effectively jumping to the beginning of this if.
+                        assert(not states.empty());
+                        states.pop_back();
+                        skip_to_state_evaluation = true;
+                        continue;
+                    }
+                    else
+                    {
+                        return sax->parse_error(m_lexer.get_position(),
+                                                m_lexer.get_token_string(),
+                                                parse_error::create(101, m_lexer.get_position(),
+                                                        exception_message(token_type::end_array, "array")));
+                    }
+                }
+                else  // object
+                {
+                    // comma -> next value
+                    if (get_token() == token_type::value_separator)
+                    {
+                        // parse key
+                        if (JSON_UNLIKELY(get_token() != token_type::value_string))
+                        {
+                            return sax->parse_error(m_lexer.get_position(),
+                                                    m_lexer.get_token_string(),
+                                                    parse_error::create(101, m_lexer.get_position(),
+                                                            exception_message(token_type::value_string, "object key")));
+                        }
+                        else
+                        {
+                            if (JSON_UNLIKELY(not sax->key(m_lexer.get_string())))
+                            {
+                                return false;
+                            }
+                        }
+
+                        // parse separator (:)
+                        if (JSON_UNLIKELY(get_token() != token_type::name_separator))
+                        {
+                            return sax->parse_error(m_lexer.get_position(),
+                                                    m_lexer.get_token_string(),
+                                                    parse_error::create(101, m_lexer.get_position(),
+                                                            exception_message(token_type::name_separator, "object separator")));
+                        }
+
+                        // parse values
+                        get_token();
+                        continue;
+                    }
+
+                    // closing }
+                    if (JSON_LIKELY(last_token == token_type::end_object))
+                    {
+                        if (JSON_UNLIKELY(not sax->end_object()))
+                        {
+                            return false;
+                        }
+
+                        // We are done with this object. Before we can parse a
+                        // new value, we need to evaluate the new state first.
+                        // By setting skip_to_state_evaluation to false, we
+                        // are effectively jumping to the beginning of this if.
+                        assert(not states.empty());
+                        states.pop_back();
+                        skip_to_state_evaluation = true;
+                        continue;
+                    }
+                    else
+                    {
+                        return sax->parse_error(m_lexer.get_position(),
+                                                m_lexer.get_token_string(),
+                                                parse_error::create(101, m_lexer.get_position(),
+                                                        exception_message(token_type::end_object, "object")));
+                    }
+                }
+            }
+        }
+    }
+
+    /// get next token from lexer
+    token_type get_token()
+    {
+        return (last_token = m_lexer.scan());
+    }
+
+    std::string exception_message(const token_type expected, const std::string& context)
+    {
+        std::string error_msg = "syntax error ";
+
+        if (not context.empty())
+        {
+            error_msg += "while parsing " + context + " ";
+        }
+
+        error_msg += "- ";
+
+        if (last_token == token_type::parse_error)
+        {
+            error_msg += std::string(m_lexer.get_error_message()) + "; last read: '" +
+                         m_lexer.get_token_string() + "'";
+        }
+        else
+        {
+            error_msg += "unexpected " + std::string(lexer_t::token_type_name(last_token));
+        }
+
+        if (expected != token_type::uninitialized)
+        {
+            error_msg += "; expected " + std::string(lexer_t::token_type_name(expected));
+        }
+
+        return error_msg;
+    }
+
+  private:
+    /// callback function
+    const parser_callback_t callback = nullptr;
+    /// the type of the last read token
+    token_type last_token = token_type::uninitialized;
+    /// the lexer
+    lexer_t m_lexer;
+    /// whether to throw exceptions in case of errors
+    const bool allow_exceptions = true;
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/iterators/primitive_iterator.hpp>
+
+
+#include <cstddef> // ptrdiff_t
+#include <limits>  // numeric_limits
+
+namespace nlohmann
+{
+namespace detail
+{
+/*
+@brief an iterator for primitive JSON types
+
+This class models an iterator for primitive JSON types (boolean, number,
+string). It's only purpose is to allow the iterator/const_iterator classes
+to "iterate" over primitive values. Internally, the iterator is modeled by
+a `difference_type` variable. Value begin_value (`0`) models the begin,
+end_value (`1`) models past the end.
+*/
+class primitive_iterator_t
+{
+  private:
+    using difference_type = std::ptrdiff_t;
+    static constexpr difference_type begin_value = 0;
+    static constexpr difference_type end_value = begin_value + 1;
+
+    /// iterator as signed integer type
+    difference_type m_it = (std::numeric_limits<std::ptrdiff_t>::min)();
+
+  public:
+    constexpr difference_type get_value() const noexcept
+    {
+        return m_it;
+    }
+
+    /// set iterator to a defined beginning
+    void set_begin() noexcept
+    {
+        m_it = begin_value;
+    }
+
+    /// set iterator to a defined past the end
+    void set_end() noexcept
+    {
+        m_it = end_value;
+    }
+
+    /// return whether the iterator can be dereferenced
+    constexpr bool is_begin() const noexcept
+    {
+        return m_it == begin_value;
+    }
+
+    /// return whether the iterator is at end
+    constexpr bool is_end() const noexcept
+    {
+        return m_it == end_value;
+    }
+
+    friend constexpr bool operator==(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
+    {
+        return lhs.m_it == rhs.m_it;
+    }
+
+    friend constexpr bool operator<(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
+    {
+        return lhs.m_it < rhs.m_it;
+    }
+
+    primitive_iterator_t operator+(difference_type n) noexcept
+    {
+        auto result = *this;
+        result += n;
+        return result;
+    }
+
+    friend constexpr difference_type operator-(primitive_iterator_t lhs, primitive_iterator_t rhs) noexcept
+    {
+        return lhs.m_it - rhs.m_it;
+    }
+
+    primitive_iterator_t& operator++() noexcept
+    {
+        ++m_it;
+        return *this;
+    }
+
+    primitive_iterator_t const operator++(int) noexcept
+    {
+        auto result = *this;
+        ++m_it;
+        return result;
+    }
+
+    primitive_iterator_t& operator--() noexcept
+    {
+        --m_it;
+        return *this;
+    }
+
+    primitive_iterator_t const operator--(int) noexcept
+    {
+        auto result = *this;
+        --m_it;
+        return result;
+    }
+
+    primitive_iterator_t& operator+=(difference_type n) noexcept
+    {
+        m_it += n;
+        return *this;
+    }
+
+    primitive_iterator_t& operator-=(difference_type n) noexcept
+    {
+        m_it -= n;
+        return *this;
+    }
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/iterators/internal_iterator.hpp>
+
+
+// #include <nlohmann/detail/iterators/primitive_iterator.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+/*!
+@brief an iterator value
+
+@note This structure could easily be a union, but MSVC currently does not allow
+unions members with complex constructors, see https://github.com/nlohmann/json/pull/105.
+*/
+template<typename BasicJsonType> struct internal_iterator
+{
+    /// iterator for JSON objects
+    typename BasicJsonType::object_t::iterator object_iterator {};
+    /// iterator for JSON arrays
+    typename BasicJsonType::array_t::iterator array_iterator {};
+    /// generic iterator for all other types
+    primitive_iterator_t primitive_iterator {};
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/iterators/iter_impl.hpp>
+
+
+#include <ciso646> // not
+#include <iterator> // iterator, random_access_iterator_tag, bidirectional_iterator_tag, advance, next
+#include <type_traits> // conditional, is_const, remove_const
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/iterators/internal_iterator.hpp>
+
+// #include <nlohmann/detail/iterators/primitive_iterator.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+// forward declare, to be able to friend it later on
+template<typename IteratorType> class iteration_proxy;
+template<typename IteratorType> class iteration_proxy_value;
+
+/*!
+@brief a template for a bidirectional iterator for the @ref basic_json class
+This class implements a both iterators (iterator and const_iterator) for the
+@ref basic_json class.
+@note An iterator is called *initialized* when a pointer to a JSON value has
+      been set (e.g., by a constructor or a copy assignment). If the iterator is
+      default-constructed, it is *uninitialized* and most methods are undefined.
+      **The library uses assertions to detect calls on uninitialized iterators.**
+@requirement The class satisfies the following concept requirements:
+-
+[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator):
+  The iterator that can be moved can be moved in both directions (i.e.
+  incremented and decremented).
+@since version 1.0.0, simplified in version 2.0.9, change to bidirectional
+       iterators in version 3.0.0 (see https://github.com/nlohmann/json/issues/593)
+*/
+template<typename BasicJsonType>
+class iter_impl
+{
+    /// allow basic_json to access private members
+    friend iter_impl<typename std::conditional<std::is_const<BasicJsonType>::value, typename std::remove_const<BasicJsonType>::type, const BasicJsonType>::type>;
+    friend BasicJsonType;
+    friend iteration_proxy<iter_impl>;
+    friend iteration_proxy_value<iter_impl>;
+
+    using object_t = typename BasicJsonType::object_t;
+    using array_t = typename BasicJsonType::array_t;
+    // make sure BasicJsonType is basic_json or const basic_json
+    static_assert(is_basic_json<typename std::remove_const<BasicJsonType>::type>::value,
+                  "iter_impl only accepts (const) basic_json");
+
+  public:
+
+    /// The std::iterator class template (used as a base class to provide typedefs) is deprecated in C++17.
+    /// The C++ Standard has never required user-defined iterators to derive from std::iterator.
+    /// A user-defined iterator should provide publicly accessible typedefs named
+    /// iterator_category, value_type, difference_type, pointer, and reference.
+    /// Note that value_type is required to be non-const, even for constant iterators.
+    using iterator_category = std::bidirectional_iterator_tag;
+
+    /// the type of the values when the iterator is dereferenced
+    using value_type = typename BasicJsonType::value_type;
+    /// a type to represent differences between iterators
+    using difference_type = typename BasicJsonType::difference_type;
+    /// defines a pointer to the type iterated over (value_type)
+    using pointer = typename std::conditional<std::is_const<BasicJsonType>::value,
+          typename BasicJsonType::const_pointer,
+          typename BasicJsonType::pointer>::type;
+    /// defines a reference to the type iterated over (value_type)
+    using reference =
+        typename std::conditional<std::is_const<BasicJsonType>::value,
+        typename BasicJsonType::const_reference,
+        typename BasicJsonType::reference>::type;
+
+    /// default constructor
+    iter_impl() = default;
+
+    /*!
+    @brief constructor for a given JSON instance
+    @param[in] object  pointer to a JSON object for this iterator
+    @pre object != nullptr
+    @post The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    explicit iter_impl(pointer object) noexcept : m_object(object)
+    {
+        assert(m_object != nullptr);
+
+        switch (m_object->m_type)
+        {
+            case value_t::object:
+            {
+                m_it.object_iterator = typename object_t::iterator();
+                break;
+            }
+
+            case value_t::array:
+            {
+                m_it.array_iterator = typename array_t::iterator();
+                break;
+            }
+
+            default:
+            {
+                m_it.primitive_iterator = primitive_iterator_t();
+                break;
+            }
+        }
+    }
+
+    /*!
+    @note The conventional copy constructor and copy assignment are implicitly
+          defined. Combined with the following converting constructor and
+          assignment, they support: (1) copy from iterator to iterator, (2)
+          copy from const iterator to const iterator, and (3) conversion from
+          iterator to const iterator. However conversion from const iterator
+          to iterator is not defined.
+    */
+
+    /*!
+    @brief converting constructor
+    @param[in] other  non-const iterator to copy from
+    @note It is not checked whether @a other is initialized.
+    */
+    iter_impl(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept
+        : m_object(other.m_object), m_it(other.m_it) {}
+
+    /*!
+    @brief converting assignment
+    @param[in,out] other  non-const iterator to copy from
+    @return const/non-const iterator
+    @note It is not checked whether @a other is initialized.
+    */
+    iter_impl& operator=(const iter_impl<typename std::remove_const<BasicJsonType>::type>& other) noexcept
+    {
+        m_object = other.m_object;
+        m_it = other.m_it;
+        return *this;
+    }
+
+  private:
+    /*!
+    @brief set the iterator to the first value
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    void set_begin() noexcept
+    {
+        assert(m_object != nullptr);
+
+        switch (m_object->m_type)
+        {
+            case value_t::object:
+            {
+                m_it.object_iterator = m_object->m_value.object->begin();
+                break;
+            }
+
+            case value_t::array:
+            {
+                m_it.array_iterator = m_object->m_value.array->begin();
+                break;
+            }
+
+            case value_t::null:
+            {
+                // set to end so begin()==end() is true: null is empty
+                m_it.primitive_iterator.set_end();
+                break;
+            }
+
+            default:
+            {
+                m_it.primitive_iterator.set_begin();
+                break;
+            }
+        }
+    }
+
+    /*!
+    @brief set the iterator past the last value
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    void set_end() noexcept
+    {
+        assert(m_object != nullptr);
+
+        switch (m_object->m_type)
+        {
+            case value_t::object:
+            {
+                m_it.object_iterator = m_object->m_value.object->end();
+                break;
+            }
+
+            case value_t::array:
+            {
+                m_it.array_iterator = m_object->m_value.array->end();
+                break;
+            }
+
+            default:
+            {
+                m_it.primitive_iterator.set_end();
+                break;
+            }
+        }
+    }
+
+  public:
+    /*!
+    @brief return a reference to the value pointed to by the iterator
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    reference operator*() const
+    {
+        assert(m_object != nullptr);
+
+        switch (m_object->m_type)
+        {
+            case value_t::object:
+            {
+                assert(m_it.object_iterator != m_object->m_value.object->end());
+                return m_it.object_iterator->second;
+            }
+
+            case value_t::array:
+            {
+                assert(m_it.array_iterator != m_object->m_value.array->end());
+                return *m_it.array_iterator;
+            }
+
+            case value_t::null:
+                JSON_THROW(invalid_iterator::create(214, "cannot get value"));
+
+            default:
+            {
+                if (JSON_LIKELY(m_it.primitive_iterator.is_begin()))
+                {
+                    return *m_object;
+                }
+
+                JSON_THROW(invalid_iterator::create(214, "cannot get value"));
+            }
+        }
+    }
+
+    /*!
+    @brief dereference the iterator
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    pointer operator->() const
+    {
+        assert(m_object != nullptr);
+
+        switch (m_object->m_type)
+        {
+            case value_t::object:
+            {
+                assert(m_it.object_iterator != m_object->m_value.object->end());
+                return &(m_it.object_iterator->second);
+            }
+
+            case value_t::array:
+            {
+                assert(m_it.array_iterator != m_object->m_value.array->end());
+                return &*m_it.array_iterator;
+            }
+
+            default:
+            {
+                if (JSON_LIKELY(m_it.primitive_iterator.is_begin()))
+                {
+                    return m_object;
+                }
+
+                JSON_THROW(invalid_iterator::create(214, "cannot get value"));
+            }
+        }
+    }
+
+    /*!
+    @brief post-increment (it++)
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    iter_impl const operator++(int)
+    {
+        auto result = *this;
+        ++(*this);
+        return result;
+    }
+
+    /*!
+    @brief pre-increment (++it)
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    iter_impl& operator++()
+    {
+        assert(m_object != nullptr);
+
+        switch (m_object->m_type)
+        {
+            case value_t::object:
+            {
+                std::advance(m_it.object_iterator, 1);
+                break;
+            }
+
+            case value_t::array:
+            {
+                std::advance(m_it.array_iterator, 1);
+                break;
+            }
+
+            default:
+            {
+                ++m_it.primitive_iterator;
+                break;
+            }
+        }
+
+        return *this;
+    }
+
+    /*!
+    @brief post-decrement (it--)
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    iter_impl const operator--(int)
+    {
+        auto result = *this;
+        --(*this);
+        return result;
+    }
+
+    /*!
+    @brief pre-decrement (--it)
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    iter_impl& operator--()
+    {
+        assert(m_object != nullptr);
+
+        switch (m_object->m_type)
+        {
+            case value_t::object:
+            {
+                std::advance(m_it.object_iterator, -1);
+                break;
+            }
+
+            case value_t::array:
+            {
+                std::advance(m_it.array_iterator, -1);
+                break;
+            }
+
+            default:
+            {
+                --m_it.primitive_iterator;
+                break;
+            }
+        }
+
+        return *this;
+    }
+
+    /*!
+    @brief  comparison: equal
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    bool operator==(const iter_impl& other) const
+    {
+        // if objects are not the same, the comparison is undefined
+        if (JSON_UNLIKELY(m_object != other.m_object))
+        {
+            JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers"));
+        }
+
+        assert(m_object != nullptr);
+
+        switch (m_object->m_type)
+        {
+            case value_t::object:
+                return (m_it.object_iterator == other.m_it.object_iterator);
+
+            case value_t::array:
+                return (m_it.array_iterator == other.m_it.array_iterator);
+
+            default:
+                return (m_it.primitive_iterator == other.m_it.primitive_iterator);
+        }
+    }
+
+    /*!
+    @brief  comparison: not equal
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    bool operator!=(const iter_impl& other) const
+    {
+        return not operator==(other);
+    }
+
+    /*!
+    @brief  comparison: smaller
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    bool operator<(const iter_impl& other) const
+    {
+        // if objects are not the same, the comparison is undefined
+        if (JSON_UNLIKELY(m_object != other.m_object))
+        {
+            JSON_THROW(invalid_iterator::create(212, "cannot compare iterators of different containers"));
+        }
+
+        assert(m_object != nullptr);
+
+        switch (m_object->m_type)
+        {
+            case value_t::object:
+                JSON_THROW(invalid_iterator::create(213, "cannot compare order of object iterators"));
+
+            case value_t::array:
+                return (m_it.array_iterator < other.m_it.array_iterator);
+
+            default:
+                return (m_it.primitive_iterator < other.m_it.primitive_iterator);
+        }
+    }
+
+    /*!
+    @brief  comparison: less than or equal
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    bool operator<=(const iter_impl& other) const
+    {
+        return not other.operator < (*this);
+    }
+
+    /*!
+    @brief  comparison: greater than
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    bool operator>(const iter_impl& other) const
+    {
+        return not operator<=(other);
+    }
+
+    /*!
+    @brief  comparison: greater than or equal
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    bool operator>=(const iter_impl& other) const
+    {
+        return not operator<(other);
+    }
+
+    /*!
+    @brief  add to iterator
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    iter_impl& operator+=(difference_type i)
+    {
+        assert(m_object != nullptr);
+
+        switch (m_object->m_type)
+        {
+            case value_t::object:
+                JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators"));
+
+            case value_t::array:
+            {
+                std::advance(m_it.array_iterator, i);
+                break;
+            }
+
+            default:
+            {
+                m_it.primitive_iterator += i;
+                break;
+            }
+        }
+
+        return *this;
+    }
+
+    /*!
+    @brief  subtract from iterator
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    iter_impl& operator-=(difference_type i)
+    {
+        return operator+=(-i);
+    }
+
+    /*!
+    @brief  add to iterator
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    iter_impl operator+(difference_type i) const
+    {
+        auto result = *this;
+        result += i;
+        return result;
+    }
+
+    /*!
+    @brief  addition of distance and iterator
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    friend iter_impl operator+(difference_type i, const iter_impl& it)
+    {
+        auto result = it;
+        result += i;
+        return result;
+    }
+
+    /*!
+    @brief  subtract from iterator
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    iter_impl operator-(difference_type i) const
+    {
+        auto result = *this;
+        result -= i;
+        return result;
+    }
+
+    /*!
+    @brief  return difference
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    difference_type operator-(const iter_impl& other) const
+    {
+        assert(m_object != nullptr);
+
+        switch (m_object->m_type)
+        {
+            case value_t::object:
+                JSON_THROW(invalid_iterator::create(209, "cannot use offsets with object iterators"));
+
+            case value_t::array:
+                return m_it.array_iterator - other.m_it.array_iterator;
+
+            default:
+                return m_it.primitive_iterator - other.m_it.primitive_iterator;
+        }
+    }
+
+    /*!
+    @brief  access to successor
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    reference operator[](difference_type n) const
+    {
+        assert(m_object != nullptr);
+
+        switch (m_object->m_type)
+        {
+            case value_t::object:
+                JSON_THROW(invalid_iterator::create(208, "cannot use operator[] for object iterators"));
+
+            case value_t::array:
+                return *std::next(m_it.array_iterator, n);
+
+            case value_t::null:
+                JSON_THROW(invalid_iterator::create(214, "cannot get value"));
+
+            default:
+            {
+                if (JSON_LIKELY(m_it.primitive_iterator.get_value() == -n))
+                {
+                    return *m_object;
+                }
+
+                JSON_THROW(invalid_iterator::create(214, "cannot get value"));
+            }
+        }
+    }
+
+    /*!
+    @brief  return the key of an object iterator
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    const typename object_t::key_type& key() const
+    {
+        assert(m_object != nullptr);
+
+        if (JSON_LIKELY(m_object->is_object()))
+        {
+            return m_it.object_iterator->first;
+        }
+
+        JSON_THROW(invalid_iterator::create(207, "cannot use key() for non-object iterators"));
+    }
+
+    /*!
+    @brief  return the value of an iterator
+    @pre The iterator is initialized; i.e. `m_object != nullptr`.
+    */
+    reference value() const
+    {
+        return operator*();
+    }
+
+  private:
+    /// associated JSON instance
+    pointer m_object = nullptr;
+    /// the actual iterator of the associated instance
+    internal_iterator<typename std::remove_const<BasicJsonType>::type> m_it;
+};
+}  // namespace detail
+} // namespace nlohmann
+// #include <nlohmann/detail/iterators/iteration_proxy.hpp>
+
+// #include <nlohmann/detail/iterators/json_reverse_iterator.hpp>
+
+
+#include <cstddef> // ptrdiff_t
+#include <iterator> // reverse_iterator
+#include <utility> // declval
+
+namespace nlohmann
+{
+namespace detail
+{
+//////////////////////
+// reverse_iterator //
+//////////////////////
+
+/*!
+@brief a template for a reverse iterator class
+
+@tparam Base the base iterator type to reverse. Valid types are @ref
+iterator (to create @ref reverse_iterator) and @ref const_iterator (to
+create @ref const_reverse_iterator).
+
+@requirement The class satisfies the following concept requirements:
+-
+[BidirectionalIterator](https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator):
+  The iterator that can be moved can be moved in both directions (i.e.
+  incremented and decremented).
+- [OutputIterator](https://en.cppreference.com/w/cpp/named_req/OutputIterator):
+  It is possible to write to the pointed-to element (only if @a Base is
+  @ref iterator).
+
+@since version 1.0.0
+*/
+template<typename Base>
+class json_reverse_iterator : public std::reverse_iterator<Base>
+{
+  public:
+    using difference_type = std::ptrdiff_t;
+    /// shortcut to the reverse iterator adapter
+    using base_iterator = std::reverse_iterator<Base>;
+    /// the reference type for the pointed-to element
+    using reference = typename Base::reference;
+
+    /// create reverse iterator from iterator
+    explicit json_reverse_iterator(const typename base_iterator::iterator_type& it) noexcept
+        : base_iterator(it) {}
+
+    /// create reverse iterator from base class
+    explicit json_reverse_iterator(const base_iterator& it) noexcept : base_iterator(it) {}
+
+    /// post-increment (it++)
+    json_reverse_iterator const operator++(int)
+    {
+        return static_cast<json_reverse_iterator>(base_iterator::operator++(1));
+    }
+
+    /// pre-increment (++it)
+    json_reverse_iterator& operator++()
+    {
+        return static_cast<json_reverse_iterator&>(base_iterator::operator++());
+    }
+
+    /// post-decrement (it--)
+    json_reverse_iterator const operator--(int)
+    {
+        return static_cast<json_reverse_iterator>(base_iterator::operator--(1));
+    }
+
+    /// pre-decrement (--it)
+    json_reverse_iterator& operator--()
+    {
+        return static_cast<json_reverse_iterator&>(base_iterator::operator--());
+    }
+
+    /// add to iterator
+    json_reverse_iterator& operator+=(difference_type i)
+    {
+        return static_cast<json_reverse_iterator&>(base_iterator::operator+=(i));
+    }
+
+    /// add to iterator
+    json_reverse_iterator operator+(difference_type i) const
+    {
+        return static_cast<json_reverse_iterator>(base_iterator::operator+(i));
+    }
+
+    /// subtract from iterator
+    json_reverse_iterator operator-(difference_type i) const
+    {
+        return static_cast<json_reverse_iterator>(base_iterator::operator-(i));
+    }
+
+    /// return difference
+    difference_type operator-(const json_reverse_iterator& other) const
+    {
+        return base_iterator(*this) - base_iterator(other);
+    }
+
+    /// access to successor
+    reference operator[](difference_type n) const
+    {
+        return *(this->operator+(n));
+    }
+
+    /// return the key of an object iterator
+    auto key() const -> decltype(std::declval<Base>().key())
+    {
+        auto it = --this->base();
+        return it.key();
+    }
+
+    /// return the value of an iterator
+    reference value() const
+    {
+        auto it = --this->base();
+        return it.operator * ();
+    }
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/output/output_adapters.hpp>
+
+
+#include <algorithm> // copy
+#include <cstddef> // size_t
+#include <ios> // streamsize
+#include <iterator> // back_inserter
+#include <memory> // shared_ptr, make_shared
+#include <ostream> // basic_ostream
+#include <string> // basic_string
+#include <vector> // vector
+
+namespace nlohmann
+{
+namespace detail
+{
+/// abstract output adapter interface
+template<typename CharType> struct output_adapter_protocol
+{
+    virtual void write_character(CharType c) = 0;
+    virtual void write_characters(const CharType* s, std::size_t length) = 0;
+    virtual ~output_adapter_protocol() = default;
+};
+
+/// a type to simplify interfaces
+template<typename CharType>
+using output_adapter_t = std::shared_ptr<output_adapter_protocol<CharType>>;
+
+/// output adapter for byte vectors
+template<typename CharType>
+class output_vector_adapter : public output_adapter_protocol<CharType>
+{
+  public:
+    explicit output_vector_adapter(std::vector<CharType>& vec) noexcept
+        : v(vec)
+    {}
+
+    void write_character(CharType c) override
+    {
+        v.push_back(c);
+    }
+
+    void write_characters(const CharType* s, std::size_t length) override
+    {
+        std::copy(s, s + length, std::back_inserter(v));
+    }
+
+  private:
+    std::vector<CharType>& v;
+};
+
+/// output adapter for output streams
+template<typename CharType>
+class output_stream_adapter : public output_adapter_protocol<CharType>
+{
+  public:
+    explicit output_stream_adapter(std::basic_ostream<CharType>& s) noexcept
+        : stream(s)
+    {}
+
+    void write_character(CharType c) override
+    {
+        stream.put(c);
+    }
+
+    void write_characters(const CharType* s, std::size_t length) override
+    {
+        stream.write(s, static_cast<std::streamsize>(length));
+    }
+
+  private:
+    std::basic_ostream<CharType>& stream;
+};
+
+/// output adapter for basic_string
+template<typename CharType, typename StringType = std::basic_string<CharType>>
+class output_string_adapter : public output_adapter_protocol<CharType>
+{
+  public:
+    explicit output_string_adapter(StringType& s) noexcept
+        : str(s)
+    {}
+
+    void write_character(CharType c) override
+    {
+        str.push_back(c);
+    }
+
+    void write_characters(const CharType* s, std::size_t length) override
+    {
+        str.append(s, length);
+    }
+
+  private:
+    StringType& str;
+};
+
+template<typename CharType, typename StringType = std::basic_string<CharType>>
+class output_adapter
+{
+  public:
+    output_adapter(std::vector<CharType>& vec)
+        : oa(std::make_shared<output_vector_adapter<CharType>>(vec)) {}
+
+    output_adapter(std::basic_ostream<CharType>& s)
+        : oa(std::make_shared<output_stream_adapter<CharType>>(s)) {}
+
+    output_adapter(StringType& s)
+        : oa(std::make_shared<output_string_adapter<CharType, StringType>>(s)) {}
+
+    operator output_adapter_t<CharType>()
+    {
+        return oa;
+    }
+
+  private:
+    output_adapter_t<CharType> oa = nullptr;
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/input/binary_reader.hpp>
+
+
+#include <algorithm> // generate_n
+#include <array> // array
+#include <cassert> // assert
+#include <cmath> // ldexp
+#include <cstddef> // size_t
+#include <cstdint> // uint8_t, uint16_t, uint32_t, uint64_t
+#include <cstdio> // snprintf
+#include <cstring> // memcpy
+#include <iterator> // back_inserter
+#include <limits> // numeric_limits
+#include <string> // char_traits, string
+#include <utility> // make_pair, move
+
+// #include <nlohmann/detail/input/input_adapters.hpp>
+
+// #include <nlohmann/detail/input/json_sax.hpp>
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/is_sax.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+///////////////////
+// binary reader //
+///////////////////
+
+/*!
+@brief deserialization of CBOR, MessagePack, and UBJSON values
+*/
+template<typename BasicJsonType, typename SAX = json_sax_dom_parser<BasicJsonType>>
+class binary_reader
+{
+    using number_integer_t = typename BasicJsonType::number_integer_t;
+    using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+    using number_float_t = typename BasicJsonType::number_float_t;
+    using string_t = typename BasicJsonType::string_t;
+    using json_sax_t = SAX;
+
+  public:
+    /*!
+    @brief create a binary reader
+
+    @param[in] adapter  input adapter to read from
+    */
+    explicit binary_reader(input_adapter_t adapter) : ia(std::move(adapter))
+    {
+        (void)detail::is_sax_static_asserts<SAX, BasicJsonType> {};
+        assert(ia);
+    }
+
+    /*!
+    @param[in] format  the binary format to parse
+    @param[in] sax_    a SAX event processor
+    @param[in] strict  whether to expect the input to be consumed completed
+
+    @return
+    */
+    bool sax_parse(const input_format_t format,
+                   json_sax_t* sax_,
+                   const bool strict = true)
+    {
+        sax = sax_;
+        bool result = false;
+
+        switch (format)
+        {
+            case input_format_t::bson:
+                result = parse_bson_internal();
+                break;
+
+            case input_format_t::cbor:
+                result = parse_cbor_internal();
+                break;
+
+            case input_format_t::msgpack:
+                result = parse_msgpack_internal();
+                break;
+
+            case input_format_t::ubjson:
+                result = parse_ubjson_internal();
+                break;
+
+            // LCOV_EXCL_START
+            default:
+                assert(false);
+                // LCOV_EXCL_STOP
+        }
+
+        // strict mode: next byte must be EOF
+        if (result and strict)
+        {
+            if (format == input_format_t::ubjson)
+            {
+                get_ignore_noop();
+            }
+            else
+            {
+                get();
+            }
+
+            if (JSON_UNLIKELY(current != std::char_traits<char>::eof()))
+            {
+                return sax->parse_error(chars_read, get_token_string(),
+                                        parse_error::create(110, chars_read, exception_message(format, "expected end of input; last byte: 0x" + get_token_string(), "value")));
+            }
+        }
+
+        return result;
+    }
+
+    /*!
+    @brief determine system byte order
+
+    @return true if and only if system's byte order is little endian
+
+    @note from http://stackoverflow.com/a/1001328/266378
+    */
+    static constexpr bool little_endianess(int num = 1) noexcept
+    {
+        return (*reinterpret_cast<char*>(&num) == 1);
+    }
+
+  private:
+    //////////
+    // BSON //
+    //////////
+
+    /*!
+    @brief Reads in a BSON-object and passes it to the SAX-parser.
+    @return whether a valid BSON-value was passed to the SAX parser
+    */
+    bool parse_bson_internal()
+    {
+        std::int32_t document_size;
+        get_number<std::int32_t, true>(input_format_t::bson, document_size);
+
+        if (JSON_UNLIKELY(not sax->start_object(std::size_t(-1))))
+        {
+            return false;
+        }
+
+        if (JSON_UNLIKELY(not parse_bson_element_list(/*is_array*/false)))
+        {
+            return false;
+        }
+
+        return sax->end_object();
+    }
+
+    /*!
+    @brief Parses a C-style string from the BSON input.
+    @param[in, out] result  A reference to the string variable where the read
+                            string is to be stored.
+    @return `true` if the \x00-byte indicating the end of the string was
+             encountered before the EOF; false` indicates an unexpected EOF.
+    */
+    bool get_bson_cstr(string_t& result)
+    {
+        auto out = std::back_inserter(result);
+        while (true)
+        {
+            get();
+            if (JSON_UNLIKELY(not unexpect_eof(input_format_t::bson, "cstring")))
+            {
+                return false;
+            }
+            if (current == 0x00)
+            {
+                return true;
+            }
+            *out++ = static_cast<char>(current);
+        }
+
+        return true;
+    }
+
+    /*!
+    @brief Parses a zero-terminated string of length @a len from the BSON
+           input.
+    @param[in] len  The length (including the zero-byte at the end) of the
+                    string to be read.
+    @param[in, out] result  A reference to the string variable where the read
+                            string is to be stored.
+    @tparam NumberType The type of the length @a len
+    @pre len >= 1
+    @return `true` if the string was successfully parsed
+    */
+    template<typename NumberType>
+    bool get_bson_string(const NumberType len, string_t& result)
+    {
+        if (JSON_UNLIKELY(len < 1))
+        {
+            auto last_token = get_token_string();
+            return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::bson, "string length must be at least 1, is " + std::to_string(len), "string")));
+        }
+
+        return get_string(input_format_t::bson, len - static_cast<NumberType>(1), result) and get() != std::char_traits<char>::eof();
+    }
+
+    /*!
+    @brief Read a BSON document element of the given @a element_type.
+    @param[in] element_type The BSON element type, c.f. http://bsonspec.org/spec.html
+    @param[in] element_type_parse_position The position in the input stream,
+               where the `element_type` was read.
+    @warning Not all BSON element types are supported yet. An unsupported
+             @a element_type will give rise to a parse_error.114:
+             Unsupported BSON record type 0x...
+    @return whether a valid BSON-object/array was passed to the SAX parser
+    */
+    bool parse_bson_element_internal(const int element_type,
+                                     const std::size_t element_type_parse_position)
+    {
+        switch (element_type)
+        {
+            case 0x01: // double
+            {
+                double number;
+                return get_number<double, true>(input_format_t::bson, number) and sax->number_float(static_cast<number_float_t>(number), "");
+            }
+
+            case 0x02: // string
+            {
+                std::int32_t len;
+                string_t value;
+                return get_number<std::int32_t, true>(input_format_t::bson, len) and get_bson_string(len, value) and sax->string(value);
+            }
+
+            case 0x03: // object
+            {
+                return parse_bson_internal();
+            }
+
+            case 0x04: // array
+            {
+                return parse_bson_array();
+            }
+
+            case 0x08: // boolean
+            {
+                return sax->boolean(get() != 0);
+            }
+
+            case 0x0A: // null
+            {
+                return sax->null();
+            }
+
+            case 0x10: // int32
+            {
+                std::int32_t value;
+                return get_number<std::int32_t, true>(input_format_t::bson, value) and sax->number_integer(value);
+            }
+
+            case 0x12: // int64
+            {
+                std::int64_t value;
+                return get_number<std::int64_t, true>(input_format_t::bson, value) and sax->number_integer(value);
+            }
+
+            default: // anything else not supported (yet)
+            {
+                char cr[3];
+                (std::snprintf)(cr, sizeof(cr), "%.2hhX", static_cast<unsigned char>(element_type));
+                return sax->parse_error(element_type_parse_position, std::string(cr), parse_error::create(114, element_type_parse_position, "Unsupported BSON record type 0x" + std::string(cr)));
+            }
+        }
+    }
+
+    /*!
+    @brief Read a BSON element list (as specified in the BSON-spec)
+
+    The same binary layout is used for objects and arrays, hence it must be
+    indicated with the argument @a is_array which one is expected
+    (true --> array, false --> object).
+
+    @param[in] is_array Determines if the element list being read is to be
+                        treated as an object (@a is_array == false), or as an
+                        array (@a is_array == true).
+    @return whether a valid BSON-object/array was passed to the SAX parser
+    */
+    bool parse_bson_element_list(const bool is_array)
+    {
+        string_t key;
+        while (int element_type = get())
+        {
+            if (JSON_UNLIKELY(not unexpect_eof(input_format_t::bson, "element list")))
+            {
+                return false;
+            }
+
+            const std::size_t element_type_parse_position = chars_read;
+            if (JSON_UNLIKELY(not get_bson_cstr(key)))
+            {
+                return false;
+            }
+
+            if (not is_array)
+            {
+                if (not sax->key(key))
+                {
+                    return false;
+                }
+            }
+
+            if (JSON_UNLIKELY(not parse_bson_element_internal(element_type, element_type_parse_position)))
+            {
+                return false;
+            }
+
+            // get_bson_cstr only appends
+            key.clear();
+        }
+
+        return true;
+    }
+
+    /*!
+    @brief Reads an array from the BSON input and passes it to the SAX-parser.
+    @return whether a valid BSON-array was passed to the SAX parser
+    */
+    bool parse_bson_array()
+    {
+        std::int32_t document_size;
+        get_number<std::int32_t, true>(input_format_t::bson, document_size);
+
+        if (JSON_UNLIKELY(not sax->start_array(std::size_t(-1))))
+        {
+            return false;
+        }
+
+        if (JSON_UNLIKELY(not parse_bson_element_list(/*is_array*/true)))
+        {
+            return false;
+        }
+
+        return sax->end_array();
+    }
+
+    //////////
+    // CBOR //
+    //////////
+
+    /*!
+    @param[in] get_char  whether a new character should be retrieved from the
+                         input (true, default) or whether the last read
+                         character should be considered instead
+
+    @return whether a valid CBOR value was passed to the SAX parser
+    */
+    bool parse_cbor_internal(const bool get_char = true)
+    {
+        switch (get_char ? get() : current)
+        {
+            // EOF
+            case std::char_traits<char>::eof():
+                return unexpect_eof(input_format_t::cbor, "value");
+
+            // Integer 0x00..0x17 (0..23)
+            case 0x00:
+            case 0x01:
+            case 0x02:
+            case 0x03:
+            case 0x04:
+            case 0x05:
+            case 0x06:
+            case 0x07:
+            case 0x08:
+            case 0x09:
+            case 0x0A:
+            case 0x0B:
+            case 0x0C:
+            case 0x0D:
+            case 0x0E:
+            case 0x0F:
+            case 0x10:
+            case 0x11:
+            case 0x12:
+            case 0x13:
+            case 0x14:
+            case 0x15:
+            case 0x16:
+            case 0x17:
+                return sax->number_unsigned(static_cast<number_unsigned_t>(current));
+
+            case 0x18: // Unsigned integer (one-byte uint8_t follows)
+            {
+                uint8_t number;
+                return get_number(input_format_t::cbor, number) and sax->number_unsigned(number);
+            }
+
+            case 0x19: // Unsigned integer (two-byte uint16_t follows)
+            {
+                uint16_t number;
+                return get_number(input_format_t::cbor, number) and sax->number_unsigned(number);
+            }
+
+            case 0x1A: // Unsigned integer (four-byte uint32_t follows)
+            {
+                uint32_t number;
+                return get_number(input_format_t::cbor, number) and sax->number_unsigned(number);
+            }
+
+            case 0x1B: // Unsigned integer (eight-byte uint64_t follows)
+            {
+                uint64_t number;
+                return get_number(input_format_t::cbor, number) and sax->number_unsigned(number);
+            }
+
+            // Negative integer -1-0x00..-1-0x17 (-1..-24)
+            case 0x20:
+            case 0x21:
+            case 0x22:
+            case 0x23:
+            case 0x24:
+            case 0x25:
+            case 0x26:
+            case 0x27:
+            case 0x28:
+            case 0x29:
+            case 0x2A:
+            case 0x2B:
+            case 0x2C:
+            case 0x2D:
+            case 0x2E:
+            case 0x2F:
+            case 0x30:
+            case 0x31:
+            case 0x32:
+            case 0x33:
+            case 0x34:
+            case 0x35:
+            case 0x36:
+            case 0x37:
+                return sax->number_integer(static_cast<int8_t>(0x20 - 1 - current));
+
+            case 0x38: // Negative integer (one-byte uint8_t follows)
+            {
+                uint8_t number;
+                return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast<number_integer_t>(-1) - number);
+            }
+
+            case 0x39: // Negative integer -1-n (two-byte uint16_t follows)
+            {
+                uint16_t number;
+                return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast<number_integer_t>(-1) - number);
+            }
+
+            case 0x3A: // Negative integer -1-n (four-byte uint32_t follows)
+            {
+                uint32_t number;
+                return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast<number_integer_t>(-1) - number);
+            }
+
+            case 0x3B: // Negative integer -1-n (eight-byte uint64_t follows)
+            {
+                uint64_t number;
+                return get_number(input_format_t::cbor, number) and sax->number_integer(static_cast<number_integer_t>(-1)
+                        - static_cast<number_integer_t>(number));
+            }
+
+            // UTF-8 string (0x00..0x17 bytes follow)
+            case 0x60:
+            case 0x61:
+            case 0x62:
+            case 0x63:
+            case 0x64:
+            case 0x65:
+            case 0x66:
+            case 0x67:
+            case 0x68:
+            case 0x69:
+            case 0x6A:
+            case 0x6B:
+            case 0x6C:
+            case 0x6D:
+            case 0x6E:
+            case 0x6F:
+            case 0x70:
+            case 0x71:
+            case 0x72:
+            case 0x73:
+            case 0x74:
+            case 0x75:
+            case 0x76:
+            case 0x77:
+            case 0x78: // UTF-8 string (one-byte uint8_t for n follows)
+            case 0x79: // UTF-8 string (two-byte uint16_t for n follow)
+            case 0x7A: // UTF-8 string (four-byte uint32_t for n follow)
+            case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow)
+            case 0x7F: // UTF-8 string (indefinite length)
+            {
+                string_t s;
+                return get_cbor_string(s) and sax->string(s);
+            }
+
+            // array (0x00..0x17 data items follow)
+            case 0x80:
+            case 0x81:
+            case 0x82:
+            case 0x83:
+            case 0x84:
+            case 0x85:
+            case 0x86:
+            case 0x87:
+            case 0x88:
+            case 0x89:
+            case 0x8A:
+            case 0x8B:
+            case 0x8C:
+            case 0x8D:
+            case 0x8E:
+            case 0x8F:
+            case 0x90:
+            case 0x91:
+            case 0x92:
+            case 0x93:
+            case 0x94:
+            case 0x95:
+            case 0x96:
+            case 0x97:
+                return get_cbor_array(static_cast<std::size_t>(current & 0x1F));
+
+            case 0x98: // array (one-byte uint8_t for n follows)
+            {
+                uint8_t len;
+                return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast<std::size_t>(len));
+            }
+
+            case 0x99: // array (two-byte uint16_t for n follow)
+            {
+                uint16_t len;
+                return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast<std::size_t>(len));
+            }
+
+            case 0x9A: // array (four-byte uint32_t for n follow)
+            {
+                uint32_t len;
+                return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast<std::size_t>(len));
+            }
+
+            case 0x9B: // array (eight-byte uint64_t for n follow)
+            {
+                uint64_t len;
+                return get_number(input_format_t::cbor, len) and get_cbor_array(static_cast<std::size_t>(len));
+            }
+
+            case 0x9F: // array (indefinite length)
+                return get_cbor_array(std::size_t(-1));
+
+            // map (0x00..0x17 pairs of data items follow)
+            case 0xA0:
+            case 0xA1:
+            case 0xA2:
+            case 0xA3:
+            case 0xA4:
+            case 0xA5:
+            case 0xA6:
+            case 0xA7:
+            case 0xA8:
+            case 0xA9:
+            case 0xAA:
+            case 0xAB:
+            case 0xAC:
+            case 0xAD:
+            case 0xAE:
+            case 0xAF:
+            case 0xB0:
+            case 0xB1:
+            case 0xB2:
+            case 0xB3:
+            case 0xB4:
+            case 0xB5:
+            case 0xB6:
+            case 0xB7:
+                return get_cbor_object(static_cast<std::size_t>(current & 0x1F));
+
+            case 0xB8: // map (one-byte uint8_t for n follows)
+            {
+                uint8_t len;
+                return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast<std::size_t>(len));
+            }
+
+            case 0xB9: // map (two-byte uint16_t for n follow)
+            {
+                uint16_t len;
+                return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast<std::size_t>(len));
+            }
+
+            case 0xBA: // map (four-byte uint32_t for n follow)
+            {
+                uint32_t len;
+                return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast<std::size_t>(len));
+            }
+
+            case 0xBB: // map (eight-byte uint64_t for n follow)
+            {
+                uint64_t len;
+                return get_number(input_format_t::cbor, len) and get_cbor_object(static_cast<std::size_t>(len));
+            }
+
+            case 0xBF: // map (indefinite length)
+                return get_cbor_object(std::size_t(-1));
+
+            case 0xF4: // false
+                return sax->boolean(false);
+
+            case 0xF5: // true
+                return sax->boolean(true);
+
+            case 0xF6: // null
+                return sax->null();
+
+            case 0xF9: // Half-Precision Float (two-byte IEEE 754)
+            {
+                const int byte1_raw = get();
+                if (JSON_UNLIKELY(not unexpect_eof(input_format_t::cbor, "number")))
+                {
+                    return false;
+                }
+                const int byte2_raw = get();
+                if (JSON_UNLIKELY(not unexpect_eof(input_format_t::cbor, "number")))
+                {
+                    return false;
+                }
+
+                const auto byte1 = static_cast<unsigned char>(byte1_raw);
+                const auto byte2 = static_cast<unsigned char>(byte2_raw);
+
+                // code from RFC 7049, Appendix D, Figure 3:
+                // As half-precision floating-point numbers were only added
+                // to IEEE 754 in 2008, today's programming platforms often
+                // still only have limited support for them. It is very
+                // easy to include at least decoding support for them even
+                // without such support. An example of a small decoder for
+                // half-precision floating-point numbers in the C language
+                // is shown in Fig. 3.
+                const int half = (byte1 << 8) + byte2;
+                const double val = [&half]
+                {
+                    const int exp = (half >> 10) & 0x1F;
+                    const int mant = half & 0x3FF;
+                    assert(0 <= exp and exp <= 32);
+                    assert(0 <= mant and mant <= 1024);
+                    switch (exp)
+                    {
+                        case 0:
+                            return std::ldexp(mant, -24);
+                        case 31:
+                            return (mant == 0)
+                            ? std::numeric_limits<double>::infinity()
+                            : std::numeric_limits<double>::quiet_NaN();
+                        default:
+                            return std::ldexp(mant + 1024, exp - 25);
+                    }
+                }();
+                return sax->number_float((half & 0x8000) != 0
+                                         ? static_cast<number_float_t>(-val)
+                                         : static_cast<number_float_t>(val), "");
+            }
+
+            case 0xFA: // Single-Precision Float (four-byte IEEE 754)
+            {
+                float number;
+                return get_number(input_format_t::cbor, number) and sax->number_float(static_cast<number_float_t>(number), "");
+            }
+
+            case 0xFB: // Double-Precision Float (eight-byte IEEE 754)
+            {
+                double number;
+                return get_number(input_format_t::cbor, number) and sax->number_float(static_cast<number_float_t>(number), "");
+            }
+
+            default: // anything else (0xFF is handled inside the other types)
+            {
+                auto last_token = get_token_string();
+                return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::cbor, "invalid byte: 0x" + last_token, "value")));
+            }
+        }
+    }
+
+    /*!
+    @brief reads a CBOR string
+
+    This function first reads starting bytes to determine the expected
+    string length and then copies this number of bytes into a string.
+    Additionally, CBOR's strings with indefinite lengths are supported.
+
+    @param[out] result  created string
+
+    @return whether string creation completed
+    */
+    bool get_cbor_string(string_t& result)
+    {
+        if (JSON_UNLIKELY(not unexpect_eof(input_format_t::cbor, "string")))
+        {
+            return false;
+        }
+
+        switch (current)
+        {
+            // UTF-8 string (0x00..0x17 bytes follow)
+            case 0x60:
+            case 0x61:
+            case 0x62:
+            case 0x63:
+            case 0x64:
+            case 0x65:
+            case 0x66:
+            case 0x67:
+            case 0x68:
+            case 0x69:
+            case 0x6A:
+            case 0x6B:
+            case 0x6C:
+            case 0x6D:
+            case 0x6E:
+            case 0x6F:
+            case 0x70:
+            case 0x71:
+            case 0x72:
+            case 0x73:
+            case 0x74:
+            case 0x75:
+            case 0x76:
+            case 0x77:
+            {
+                return get_string(input_format_t::cbor, current & 0x1F, result);
+            }
+
+            case 0x78: // UTF-8 string (one-byte uint8_t for n follows)
+            {
+                uint8_t len;
+                return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result);
+            }
+
+            case 0x79: // UTF-8 string (two-byte uint16_t for n follow)
+            {
+                uint16_t len;
+                return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result);
+            }
+
+            case 0x7A: // UTF-8 string (four-byte uint32_t for n follow)
+            {
+                uint32_t len;
+                return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result);
+            }
+
+            case 0x7B: // UTF-8 string (eight-byte uint64_t for n follow)
+            {
+                uint64_t len;
+                return get_number(input_format_t::cbor, len) and get_string(input_format_t::cbor, len, result);
+            }
+
+            case 0x7F: // UTF-8 string (indefinite length)
+            {
+                while (get() != 0xFF)
+                {
+                    string_t chunk;
+                    if (not get_cbor_string(chunk))
+                    {
+                        return false;
+                    }
+                    result.append(chunk);
+                }
+                return true;
+            }
+
+            default:
+            {
+                auto last_token = get_token_string();
+                return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::cbor, "expected length specification (0x60-0x7B) or indefinite string type (0x7F); last byte: 0x" + last_token, "string")));
+            }
+        }
+    }
+
+    /*!
+    @param[in] len  the length of the array or std::size_t(-1) for an
+                    array of indefinite size
+    @return whether array creation completed
+    */
+    bool get_cbor_array(const std::size_t len)
+    {
+        if (JSON_UNLIKELY(not sax->start_array(len)))
+        {
+            return false;
+        }
+
+        if (len != std::size_t(-1))
+        {
+            for (std::size_t i = 0; i < len; ++i)
+            {
+                if (JSON_UNLIKELY(not parse_cbor_internal()))
+                {
+                    return false;
+                }
+            }
+        }
+        else
+        {
+            while (get() != 0xFF)
+            {
+                if (JSON_UNLIKELY(not parse_cbor_internal(false)))
+                {
+                    return false;
+                }
+            }
+        }
+
+        return sax->end_array();
+    }
+
+    /*!
+    @param[in] len  the length of the object or std::size_t(-1) for an
+                    object of indefinite size
+    @return whether object creation completed
+    */
+    bool get_cbor_object(const std::size_t len)
+    {
+        if (not JSON_UNLIKELY(sax->start_object(len)))
+        {
+            return false;
+        }
+
+        string_t key;
+        if (len != std::size_t(-1))
+        {
+            for (std::size_t i = 0; i < len; ++i)
+            {
+                get();
+                if (JSON_UNLIKELY(not get_cbor_string(key) or not sax->key(key)))
+                {
+                    return false;
+                }
+
+                if (JSON_UNLIKELY(not parse_cbor_internal()))
+                {
+                    return false;
+                }
+                key.clear();
+            }
+        }
+        else
+        {
+            while (get() != 0xFF)
+            {
+                if (JSON_UNLIKELY(not get_cbor_string(key) or not sax->key(key)))
+                {
+                    return false;
+                }
+
+                if (JSON_UNLIKELY(not parse_cbor_internal()))
+                {
+                    return false;
+                }
+                key.clear();
+            }
+        }
+
+        return sax->end_object();
+    }
+
+    /////////////
+    // MsgPack //
+    /////////////
+
+    /*!
+    @return whether a valid MessagePack value was passed to the SAX parser
+    */
+    bool parse_msgpack_internal()
+    {
+        switch (get())
+        {
+            // EOF
+            case std::char_traits<char>::eof():
+                return unexpect_eof(input_format_t::msgpack, "value");
+
+            // positive fixint
+            case 0x00:
+            case 0x01:
+            case 0x02:
+            case 0x03:
+            case 0x04:
+            case 0x05:
+            case 0x06:
+            case 0x07:
+            case 0x08:
+            case 0x09:
+            case 0x0A:
+            case 0x0B:
+            case 0x0C:
+            case 0x0D:
+            case 0x0E:
+            case 0x0F:
+            case 0x10:
+            case 0x11:
+            case 0x12:
+            case 0x13:
+            case 0x14:
+            case 0x15:
+            case 0x16:
+            case 0x17:
+            case 0x18:
+            case 0x19:
+            case 0x1A:
+            case 0x1B:
+            case 0x1C:
+            case 0x1D:
+            case 0x1E:
+            case 0x1F:
+            case 0x20:
+            case 0x21:
+            case 0x22:
+            case 0x23:
+            case 0x24:
+            case 0x25:
+            case 0x26:
+            case 0x27:
+            case 0x28:
+            case 0x29:
+            case 0x2A:
+            case 0x2B:
+            case 0x2C:
+            case 0x2D:
+            case 0x2E:
+            case 0x2F:
+            case 0x30:
+            case 0x31:
+            case 0x32:
+            case 0x33:
+            case 0x34:
+            case 0x35:
+            case 0x36:
+            case 0x37:
+            case 0x38:
+            case 0x39:
+            case 0x3A:
+            case 0x3B:
+            case 0x3C:
+            case 0x3D:
+            case 0x3E:
+            case 0x3F:
+            case 0x40:
+            case 0x41:
+            case 0x42:
+            case 0x43:
+            case 0x44:
+            case 0x45:
+            case 0x46:
+            case 0x47:
+            case 0x48:
+            case 0x49:
+            case 0x4A:
+            case 0x4B:
+            case 0x4C:
+            case 0x4D:
+            case 0x4E:
+            case 0x4F:
+            case 0x50:
+            case 0x51:
+            case 0x52:
+            case 0x53:
+            case 0x54:
+            case 0x55:
+            case 0x56:
+            case 0x57:
+            case 0x58:
+            case 0x59:
+            case 0x5A:
+            case 0x5B:
+            case 0x5C:
+            case 0x5D:
+            case 0x5E:
+            case 0x5F:
+            case 0x60:
+            case 0x61:
+            case 0x62:
+            case 0x63:
+            case 0x64:
+            case 0x65:
+            case 0x66:
+            case 0x67:
+            case 0x68:
+            case 0x69:
+            case 0x6A:
+            case 0x6B:
+            case 0x6C:
+            case 0x6D:
+            case 0x6E:
+            case 0x6F:
+            case 0x70:
+            case 0x71:
+            case 0x72:
+            case 0x73:
+            case 0x74:
+            case 0x75:
+            case 0x76:
+            case 0x77:
+            case 0x78:
+            case 0x79:
+            case 0x7A:
+            case 0x7B:
+            case 0x7C:
+            case 0x7D:
+            case 0x7E:
+            case 0x7F:
+                return sax->number_unsigned(static_cast<number_unsigned_t>(current));
+
+            // fixmap
+            case 0x80:
+            case 0x81:
+            case 0x82:
+            case 0x83:
+            case 0x84:
+            case 0x85:
+            case 0x86:
+            case 0x87:
+            case 0x88:
+            case 0x89:
+            case 0x8A:
+            case 0x8B:
+            case 0x8C:
+            case 0x8D:
+            case 0x8E:
+            case 0x8F:
+                return get_msgpack_object(static_cast<std::size_t>(current & 0x0F));
+
+            // fixarray
+            case 0x90:
+            case 0x91:
+            case 0x92:
+            case 0x93:
+            case 0x94:
+            case 0x95:
+            case 0x96:
+            case 0x97:
+            case 0x98:
+            case 0x99:
+            case 0x9A:
+            case 0x9B:
+            case 0x9C:
+            case 0x9D:
+            case 0x9E:
+            case 0x9F:
+                return get_msgpack_array(static_cast<std::size_t>(current & 0x0F));
+
+            // fixstr
+            case 0xA0:
+            case 0xA1:
+            case 0xA2:
+            case 0xA3:
+            case 0xA4:
+            case 0xA5:
+            case 0xA6:
+            case 0xA7:
+            case 0xA8:
+            case 0xA9:
+            case 0xAA:
+            case 0xAB:
+            case 0xAC:
+            case 0xAD:
+            case 0xAE:
+            case 0xAF:
+            case 0xB0:
+            case 0xB1:
+            case 0xB2:
+            case 0xB3:
+            case 0xB4:
+            case 0xB5:
+            case 0xB6:
+            case 0xB7:
+            case 0xB8:
+            case 0xB9:
+            case 0xBA:
+            case 0xBB:
+            case 0xBC:
+            case 0xBD:
+            case 0xBE:
+            case 0xBF:
+            {
+                string_t s;
+                return get_msgpack_string(s) and sax->string(s);
+            }
+
+            case 0xC0: // nil
+                return sax->null();
+
+            case 0xC2: // false
+                return sax->boolean(false);
+
+            case 0xC3: // true
+                return sax->boolean(true);
+
+            case 0xCA: // float 32
+            {
+                float number;
+                return get_number(input_format_t::msgpack, number) and sax->number_float(static_cast<number_float_t>(number), "");
+            }
+
+            case 0xCB: // float 64
+            {
+                double number;
+                return get_number(input_format_t::msgpack, number) and sax->number_float(static_cast<number_float_t>(number), "");
+            }
+
+            case 0xCC: // uint 8
+            {
+                uint8_t number;
+                return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number);
+            }
+
+            case 0xCD: // uint 16
+            {
+                uint16_t number;
+                return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number);
+            }
+
+            case 0xCE: // uint 32
+            {
+                uint32_t number;
+                return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number);
+            }
+
+            case 0xCF: // uint 64
+            {
+                uint64_t number;
+                return get_number(input_format_t::msgpack, number) and sax->number_unsigned(number);
+            }
+
+            case 0xD0: // int 8
+            {
+                int8_t number;
+                return get_number(input_format_t::msgpack, number) and sax->number_integer(number);
+            }
+
+            case 0xD1: // int 16
+            {
+                int16_t number;
+                return get_number(input_format_t::msgpack, number) and sax->number_integer(number);
+            }
+
+            case 0xD2: // int 32
+            {
+                int32_t number;
+                return get_number(input_format_t::msgpack, number) and sax->number_integer(number);
+            }
+
+            case 0xD3: // int 64
+            {
+                int64_t number;
+                return get_number(input_format_t::msgpack, number) and sax->number_integer(number);
+            }
+
+            case 0xD9: // str 8
+            case 0xDA: // str 16
+            case 0xDB: // str 32
+            {
+                string_t s;
+                return get_msgpack_string(s) and sax->string(s);
+            }
+
+            case 0xDC: // array 16
+            {
+                uint16_t len;
+                return get_number(input_format_t::msgpack, len) and get_msgpack_array(static_cast<std::size_t>(len));
+            }
+
+            case 0xDD: // array 32
+            {
+                uint32_t len;
+                return get_number(input_format_t::msgpack, len) and get_msgpack_array(static_cast<std::size_t>(len));
+            }
+
+            case 0xDE: // map 16
+            {
+                uint16_t len;
+                return get_number(input_format_t::msgpack, len) and get_msgpack_object(static_cast<std::size_t>(len));
+            }
+
+            case 0xDF: // map 32
+            {
+                uint32_t len;
+                return get_number(input_format_t::msgpack, len) and get_msgpack_object(static_cast<std::size_t>(len));
+            }
+
+            // negative fixint
+            case 0xE0:
+            case 0xE1:
+            case 0xE2:
+            case 0xE3:
+            case 0xE4:
+            case 0xE5:
+            case 0xE6:
+            case 0xE7:
+            case 0xE8:
+            case 0xE9:
+            case 0xEA:
+            case 0xEB:
+            case 0xEC:
+            case 0xED:
+            case 0xEE:
+            case 0xEF:
+            case 0xF0:
+            case 0xF1:
+            case 0xF2:
+            case 0xF3:
+            case 0xF4:
+            case 0xF5:
+            case 0xF6:
+            case 0xF7:
+            case 0xF8:
+            case 0xF9:
+            case 0xFA:
+            case 0xFB:
+            case 0xFC:
+            case 0xFD:
+            case 0xFE:
+            case 0xFF:
+                return sax->number_integer(static_cast<int8_t>(current));
+
+            default: // anything else
+            {
+                auto last_token = get_token_string();
+                return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::msgpack, "invalid byte: 0x" + last_token, "value")));
+            }
+        }
+    }
+
+    /*!
+    @brief reads a MessagePack string
+
+    This function first reads starting bytes to determine the expected
+    string length and then copies this number of bytes into a string.
+
+    @param[out] result  created string
+
+    @return whether string creation completed
+    */
+    bool get_msgpack_string(string_t& result)
+    {
+        if (JSON_UNLIKELY(not unexpect_eof(input_format_t::msgpack, "string")))
+        {
+            return false;
+        }
+
+        switch (current)
+        {
+            // fixstr
+            case 0xA0:
+            case 0xA1:
+            case 0xA2:
+            case 0xA3:
+            case 0xA4:
+            case 0xA5:
+            case 0xA6:
+            case 0xA7:
+            case 0xA8:
+            case 0xA9:
+            case 0xAA:
+            case 0xAB:
+            case 0xAC:
+            case 0xAD:
+            case 0xAE:
+            case 0xAF:
+            case 0xB0:
+            case 0xB1:
+            case 0xB2:
+            case 0xB3:
+            case 0xB4:
+            case 0xB5:
+            case 0xB6:
+            case 0xB7:
+            case 0xB8:
+            case 0xB9:
+            case 0xBA:
+            case 0xBB:
+            case 0xBC:
+            case 0xBD:
+            case 0xBE:
+            case 0xBF:
+            {
+                return get_string(input_format_t::msgpack, current & 0x1F, result);
+            }
+
+            case 0xD9: // str 8
+            {
+                uint8_t len;
+                return get_number(input_format_t::msgpack, len) and get_string(input_format_t::msgpack, len, result);
+            }
+
+            case 0xDA: // str 16
+            {
+                uint16_t len;
+                return get_number(input_format_t::msgpack, len) and get_string(input_format_t::msgpack, len, result);
+            }
+
+            case 0xDB: // str 32
+            {
+                uint32_t len;
+                return get_number(input_format_t::msgpack, len) and get_string(input_format_t::msgpack, len, result);
+            }
+
+            default:
+            {
+                auto last_token = get_token_string();
+                return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::msgpack, "expected length specification (0xA0-0xBF, 0xD9-0xDB); last byte: 0x" + last_token, "string")));
+            }
+        }
+    }
+
+    /*!
+    @param[in] len  the length of the array
+    @return whether array creation completed
+    */
+    bool get_msgpack_array(const std::size_t len)
+    {
+        if (JSON_UNLIKELY(not sax->start_array(len)))
+        {
+            return false;
+        }
+
+        for (std::size_t i = 0; i < len; ++i)
+        {
+            if (JSON_UNLIKELY(not parse_msgpack_internal()))
+            {
+                return false;
+            }
+        }
+
+        return sax->end_array();
+    }
+
+    /*!
+    @param[in] len  the length of the object
+    @return whether object creation completed
+    */
+    bool get_msgpack_object(const std::size_t len)
+    {
+        if (JSON_UNLIKELY(not sax->start_object(len)))
+        {
+            return false;
+        }
+
+        string_t key;
+        for (std::size_t i = 0; i < len; ++i)
+        {
+            get();
+            if (JSON_UNLIKELY(not get_msgpack_string(key) or not sax->key(key)))
+            {
+                return false;
+            }
+
+            if (JSON_UNLIKELY(not parse_msgpack_internal()))
+            {
+                return false;
+            }
+            key.clear();
+        }
+
+        return sax->end_object();
+    }
+
+    ////////////
+    // UBJSON //
+    ////////////
+
+    /*!
+    @param[in] get_char  whether a new character should be retrieved from the
+                         input (true, default) or whether the last read
+                         character should be considered instead
+
+    @return whether a valid UBJSON value was passed to the SAX parser
+    */
+    bool parse_ubjson_internal(const bool get_char = true)
+    {
+        return get_ubjson_value(get_char ? get_ignore_noop() : current);
+    }
+
+    /*!
+    @brief reads a UBJSON string
+
+    This function is either called after reading the 'S' byte explicitly
+    indicating a string, or in case of an object key where the 'S' byte can be
+    left out.
+
+    @param[out] result   created string
+    @param[in] get_char  whether a new character should be retrieved from the
+                         input (true, default) or whether the last read
+                         character should be considered instead
+
+    @return whether string creation completed
+    */
+    bool get_ubjson_string(string_t& result, const bool get_char = true)
+    {
+        if (get_char)
+        {
+            get();  // TODO: may we ignore N here?
+        }
+
+        if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "value")))
+        {
+            return false;
+        }
+
+        switch (current)
+        {
+            case 'U':
+            {
+                uint8_t len;
+                return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result);
+            }
+
+            case 'i':
+            {
+                int8_t len;
+                return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result);
+            }
+
+            case 'I':
+            {
+                int16_t len;
+                return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result);
+            }
+
+            case 'l':
+            {
+                int32_t len;
+                return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result);
+            }
+
+            case 'L':
+            {
+                int64_t len;
+                return get_number(input_format_t::ubjson, len) and get_string(input_format_t::ubjson, len, result);
+            }
+
+            default:
+                auto last_token = get_token_string();
+                return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L); last byte: 0x" + last_token, "string")));
+        }
+    }
+
+    /*!
+    @param[out] result  determined size
+    @return whether size determination completed
+    */
+    bool get_ubjson_size_value(std::size_t& result)
+    {
+        switch (get_ignore_noop())
+        {
+            case 'U':
+            {
+                uint8_t number;
+                if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number)))
+                {
+                    return false;
+                }
+                result = static_cast<std::size_t>(number);
+                return true;
+            }
+
+            case 'i':
+            {
+                int8_t number;
+                if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number)))
+                {
+                    return false;
+                }
+                result = static_cast<std::size_t>(number);
+                return true;
+            }
+
+            case 'I':
+            {
+                int16_t number;
+                if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number)))
+                {
+                    return false;
+                }
+                result = static_cast<std::size_t>(number);
+                return true;
+            }
+
+            case 'l':
+            {
+                int32_t number;
+                if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number)))
+                {
+                    return false;
+                }
+                result = static_cast<std::size_t>(number);
+                return true;
+            }
+
+            case 'L':
+            {
+                int64_t number;
+                if (JSON_UNLIKELY(not get_number(input_format_t::ubjson, number)))
+                {
+                    return false;
+                }
+                result = static_cast<std::size_t>(number);
+                return true;
+            }
+
+            default:
+            {
+                auto last_token = get_token_string();
+                return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "expected length type specification (U, i, I, l, L) after '#'; last byte: 0x" + last_token, "size")));
+            }
+        }
+    }
+
+    /*!
+    @brief determine the type and size for a container
+
+    In the optimized UBJSON format, a type and a size can be provided to allow
+    for a more compact representation.
+
+    @param[out] result  pair of the size and the type
+
+    @return whether pair creation completed
+    */
+    bool get_ubjson_size_type(std::pair<std::size_t, int>& result)
+    {
+        result.first = string_t::npos; // size
+        result.second = 0; // type
+
+        get_ignore_noop();
+
+        if (current == '$')
+        {
+            result.second = get();  // must not ignore 'N', because 'N' maybe the type
+            if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "type")))
+            {
+                return false;
+            }
+
+            get_ignore_noop();
+            if (JSON_UNLIKELY(current != '#'))
+            {
+                if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "value")))
+                {
+                    return false;
+                }
+                auto last_token = get_token_string();
+                return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "expected '#' after type information; last byte: 0x" + last_token, "size")));
+            }
+
+            return get_ubjson_size_value(result.first);
+        }
+        else if (current == '#')
+        {
+            return get_ubjson_size_value(result.first);
+        }
+        return true;
+    }
+
+    /*!
+    @param prefix  the previously read or set type prefix
+    @return whether value creation completed
+    */
+    bool get_ubjson_value(const int prefix)
+    {
+        switch (prefix)
+        {
+            case std::char_traits<char>::eof():  // EOF
+                return unexpect_eof(input_format_t::ubjson, "value");
+
+            case 'T':  // true
+                return sax->boolean(true);
+            case 'F':  // false
+                return sax->boolean(false);
+
+            case 'Z':  // null
+                return sax->null();
+
+            case 'U':
+            {
+                uint8_t number;
+                return get_number(input_format_t::ubjson, number) and sax->number_unsigned(number);
+            }
+
+            case 'i':
+            {
+                int8_t number;
+                return get_number(input_format_t::ubjson, number) and sax->number_integer(number);
+            }
+
+            case 'I':
+            {
+                int16_t number;
+                return get_number(input_format_t::ubjson, number) and sax->number_integer(number);
+            }
+
+            case 'l':
+            {
+                int32_t number;
+                return get_number(input_format_t::ubjson, number) and sax->number_integer(number);
+            }
+
+            case 'L':
+            {
+                int64_t number;
+                return get_number(input_format_t::ubjson, number) and sax->number_integer(number);
+            }
+
+            case 'd':
+            {
+                float number;
+                return get_number(input_format_t::ubjson, number) and sax->number_float(static_cast<number_float_t>(number), "");
+            }
+
+            case 'D':
+            {
+                double number;
+                return get_number(input_format_t::ubjson, number) and sax->number_float(static_cast<number_float_t>(number), "");
+            }
+
+            case 'C':  // char
+            {
+                get();
+                if (JSON_UNLIKELY(not unexpect_eof(input_format_t::ubjson, "char")))
+                {
+                    return false;
+                }
+                if (JSON_UNLIKELY(current > 127))
+                {
+                    auto last_token = get_token_string();
+                    return sax->parse_error(chars_read, last_token, parse_error::create(113, chars_read, exception_message(input_format_t::ubjson, "byte after 'C' must be in range 0x00..0x7F; last byte: 0x" + last_token, "char")));
+                }
+                string_t s(1, static_cast<char>(current));
+                return sax->string(s);
+            }
+
+            case 'S':  // string
+            {
+                string_t s;
+                return get_ubjson_string(s) and sax->string(s);
+            }
+
+            case '[':  // array
+                return get_ubjson_array();
+
+            case '{':  // object
+                return get_ubjson_object();
+
+            default: // anything else
+            {
+                auto last_token = get_token_string();
+                return sax->parse_error(chars_read, last_token, parse_error::create(112, chars_read, exception_message(input_format_t::ubjson, "invalid byte: 0x" + last_token, "value")));
+            }
+        }
+    }
+
+    /*!
+    @return whether array creation completed
+    */
+    bool get_ubjson_array()
+    {
+        std::pair<std::size_t, int> size_and_type;
+        if (JSON_UNLIKELY(not get_ubjson_size_type(size_and_type)))
+        {
+            return false;
+        }
+
+        if (size_and_type.first != string_t::npos)
+        {
+            if (JSON_UNLIKELY(not sax->start_array(size_and_type.first)))
+            {
+                return false;
+            }
+
+            if (size_and_type.second != 0)
+            {
+                if (size_and_type.second != 'N')
+                {
+                    for (std::size_t i = 0; i < size_and_type.first; ++i)
+                    {
+                        if (JSON_UNLIKELY(not get_ubjson_value(size_and_type.second)))
+                        {
+                            return false;
+                        }
+                    }
+                }
+            }
+            else
+            {
+                for (std::size_t i = 0; i < size_and_type.first; ++i)
+                {
+                    if (JSON_UNLIKELY(not parse_ubjson_internal()))
+                    {
+                        return false;
+                    }
+                }
+            }
+        }
+        else
+        {
+            if (JSON_UNLIKELY(not sax->start_array(std::size_t(-1))))
+            {
+                return false;
+            }
+
+            while (current != ']')
+            {
+                if (JSON_UNLIKELY(not parse_ubjson_internal(false)))
+                {
+                    return false;
+                }
+                get_ignore_noop();
+            }
+        }
+
+        return sax->end_array();
+    }
+
+    /*!
+    @return whether object creation completed
+    */
+    bool get_ubjson_object()
+    {
+        std::pair<std::size_t, int> size_and_type;
+        if (JSON_UNLIKELY(not get_ubjson_size_type(size_and_type)))
+        {
+            return false;
+        }
+
+        string_t key;
+        if (size_and_type.first != string_t::npos)
+        {
+            if (JSON_UNLIKELY(not sax->start_object(size_and_type.first)))
+            {
+                return false;
+            }
+
+            if (size_and_type.second != 0)
+            {
+                for (std::size_t i = 0; i < size_and_type.first; ++i)
+                {
+                    if (JSON_UNLIKELY(not get_ubjson_string(key) or not sax->key(key)))
+                    {
+                        return false;
+                    }
+                    if (JSON_UNLIKELY(not get_ubjson_value(size_and_type.second)))
+                    {
+                        return false;
+                    }
+                    key.clear();
+                }
+            }
+            else
+            {
+                for (std::size_t i = 0; i < size_and_type.first; ++i)
+                {
+                    if (JSON_UNLIKELY(not get_ubjson_string(key) or not sax->key(key)))
+                    {
+                        return false;
+                    }
+                    if (JSON_UNLIKELY(not parse_ubjson_internal()))
+                    {
+                        return false;
+                    }
+                    key.clear();
+                }
+            }
+        }
+        else
+        {
+            if (JSON_UNLIKELY(not sax->start_object(std::size_t(-1))))
+            {
+                return false;
+            }
+
+            while (current != '}')
+            {
+                if (JSON_UNLIKELY(not get_ubjson_string(key, false) or not sax->key(key)))
+                {
+                    return false;
+                }
+                if (JSON_UNLIKELY(not parse_ubjson_internal()))
+                {
+                    return false;
+                }
+                get_ignore_noop();
+                key.clear();
+            }
+        }
+
+        return sax->end_object();
+    }
+
+    ///////////////////////
+    // Utility functions //
+    ///////////////////////
+
+    /*!
+    @brief get next character from the input
+
+    This function provides the interface to the used input adapter. It does
+    not throw in case the input reached EOF, but returns a -'ve valued
+    `std::char_traits<char>::eof()` in that case.
+
+    @return character read from the input
+    */
+    int get()
+    {
+        ++chars_read;
+        return (current = ia->get_character());
+    }
+
+    /*!
+    @return character read from the input after ignoring all 'N' entries
+    */
+    int get_ignore_noop()
+    {
+        do
+        {
+            get();
+        }
+        while (current == 'N');
+
+        return current;
+    }
+
+    /*
+    @brief read a number from the input
+
+    @tparam NumberType the type of the number
+    @param[in] format   the current format (for diagnostics)
+    @param[out] result  number of type @a NumberType
+
+    @return whether conversion completed
+
+    @note This function needs to respect the system's endianess, because
+          bytes in CBOR, MessagePack, and UBJSON are stored in network order
+          (big endian) and therefore need reordering on little endian systems.
+    */
+    template<typename NumberType, bool InputIsLittleEndian = false>
+    bool get_number(const input_format_t format, NumberType& result)
+    {
+        // step 1: read input into array with system's byte order
+        std::array<uint8_t, sizeof(NumberType)> vec;
+        for (std::size_t i = 0; i < sizeof(NumberType); ++i)
+        {
+            get();
+            if (JSON_UNLIKELY(not unexpect_eof(format, "number")))
+            {
+                return false;
+            }
+
+            // reverse byte order prior to conversion if necessary
+            if (is_little_endian && !InputIsLittleEndian)
+            {
+                vec[sizeof(NumberType) - i - 1] = static_cast<uint8_t>(current);
+            }
+            else
+            {
+                vec[i] = static_cast<uint8_t>(current); // LCOV_EXCL_LINE
+            }
+        }
+
+        // step 2: convert array into number of type T and return
+        std::memcpy(&result, vec.data(), sizeof(NumberType));
+        return true;
+    }
+
+    /*!
+    @brief create a string by reading characters from the input
+
+    @tparam NumberType the type of the number
+    @param[in] format the current format (for diagnostics)
+    @param[in] len number of characters to read
+    @param[out] result string created by reading @a len bytes
+
+    @return whether string creation completed
+
+    @note We can not reserve @a len bytes for the result, because @a len
+          may be too large. Usually, @ref unexpect_eof() detects the end of
+          the input before we run out of string memory.
+    */
+    template<typename NumberType>
+    bool get_string(const input_format_t format,
+                    const NumberType len,
+                    string_t& result)
+    {
+        bool success = true;
+        std::generate_n(std::back_inserter(result), len, [this, &success, &format]()
+        {
+            get();
+            if (JSON_UNLIKELY(not unexpect_eof(format, "string")))
+            {
+                success = false;
+            }
+            return static_cast<char>(current);
+        });
+        return success;
+    }
+
+    /*!
+    @param[in] format   the current format (for diagnostics)
+    @param[in] context  further context information (for diagnostics)
+    @return whether the last read character is not EOF
+    */
+    bool unexpect_eof(const input_format_t format, const char* context) const
+    {
+        if (JSON_UNLIKELY(current == std::char_traits<char>::eof()))
+        {
+            return sax->parse_error(chars_read, "<end of file>",
+                                    parse_error::create(110, chars_read, exception_message(format, "unexpected end of input", context)));
+        }
+        return true;
+    }
+
+    /*!
+    @return a string representation of the last read byte
+    */
+    std::string get_token_string() const
+    {
+        char cr[3];
+        (std::snprintf)(cr, 3, "%.2hhX", static_cast<unsigned char>(current));
+        return std::string{cr};
+    }
+
+    /*!
+    @param[in] format   the current format
+    @param[in] detail   a detailed error message
+    @param[in] context  further contect information
+    @return a message string to use in the parse_error exceptions
+    */
+    std::string exception_message(const input_format_t format,
+                                  const std::string& detail,
+                                  const std::string& context) const
+    {
+        std::string error_msg = "syntax error while parsing ";
+
+        switch (format)
+        {
+            case input_format_t::cbor:
+                error_msg += "CBOR";
+                break;
+
+            case input_format_t::msgpack:
+                error_msg += "MessagePack";
+                break;
+
+            case input_format_t::ubjson:
+                error_msg += "UBJSON";
+                break;
+
+            case input_format_t::bson:
+                error_msg += "BSON";
+                break;
+
+            // LCOV_EXCL_START
+            default:
+                assert(false);
+                // LCOV_EXCL_STOP
+        }
+
+        return error_msg + " " + context + ": " + detail;
+    }
+
+  private:
+    /// input adapter
+    input_adapter_t ia = nullptr;
+
+    /// the current character
+    int current = std::char_traits<char>::eof();
+
+    /// the number of characters read
+    std::size_t chars_read = 0;
+
+    /// whether we can assume little endianess
+    const bool is_little_endian = little_endianess();
+
+    /// the SAX parser
+    json_sax_t* sax = nullptr;
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/output/binary_writer.hpp>
+
+
+#include <algorithm> // reverse
+#include <array> // array
+#include <cstdint> // uint8_t, uint16_t, uint32_t, uint64_t
+#include <cstring> // memcpy
+#include <limits> // numeric_limits
+
+// #include <nlohmann/detail/input/binary_reader.hpp>
+
+// #include <nlohmann/detail/output/output_adapters.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+///////////////////
+// binary writer //
+///////////////////
+
+/*!
+@brief serialization to CBOR and MessagePack values
+*/
+template<typename BasicJsonType, typename CharType>
+class binary_writer
+{
+    using string_t = typename BasicJsonType::string_t;
+
+  public:
+    /*!
+    @brief create a binary writer
+
+    @param[in] adapter  output adapter to write to
+    */
+    explicit binary_writer(output_adapter_t<CharType> adapter) : oa(adapter)
+    {
+        assert(oa);
+    }
+
+    /*!
+    @param[in] j  JSON value to serialize
+    @pre       j.type() == value_t::object
+    */
+    void write_bson(const BasicJsonType& j)
+    {
+        switch (j.type())
+        {
+            case value_t::object:
+            {
+                write_bson_object(*j.m_value.object);
+                break;
+            }
+
+            default:
+            {
+                JSON_THROW(type_error::create(317, "to serialize to BSON, top-level type must be object, but is " + std::string(j.type_name())));
+            }
+        }
+    }
+
+    /*!
+    @param[in] j  JSON value to serialize
+    */
+    void write_cbor(const BasicJsonType& j)
+    {
+        switch (j.type())
+        {
+            case value_t::null:
+            {
+                oa->write_character(to_char_type(0xF6));
+                break;
+            }
+
+            case value_t::boolean:
+            {
+                oa->write_character(j.m_value.boolean
+                                    ? to_char_type(0xF5)
+                                    : to_char_type(0xF4));
+                break;
+            }
+
+            case value_t::number_integer:
+            {
+                if (j.m_value.number_integer >= 0)
+                {
+                    // CBOR does not differentiate between positive signed
+                    // integers and unsigned integers. Therefore, we used the
+                    // code from the value_t::number_unsigned case here.
+                    if (j.m_value.number_integer <= 0x17)
+                    {
+                        write_number(static_cast<uint8_t>(j.m_value.number_integer));
+                    }
+                    else if (j.m_value.number_integer <= (std::numeric_limits<uint8_t>::max)())
+                    {
+                        oa->write_character(to_char_type(0x18));
+                        write_number(static_cast<uint8_t>(j.m_value.number_integer));
+                    }
+                    else if (j.m_value.number_integer <= (std::numeric_limits<uint16_t>::max)())
+                    {
+                        oa->write_character(to_char_type(0x19));
+                        write_number(static_cast<uint16_t>(j.m_value.number_integer));
+                    }
+                    else if (j.m_value.number_integer <= (std::numeric_limits<uint32_t>::max)())
+                    {
+                        oa->write_character(to_char_type(0x1A));
+                        write_number(static_cast<uint32_t>(j.m_value.number_integer));
+                    }
+                    else
+                    {
+                        oa->write_character(to_char_type(0x1B));
+                        write_number(static_cast<uint64_t>(j.m_value.number_integer));
+                    }
+                }
+                else
+                {
+                    // The conversions below encode the sign in the first
+                    // byte, and the value is converted to a positive number.
+                    const auto positive_number = -1 - j.m_value.number_integer;
+                    if (j.m_value.number_integer >= -24)
+                    {
+                        write_number(static_cast<uint8_t>(0x20 + positive_number));
+                    }
+                    else if (positive_number <= (std::numeric_limits<uint8_t>::max)())
+                    {
+                        oa->write_character(to_char_type(0x38));
+                        write_number(static_cast<uint8_t>(positive_number));
+                    }
+                    else if (positive_number <= (std::numeric_limits<uint16_t>::max)())
+                    {
+                        oa->write_character(to_char_type(0x39));
+                        write_number(static_cast<uint16_t>(positive_number));
+                    }
+                    else if (positive_number <= (std::numeric_limits<uint32_t>::max)())
+                    {
+                        oa->write_character(to_char_type(0x3A));
+                        write_number(static_cast<uint32_t>(positive_number));
+                    }
+                    else
+                    {
+                        oa->write_character(to_char_type(0x3B));
+                        write_number(static_cast<uint64_t>(positive_number));
+                    }
+                }
+                break;
+            }
+
+            case value_t::number_unsigned:
+            {
+                if (j.m_value.number_unsigned <= 0x17)
+                {
+                    write_number(static_cast<uint8_t>(j.m_value.number_unsigned));
+                }
+                else if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)())
+                {
+                    oa->write_character(to_char_type(0x18));
+                    write_number(static_cast<uint8_t>(j.m_value.number_unsigned));
+                }
+                else if (j.m_value.number_unsigned <= (std::numeric_limits<uint16_t>::max)())
+                {
+                    oa->write_character(to_char_type(0x19));
+                    write_number(static_cast<uint16_t>(j.m_value.number_unsigned));
+                }
+                else if (j.m_value.number_unsigned <= (std::numeric_limits<uint32_t>::max)())
+                {
+                    oa->write_character(to_char_type(0x1A));
+                    write_number(static_cast<uint32_t>(j.m_value.number_unsigned));
+                }
+                else
+                {
+                    oa->write_character(to_char_type(0x1B));
+                    write_number(static_cast<uint64_t>(j.m_value.number_unsigned));
+                }
+                break;
+            }
+
+            case value_t::number_float:
+            {
+                oa->write_character(get_cbor_float_prefix(j.m_value.number_float));
+                write_number(j.m_value.number_float);
+                break;
+            }
+
+            case value_t::string:
+            {
+                // step 1: write control byte and the string length
+                const auto N = j.m_value.string->size();
+                if (N <= 0x17)
+                {
+                    write_number(static_cast<uint8_t>(0x60 + N));
+                }
+                else if (N <= (std::numeric_limits<uint8_t>::max)())
+                {
+                    oa->write_character(to_char_type(0x78));
+                    write_number(static_cast<uint8_t>(N));
+                }
+                else if (N <= (std::numeric_limits<uint16_t>::max)())
+                {
+                    oa->write_character(to_char_type(0x79));
+                    write_number(static_cast<uint16_t>(N));
+                }
+                else if (N <= (std::numeric_limits<uint32_t>::max)())
+                {
+                    oa->write_character(to_char_type(0x7A));
+                    write_number(static_cast<uint32_t>(N));
+                }
+                // LCOV_EXCL_START
+                else if (N <= (std::numeric_limits<uint64_t>::max)())
+                {
+                    oa->write_character(to_char_type(0x7B));
+                    write_number(static_cast<uint64_t>(N));
+                }
+                // LCOV_EXCL_STOP
+
+                // step 2: write the string
+                oa->write_characters(
+                    reinterpret_cast<const CharType*>(j.m_value.string->c_str()),
+                    j.m_value.string->size());
+                break;
+            }
+
+            case value_t::array:
+            {
+                // step 1: write control byte and the array size
+                const auto N = j.m_value.array->size();
+                if (N <= 0x17)
+                {
+                    write_number(static_cast<uint8_t>(0x80 + N));
+                }
+                else if (N <= (std::numeric_limits<uint8_t>::max)())
+                {
+                    oa->write_character(to_char_type(0x98));
+                    write_number(static_cast<uint8_t>(N));
+                }
+                else if (N <= (std::numeric_limits<uint16_t>::max)())
+                {
+                    oa->write_character(to_char_type(0x99));
+                    write_number(static_cast<uint16_t>(N));
+                }
+                else if (N <= (std::numeric_limits<uint32_t>::max)())
+                {
+                    oa->write_character(to_char_type(0x9A));
+                    write_number(static_cast<uint32_t>(N));
+                }
+                // LCOV_EXCL_START
+                else if (N <= (std::numeric_limits<uint64_t>::max)())
+                {
+                    oa->write_character(to_char_type(0x9B));
+                    write_number(static_cast<uint64_t>(N));
+                }
+                // LCOV_EXCL_STOP
+
+                // step 2: write each element
+                for (const auto& el : *j.m_value.array)
+                {
+                    write_cbor(el);
+                }
+                break;
+            }
+
+            case value_t::object:
+            {
+                // step 1: write control byte and the object size
+                const auto N = j.m_value.object->size();
+                if (N <= 0x17)
+                {
+                    write_number(static_cast<uint8_t>(0xA0 + N));
+                }
+                else if (N <= (std::numeric_limits<uint8_t>::max)())
+                {
+                    oa->write_character(to_char_type(0xB8));
+                    write_number(static_cast<uint8_t>(N));
+                }
+                else if (N <= (std::numeric_limits<uint16_t>::max)())
+                {
+                    oa->write_character(to_char_type(0xB9));
+                    write_number(static_cast<uint16_t>(N));
+                }
+                else if (N <= (std::numeric_limits<uint32_t>::max)())
+                {
+                    oa->write_character(to_char_type(0xBA));
+                    write_number(static_cast<uint32_t>(N));
+                }
+                // LCOV_EXCL_START
+                else if (N <= (std::numeric_limits<uint64_t>::max)())
+                {
+                    oa->write_character(to_char_type(0xBB));
+                    write_number(static_cast<uint64_t>(N));
+                }
+                // LCOV_EXCL_STOP
+
+                // step 2: write each element
+                for (const auto& el : *j.m_value.object)
+                {
+                    write_cbor(el.first);
+                    write_cbor(el.second);
+                }
+                break;
+            }
+
+            default:
+                break;
+        }
+    }
+
+    /*!
+    @param[in] j  JSON value to serialize
+    */
+    void write_msgpack(const BasicJsonType& j)
+    {
+        switch (j.type())
+        {
+            case value_t::null: // nil
+            {
+                oa->write_character(to_char_type(0xC0));
+                break;
+            }
+
+            case value_t::boolean: // true and false
+            {
+                oa->write_character(j.m_value.boolean
+                                    ? to_char_type(0xC3)
+                                    : to_char_type(0xC2));
+                break;
+            }
+
+            case value_t::number_integer:
+            {
+                if (j.m_value.number_integer >= 0)
+                {
+                    // MessagePack does not differentiate between positive
+                    // signed integers and unsigned integers. Therefore, we used
+                    // the code from the value_t::number_unsigned case here.
+                    if (j.m_value.number_unsigned < 128)
+                    {
+                        // positive fixnum
+                        write_number(static_cast<uint8_t>(j.m_value.number_integer));
+                    }
+                    else if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)())
+                    {
+                        // uint 8
+                        oa->write_character(to_char_type(0xCC));
+                        write_number(static_cast<uint8_t>(j.m_value.number_integer));
+                    }
+                    else if (j.m_value.number_unsigned <= (std::numeric_limits<uint16_t>::max)())
+                    {
+                        // uint 16
+                        oa->write_character(to_char_type(0xCD));
+                        write_number(static_cast<uint16_t>(j.m_value.number_integer));
+                    }
+                    else if (j.m_value.number_unsigned <= (std::numeric_limits<uint32_t>::max)())
+                    {
+                        // uint 32
+                        oa->write_character(to_char_type(0xCE));
+                        write_number(static_cast<uint32_t>(j.m_value.number_integer));
+                    }
+                    else if (j.m_value.number_unsigned <= (std::numeric_limits<uint64_t>::max)())
+                    {
+                        // uint 64
+                        oa->write_character(to_char_type(0xCF));
+                        write_number(static_cast<uint64_t>(j.m_value.number_integer));
+                    }
+                }
+                else
+                {
+                    if (j.m_value.number_integer >= -32)
+                    {
+                        // negative fixnum
+                        write_number(static_cast<int8_t>(j.m_value.number_integer));
+                    }
+                    else if (j.m_value.number_integer >= (std::numeric_limits<int8_t>::min)() and
+                             j.m_value.number_integer <= (std::numeric_limits<int8_t>::max)())
+                    {
+                        // int 8
+                        oa->write_character(to_char_type(0xD0));
+                        write_number(static_cast<int8_t>(j.m_value.number_integer));
+                    }
+                    else if (j.m_value.number_integer >= (std::numeric_limits<int16_t>::min)() and
+                             j.m_value.number_integer <= (std::numeric_limits<int16_t>::max)())
+                    {
+                        // int 16
+                        oa->write_character(to_char_type(0xD1));
+                        write_number(static_cast<int16_t>(j.m_value.number_integer));
+                    }
+                    else if (j.m_value.number_integer >= (std::numeric_limits<int32_t>::min)() and
+                             j.m_value.number_integer <= (std::numeric_limits<int32_t>::max)())
+                    {
+                        // int 32
+                        oa->write_character(to_char_type(0xD2));
+                        write_number(static_cast<int32_t>(j.m_value.number_integer));
+                    }
+                    else if (j.m_value.number_integer >= (std::numeric_limits<int64_t>::min)() and
+                             j.m_value.number_integer <= (std::numeric_limits<int64_t>::max)())
+                    {
+                        // int 64
+                        oa->write_character(to_char_type(0xD3));
+                        write_number(static_cast<int64_t>(j.m_value.number_integer));
+                    }
+                }
+                break;
+            }
+
+            case value_t::number_unsigned:
+            {
+                if (j.m_value.number_unsigned < 128)
+                {
+                    // positive fixnum
+                    write_number(static_cast<uint8_t>(j.m_value.number_integer));
+                }
+                else if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)())
+                {
+                    // uint 8
+                    oa->write_character(to_char_type(0xCC));
+                    write_number(static_cast<uint8_t>(j.m_value.number_integer));
+                }
+                else if (j.m_value.number_unsigned <= (std::numeric_limits<uint16_t>::max)())
+                {
+                    // uint 16
+                    oa->write_character(to_char_type(0xCD));
+                    write_number(static_cast<uint16_t>(j.m_value.number_integer));
+                }
+                else if (j.m_value.number_unsigned <= (std::numeric_limits<uint32_t>::max)())
+                {
+                    // uint 32
+                    oa->write_character(to_char_type(0xCE));
+                    write_number(static_cast<uint32_t>(j.m_value.number_integer));
+                }
+                else if (j.m_value.number_unsigned <= (std::numeric_limits<uint64_t>::max)())
+                {
+                    // uint 64
+                    oa->write_character(to_char_type(0xCF));
+                    write_number(static_cast<uint64_t>(j.m_value.number_integer));
+                }
+                break;
+            }
+
+            case value_t::number_float:
+            {
+                oa->write_character(get_msgpack_float_prefix(j.m_value.number_float));
+                write_number(j.m_value.number_float);
+                break;
+            }
+
+            case value_t::string:
+            {
+                // step 1: write control byte and the string length
+                const auto N = j.m_value.string->size();
+                if (N <= 31)
+                {
+                    // fixstr
+                    write_number(static_cast<uint8_t>(0xA0 | N));
+                }
+                else if (N <= (std::numeric_limits<uint8_t>::max)())
+                {
+                    // str 8
+                    oa->write_character(to_char_type(0xD9));
+                    write_number(static_cast<uint8_t>(N));
+                }
+                else if (N <= (std::numeric_limits<uint16_t>::max)())
+                {
+                    // str 16
+                    oa->write_character(to_char_type(0xDA));
+                    write_number(static_cast<uint16_t>(N));
+                }
+                else if (N <= (std::numeric_limits<uint32_t>::max)())
+                {
+                    // str 32
+                    oa->write_character(to_char_type(0xDB));
+                    write_number(static_cast<uint32_t>(N));
+                }
+
+                // step 2: write the string
+                oa->write_characters(
+                    reinterpret_cast<const CharType*>(j.m_value.string->c_str()),
+                    j.m_value.string->size());
+                break;
+            }
+
+            case value_t::array:
+            {
+                // step 1: write control byte and the array size
+                const auto N = j.m_value.array->size();
+                if (N <= 15)
+                {
+                    // fixarray
+                    write_number(static_cast<uint8_t>(0x90 | N));
+                }
+                else if (N <= (std::numeric_limits<uint16_t>::max)())
+                {
+                    // array 16
+                    oa->write_character(to_char_type(0xDC));
+                    write_number(static_cast<uint16_t>(N));
+                }
+                else if (N <= (std::numeric_limits<uint32_t>::max)())
+                {
+                    // array 32
+                    oa->write_character(to_char_type(0xDD));
+                    write_number(static_cast<uint32_t>(N));
+                }
+
+                // step 2: write each element
+                for (const auto& el : *j.m_value.array)
+                {
+                    write_msgpack(el);
+                }
+                break;
+            }
+
+            case value_t::object:
+            {
+                // step 1: write control byte and the object size
+                const auto N = j.m_value.object->size();
+                if (N <= 15)
+                {
+                    // fixmap
+                    write_number(static_cast<uint8_t>(0x80 | (N & 0xF)));
+                }
+                else if (N <= (std::numeric_limits<uint16_t>::max)())
+                {
+                    // map 16
+                    oa->write_character(to_char_type(0xDE));
+                    write_number(static_cast<uint16_t>(N));
+                }
+                else if (N <= (std::numeric_limits<uint32_t>::max)())
+                {
+                    // map 32
+                    oa->write_character(to_char_type(0xDF));
+                    write_number(static_cast<uint32_t>(N));
+                }
+
+                // step 2: write each element
+                for (const auto& el : *j.m_value.object)
+                {
+                    write_msgpack(el.first);
+                    write_msgpack(el.second);
+                }
+                break;
+            }
+
+            default:
+                break;
+        }
+    }
+
+    /*!
+    @param[in] j  JSON value to serialize
+    @param[in] use_count   whether to use '#' prefixes (optimized format)
+    @param[in] use_type    whether to use '$' prefixes (optimized format)
+    @param[in] add_prefix  whether prefixes need to be used for this value
+    */
+    void write_ubjson(const BasicJsonType& j, const bool use_count,
+                      const bool use_type, const bool add_prefix = true)
+    {
+        switch (j.type())
+        {
+            case value_t::null:
+            {
+                if (add_prefix)
+                {
+                    oa->write_character(to_char_type('Z'));
+                }
+                break;
+            }
+
+            case value_t::boolean:
+            {
+                if (add_prefix)
+                {
+                    oa->write_character(j.m_value.boolean
+                                        ? to_char_type('T')
+                                        : to_char_type('F'));
+                }
+                break;
+            }
+
+            case value_t::number_integer:
+            {
+                write_number_with_ubjson_prefix(j.m_value.number_integer, add_prefix);
+                break;
+            }
+
+            case value_t::number_unsigned:
+            {
+                write_number_with_ubjson_prefix(j.m_value.number_unsigned, add_prefix);
+                break;
+            }
+
+            case value_t::number_float:
+            {
+                write_number_with_ubjson_prefix(j.m_value.number_float, add_prefix);
+                break;
+            }
+
+            case value_t::string:
+            {
+                if (add_prefix)
+                {
+                    oa->write_character(to_char_type('S'));
+                }
+                write_number_with_ubjson_prefix(j.m_value.string->size(), true);
+                oa->write_characters(
+                    reinterpret_cast<const CharType*>(j.m_value.string->c_str()),
+                    j.m_value.string->size());
+                break;
+            }
+
+            case value_t::array:
+            {
+                if (add_prefix)
+                {
+                    oa->write_character(to_char_type('['));
+                }
+
+                bool prefix_required = true;
+                if (use_type and not j.m_value.array->empty())
+                {
+                    assert(use_count);
+                    const CharType first_prefix = ubjson_prefix(j.front());
+                    const bool same_prefix = std::all_of(j.begin() + 1, j.end(),
+                                                         [this, first_prefix](const BasicJsonType & v)
+                    {
+                        return ubjson_prefix(v) == first_prefix;
+                    });
+
+                    if (same_prefix)
+                    {
+                        prefix_required = false;
+                        oa->write_character(to_char_type('$'));
+                        oa->write_character(first_prefix);
+                    }
+                }
+
+                if (use_count)
+                {
+                    oa->write_character(to_char_type('#'));
+                    write_number_with_ubjson_prefix(j.m_value.array->size(), true);
+                }
+
+                for (const auto& el : *j.m_value.array)
+                {
+                    write_ubjson(el, use_count, use_type, prefix_required);
+                }
+
+                if (not use_count)
+                {
+                    oa->write_character(to_char_type(']'));
+                }
+
+                break;
+            }
+
+            case value_t::object:
+            {
+                if (add_prefix)
+                {
+                    oa->write_character(to_char_type('{'));
+                }
+
+                bool prefix_required = true;
+                if (use_type and not j.m_value.object->empty())
+                {
+                    assert(use_count);
+                    const CharType first_prefix = ubjson_prefix(j.front());
+                    const bool same_prefix = std::all_of(j.begin(), j.end(),
+                                                         [this, first_prefix](const BasicJsonType & v)
+                    {
+                        return ubjson_prefix(v) == first_prefix;
+                    });
+
+                    if (same_prefix)
+                    {
+                        prefix_required = false;
+                        oa->write_character(to_char_type('$'));
+                        oa->write_character(first_prefix);
+                    }
+                }
+
+                if (use_count)
+                {
+                    oa->write_character(to_char_type('#'));
+                    write_number_with_ubjson_prefix(j.m_value.object->size(), true);
+                }
+
+                for (const auto& el : *j.m_value.object)
+                {
+                    write_number_with_ubjson_prefix(el.first.size(), true);
+                    oa->write_characters(
+                        reinterpret_cast<const CharType*>(el.first.c_str()),
+                        el.first.size());
+                    write_ubjson(el.second, use_count, use_type, prefix_required);
+                }
+
+                if (not use_count)
+                {
+                    oa->write_character(to_char_type('}'));
+                }
+
+                break;
+            }
+
+            default:
+                break;
+        }
+    }
+
+  private:
+    //////////
+    // BSON //
+    //////////
+
+    /*!
+    @return The size of a BSON document entry header, including the id marker
+            and the entry name size (and its null-terminator).
+    */
+    static std::size_t calc_bson_entry_header_size(const string_t& name)
+    {
+        const auto it = name.find(static_cast<typename string_t::value_type>(0));
+        if (JSON_UNLIKELY(it != BasicJsonType::string_t::npos))
+        {
+            JSON_THROW(out_of_range::create(409,
+                                            "BSON key cannot contain code point U+0000 (at byte " + std::to_string(it) + ")"));
+        }
+
+        return /*id*/ 1ul + name.size() + /*zero-terminator*/1u;
+    }
+
+    /*!
+    @brief Writes the given @a element_type and @a name to the output adapter
+    */
+    void write_bson_entry_header(const string_t& name,
+                                 const std::uint8_t element_type)
+    {
+        oa->write_character(to_char_type(element_type)); // boolean
+        oa->write_characters(
+            reinterpret_cast<const CharType*>(name.c_str()),
+            name.size() + 1u);
+    }
+
+    /*!
+    @brief Writes a BSON element with key @a name and boolean value @a value
+    */
+    void write_bson_boolean(const string_t& name,
+                            const bool value)
+    {
+        write_bson_entry_header(name, 0x08);
+        oa->write_character(value ? to_char_type(0x01) : to_char_type(0x00));
+    }
+
+    /*!
+    @brief Writes a BSON element with key @a name and double value @a value
+    */
+    void write_bson_double(const string_t& name,
+                           const double value)
+    {
+        write_bson_entry_header(name, 0x01);
+        write_number<double, true>(value);
+    }
+
+    /*!
+    @return The size of the BSON-encoded string in @a value
+    */
+    static std::size_t calc_bson_string_size(const string_t& value)
+    {
+        return sizeof(std::int32_t) + value.size() + 1ul;
+    }
+
+    /*!
+    @brief Writes a BSON element with key @a name and string value @a value
+    */
+    void write_bson_string(const string_t& name,
+                           const string_t& value)
+    {
+        write_bson_entry_header(name, 0x02);
+
+        write_number<std::int32_t, true>(static_cast<std::int32_t>(value.size() + 1ul));
+        oa->write_characters(
+            reinterpret_cast<const CharType*>(value.c_str()),
+            value.size() + 1);
+    }
+
+    /*!
+    @brief Writes a BSON element with key @a name and null value
+    */
+    void write_bson_null(const string_t& name)
+    {
+        write_bson_entry_header(name, 0x0A);
+    }
+
+    /*!
+    @return The size of the BSON-encoded integer @a value
+    */
+    static std::size_t calc_bson_integer_size(const std::int64_t value)
+    {
+        if ((std::numeric_limits<std::int32_t>::min)() <= value and value <= (std::numeric_limits<std::int32_t>::max)())
+        {
+            return sizeof(std::int32_t);
+        }
+        else
+        {
+            return sizeof(std::int64_t);
+        }
+    }
+
+    /*!
+    @brief Writes a BSON element with key @a name and integer @a value
+    */
+    void write_bson_integer(const string_t& name,
+                            const std::int64_t value)
+    {
+        if ((std::numeric_limits<std::int32_t>::min)() <= value and value <= (std::numeric_limits<std::int32_t>::max)())
+        {
+            write_bson_entry_header(name, 0x10); // int32
+            write_number<std::int32_t, true>(static_cast<std::int32_t>(value));
+        }
+        else
+        {
+            write_bson_entry_header(name, 0x12); // int64
+            write_number<std::int64_t, true>(static_cast<std::int64_t>(value));
+        }
+    }
+
+    /*!
+    @return The size of the BSON-encoded unsigned integer in @a j
+    */
+    static constexpr std::size_t calc_bson_unsigned_size(const std::uint64_t value) noexcept
+    {
+        return (value <= static_cast<std::uint64_t>((std::numeric_limits<std::int32_t>::max)()))
+               ? sizeof(std::int32_t)
+               : sizeof(std::int64_t);
+    }
+
+    /*!
+    @brief Writes a BSON element with key @a name and unsigned @a value
+    */
+    void write_bson_unsigned(const string_t& name,
+                             const std::uint64_t value)
+    {
+        if (value <= static_cast<std::uint64_t>((std::numeric_limits<std::int32_t>::max)()))
+        {
+            write_bson_entry_header(name, 0x10 /* int32 */);
+            write_number<std::int32_t, true>(static_cast<std::int32_t>(value));
+        }
+        else if (value <= static_cast<std::uint64_t>((std::numeric_limits<std::int64_t>::max)()))
+        {
+            write_bson_entry_header(name, 0x12 /* int64 */);
+            write_number<std::int64_t, true>(static_cast<std::int64_t>(value));
+        }
+        else
+        {
+            JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(value) + " cannot be represented by BSON as it does not fit int64"));
+        }
+    }
+
+    /*!
+    @brief Writes a BSON element with key @a name and object @a value
+    */
+    void write_bson_object_entry(const string_t& name,
+                                 const typename BasicJsonType::object_t& value)
+    {
+        write_bson_entry_header(name, 0x03); // object
+        write_bson_object(value);
+    }
+
+    /*!
+    @return The size of the BSON-encoded array @a value
+    */
+    static std::size_t calc_bson_array_size(const typename BasicJsonType::array_t& value)
+    {
+        std::size_t embedded_document_size = 0ul;
+        std::size_t array_index = 0ul;
+
+        for (const auto& el : value)
+        {
+            embedded_document_size += calc_bson_element_size(std::to_string(array_index++), el);
+        }
+
+        return sizeof(std::int32_t) + embedded_document_size + 1ul;
+    }
+
+    /*!
+    @brief Writes a BSON element with key @a name and array @a value
+    */
+    void write_bson_array(const string_t& name,
+                          const typename BasicJsonType::array_t& value)
+    {
+        write_bson_entry_header(name, 0x04); // array
+        write_number<std::int32_t, true>(static_cast<std::int32_t>(calc_bson_array_size(value)));
+
+        std::size_t array_index = 0ul;
+
+        for (const auto& el : value)
+        {
+            write_bson_element(std::to_string(array_index++), el);
+        }
+
+        oa->write_character(to_char_type(0x00));
+    }
+
+    /*!
+    @brief Calculates the size necessary to serialize the JSON value @a j with its @a name
+    @return The calculated size for the BSON document entry for @a j with the given @a name.
+    */
+    static std::size_t calc_bson_element_size(const string_t& name,
+            const BasicJsonType& j)
+    {
+        const auto header_size = calc_bson_entry_header_size(name);
+        switch (j.type())
+        {
+            case value_t::object:
+                return header_size + calc_bson_object_size(*j.m_value.object);
+
+            case value_t::array:
+                return header_size + calc_bson_array_size(*j.m_value.array);
+
+            case value_t::boolean:
+                return header_size + 1ul;
+
+            case value_t::number_float:
+                return header_size + 8ul;
+
+            case value_t::number_integer:
+                return header_size + calc_bson_integer_size(j.m_value.number_integer);
+
+            case value_t::number_unsigned:
+                return header_size + calc_bson_unsigned_size(j.m_value.number_unsigned);
+
+            case value_t::string:
+                return header_size + calc_bson_string_size(*j.m_value.string);
+
+            case value_t::null:
+                return header_size + 0ul;
+
+            // LCOV_EXCL_START
+            default:
+                assert(false);
+                return 0ul;
+                // LCOV_EXCL_STOP
+        };
+    }
+
+    /*!
+    @brief Serializes the JSON value @a j to BSON and associates it with the
+           key @a name.
+    @param name The name to associate with the JSON entity @a j within the
+                current BSON document
+    @return The size of the BSON entry
+    */
+    void write_bson_element(const string_t& name,
+                            const BasicJsonType& j)
+    {
+        switch (j.type())
+        {
+            case value_t::object:
+                return write_bson_object_entry(name, *j.m_value.object);
+
+            case value_t::array:
+                return write_bson_array(name, *j.m_value.array);
+
+            case value_t::boolean:
+                return write_bson_boolean(name, j.m_value.boolean);
+
+            case value_t::number_float:
+                return write_bson_double(name, j.m_value.number_float);
+
+            case value_t::number_integer:
+                return write_bson_integer(name, j.m_value.number_integer);
+
+            case value_t::number_unsigned:
+                return write_bson_unsigned(name, j.m_value.number_unsigned);
+
+            case value_t::string:
+                return write_bson_string(name, *j.m_value.string);
+
+            case value_t::null:
+                return write_bson_null(name);
+
+            // LCOV_EXCL_START
+            default:
+                assert(false);
+                return;
+                // LCOV_EXCL_STOP
+        };
+    }
+
+    /*!
+    @brief Calculates the size of the BSON serialization of the given
+           JSON-object @a j.
+    @param[in] j  JSON value to serialize
+    @pre       j.type() == value_t::object
+    */
+    static std::size_t calc_bson_object_size(const typename BasicJsonType::object_t& value)
+    {
+        std::size_t document_size = std::accumulate(value.begin(), value.end(), 0ul,
+                                    [](size_t result, const typename BasicJsonType::object_t::value_type & el)
+        {
+            return result += calc_bson_element_size(el.first, el.second);
+        });
+
+        return sizeof(std::int32_t) + document_size + 1ul;
+    }
+
+    /*!
+    @param[in] j  JSON value to serialize
+    @pre       j.type() == value_t::object
+    */
+    void write_bson_object(const typename BasicJsonType::object_t& value)
+    {
+        write_number<std::int32_t, true>(static_cast<std::int32_t>(calc_bson_object_size(value)));
+
+        for (const auto& el : value)
+        {
+            write_bson_element(el.first, el.second);
+        }
+
+        oa->write_character(to_char_type(0x00));
+    }
+
+    //////////
+    // CBOR //
+    //////////
+
+    static constexpr CharType get_cbor_float_prefix(float /*unused*/)
+    {
+        return to_char_type(0xFA);  // Single-Precision Float
+    }
+
+    static constexpr CharType get_cbor_float_prefix(double /*unused*/)
+    {
+        return to_char_type(0xFB);  // Double-Precision Float
+    }
+
+    /////////////
+    // MsgPack //
+    /////////////
+
+    static constexpr CharType get_msgpack_float_prefix(float /*unused*/)
+    {
+        return to_char_type(0xCA);  // float 32
+    }
+
+    static constexpr CharType get_msgpack_float_prefix(double /*unused*/)
+    {
+        return to_char_type(0xCB);  // float 64
+    }
+
+    ////////////
+    // UBJSON //
+    ////////////
+
+    // UBJSON: write number (floating point)
+    template<typename NumberType, typename std::enable_if<
+                 std::is_floating_point<NumberType>::value, int>::type = 0>
+    void write_number_with_ubjson_prefix(const NumberType n,
+                                         const bool add_prefix)
+    {
+        if (add_prefix)
+        {
+            oa->write_character(get_ubjson_float_prefix(n));
+        }
+        write_number(n);
+    }
+
+    // UBJSON: write number (unsigned integer)
+    template<typename NumberType, typename std::enable_if<
+                 std::is_unsigned<NumberType>::value, int>::type = 0>
+    void write_number_with_ubjson_prefix(const NumberType n,
+                                         const bool add_prefix)
+    {
+        if (n <= static_cast<uint64_t>((std::numeric_limits<int8_t>::max)()))
+        {
+            if (add_prefix)
+            {
+                oa->write_character(to_char_type('i'));  // int8
+            }
+            write_number(static_cast<uint8_t>(n));
+        }
+        else if (n <= (std::numeric_limits<uint8_t>::max)())
+        {
+            if (add_prefix)
+            {
+                oa->write_character(to_char_type('U'));  // uint8
+            }
+            write_number(static_cast<uint8_t>(n));
+        }
+        else if (n <= static_cast<uint64_t>((std::numeric_limits<int16_t>::max)()))
+        {
+            if (add_prefix)
+            {
+                oa->write_character(to_char_type('I'));  // int16
+            }
+            write_number(static_cast<int16_t>(n));
+        }
+        else if (n <= static_cast<uint64_t>((std::numeric_limits<int32_t>::max)()))
+        {
+            if (add_prefix)
+            {
+                oa->write_character(to_char_type('l'));  // int32
+            }
+            write_number(static_cast<int32_t>(n));
+        }
+        else if (n <= static_cast<uint64_t>((std::numeric_limits<int64_t>::max)()))
+        {
+            if (add_prefix)
+            {
+                oa->write_character(to_char_type('L'));  // int64
+            }
+            write_number(static_cast<int64_t>(n));
+        }
+        else
+        {
+            JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(n) + " cannot be represented by UBJSON as it does not fit int64"));
+        }
+    }
+
+    // UBJSON: write number (signed integer)
+    template<typename NumberType, typename std::enable_if<
+                 std::is_signed<NumberType>::value and
+                 not std::is_floating_point<NumberType>::value, int>::type = 0>
+    void write_number_with_ubjson_prefix(const NumberType n,
+                                         const bool add_prefix)
+    {
+        if ((std::numeric_limits<int8_t>::min)() <= n and n <= (std::numeric_limits<int8_t>::max)())
+        {
+            if (add_prefix)
+            {
+                oa->write_character(to_char_type('i'));  // int8
+            }
+            write_number(static_cast<int8_t>(n));
+        }
+        else if (static_cast<int64_t>((std::numeric_limits<uint8_t>::min)()) <= n and n <= static_cast<int64_t>((std::numeric_limits<uint8_t>::max)()))
+        {
+            if (add_prefix)
+            {
+                oa->write_character(to_char_type('U'));  // uint8
+            }
+            write_number(static_cast<uint8_t>(n));
+        }
+        else if ((std::numeric_limits<int16_t>::min)() <= n and n <= (std::numeric_limits<int16_t>::max)())
+        {
+            if (add_prefix)
+            {
+                oa->write_character(to_char_type('I'));  // int16
+            }
+            write_number(static_cast<int16_t>(n));
+        }
+        else if ((std::numeric_limits<int32_t>::min)() <= n and n <= (std::numeric_limits<int32_t>::max)())
+        {
+            if (add_prefix)
+            {
+                oa->write_character(to_char_type('l'));  // int32
+            }
+            write_number(static_cast<int32_t>(n));
+        }
+        else if ((std::numeric_limits<int64_t>::min)() <= n and n <= (std::numeric_limits<int64_t>::max)())
+        {
+            if (add_prefix)
+            {
+                oa->write_character(to_char_type('L'));  // int64
+            }
+            write_number(static_cast<int64_t>(n));
+        }
+        // LCOV_EXCL_START
+        else
+        {
+            JSON_THROW(out_of_range::create(407, "integer number " + std::to_string(n) + " cannot be represented by UBJSON as it does not fit int64"));
+        }
+        // LCOV_EXCL_STOP
+    }
+
+    /*!
+    @brief determine the type prefix of container values
+
+    @note This function does not need to be 100% accurate when it comes to
+          integer limits. In case a number exceeds the limits of int64_t,
+          this will be detected by a later call to function
+          write_number_with_ubjson_prefix. Therefore, we return 'L' for any
+          value that does not fit the previous limits.
+    */
+    CharType ubjson_prefix(const BasicJsonType& j) const noexcept
+    {
+        switch (j.type())
+        {
+            case value_t::null:
+                return 'Z';
+
+            case value_t::boolean:
+                return j.m_value.boolean ? 'T' : 'F';
+
+            case value_t::number_integer:
+            {
+                if ((std::numeric_limits<int8_t>::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits<int8_t>::max)())
+                {
+                    return 'i';
+                }
+                if ((std::numeric_limits<uint8_t>::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits<uint8_t>::max)())
+                {
+                    return 'U';
+                }
+                if ((std::numeric_limits<int16_t>::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits<int16_t>::max)())
+                {
+                    return 'I';
+                }
+                if ((std::numeric_limits<int32_t>::min)() <= j.m_value.number_integer and j.m_value.number_integer <= (std::numeric_limits<int32_t>::max)())
+                {
+                    return 'l';
+                }
+                // no check and assume int64_t (see note above)
+                return 'L';
+            }
+
+            case value_t::number_unsigned:
+            {
+                if (j.m_value.number_unsigned <= (std::numeric_limits<int8_t>::max)())
+                {
+                    return 'i';
+                }
+                if (j.m_value.number_unsigned <= (std::numeric_limits<uint8_t>::max)())
+                {
+                    return 'U';
+                }
+                if (j.m_value.number_unsigned <= (std::numeric_limits<int16_t>::max)())
+                {
+                    return 'I';
+                }
+                if (j.m_value.number_unsigned <= (std::numeric_limits<int32_t>::max)())
+                {
+                    return 'l';
+                }
+                // no check and assume int64_t (see note above)
+                return 'L';
+            }
+
+            case value_t::number_float:
+                return get_ubjson_float_prefix(j.m_value.number_float);
+
+            case value_t::string:
+                return 'S';
+
+            case value_t::array:
+                return '[';
+
+            case value_t::object:
+                return '{';
+
+            default:  // discarded values
+                return 'N';
+        }
+    }
+
+    static constexpr CharType get_ubjson_float_prefix(float /*unused*/)
+    {
+        return 'd';  // float 32
+    }
+
+    static constexpr CharType get_ubjson_float_prefix(double /*unused*/)
+    {
+        return 'D';  // float 64
+    }
+
+    ///////////////////////
+    // Utility functions //
+    ///////////////////////
+
+    /*
+    @brief write a number to output input
+    @param[in] n number of type @a NumberType
+    @tparam NumberType the type of the number
+    @tparam OutputIsLittleEndian Set to true if output data is
+                                 required to be little endian
+
+    @note This function needs to respect the system's endianess, because bytes
+          in CBOR, MessagePack, and UBJSON are stored in network order (big
+          endian) and therefore need reordering on little endian systems.
+    */
+    template<typename NumberType, bool OutputIsLittleEndian = false>
+    void write_number(const NumberType n)
+    {
+        // step 1: write number to array of length NumberType
+        std::array<CharType, sizeof(NumberType)> vec;
+        std::memcpy(vec.data(), &n, sizeof(NumberType));
+
+        // step 2: write array to output (with possible reordering)
+        if (is_little_endian and not OutputIsLittleEndian)
+        {
+            // reverse byte order prior to conversion if necessary
+            std::reverse(vec.begin(), vec.end());
+        }
+
+        oa->write_characters(vec.data(), sizeof(NumberType));
+    }
+
+  public:
+    // The following to_char_type functions are implement the conversion
+    // between uint8_t and CharType. In case CharType is not unsigned,
+    // such a conversion is required to allow values greater than 128.
+    // See <https://github.com/nlohmann/json/issues/1286> for a discussion.
+    template < typename C = CharType,
+               enable_if_t < std::is_signed<C>::value and std::is_signed<char>::value > * = nullptr >
+    static constexpr CharType to_char_type(std::uint8_t x) noexcept
+    {
+        return *reinterpret_cast<char*>(&x);
+    }
+
+    template < typename C = CharType,
+               enable_if_t < std::is_signed<C>::value and std::is_unsigned<char>::value > * = nullptr >
+    static CharType to_char_type(std::uint8_t x) noexcept
+    {
+        static_assert(sizeof(std::uint8_t) == sizeof(CharType), "size of CharType must be equal to std::uint8_t");
+        static_assert(std::is_pod<CharType>::value, "CharType must be POD");
+        CharType result;
+        std::memcpy(&result, &x, sizeof(x));
+        return result;
+    }
+
+    template<typename C = CharType,
+             enable_if_t<std::is_unsigned<C>::value>* = nullptr>
+    static constexpr CharType to_char_type(std::uint8_t x) noexcept
+    {
+        return x;
+    }
+
+    template < typename InputCharType, typename C = CharType,
+               enable_if_t <
+                   std::is_signed<C>::value and
+                   std::is_signed<char>::value and
+                   std::is_same<char, typename std::remove_cv<InputCharType>::type>::value
+                   > * = nullptr >
+    static constexpr CharType to_char_type(InputCharType x) noexcept
+    {
+        return x;
+    }
+
+  private:
+    /// whether we can assume little endianess
+    const bool is_little_endian = binary_reader<BasicJsonType>::little_endianess();
+
+    /// the output
+    output_adapter_t<CharType> oa = nullptr;
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/output/serializer.hpp>
+
+
+#include <algorithm> // reverse, remove, fill, find, none_of
+#include <array> // array
+#include <cassert> // assert
+#include <ciso646> // and, or
+#include <clocale> // localeconv, lconv
+#include <cmath> // labs, isfinite, isnan, signbit
+#include <cstddef> // size_t, ptrdiff_t
+#include <cstdint> // uint8_t
+#include <cstdio> // snprintf
+#include <limits> // numeric_limits
+#include <string> // string
+#include <type_traits> // is_same
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/conversions/to_chars.hpp>
+
+
+#include <cassert> // assert
+#include <ciso646> // or, and, not
+#include <cmath>   // signbit, isfinite
+#include <cstdint> // intN_t, uintN_t
+#include <cstring> // memcpy, memmove
+
+namespace nlohmann
+{
+namespace detail
+{
+
+/*!
+@brief implements the Grisu2 algorithm for binary to decimal floating-point
+conversion.
+
+This implementation is a slightly modified version of the reference
+implementation which may be obtained from
+http://florian.loitsch.com/publications (bench.tar.gz).
+
+The code is distributed under the MIT license, Copyright (c) 2009 Florian Loitsch.
+
+For a detailed description of the algorithm see:
+
+[1] Loitsch, "Printing Floating-Point Numbers Quickly and Accurately with
+    Integers", Proceedings of the ACM SIGPLAN 2010 Conference on Programming
+    Language Design and Implementation, PLDI 2010
+[2] Burger, Dybvig, "Printing Floating-Point Numbers Quickly and Accurately",
+    Proceedings of the ACM SIGPLAN 1996 Conference on Programming Language
+    Design and Implementation, PLDI 1996
+*/
+namespace dtoa_impl
+{
+
+template <typename Target, typename Source>
+Target reinterpret_bits(const Source source)
+{
+    static_assert(sizeof(Target) == sizeof(Source), "size mismatch");
+
+    Target target;
+    std::memcpy(&target, &source, sizeof(Source));
+    return target;
+}
+
+struct diyfp // f * 2^e
+{
+    static constexpr int kPrecision = 64; // = q
+
+    uint64_t f = 0;
+    int e = 0;
+
+    constexpr diyfp(uint64_t f_, int e_) noexcept : f(f_), e(e_) {}
+
+    /*!
+    @brief returns x - y
+    @pre x.e == y.e and x.f >= y.f
+    */
+    static diyfp sub(const diyfp& x, const diyfp& y) noexcept
+    {
+        assert(x.e == y.e);
+        assert(x.f >= y.f);
+
+        return {x.f - y.f, x.e};
+    }
+
+    /*!
+    @brief returns x * y
+    @note The result is rounded. (Only the upper q bits are returned.)
+    */
+    static diyfp mul(const diyfp& x, const diyfp& y) noexcept
+    {
+        static_assert(kPrecision == 64, "internal error");
+
+        // Computes:
+        //  f = round((x.f * y.f) / 2^q)
+        //  e = x.e + y.e + q
+
+        // Emulate the 64-bit * 64-bit multiplication:
+        //
+        // p = u * v
+        //   = (u_lo + 2^32 u_hi) (v_lo + 2^32 v_hi)
+        //   = (u_lo v_lo         ) + 2^32 ((u_lo v_hi         ) + (u_hi v_lo         )) + 2^64 (u_hi v_hi         )
+        //   = (p0                ) + 2^32 ((p1                ) + (p2                )) + 2^64 (p3                )
+        //   = (p0_lo + 2^32 p0_hi) + 2^32 ((p1_lo + 2^32 p1_hi) + (p2_lo + 2^32 p2_hi)) + 2^64 (p3                )
+        //   = (p0_lo             ) + 2^32 (p0_hi + p1_lo + p2_lo                      ) + 2^64 (p1_hi + p2_hi + p3)
+        //   = (p0_lo             ) + 2^32 (Q                                          ) + 2^64 (H                 )
+        //   = (p0_lo             ) + 2^32 (Q_lo + 2^32 Q_hi                           ) + 2^64 (H                 )
+        //
+        // (Since Q might be larger than 2^32 - 1)
+        //
+        //   = (p0_lo + 2^32 Q_lo) + 2^64 (Q_hi + H)
+        //
+        // (Q_hi + H does not overflow a 64-bit int)
+        //
+        //   = p_lo + 2^64 p_hi
+
+        const uint64_t u_lo = x.f & 0xFFFFFFFF;
+        const uint64_t u_hi = x.f >> 32;
+        const uint64_t v_lo = y.f & 0xFFFFFFFF;
+        const uint64_t v_hi = y.f >> 32;
+
+        const uint64_t p0 = u_lo * v_lo;
+        const uint64_t p1 = u_lo * v_hi;
+        const uint64_t p2 = u_hi * v_lo;
+        const uint64_t p3 = u_hi * v_hi;
+
+        const uint64_t p0_hi = p0 >> 32;
+        const uint64_t p1_lo = p1 & 0xFFFFFFFF;
+        const uint64_t p1_hi = p1 >> 32;
+        const uint64_t p2_lo = p2 & 0xFFFFFFFF;
+        const uint64_t p2_hi = p2 >> 32;
+
+        uint64_t Q = p0_hi + p1_lo + p2_lo;
+
+        // The full product might now be computed as
+        //
+        // p_hi = p3 + p2_hi + p1_hi + (Q >> 32)
+        // p_lo = p0_lo + (Q << 32)
+        //
+        // But in this particular case here, the full p_lo is not required.
+        // Effectively we only need to add the highest bit in p_lo to p_hi (and
+        // Q_hi + 1 does not overflow).
+
+        Q += uint64_t{1} << (64 - 32 - 1); // round, ties up
+
+        const uint64_t h = p3 + p2_hi + p1_hi + (Q >> 32);
+
+        return {h, x.e + y.e + 64};
+    }
+
+    /*!
+    @brief normalize x such that the significand is >= 2^(q-1)
+    @pre x.f != 0
+    */
+    static diyfp normalize(diyfp x) noexcept
+    {
+        assert(x.f != 0);
+
+        while ((x.f >> 63) == 0)
+        {
+            x.f <<= 1;
+            x.e--;
+        }
+
+        return x;
+    }
+
+    /*!
+    @brief normalize x such that the result has the exponent E
+    @pre e >= x.e and the upper e - x.e bits of x.f must be zero.
+    */
+    static diyfp normalize_to(const diyfp& x, const int target_exponent) noexcept
+    {
+        const int delta = x.e - target_exponent;
+
+        assert(delta >= 0);
+        assert(((x.f << delta) >> delta) == x.f);
+
+        return {x.f << delta, target_exponent};
+    }
+};
+
+struct boundaries
+{
+    diyfp w;
+    diyfp minus;
+    diyfp plus;
+};
+
+/*!
+Compute the (normalized) diyfp representing the input number 'value' and its
+boundaries.
+
+@pre value must be finite and positive
+*/
+template <typename FloatType>
+boundaries compute_boundaries(FloatType value)
+{
+    assert(std::isfinite(value));
+    assert(value > 0);
+
+    // Convert the IEEE representation into a diyfp.
+    //
+    // If v is denormal:
+    //      value = 0.F * 2^(1 - bias) = (          F) * 2^(1 - bias - (p-1))
+    // If v is normalized:
+    //      value = 1.F * 2^(E - bias) = (2^(p-1) + F) * 2^(E - bias - (p-1))
+
+    static_assert(std::numeric_limits<FloatType>::is_iec559,
+                  "internal error: dtoa_short requires an IEEE-754 floating-point implementation");
+
+    constexpr int      kPrecision = std::numeric_limits<FloatType>::digits; // = p (includes the hidden bit)
+    constexpr int      kBias      = std::numeric_limits<FloatType>::max_exponent - 1 + (kPrecision - 1);
+    constexpr int      kMinExp    = 1 - kBias;
+    constexpr uint64_t kHiddenBit = uint64_t{1} << (kPrecision - 1); // = 2^(p-1)
+
+    using bits_type = typename std::conditional< kPrecision == 24, uint32_t, uint64_t >::type;
+
+    const uint64_t bits = reinterpret_bits<bits_type>(value);
+    const uint64_t E = bits >> (kPrecision - 1);
+    const uint64_t F = bits & (kHiddenBit - 1);
+
+    const bool is_denormal = (E == 0);
+    const diyfp v = is_denormal
+                    ? diyfp(F, kMinExp)
+                    : diyfp(F + kHiddenBit, static_cast<int>(E) - kBias);
+
+    // Compute the boundaries m- and m+ of the floating-point value
+    // v = f * 2^e.
+    //
+    // Determine v- and v+, the floating-point predecessor and successor if v,
+    // respectively.
+    //
+    //      v- = v - 2^e        if f != 2^(p-1) or e == e_min                (A)
+    //         = v - 2^(e-1)    if f == 2^(p-1) and e > e_min                (B)
+    //
+    //      v+ = v + 2^e
+    //
+    // Let m- = (v- + v) / 2 and m+ = (v + v+) / 2. All real numbers _strictly_
+    // between m- and m+ round to v, regardless of how the input rounding
+    // algorithm breaks ties.
+    //
+    //      ---+-------------+-------------+-------------+-------------+---  (A)
+    //         v-            m-            v             m+            v+
+    //
+    //      -----------------+------+------+-------------+-------------+---  (B)
+    //                       v-     m-     v             m+            v+
+
+    const bool lower_boundary_is_closer = (F == 0 and E > 1);
+    const diyfp m_plus = diyfp(2 * v.f + 1, v.e - 1);
+    const diyfp m_minus = lower_boundary_is_closer
+                          ? diyfp(4 * v.f - 1, v.e - 2)  // (B)
+                          : diyfp(2 * v.f - 1, v.e - 1); // (A)
+
+    // Determine the normalized w+ = m+.
+    const diyfp w_plus = diyfp::normalize(m_plus);
+
+    // Determine w- = m- such that e_(w-) = e_(w+).
+    const diyfp w_minus = diyfp::normalize_to(m_minus, w_plus.e);
+
+    return {diyfp::normalize(v), w_minus, w_plus};
+}
+
+// Given normalized diyfp w, Grisu needs to find a (normalized) cached
+// power-of-ten c, such that the exponent of the product c * w = f * 2^e lies
+// within a certain range [alpha, gamma] (Definition 3.2 from [1])
+//
+//      alpha <= e = e_c + e_w + q <= gamma
+//
+// or
+//
+//      f_c * f_w * 2^alpha <= f_c 2^(e_c) * f_w 2^(e_w) * 2^q
+//                          <= f_c * f_w * 2^gamma
+//
+// Since c and w are normalized, i.e. 2^(q-1) <= f < 2^q, this implies
+//
+//      2^(q-1) * 2^(q-1) * 2^alpha <= c * w * 2^q < 2^q * 2^q * 2^gamma
+//
+// or
+//
+//      2^(q - 2 + alpha) <= c * w < 2^(q + gamma)
+//
+// The choice of (alpha,gamma) determines the size of the table and the form of
+// the digit generation procedure. Using (alpha,gamma)=(-60,-32) works out well
+// in practice:
+//
+// The idea is to cut the number c * w = f * 2^e into two parts, which can be
+// processed independently: An integral part p1, and a fractional part p2:
+//
+//      f * 2^e = ( (f div 2^-e) * 2^-e + (f mod 2^-e) ) * 2^e
+//              = (f div 2^-e) + (f mod 2^-e) * 2^e
+//              = p1 + p2 * 2^e
+//
+// The conversion of p1 into decimal form requires a series of divisions and
+// modulos by (a power of) 10. These operations are faster for 32-bit than for
+// 64-bit integers, so p1 should ideally fit into a 32-bit integer. This can be
+// achieved by choosing
+//
+//      -e >= 32   or   e <= -32 := gamma
+//
+// In order to convert the fractional part
+//
+//      p2 * 2^e = p2 / 2^-e = d[-1] / 10^1 + d[-2] / 10^2 + ...
+//
+// into decimal form, the fraction is repeatedly multiplied by 10 and the digits
+// d[-i] are extracted in order:
+//
+//      (10 * p2) div 2^-e = d[-1]
+//      (10 * p2) mod 2^-e = d[-2] / 10^1 + ...
+//
+// The multiplication by 10 must not overflow. It is sufficient to choose
+//
+//      10 * p2 < 16 * p2 = 2^4 * p2 <= 2^64.
+//
+// Since p2 = f mod 2^-e < 2^-e,
+//
+//      -e <= 60   or   e >= -60 := alpha
+
+constexpr int kAlpha = -60;
+constexpr int kGamma = -32;
+
+struct cached_power // c = f * 2^e ~= 10^k
+{
+    uint64_t f;
+    int e;
+    int k;
+};
+
+/*!
+For a normalized diyfp w = f * 2^e, this function returns a (normalized) cached
+power-of-ten c = f_c * 2^e_c, such that the exponent of the product w * c
+satisfies (Definition 3.2 from [1])
+
+     alpha <= e_c + e + q <= gamma.
+*/
+inline cached_power get_cached_power_for_binary_exponent(int e)
+{
+    // Now
+    //
+    //      alpha <= e_c + e + q <= gamma                                    (1)
+    //      ==> f_c * 2^alpha <= c * 2^e * 2^q
+    //
+    // and since the c's are normalized, 2^(q-1) <= f_c,
+    //
+    //      ==> 2^(q - 1 + alpha) <= c * 2^(e + q)
+    //      ==> 2^(alpha - e - 1) <= c
+    //
+    // If c were an exakt power of ten, i.e. c = 10^k, one may determine k as
+    //
+    //      k = ceil( log_10( 2^(alpha - e - 1) ) )
+    //        = ceil( (alpha - e - 1) * log_10(2) )
+    //
+    // From the paper:
+    // "In theory the result of the procedure could be wrong since c is rounded,
+    //  and the computation itself is approximated [...]. In practice, however,
+    //  this simple function is sufficient."
+    //
+    // For IEEE double precision floating-point numbers converted into
+    // normalized diyfp's w = f * 2^e, with q = 64,
+    //
+    //      e >= -1022      (min IEEE exponent)
+    //           -52        (p - 1)
+    //           -52        (p - 1, possibly normalize denormal IEEE numbers)
+    //           -11        (normalize the diyfp)
+    //         = -1137
+    //
+    // and
+    //
+    //      e <= +1023      (max IEEE exponent)
+    //           -52        (p - 1)
+    //           -11        (normalize the diyfp)
+    //         = 960
+    //
+    // This binary exponent range [-1137,960] results in a decimal exponent
+    // range [-307,324]. One does not need to store a cached power for each
+    // k in this range. For each such k it suffices to find a cached power
+    // such that the exponent of the product lies in [alpha,gamma].
+    // This implies that the difference of the decimal exponents of adjacent
+    // table entries must be less than or equal to
+    //
+    //      floor( (gamma - alpha) * log_10(2) ) = 8.
+    //
+    // (A smaller distance gamma-alpha would require a larger table.)
+
+    // NB:
+    // Actually this function returns c, such that -60 <= e_c + e + 64 <= -34.
+
+    constexpr int kCachedPowersSize = 79;
+    constexpr int kCachedPowersMinDecExp = -300;
+    constexpr int kCachedPowersDecStep = 8;
+
+    static constexpr cached_power kCachedPowers[] =
+    {
+        { 0xAB70FE17C79AC6CA, -1060, -300 },
+        { 0xFF77B1FCBEBCDC4F, -1034, -292 },
+        { 0xBE5691EF416BD60C, -1007, -284 },
+        { 0x8DD01FAD907FFC3C,  -980, -276 },
+        { 0xD3515C2831559A83,  -954, -268 },
+        { 0x9D71AC8FADA6C9B5,  -927, -260 },
+        { 0xEA9C227723EE8BCB,  -901, -252 },
+        { 0xAECC49914078536D,  -874, -244 },
+        { 0x823C12795DB6CE57,  -847, -236 },
+        { 0xC21094364DFB5637,  -821, -228 },
+        { 0x9096EA6F3848984F,  -794, -220 },
+        { 0xD77485CB25823AC7,  -768, -212 },
+        { 0xA086CFCD97BF97F4,  -741, -204 },
+        { 0xEF340A98172AACE5,  -715, -196 },
+        { 0xB23867FB2A35B28E,  -688, -188 },
+        { 0x84C8D4DFD2C63F3B,  -661, -180 },
+        { 0xC5DD44271AD3CDBA,  -635, -172 },
+        { 0x936B9FCEBB25C996,  -608, -164 },
+        { 0xDBAC6C247D62A584,  -582, -156 },
+        { 0xA3AB66580D5FDAF6,  -555, -148 },
+        { 0xF3E2F893DEC3F126,  -529, -140 },
+        { 0xB5B5ADA8AAFF80B8,  -502, -132 },
+        { 0x87625F056C7C4A8B,  -475, -124 },
+        { 0xC9BCFF6034C13053,  -449, -116 },
+        { 0x964E858C91BA2655,  -422, -108 },
+        { 0xDFF9772470297EBD,  -396, -100 },
+        { 0xA6DFBD9FB8E5B88F,  -369,  -92 },
+        { 0xF8A95FCF88747D94,  -343,  -84 },
+        { 0xB94470938FA89BCF,  -316,  -76 },
+        { 0x8A08F0F8BF0F156B,  -289,  -68 },
+        { 0xCDB02555653131B6,  -263,  -60 },
+        { 0x993FE2C6D07B7FAC,  -236,  -52 },
+        { 0xE45C10C42A2B3B06,  -210,  -44 },
+        { 0xAA242499697392D3,  -183,  -36 },
+        { 0xFD87B5F28300CA0E,  -157,  -28 },
+        { 0xBCE5086492111AEB,  -130,  -20 },
+        { 0x8CBCCC096F5088CC,  -103,  -12 },
+        { 0xD1B71758E219652C,   -77,   -4 },
+        { 0x9C40000000000000,   -50,    4 },
+        { 0xE8D4A51000000000,   -24,   12 },
+        { 0xAD78EBC5AC620000,     3,   20 },
+        { 0x813F3978F8940984,    30,   28 },
+        { 0xC097CE7BC90715B3,    56,   36 },
+        { 0x8F7E32CE7BEA5C70,    83,   44 },
+        { 0xD5D238A4ABE98068,   109,   52 },
+        { 0x9F4F2726179A2245,   136,   60 },
+        { 0xED63A231D4C4FB27,   162,   68 },
+        { 0xB0DE65388CC8ADA8,   189,   76 },
+        { 0x83C7088E1AAB65DB,   216,   84 },
+        { 0xC45D1DF942711D9A,   242,   92 },
+        { 0x924D692CA61BE758,   269,  100 },
+        { 0xDA01EE641A708DEA,   295,  108 },
+        { 0xA26DA3999AEF774A,   322,  116 },
+        { 0xF209787BB47D6B85,   348,  124 },
+        { 0xB454E4A179DD1877,   375,  132 },
+        { 0x865B86925B9BC5C2,   402,  140 },
+        { 0xC83553C5C8965D3D,   428,  148 },
+        { 0x952AB45CFA97A0B3,   455,  156 },
+        { 0xDE469FBD99A05FE3,   481,  164 },
+        { 0xA59BC234DB398C25,   508,  172 },
+        { 0xF6C69A72A3989F5C,   534,  180 },
+        { 0xB7DCBF5354E9BECE,   561,  188 },
+        { 0x88FCF317F22241E2,   588,  196 },
+        { 0xCC20CE9BD35C78A5,   614,  204 },
+        { 0x98165AF37B2153DF,   641,  212 },
+        { 0xE2A0B5DC971F303A,   667,  220 },
+        { 0xA8D9D1535CE3B396,   694,  228 },
+        { 0xFB9B7CD9A4A7443C,   720,  236 },
+        { 0xBB764C4CA7A44410,   747,  244 },
+        { 0x8BAB8EEFB6409C1A,   774,  252 },
+        { 0xD01FEF10A657842C,   800,  260 },
+        { 0x9B10A4E5E9913129,   827,  268 },
+        { 0xE7109BFBA19C0C9D,   853,  276 },
+        { 0xAC2820D9623BF429,   880,  284 },
+        { 0x80444B5E7AA7CF85,   907,  292 },
+        { 0xBF21E44003ACDD2D,   933,  300 },
+        { 0x8E679C2F5E44FF8F,   960,  308 },
+        { 0xD433179D9C8CB841,   986,  316 },
+        { 0x9E19DB92B4E31BA9,  1013,  324 },
+    };
+
+    // This computation gives exactly the same results for k as
+    //      k = ceil((kAlpha - e - 1) * 0.30102999566398114)
+    // for |e| <= 1500, but doesn't require floating-point operations.
+    // NB: log_10(2) ~= 78913 / 2^18
+    assert(e >= -1500);
+    assert(e <=  1500);
+    const int f = kAlpha - e - 1;
+    const int k = (f * 78913) / (1 << 18) + static_cast<int>(f > 0);
+
+    const int index = (-kCachedPowersMinDecExp + k + (kCachedPowersDecStep - 1)) / kCachedPowersDecStep;
+    assert(index >= 0);
+    assert(index < kCachedPowersSize);
+    static_cast<void>(kCachedPowersSize); // Fix warning.
+
+    const cached_power cached = kCachedPowers[index];
+    assert(kAlpha <= cached.e + e + 64);
+    assert(kGamma >= cached.e + e + 64);
+
+    return cached;
+}
+
+/*!
+For n != 0, returns k, such that pow10 := 10^(k-1) <= n < 10^k.
+For n == 0, returns 1 and sets pow10 := 1.
+*/
+inline int find_largest_pow10(const uint32_t n, uint32_t& pow10)
+{
+    // LCOV_EXCL_START
+    if (n >= 1000000000)
+    {
+        pow10 = 1000000000;
+        return 10;
+    }
+    // LCOV_EXCL_STOP
+    else if (n >= 100000000)
+    {
+        pow10 = 100000000;
+        return  9;
+    }
+    else if (n >= 10000000)
+    {
+        pow10 = 10000000;
+        return  8;
+    }
+    else if (n >= 1000000)
+    {
+        pow10 = 1000000;
+        return  7;
+    }
+    else if (n >= 100000)
+    {
+        pow10 = 100000;
+        return  6;
+    }
+    else if (n >= 10000)
+    {
+        pow10 = 10000;
+        return  5;
+    }
+    else if (n >= 1000)
+    {
+        pow10 = 1000;
+        return  4;
+    }
+    else if (n >= 100)
+    {
+        pow10 = 100;
+        return  3;
+    }
+    else if (n >= 10)
+    {
+        pow10 = 10;
+        return  2;
+    }
+    else
+    {
+        pow10 = 1;
+        return 1;
+    }
+}
+
+inline void grisu2_round(char* buf, int len, uint64_t dist, uint64_t delta,
+                         uint64_t rest, uint64_t ten_k)
+{
+    assert(len >= 1);
+    assert(dist <= delta);
+    assert(rest <= delta);
+    assert(ten_k > 0);
+
+    //               <--------------------------- delta ---->
+    //                                  <---- dist --------->
+    // --------------[------------------+-------------------]--------------
+    //               M-                 w                   M+
+    //
+    //                                  ten_k
+    //                                <------>
+    //                                       <---- rest ---->
+    // --------------[------------------+----+--------------]--------------
+    //                                  w    V
+    //                                       = buf * 10^k
+    //
+    // ten_k represents a unit-in-the-last-place in the decimal representation
+    // stored in buf.
+    // Decrement buf by ten_k while this takes buf closer to w.
+
+    // The tests are written in this order to avoid overflow in unsigned
+    // integer arithmetic.
+
+    while (rest < dist
+            and delta - rest >= ten_k
+            and (rest + ten_k < dist or dist - rest > rest + ten_k - dist))
+    {
+        assert(buf[len - 1] != '0');
+        buf[len - 1]--;
+        rest += ten_k;
+    }
+}
+
+/*!
+Generates V = buffer * 10^decimal_exponent, such that M- <= V <= M+.
+M- and M+ must be normalized and share the same exponent -60 <= e <= -32.
+*/
+inline void grisu2_digit_gen(char* buffer, int& length, int& decimal_exponent,
+                             diyfp M_minus, diyfp w, diyfp M_plus)
+{
+    static_assert(kAlpha >= -60, "internal error");
+    static_assert(kGamma <= -32, "internal error");
+
+    // Generates the digits (and the exponent) of a decimal floating-point
+    // number V = buffer * 10^decimal_exponent in the range [M-, M+]. The diyfp's
+    // w, M- and M+ share the same exponent e, which satisfies alpha <= e <= gamma.
+    //
+    //               <--------------------------- delta ---->
+    //                                  <---- dist --------->
+    // --------------[------------------+-------------------]--------------
+    //               M-                 w                   M+
+    //
+    // Grisu2 generates the digits of M+ from left to right and stops as soon as
+    // V is in [M-,M+].
+
+    assert(M_plus.e >= kAlpha);
+    assert(M_plus.e <= kGamma);
+
+    uint64_t delta = diyfp::sub(M_plus, M_minus).f; // (significand of (M+ - M-), implicit exponent is e)
+    uint64_t dist  = diyfp::sub(M_plus, w      ).f; // (significand of (M+ - w ), implicit exponent is e)
+
+    // Split M+ = f * 2^e into two parts p1 and p2 (note: e < 0):
+    //
+    //      M+ = f * 2^e
+    //         = ((f div 2^-e) * 2^-e + (f mod 2^-e)) * 2^e
+    //         = ((p1        ) * 2^-e + (p2        )) * 2^e
+    //         = p1 + p2 * 2^e
+
+    const diyfp one(uint64_t{1} << -M_plus.e, M_plus.e);
+
+    auto p1 = static_cast<uint32_t>(M_plus.f >> -one.e); // p1 = f div 2^-e (Since -e >= 32, p1 fits into a 32-bit int.)
+    uint64_t p2 = M_plus.f & (one.f - 1);                    // p2 = f mod 2^-e
+
+    // 1)
+    //
+    // Generate the digits of the integral part p1 = d[n-1]...d[1]d[0]
+
+    assert(p1 > 0);
+
+    uint32_t pow10;
+    const int k = find_largest_pow10(p1, pow10);
+
+    //      10^(k-1) <= p1 < 10^k, pow10 = 10^(k-1)
+    //
+    //      p1 = (p1 div 10^(k-1)) * 10^(k-1) + (p1 mod 10^(k-1))
+    //         = (d[k-1]         ) * 10^(k-1) + (p1 mod 10^(k-1))
+    //
+    //      M+ = p1                                             + p2 * 2^e
+    //         = d[k-1] * 10^(k-1) + (p1 mod 10^(k-1))          + p2 * 2^e
+    //         = d[k-1] * 10^(k-1) + ((p1 mod 10^(k-1)) * 2^-e + p2) * 2^e
+    //         = d[k-1] * 10^(k-1) + (                         rest) * 2^e
+    //
+    // Now generate the digits d[n] of p1 from left to right (n = k-1,...,0)
+    //
+    //      p1 = d[k-1]...d[n] * 10^n + d[n-1]...d[0]
+    //
+    // but stop as soon as
+    //
+    //      rest * 2^e = (d[n-1]...d[0] * 2^-e + p2) * 2^e <= delta * 2^e
+
+    int n = k;
+    while (n > 0)
+    {
+        // Invariants:
+        //      M+ = buffer * 10^n + (p1 + p2 * 2^e)    (buffer = 0 for n = k)
+        //      pow10 = 10^(n-1) <= p1 < 10^n
+        //
+        const uint32_t d = p1 / pow10;  // d = p1 div 10^(n-1)
+        const uint32_t r = p1 % pow10;  // r = p1 mod 10^(n-1)
+        //
+        //      M+ = buffer * 10^n + (d * 10^(n-1) + r) + p2 * 2^e
+        //         = (buffer * 10 + d) * 10^(n-1) + (r + p2 * 2^e)
+        //
+        assert(d <= 9);
+        buffer[length++] = static_cast<char>('0' + d); // buffer := buffer * 10 + d
+        //
+        //      M+ = buffer * 10^(n-1) + (r + p2 * 2^e)
+        //
+        p1 = r;
+        n--;
+        //
+        //      M+ = buffer * 10^n + (p1 + p2 * 2^e)
+        //      pow10 = 10^n
+        //
+
+        // Now check if enough digits have been generated.
+        // Compute
+        //
+        //      p1 + p2 * 2^e = (p1 * 2^-e + p2) * 2^e = rest * 2^e
+        //
+        // Note:
+        // Since rest and delta share the same exponent e, it suffices to
+        // compare the significands.
+        const uint64_t rest = (uint64_t{p1} << -one.e) + p2;
+        if (rest <= delta)
+        {
+            // V = buffer * 10^n, with M- <= V <= M+.
+
+            decimal_exponent += n;
+
+            // We may now just stop. But instead look if the buffer could be
+            // decremented to bring V closer to w.
+            //
+            // pow10 = 10^n is now 1 ulp in the decimal representation V.
+            // The rounding procedure works with diyfp's with an implicit
+            // exponent of e.
+            //
+            //      10^n = (10^n * 2^-e) * 2^e = ulp * 2^e
+            //
+            const uint64_t ten_n = uint64_t{pow10} << -one.e;
+            grisu2_round(buffer, length, dist, delta, rest, ten_n);
+
+            return;
+        }
+
+        pow10 /= 10;
+        //
+        //      pow10 = 10^(n-1) <= p1 < 10^n
+        // Invariants restored.
+    }
+
+    // 2)
+    //
+    // The digits of the integral part have been generated:
+    //
+    //      M+ = d[k-1]...d[1]d[0] + p2 * 2^e
+    //         = buffer            + p2 * 2^e
+    //
+    // Now generate the digits of the fractional part p2 * 2^e.
+    //
+    // Note:
+    // No decimal point is generated: the exponent is adjusted instead.
+    //
+    // p2 actually represents the fraction
+    //
+    //      p2 * 2^e
+    //          = p2 / 2^-e
+    //          = d[-1] / 10^1 + d[-2] / 10^2 + ...
+    //
+    // Now generate the digits d[-m] of p1 from left to right (m = 1,2,...)
+    //
+    //      p2 * 2^e = d[-1]d[-2]...d[-m] * 10^-m
+    //                      + 10^-m * (d[-m-1] / 10^1 + d[-m-2] / 10^2 + ...)
+    //
+    // using
+    //
+    //      10^m * p2 = ((10^m * p2) div 2^-e) * 2^-e + ((10^m * p2) mod 2^-e)
+    //                = (                   d) * 2^-e + (                   r)
+    //
+    // or
+    //      10^m * p2 * 2^e = d + r * 2^e
+    //
+    // i.e.
+    //
+    //      M+ = buffer + p2 * 2^e
+    //         = buffer + 10^-m * (d + r * 2^e)
+    //         = (buffer * 10^m + d) * 10^-m + 10^-m * r * 2^e
+    //
+    // and stop as soon as 10^-m * r * 2^e <= delta * 2^e
+
+    assert(p2 > delta);
+
+    int m = 0;
+    for (;;)
+    {
+        // Invariant:
+        //      M+ = buffer * 10^-m + 10^-m * (d[-m-1] / 10 + d[-m-2] / 10^2 + ...) * 2^e
+        //         = buffer * 10^-m + 10^-m * (p2                                 ) * 2^e
+        //         = buffer * 10^-m + 10^-m * (1/10 * (10 * p2)                   ) * 2^e
+        //         = buffer * 10^-m + 10^-m * (1/10 * ((10*p2 div 2^-e) * 2^-e + (10*p2 mod 2^-e)) * 2^e
+        //
+        assert(p2 <= UINT64_MAX / 10);
+        p2 *= 10;
+        const uint64_t d = p2 >> -one.e;     // d = (10 * p2) div 2^-e
+        const uint64_t r = p2 & (one.f - 1); // r = (10 * p2) mod 2^-e
+        //
+        //      M+ = buffer * 10^-m + 10^-m * (1/10 * (d * 2^-e + r) * 2^e
+        //         = buffer * 10^-m + 10^-m * (1/10 * (d + r * 2^e))
+        //         = (buffer * 10 + d) * 10^(-m-1) + 10^(-m-1) * r * 2^e
+        //
+        assert(d <= 9);
+        buffer[length++] = static_cast<char>('0' + d); // buffer := buffer * 10 + d
+        //
+        //      M+ = buffer * 10^(-m-1) + 10^(-m-1) * r * 2^e
+        //
+        p2 = r;
+        m++;
+        //
+        //      M+ = buffer * 10^-m + 10^-m * p2 * 2^e
+        // Invariant restored.
+
+        // Check if enough digits have been generated.
+        //
+        //      10^-m * p2 * 2^e <= delta * 2^e
+        //              p2 * 2^e <= 10^m * delta * 2^e
+        //                    p2 <= 10^m * delta
+        delta *= 10;
+        dist  *= 10;
+        if (p2 <= delta)
+        {
+            break;
+        }
+    }
+
+    // V = buffer * 10^-m, with M- <= V <= M+.
+
+    decimal_exponent -= m;
+
+    // 1 ulp in the decimal representation is now 10^-m.
+    // Since delta and dist are now scaled by 10^m, we need to do the
+    // same with ulp in order to keep the units in sync.
+    //
+    //      10^m * 10^-m = 1 = 2^-e * 2^e = ten_m * 2^e
+    //
+    const uint64_t ten_m = one.f;
+    grisu2_round(buffer, length, dist, delta, p2, ten_m);
+
+    // By construction this algorithm generates the shortest possible decimal
+    // number (Loitsch, Theorem 6.2) which rounds back to w.
+    // For an input number of precision p, at least
+    //
+    //      N = 1 + ceil(p * log_10(2))
+    //
+    // decimal digits are sufficient to identify all binary floating-point
+    // numbers (Matula, "In-and-Out conversions").
+    // This implies that the algorithm does not produce more than N decimal
+    // digits.
+    //
+    //      N = 17 for p = 53 (IEEE double precision)
+    //      N = 9  for p = 24 (IEEE single precision)
+}
+
+/*!
+v = buf * 10^decimal_exponent
+len is the length of the buffer (number of decimal digits)
+The buffer must be large enough, i.e. >= max_digits10.
+*/
+inline void grisu2(char* buf, int& len, int& decimal_exponent,
+                   diyfp m_minus, diyfp v, diyfp m_plus)
+{
+    assert(m_plus.e == m_minus.e);
+    assert(m_plus.e == v.e);
+
+    //  --------(-----------------------+-----------------------)--------    (A)
+    //          m-                      v                       m+
+    //
+    //  --------------------(-----------+-----------------------)--------    (B)
+    //                      m-          v                       m+
+    //
+    // First scale v (and m- and m+) such that the exponent is in the range
+    // [alpha, gamma].
+
+    const cached_power cached = get_cached_power_for_binary_exponent(m_plus.e);
+
+    const diyfp c_minus_k(cached.f, cached.e); // = c ~= 10^-k
+
+    // The exponent of the products is = v.e + c_minus_k.e + q and is in the range [alpha,gamma]
+    const diyfp w       = diyfp::mul(v,       c_minus_k);
+    const diyfp w_minus = diyfp::mul(m_minus, c_minus_k);
+    const diyfp w_plus  = diyfp::mul(m_plus,  c_minus_k);
+
+    //  ----(---+---)---------------(---+---)---------------(---+---)----
+    //          w-                      w                       w+
+    //          = c*m-                  = c*v                   = c*m+
+    //
+    // diyfp::mul rounds its result and c_minus_k is approximated too. w, w- and
+    // w+ are now off by a small amount.
+    // In fact:
+    //
+    //      w - v * 10^k < 1 ulp
+    //
+    // To account for this inaccuracy, add resp. subtract 1 ulp.
+    //
+    //  --------+---[---------------(---+---)---------------]---+--------
+    //          w-  M-                  w                   M+  w+
+    //
+    // Now any number in [M-, M+] (bounds included) will round to w when input,
+    // regardless of how the input rounding algorithm breaks ties.
+    //
+    // And digit_gen generates the shortest possible such number in [M-, M+].
+    // Note that this does not mean that Grisu2 always generates the shortest
+    // possible number in the interval (m-, m+).
+    const diyfp M_minus(w_minus.f + 1, w_minus.e);
+    const diyfp M_plus (w_plus.f  - 1, w_plus.e );
+
+    decimal_exponent = -cached.k; // = -(-k) = k
+
+    grisu2_digit_gen(buf, len, decimal_exponent, M_minus, w, M_plus);
+}
+
+/*!
+v = buf * 10^decimal_exponent
+len is the length of the buffer (number of decimal digits)
+The buffer must be large enough, i.e. >= max_digits10.
+*/
+template <typename FloatType>
+void grisu2(char* buf, int& len, int& decimal_exponent, FloatType value)
+{
+    static_assert(diyfp::kPrecision >= std::numeric_limits<FloatType>::digits + 3,
+                  "internal error: not enough precision");
+
+    assert(std::isfinite(value));
+    assert(value > 0);
+
+    // If the neighbors (and boundaries) of 'value' are always computed for double-precision
+    // numbers, all float's can be recovered using strtod (and strtof). However, the resulting
+    // decimal representations are not exactly "short".
+    //
+    // The documentation for 'std::to_chars' (https://en.cppreference.com/w/cpp/utility/to_chars)
+    // says "value is converted to a string as if by std::sprintf in the default ("C") locale"
+    // and since sprintf promotes float's to double's, I think this is exactly what 'std::to_chars'
+    // does.
+    // On the other hand, the documentation for 'std::to_chars' requires that "parsing the
+    // representation using the corresponding std::from_chars function recovers value exactly". That
+    // indicates that single precision floating-point numbers should be recovered using
+    // 'std::strtof'.
+    //
+    // NB: If the neighbors are computed for single-precision numbers, there is a single float
+    //     (7.0385307e-26f) which can't be recovered using strtod. The resulting double precision
+    //     value is off by 1 ulp.
+#if 0
+    const boundaries w = compute_boundaries(static_cast<double>(value));
+#else
+    const boundaries w = compute_boundaries(value);
+#endif
+
+    grisu2(buf, len, decimal_exponent, w.minus, w.w, w.plus);
+}
+
+/*!
+@brief appends a decimal representation of e to buf
+@return a pointer to the element following the exponent.
+@pre -1000 < e < 1000
+*/
+inline char* append_exponent(char* buf, int e)
+{
+    assert(e > -1000);
+    assert(e <  1000);
+
+    if (e < 0)
+    {
+        e = -e;
+        *buf++ = '-';
+    }
+    else
+    {
+        *buf++ = '+';
+    }
+
+    auto k = static_cast<uint32_t>(e);
+    if (k < 10)
+    {
+        // Always print at least two digits in the exponent.
+        // This is for compatibility with printf("%g").
+        *buf++ = '0';
+        *buf++ = static_cast<char>('0' + k);
+    }
+    else if (k < 100)
+    {
+        *buf++ = static_cast<char>('0' + k / 10);
+        k %= 10;
+        *buf++ = static_cast<char>('0' + k);
+    }
+    else
+    {
+        *buf++ = static_cast<char>('0' + k / 100);
+        k %= 100;
+        *buf++ = static_cast<char>('0' + k / 10);
+        k %= 10;
+        *buf++ = static_cast<char>('0' + k);
+    }
+
+    return buf;
+}
+
+/*!
+@brief prettify v = buf * 10^decimal_exponent
+
+If v is in the range [10^min_exp, 10^max_exp) it will be printed in fixed-point
+notation. Otherwise it will be printed in exponential notation.
+
+@pre min_exp < 0
+@pre max_exp > 0
+*/
+inline char* format_buffer(char* buf, int len, int decimal_exponent,
+                           int min_exp, int max_exp)
+{
+    assert(min_exp < 0);
+    assert(max_exp > 0);
+
+    const int k = len;
+    const int n = len + decimal_exponent;
+
+    // v = buf * 10^(n-k)
+    // k is the length of the buffer (number of decimal digits)
+    // n is the position of the decimal point relative to the start of the buffer.
+
+    if (k <= n and n <= max_exp)
+    {
+        // digits[000]
+        // len <= max_exp + 2
+
+        std::memset(buf + k, '0', static_cast<size_t>(n - k));
+        // Make it look like a floating-point number (#362, #378)
+        buf[n + 0] = '.';
+        buf[n + 1] = '0';
+        return buf + (n + 2);
+    }
+
+    if (0 < n and n <= max_exp)
+    {
+        // dig.its
+        // len <= max_digits10 + 1
+
+        assert(k > n);
+
+        std::memmove(buf + (n + 1), buf + n, static_cast<size_t>(k - n));
+        buf[n] = '.';
+        return buf + (k + 1);
+    }
+
+    if (min_exp < n and n <= 0)
+    {
+        // 0.[000]digits
+        // len <= 2 + (-min_exp - 1) + max_digits10
+
+        std::memmove(buf + (2 + -n), buf, static_cast<size_t>(k));
+        buf[0] = '0';
+        buf[1] = '.';
+        std::memset(buf + 2, '0', static_cast<size_t>(-n));
+        return buf + (2 + (-n) + k);
+    }
+
+    if (k == 1)
+    {
+        // dE+123
+        // len <= 1 + 5
+
+        buf += 1;
+    }
+    else
+    {
+        // d.igitsE+123
+        // len <= max_digits10 + 1 + 5
+
+        std::memmove(buf + 2, buf + 1, static_cast<size_t>(k - 1));
+        buf[1] = '.';
+        buf += 1 + k;
+    }
+
+    *buf++ = 'e';
+    return append_exponent(buf, n - 1);
+}
+
+} // namespace dtoa_impl
+
+/*!
+@brief generates a decimal representation of the floating-point number value in [first, last).
+
+The format of the resulting decimal representation is similar to printf's %g
+format. Returns an iterator pointing past-the-end of the decimal representation.
+
+@note The input number must be finite, i.e. NaN's and Inf's are not supported.
+@note The buffer must be large enough.
+@note The result is NOT null-terminated.
+*/
+template <typename FloatType>
+char* to_chars(char* first, const char* last, FloatType value)
+{
+    static_cast<void>(last); // maybe unused - fix warning
+    assert(std::isfinite(value));
+
+    // Use signbit(value) instead of (value < 0) since signbit works for -0.
+    if (std::signbit(value))
+    {
+        value = -value;
+        *first++ = '-';
+    }
+
+    if (value == 0) // +-0
+    {
+        *first++ = '0';
+        // Make it look like a floating-point number (#362, #378)
+        *first++ = '.';
+        *first++ = '0';
+        return first;
+    }
+
+    assert(last - first >= std::numeric_limits<FloatType>::max_digits10);
+
+    // Compute v = buffer * 10^decimal_exponent.
+    // The decimal digits are stored in the buffer, which needs to be interpreted
+    // as an unsigned decimal integer.
+    // len is the length of the buffer, i.e. the number of decimal digits.
+    int len = 0;
+    int decimal_exponent = 0;
+    dtoa_impl::grisu2(first, len, decimal_exponent, value);
+
+    assert(len <= std::numeric_limits<FloatType>::max_digits10);
+
+    // Format the buffer like printf("%.*g", prec, value)
+    constexpr int kMinExp = -4;
+    // Use digits10 here to increase compatibility with version 2.
+    constexpr int kMaxExp = std::numeric_limits<FloatType>::digits10;
+
+    assert(last - first >= kMaxExp + 2);
+    assert(last - first >= 2 + (-kMinExp - 1) + std::numeric_limits<FloatType>::max_digits10);
+    assert(last - first >= std::numeric_limits<FloatType>::max_digits10 + 6);
+
+    return dtoa_impl::format_buffer(first, len, decimal_exponent, kMinExp, kMaxExp);
+}
+
+} // namespace detail
+} // namespace nlohmann
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/meta/cpp_future.hpp>
+
+// #include <nlohmann/detail/output/binary_writer.hpp>
+
+// #include <nlohmann/detail/output/output_adapters.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+///////////////////
+// serialization //
+///////////////////
+
+/// how to treat decoding errors
+enum class error_handler_t
+{
+    strict,  ///< throw a type_error exception in case of invalid UTF-8
+    replace, ///< replace invalid UTF-8 sequences with U+FFFD
+    ignore   ///< ignore invalid UTF-8 sequences
+};
+
+template<typename BasicJsonType>
+class serializer
+{
+    using string_t = typename BasicJsonType::string_t;
+    using number_float_t = typename BasicJsonType::number_float_t;
+    using number_integer_t = typename BasicJsonType::number_integer_t;
+    using number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+    static constexpr uint8_t UTF8_ACCEPT = 0;
+    static constexpr uint8_t UTF8_REJECT = 1;
+
+  public:
+    /*!
+    @param[in] s  output stream to serialize to
+    @param[in] ichar  indentation character to use
+    @param[in] error_handler_  how to react on decoding errors
+    */
+    serializer(output_adapter_t<char> s, const char ichar,
+               error_handler_t error_handler_ = error_handler_t::strict)
+        : o(std::move(s))
+        , loc(std::localeconv())
+        , thousands_sep(loc->thousands_sep == nullptr ? '\0' : * (loc->thousands_sep))
+        , decimal_point(loc->decimal_point == nullptr ? '\0' : * (loc->decimal_point))
+        , indent_char(ichar)
+        , indent_string(512, indent_char)
+        , error_handler(error_handler_)
+    {}
+
+    // delete because of pointer members
+    serializer(const serializer&) = delete;
+    serializer& operator=(const serializer&) = delete;
+    serializer(serializer&&) = delete;
+    serializer& operator=(serializer&&) = delete;
+    ~serializer() = default;
+
+    /*!
+    @brief internal implementation of the serialization function
+
+    This function is called by the public member function dump and organizes
+    the serialization internally. The indentation level is propagated as
+    additional parameter. In case of arrays and objects, the function is
+    called recursively.
+
+    - strings and object keys are escaped using `escape_string()`
+    - integer numbers are converted implicitly via `operator<<`
+    - floating-point numbers are converted to a string using `"%g"` format
+
+    @param[in] val             value to serialize
+    @param[in] pretty_print    whether the output shall be pretty-printed
+    @param[in] indent_step     the indent level
+    @param[in] current_indent  the current indent level (only used internally)
+    */
+    void dump(const BasicJsonType& val, const bool pretty_print,
+              const bool ensure_ascii,
+              const unsigned int indent_step,
+              const unsigned int current_indent = 0)
+    {
+        switch (val.m_type)
+        {
+            case value_t::object:
+            {
+                if (val.m_value.object->empty())
+                {
+                    o->write_characters("{}", 2);
+                    return;
+                }
+
+                if (pretty_print)
+                {
+                    o->write_characters("{\n", 2);
+
+                    // variable to hold indentation for recursive calls
+                    const auto new_indent = current_indent + indent_step;
+                    if (JSON_UNLIKELY(indent_string.size() < new_indent))
+                    {
+                        indent_string.resize(indent_string.size() * 2, ' ');
+                    }
+
+                    // first n-1 elements
+                    auto i = val.m_value.object->cbegin();
+                    for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i)
+                    {
+                        o->write_characters(indent_string.c_str(), new_indent);
+                        o->write_character('\"');
+                        dump_escaped(i->first, ensure_ascii);
+                        o->write_characters("\": ", 3);
+                        dump(i->second, true, ensure_ascii, indent_step, new_indent);
+                        o->write_characters(",\n", 2);
+                    }
+
+                    // last element
+                    assert(i != val.m_value.object->cend());
+                    assert(std::next(i) == val.m_value.object->cend());
+                    o->write_characters(indent_string.c_str(), new_indent);
+                    o->write_character('\"');
+                    dump_escaped(i->first, ensure_ascii);
+                    o->write_characters("\": ", 3);
+                    dump(i->second, true, ensure_ascii, indent_step, new_indent);
+
+                    o->write_character('\n');
+                    o->write_characters(indent_string.c_str(), current_indent);
+                    o->write_character('}');
+                }
+                else
+                {
+                    o->write_character('{');
+
+                    // first n-1 elements
+                    auto i = val.m_value.object->cbegin();
+                    for (std::size_t cnt = 0; cnt < val.m_value.object->size() - 1; ++cnt, ++i)
+                    {
+                        o->write_character('\"');
+                        dump_escaped(i->first, ensure_ascii);
+                        o->write_characters("\":", 2);
+                        dump(i->second, false, ensure_ascii, indent_step, current_indent);
+                        o->write_character(',');
+                    }
+
+                    // last element
+                    assert(i != val.m_value.object->cend());
+                    assert(std::next(i) == val.m_value.object->cend());
+                    o->write_character('\"');
+                    dump_escaped(i->first, ensure_ascii);
+                    o->write_characters("\":", 2);
+                    dump(i->second, false, ensure_ascii, indent_step, current_indent);
+
+                    o->write_character('}');
+                }
+
+                return;
+            }
+
+            case value_t::array:
+            {
+                if (val.m_value.array->empty())
+                {
+                    o->write_characters("[]", 2);
+                    return;
+                }
+
+                if (pretty_print)
+                {
+                    o->write_characters("[\n", 2);
+
+                    // variable to hold indentation for recursive calls
+                    const auto new_indent = current_indent + indent_step;
+                    if (JSON_UNLIKELY(indent_string.size() < new_indent))
+                    {
+                        indent_string.resize(indent_string.size() * 2, ' ');
+                    }
+
+                    // first n-1 elements
+                    for (auto i = val.m_value.array->cbegin();
+                            i != val.m_value.array->cend() - 1; ++i)
+                    {
+                        o->write_characters(indent_string.c_str(), new_indent);
+                        dump(*i, true, ensure_ascii, indent_step, new_indent);
+                        o->write_characters(",\n", 2);
+                    }
+
+                    // last element
+                    assert(not val.m_value.array->empty());
+                    o->write_characters(indent_string.c_str(), new_indent);
+                    dump(val.m_value.array->back(), true, ensure_ascii, indent_step, new_indent);
+
+                    o->write_character('\n');
+                    o->write_characters(indent_string.c_str(), current_indent);
+                    o->write_character(']');
+                }
+                else
+                {
+                    o->write_character('[');
+
+                    // first n-1 elements
+                    for (auto i = val.m_value.array->cbegin();
+                            i != val.m_value.array->cend() - 1; ++i)
+                    {
+                        dump(*i, false, ensure_ascii, indent_step, current_indent);
+                        o->write_character(',');
+                    }
+
+                    // last element
+                    assert(not val.m_value.array->empty());
+                    dump(val.m_value.array->back(), false, ensure_ascii, indent_step, current_indent);
+
+                    o->write_character(']');
+                }
+
+                return;
+            }
+
+            case value_t::string:
+            {
+                o->write_character('\"');
+                dump_escaped(*val.m_value.string, ensure_ascii);
+                o->write_character('\"');
+                return;
+            }
+
+            case value_t::boolean:
+            {
+                if (val.m_value.boolean)
+                {
+                    o->write_characters("true", 4);
+                }
+                else
+                {
+                    o->write_characters("false", 5);
+                }
+                return;
+            }
+
+            case value_t::number_integer:
+            {
+                dump_integer(val.m_value.number_integer);
+                return;
+            }
+
+            case value_t::number_unsigned:
+            {
+                dump_integer(val.m_value.number_unsigned);
+                return;
+            }
+
+            case value_t::number_float:
+            {
+                dump_float(val.m_value.number_float);
+                return;
+            }
+
+            case value_t::discarded:
+            {
+                o->write_characters("<discarded>", 11);
+                return;
+            }
+
+            case value_t::null:
+            {
+                o->write_characters("null", 4);
+                return;
+            }
+        }
+    }
+
+  private:
+    /*!
+    @brief dump escaped string
+
+    Escape a string by replacing certain special characters by a sequence of an
+    escape character (backslash) and another character and other control
+    characters by a sequence of "\u" followed by a four-digit hex
+    representation. The escaped string is written to output stream @a o.
+
+    @param[in] s  the string to escape
+    @param[in] ensure_ascii  whether to escape non-ASCII characters with
+                             \uXXXX sequences
+
+    @complexity Linear in the length of string @a s.
+    */
+    void dump_escaped(const string_t& s, const bool ensure_ascii)
+    {
+        uint32_t codepoint;
+        uint8_t state = UTF8_ACCEPT;
+        std::size_t bytes = 0;  // number of bytes written to string_buffer
+
+        // number of bytes written at the point of the last valid byte
+        std::size_t bytes_after_last_accept = 0;
+        std::size_t undumped_chars = 0;
+
+        for (std::size_t i = 0; i < s.size(); ++i)
+        {
+            const auto byte = static_cast<uint8_t>(s[i]);
+
+            switch (decode(state, codepoint, byte))
+            {
+                case UTF8_ACCEPT:  // decode found a new code point
+                {
+                    switch (codepoint)
+                    {
+                        case 0x08: // backspace
+                        {
+                            string_buffer[bytes++] = '\\';
+                            string_buffer[bytes++] = 'b';
+                            break;
+                        }
+
+                        case 0x09: // horizontal tab
+                        {
+                            string_buffer[bytes++] = '\\';
+                            string_buffer[bytes++] = 't';
+                            break;
+                        }
+
+                        case 0x0A: // newline
+                        {
+                            string_buffer[bytes++] = '\\';
+                            string_buffer[bytes++] = 'n';
+                            break;
+                        }
+
+                        case 0x0C: // formfeed
+                        {
+                            string_buffer[bytes++] = '\\';
+                            string_buffer[bytes++] = 'f';
+                            break;
+                        }
+
+                        case 0x0D: // carriage return
+                        {
+                            string_buffer[bytes++] = '\\';
+                            string_buffer[bytes++] = 'r';
+                            break;
+                        }
+
+                        case 0x22: // quotation mark
+                        {
+                            string_buffer[bytes++] = '\\';
+                            string_buffer[bytes++] = '\"';
+                            break;
+                        }
+
+                        case 0x5C: // reverse solidus
+                        {
+                            string_buffer[bytes++] = '\\';
+                            string_buffer[bytes++] = '\\';
+                            break;
+                        }
+
+                        default:
+                        {
+                            // escape control characters (0x00..0x1F) or, if
+                            // ensure_ascii parameter is used, non-ASCII characters
+                            if ((codepoint <= 0x1F) or (ensure_ascii and (codepoint >= 0x7F)))
+                            {
+                                if (codepoint <= 0xFFFF)
+                                {
+                                    (std::snprintf)(string_buffer.data() + bytes, 7, "\\u%04x",
+                                                    static_cast<uint16_t>(codepoint));
+                                    bytes += 6;
+                                }
+                                else
+                                {
+                                    (std::snprintf)(string_buffer.data() + bytes, 13, "\\u%04x\\u%04x",
+                                                    static_cast<uint16_t>(0xD7C0 + (codepoint >> 10)),
+                                                    static_cast<uint16_t>(0xDC00 + (codepoint & 0x3FF)));
+                                    bytes += 12;
+                                }
+                            }
+                            else
+                            {
+                                // copy byte to buffer (all previous bytes
+                                // been copied have in default case above)
+                                string_buffer[bytes++] = s[i];
+                            }
+                            break;
+                        }
+                    }
+
+                    // write buffer and reset index; there must be 13 bytes
+                    // left, as this is the maximal number of bytes to be
+                    // written ("\uxxxx\uxxxx\0") for one code point
+                    if (string_buffer.size() - bytes < 13)
+                    {
+                        o->write_characters(string_buffer.data(), bytes);
+                        bytes = 0;
+                    }
+
+                    // remember the byte position of this accept
+                    bytes_after_last_accept = bytes;
+                    undumped_chars = 0;
+                    break;
+                }
+
+                case UTF8_REJECT:  // decode found invalid UTF-8 byte
+                {
+                    switch (error_handler)
+                    {
+                        case error_handler_t::strict:
+                        {
+                            std::string sn(3, '\0');
+                            (std::snprintf)(&sn[0], sn.size(), "%.2X", byte);
+                            JSON_THROW(type_error::create(316, "invalid UTF-8 byte at index " + std::to_string(i) + ": 0x" + sn));
+                        }
+
+                        case error_handler_t::ignore:
+                        case error_handler_t::replace:
+                        {
+                            // in case we saw this character the first time, we
+                            // would like to read it again, because the byte
+                            // may be OK for itself, but just not OK for the
+                            // previous sequence
+                            if (undumped_chars > 0)
+                            {
+                                --i;
+                            }
+
+                            // reset length buffer to the last accepted index;
+                            // thus removing/ignoring the invalid characters
+                            bytes = bytes_after_last_accept;
+
+                            if (error_handler == error_handler_t::replace)
+                            {
+                                // add a replacement character
+                                if (ensure_ascii)
+                                {
+                                    string_buffer[bytes++] = '\\';
+                                    string_buffer[bytes++] = 'u';
+                                    string_buffer[bytes++] = 'f';
+                                    string_buffer[bytes++] = 'f';
+                                    string_buffer[bytes++] = 'f';
+                                    string_buffer[bytes++] = 'd';
+                                }
+                                else
+                                {
+                                    string_buffer[bytes++] = detail::binary_writer<BasicJsonType, char>::to_char_type('\xEF');
+                                    string_buffer[bytes++] = detail::binary_writer<BasicJsonType, char>::to_char_type('\xBF');
+                                    string_buffer[bytes++] = detail::binary_writer<BasicJsonType, char>::to_char_type('\xBD');
+                                }
+                                bytes_after_last_accept = bytes;
+                            }
+
+                            undumped_chars = 0;
+
+                            // continue processing the string
+                            state = UTF8_ACCEPT;
+                            break;
+                        }
+                    }
+                    break;
+                }
+
+                default:  // decode found yet incomplete multi-byte code point
+                {
+                    if (not ensure_ascii)
+                    {
+                        // code point will not be escaped - copy byte to buffer
+                        string_buffer[bytes++] = s[i];
+                    }
+                    ++undumped_chars;
+                    break;
+                }
+            }
+        }
+
+        // we finished processing the string
+        if (JSON_LIKELY(state == UTF8_ACCEPT))
+        {
+            // write buffer
+            if (bytes > 0)
+            {
+                o->write_characters(string_buffer.data(), bytes);
+            }
+        }
+        else
+        {
+            // we finish reading, but do not accept: string was incomplete
+            switch (error_handler)
+            {
+                case error_handler_t::strict:
+                {
+                    std::string sn(3, '\0');
+                    (std::snprintf)(&sn[0], sn.size(), "%.2X", static_cast<uint8_t>(s.back()));
+                    JSON_THROW(type_error::create(316, "incomplete UTF-8 string; last byte: 0x" + sn));
+                }
+
+                case error_handler_t::ignore:
+                {
+                    // write all accepted bytes
+                    o->write_characters(string_buffer.data(), bytes_after_last_accept);
+                    break;
+                }
+
+                case error_handler_t::replace:
+                {
+                    // write all accepted bytes
+                    o->write_characters(string_buffer.data(), bytes_after_last_accept);
+                    // add a replacement character
+                    if (ensure_ascii)
+                    {
+                        o->write_characters("\\ufffd", 6);
+                    }
+                    else
+                    {
+                        o->write_characters("\xEF\xBF\xBD", 3);
+                    }
+                    break;
+                }
+            }
+        }
+    }
+
+    /*!
+    @brief dump an integer
+
+    Dump a given integer to output stream @a o. Works internally with
+    @a number_buffer.
+
+    @param[in] x  integer number (signed or unsigned) to dump
+    @tparam NumberType either @a number_integer_t or @a number_unsigned_t
+    */
+    template<typename NumberType, detail::enable_if_t<
+                 std::is_same<NumberType, number_unsigned_t>::value or
+                 std::is_same<NumberType, number_integer_t>::value,
+                 int> = 0>
+    void dump_integer(NumberType x)
+    {
+        // special case for "0"
+        if (x == 0)
+        {
+            o->write_character('0');
+            return;
+        }
+
+        const bool is_negative = std::is_same<NumberType, number_integer_t>::value and not (x >= 0);  // see issue #755
+        std::size_t i = 0;
+
+        while (x != 0)
+        {
+            // spare 1 byte for '\0'
+            assert(i < number_buffer.size() - 1);
+
+            const auto digit = std::labs(static_cast<long>(x % 10));
+            number_buffer[i++] = static_cast<char>('0' + digit);
+            x /= 10;
+        }
+
+        if (is_negative)
+        {
+            // make sure there is capacity for the '-'
+            assert(i < number_buffer.size() - 2);
+            number_buffer[i++] = '-';
+        }
+
+        std::reverse(number_buffer.begin(), number_buffer.begin() + i);
+        o->write_characters(number_buffer.data(), i);
+    }
+
+    /*!
+    @brief dump a floating-point number
+
+    Dump a given floating-point number to output stream @a o. Works internally
+    with @a number_buffer.
+
+    @param[in] x  floating-point number to dump
+    */
+    void dump_float(number_float_t x)
+    {
+        // NaN / inf
+        if (not std::isfinite(x))
+        {
+            o->write_characters("null", 4);
+            return;
+        }
+
+        // If number_float_t is an IEEE-754 single or double precision number,
+        // use the Grisu2 algorithm to produce short numbers which are
+        // guaranteed to round-trip, using strtof and strtod, resp.
+        //
+        // NB: The test below works if <long double> == <double>.
+        static constexpr bool is_ieee_single_or_double
+            = (std::numeric_limits<number_float_t>::is_iec559 and std::numeric_limits<number_float_t>::digits == 24 and std::numeric_limits<number_float_t>::max_exponent == 128) or
+              (std::numeric_limits<number_float_t>::is_iec559 and std::numeric_limits<number_float_t>::digits == 53 and std::numeric_limits<number_float_t>::max_exponent == 1024);
+
+        dump_float(x, std::integral_constant<bool, is_ieee_single_or_double>());
+    }
+
+    void dump_float(number_float_t x, std::true_type /*is_ieee_single_or_double*/)
+    {
+        char* begin = number_buffer.data();
+        char* end = ::nlohmann::detail::to_chars(begin, begin + number_buffer.size(), x);
+
+        o->write_characters(begin, static_cast<size_t>(end - begin));
+    }
+
+    void dump_float(number_float_t x, std::false_type /*is_ieee_single_or_double*/)
+    {
+        // get number of digits for a float -> text -> float round-trip
+        static constexpr auto d = std::numeric_limits<number_float_t>::max_digits10;
+
+        // the actual conversion
+        std::ptrdiff_t len = (std::snprintf)(number_buffer.data(), number_buffer.size(), "%.*g", d, x);
+
+        // negative value indicates an error
+        assert(len > 0);
+        // check if buffer was large enough
+        assert(static_cast<std::size_t>(len) < number_buffer.size());
+
+        // erase thousands separator
+        if (thousands_sep != '\0')
+        {
+            const auto end = std::remove(number_buffer.begin(),
+                                         number_buffer.begin() + len, thousands_sep);
+            std::fill(end, number_buffer.end(), '\0');
+            assert((end - number_buffer.begin()) <= len);
+            len = (end - number_buffer.begin());
+        }
+
+        // convert decimal point to '.'
+        if (decimal_point != '\0' and decimal_point != '.')
+        {
+            const auto dec_pos = std::find(number_buffer.begin(), number_buffer.end(), decimal_point);
+            if (dec_pos != number_buffer.end())
+            {
+                *dec_pos = '.';
+            }
+        }
+
+        o->write_characters(number_buffer.data(), static_cast<std::size_t>(len));
+
+        // determine if need to append ".0"
+        const bool value_is_int_like =
+            std::none_of(number_buffer.begin(), number_buffer.begin() + len + 1,
+                         [](char c)
+        {
+            return (c == '.' or c == 'e');
+        });
+
+        if (value_is_int_like)
+        {
+            o->write_characters(".0", 2);
+        }
+    }
+
+    /*!
+    @brief check whether a string is UTF-8 encoded
+
+    The function checks each byte of a string whether it is UTF-8 encoded. The
+    result of the check is stored in the @a state parameter. The function must
+    be called initially with state 0 (accept). State 1 means the string must
+    be rejected, because the current byte is not allowed. If the string is
+    completely processed, but the state is non-zero, the string ended
+    prematurely; that is, the last byte indicated more bytes should have
+    followed.
+
+    @param[in,out] state  the state of the decoding
+    @param[in,out] codep  codepoint (valid only if resulting state is UTF8_ACCEPT)
+    @param[in] byte       next byte to decode
+    @return               new state
+
+    @note The function has been edited: a std::array is used.
+
+    @copyright Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de>
+    @sa http://bjoern.hoehrmann.de/utf-8/decoder/dfa/
+    */
+    static uint8_t decode(uint8_t& state, uint32_t& codep, const uint8_t byte) noexcept
+    {
+        static const std::array<uint8_t, 400> utf8d =
+        {
+            {
+                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1F
+                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3F
+                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5F
+                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7F
+                1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9F
+                7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // A0..BF
+                8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // C0..DF
+                0xA, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // E0..EF
+                0xB, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // F0..FF
+                0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0
+                1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2
+                1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4
+                1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6
+                1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 // s7..s8
+            }
+        };
+
+        const uint8_t type = utf8d[byte];
+
+        codep = (state != UTF8_ACCEPT)
+                ? (byte & 0x3fu) | (codep << 6)
+                : static_cast<uint32_t>(0xff >> type) & (byte);
+
+        state = utf8d[256u + state * 16u + type];
+        return state;
+    }
+
+  private:
+    /// the output of the serializer
+    output_adapter_t<char> o = nullptr;
+
+    /// a (hopefully) large enough character buffer
+    std::array<char, 64> number_buffer{{}};
+
+    /// the locale
+    const std::lconv* loc = nullptr;
+    /// the locale's thousand separator character
+    const char thousands_sep = '\0';
+    /// the locale's decimal point character
+    const char decimal_point = '\0';
+
+    /// string buffer
+    std::array<char, 512> string_buffer{{}};
+
+    /// the indentation character
+    const char indent_char;
+    /// the indentation string
+    string_t indent_string;
+
+    /// error_handler how to react on decoding errors
+    const error_handler_t error_handler;
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/json_ref.hpp>
+
+
+#include <initializer_list>
+#include <utility>
+
+// #include <nlohmann/detail/meta/type_traits.hpp>
+
+
+namespace nlohmann
+{
+namespace detail
+{
+template<typename BasicJsonType>
+class json_ref
+{
+  public:
+    using value_type = BasicJsonType;
+
+    json_ref(value_type&& value)
+        : owned_value(std::move(value)), value_ref(&owned_value), is_rvalue(true)
+    {}
+
+    json_ref(const value_type& value)
+        : value_ref(const_cast<value_type*>(&value)), is_rvalue(false)
+    {}
+
+    json_ref(std::initializer_list<json_ref> init)
+        : owned_value(init), value_ref(&owned_value), is_rvalue(true)
+    {}
+
+    template <
+        class... Args,
+        enable_if_t<std::is_constructible<value_type, Args...>::value, int> = 0 >
+    json_ref(Args && ... args)
+        : owned_value(std::forward<Args>(args)...), value_ref(&owned_value),
+          is_rvalue(true) {}
+
+    // class should be movable only
+    json_ref(json_ref&&) = default;
+    json_ref(const json_ref&) = delete;
+    json_ref& operator=(const json_ref&) = delete;
+    json_ref& operator=(json_ref&&) = delete;
+    ~json_ref() = default;
+
+    value_type moved_or_copied() const
+    {
+        if (is_rvalue)
+        {
+            return std::move(*value_ref);
+        }
+        return *value_ref;
+    }
+
+    value_type const& operator*() const
+    {
+        return *static_cast<value_type const*>(value_ref);
+    }
+
+    value_type const* operator->() const
+    {
+        return static_cast<value_type const*>(value_ref);
+    }
+
+  private:
+    mutable value_type owned_value = nullptr;
+    value_type* value_ref = nullptr;
+    const bool is_rvalue;
+};
+}  // namespace detail
+}  // namespace nlohmann
+
+// #include <nlohmann/detail/json_pointer.hpp>
+
+
+#include <cassert> // assert
+#include <numeric> // accumulate
+#include <string> // string
+#include <vector> // vector
+
+// #include <nlohmann/detail/macro_scope.hpp>
+
+// #include <nlohmann/detail/exceptions.hpp>
+
+// #include <nlohmann/detail/value_t.hpp>
+
+
+namespace nlohmann
+{
+template<typename BasicJsonType>
+class json_pointer
+{
+    // allow basic_json to access private members
+    NLOHMANN_BASIC_JSON_TPL_DECLARATION
+    friend class basic_json;
+
+  public:
+    /*!
+    @brief create JSON pointer
+
+    Create a JSON pointer according to the syntax described in
+    [Section 3 of RFC6901](https://tools.ietf.org/html/rfc6901#section-3).
+
+    @param[in] s  string representing the JSON pointer; if omitted, the empty
+                  string is assumed which references the whole JSON value
+
+    @throw parse_error.107 if the given JSON pointer @a s is nonempty and does
+                           not begin with a slash (`/`); see example below
+
+    @throw parse_error.108 if a tilde (`~`) in the given JSON pointer @a s is
+    not followed by `0` (representing `~`) or `1` (representing `/`); see
+    example below
+
+    @liveexample{The example shows the construction several valid JSON pointers
+    as well as the exceptional behavior.,json_pointer}
+
+    @since version 2.0.0
+    */
+    explicit json_pointer(const std::string& s = "")
+        : reference_tokens(split(s))
+    {}
+
+    /*!
+    @brief return a string representation of the JSON pointer
+
+    @invariant For each JSON pointer `ptr`, it holds:
+    @code {.cpp}
+    ptr == json_pointer(ptr.to_string());
+    @endcode
+
+    @return a string representation of the JSON pointer
+
+    @liveexample{The example shows the result of `to_string`.,
+    json_pointer__to_string}
+
+    @since version 2.0.0
+    */
+    std::string to_string() const
+    {
+        return std::accumulate(reference_tokens.begin(), reference_tokens.end(),
+                               std::string{},
+                               [](const std::string & a, const std::string & b)
+        {
+            return a + "/" + escape(b);
+        });
+    }
+
+    /// @copydoc to_string()
+    operator std::string() const
+    {
+        return to_string();
+    }
+
+    /*!
+    @param[in] s  reference token to be converted into an array index
+
+    @return integer representation of @a s
+
+    @throw out_of_range.404 if string @a s could not be converted to an integer
+    */
+    static int array_index(const std::string& s)
+    {
+        std::size_t processed_chars = 0;
+        const int res = std::stoi(s, &processed_chars);
+
+        // check if the string was completely read
+        if (JSON_UNLIKELY(processed_chars != s.size()))
+        {
+            JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + s + "'"));
+        }
+
+        return res;
+    }
+
+  private:
+    /*!
+    @brief remove and return last reference pointer
+    @throw out_of_range.405 if JSON pointer has no parent
+    */
+    std::string pop_back()
+    {
+        if (JSON_UNLIKELY(is_root()))
+        {
+            JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent"));
+        }
+
+        auto last = reference_tokens.back();
+        reference_tokens.pop_back();
+        return last;
+    }
+
+    /// return whether pointer points to the root document
+    bool is_root() const noexcept
+    {
+        return reference_tokens.empty();
+    }
+
+    json_pointer top() const
+    {
+        if (JSON_UNLIKELY(is_root()))
+        {
+            JSON_THROW(detail::out_of_range::create(405, "JSON pointer has no parent"));
+        }
+
+        json_pointer result = *this;
+        result.reference_tokens = {reference_tokens[0]};
+        return result;
+    }
+
+    /*!
+    @brief create and return a reference to the pointed to value
+
+    @complexity Linear in the number of reference tokens.
+
+    @throw parse_error.109 if array index is not a number
+    @throw type_error.313 if value cannot be unflattened
+    */
+    BasicJsonType& get_and_create(BasicJsonType& j) const
+    {
+        using size_type = typename BasicJsonType::size_type;
+        auto result = &j;
+
+        // in case no reference tokens exist, return a reference to the JSON value
+        // j which will be overwritten by a primitive value
+        for (const auto& reference_token : reference_tokens)
+        {
+            switch (result->m_type)
+            {
+                case detail::value_t::null:
+                {
+                    if (reference_token == "0")
+                    {
+                        // start a new array if reference token is 0
+                        result = &result->operator[](0);
+                    }
+                    else
+                    {
+                        // start a new object otherwise
+                        result = &result->operator[](reference_token);
+                    }
+                    break;
+                }
+
+                case detail::value_t::object:
+                {
+                    // create an entry in the object
+                    result = &result->operator[](reference_token);
+                    break;
+                }
+
+                case detail::value_t::array:
+                {
+                    // create an entry in the array
+                    JSON_TRY
+                    {
+                        result = &result->operator[](static_cast<size_type>(array_index(reference_token)));
+                    }
+                    JSON_CATCH(std::invalid_argument&)
+                    {
+                        JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number"));
+                    }
+                    break;
+                }
+
+                /*
+                The following code is only reached if there exists a reference
+                token _and_ the current value is primitive. In this case, we have
+                an error situation, because primitive values may only occur as
+                single value; that is, with an empty list of reference tokens.
+                */
+                default:
+                    JSON_THROW(detail::type_error::create(313, "invalid value to unflatten"));
+            }
+        }
+
+        return *result;
+    }
+
+    /*!
+    @brief return a reference to the pointed to value
+
+    @note This version does not throw if a value is not present, but tries to
+          create nested values instead. For instance, calling this function
+          with pointer `"/this/that"` on a null value is equivalent to calling
+          `operator[]("this").operator[]("that")` on that value, effectively
+          changing the null value to an object.
+
+    @param[in] ptr  a JSON value
+
+    @return reference to the JSON value pointed to by the JSON pointer
+
+    @complexity Linear in the length of the JSON pointer.
+
+    @throw parse_error.106   if an array index begins with '0'
+    @throw parse_error.109   if an array index was not a number
+    @throw out_of_range.404  if the JSON pointer can not be resolved
+    */
+    BasicJsonType& get_unchecked(BasicJsonType* ptr) const
+    {
+        using size_type = typename BasicJsonType::size_type;
+        for (const auto& reference_token : reference_tokens)
+        {
+            // convert null values to arrays or objects before continuing
+            if (ptr->m_type == detail::value_t::null)
+            {
+                // check if reference token is a number
+                const bool nums =
+                    std::all_of(reference_token.begin(), reference_token.end(),
+                                [](const char x)
+                {
+                    return (x >= '0' and x <= '9');
+                });
+
+                // change value to array for numbers or "-" or to object otherwise
+                *ptr = (nums or reference_token == "-")
+                       ? detail::value_t::array
+                       : detail::value_t::object;
+            }
+
+            switch (ptr->m_type)
+            {
+                case detail::value_t::object:
+                {
+                    // use unchecked object access
+                    ptr = &ptr->operator[](reference_token);
+                    break;
+                }
+
+                case detail::value_t::array:
+                {
+                    // error condition (cf. RFC 6901, Sect. 4)
+                    if (JSON_UNLIKELY(reference_token.size() > 1 and reference_token[0] == '0'))
+                    {
+                        JSON_THROW(detail::parse_error::create(106, 0,
+                                                               "array index '" + reference_token +
+                                                               "' must not begin with '0'"));
+                    }
+
+                    if (reference_token == "-")
+                    {
+                        // explicitly treat "-" as index beyond the end
+                        ptr = &ptr->operator[](ptr->m_value.array->size());
+                    }
+                    else
+                    {
+                        // convert array index to number; unchecked access
+                        JSON_TRY
+                        {
+                            ptr = &ptr->operator[](
+                                static_cast<size_type>(array_index(reference_token)));
+                        }
+                        JSON_CATCH(std::invalid_argument&)
+                        {
+                            JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number"));
+                        }
+                    }
+                    break;
+                }
+
+                default:
+                    JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
+            }
+        }
+
+        return *ptr;
+    }
+
+    /*!
+    @throw parse_error.106   if an array index begins with '0'
+    @throw parse_error.109   if an array index was not a number
+    @throw out_of_range.402  if the array index '-' is used
+    @throw out_of_range.404  if the JSON pointer can not be resolved
+    */
+    BasicJsonType& get_checked(BasicJsonType* ptr) const
+    {
+        using size_type = typename BasicJsonType::size_type;
+        for (const auto& reference_token : reference_tokens)
+        {
+            switch (ptr->m_type)
+            {
+                case detail::value_t::object:
+                {
+                    // note: at performs range check
+                    ptr = &ptr->at(reference_token);
+                    break;
+                }
+
+                case detail::value_t::array:
+                {
+                    if (JSON_UNLIKELY(reference_token == "-"))
+                    {
+                        // "-" always fails the range check
+                        JSON_THROW(detail::out_of_range::create(402,
+                                                                "array index '-' (" + std::to_string(ptr->m_value.array->size()) +
+                                                                ") is out of range"));
+                    }
+
+                    // error condition (cf. RFC 6901, Sect. 4)
+                    if (JSON_UNLIKELY(reference_token.size() > 1 and reference_token[0] == '0'))
+                    {
+                        JSON_THROW(detail::parse_error::create(106, 0,
+                                                               "array index '" + reference_token +
+                                                               "' must not begin with '0'"));
+                    }
+
+                    // note: at performs range check
+                    JSON_TRY
+                    {
+                        ptr = &ptr->at(static_cast<size_type>(array_index(reference_token)));
+                    }
+                    JSON_CATCH(std::invalid_argument&)
+                    {
+                        JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number"));
+                    }
+                    break;
+                }
+
+                default:
+                    JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
+            }
+        }
+
+        return *ptr;
+    }
+
+    /*!
+    @brief return a const reference to the pointed to value
+
+    @param[in] ptr  a JSON value
+
+    @return const reference to the JSON value pointed to by the JSON
+    pointer
+
+    @throw parse_error.106   if an array index begins with '0'
+    @throw parse_error.109   if an array index was not a number
+    @throw out_of_range.402  if the array index '-' is used
+    @throw out_of_range.404  if the JSON pointer can not be resolved
+    */
+    const BasicJsonType& get_unchecked(const BasicJsonType* ptr) const
+    {
+        using size_type = typename BasicJsonType::size_type;
+        for (const auto& reference_token : reference_tokens)
+        {
+            switch (ptr->m_type)
+            {
+                case detail::value_t::object:
+                {
+                    // use unchecked object access
+                    ptr = &ptr->operator[](reference_token);
+                    break;
+                }
+
+                case detail::value_t::array:
+                {
+                    if (JSON_UNLIKELY(reference_token == "-"))
+                    {
+                        // "-" cannot be used for const access
+                        JSON_THROW(detail::out_of_range::create(402,
+                                                                "array index '-' (" + std::to_string(ptr->m_value.array->size()) +
+                                                                ") is out of range"));
+                    }
+
+                    // error condition (cf. RFC 6901, Sect. 4)
+                    if (JSON_UNLIKELY(reference_token.size() > 1 and reference_token[0] == '0'))
+                    {
+                        JSON_THROW(detail::parse_error::create(106, 0,
+                                                               "array index '" + reference_token +
+                                                               "' must not begin with '0'"));
+                    }
+
+                    // use unchecked array access
+                    JSON_TRY
+                    {
+                        ptr = &ptr->operator[](
+                            static_cast<size_type>(array_index(reference_token)));
+                    }
+                    JSON_CATCH(std::invalid_argument&)
+                    {
+                        JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number"));
+                    }
+                    break;
+                }
+
+                default:
+                    JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
+            }
+        }
+
+        return *ptr;
+    }
+
+    /*!
+    @throw parse_error.106   if an array index begins with '0'
+    @throw parse_error.109   if an array index was not a number
+    @throw out_of_range.402  if the array index '-' is used
+    @throw out_of_range.404  if the JSON pointer can not be resolved
+    */
+    const BasicJsonType& get_checked(const BasicJsonType* ptr) const
+    {
+        using size_type = typename BasicJsonType::size_type;
+        for (const auto& reference_token : reference_tokens)
+        {
+            switch (ptr->m_type)
+            {
+                case detail::value_t::object:
+                {
+                    // note: at performs range check
+                    ptr = &ptr->at(reference_token);
+                    break;
+                }
+
+                case detail::value_t::array:
+                {
+                    if (JSON_UNLIKELY(reference_token == "-"))
+                    {
+                        // "-" always fails the range check
+                        JSON_THROW(detail::out_of_range::create(402,
+                                                                "array index '-' (" + std::to_string(ptr->m_value.array->size()) +
+                                                                ") is out of range"));
+                    }
+
+                    // error condition (cf. RFC 6901, Sect. 4)
+                    if (JSON_UNLIKELY(reference_token.size() > 1 and reference_token[0] == '0'))
+                    {
+                        JSON_THROW(detail::parse_error::create(106, 0,
+                                                               "array index '" + reference_token +
+                                                               "' must not begin with '0'"));
+                    }
+
+                    // note: at performs range check
+                    JSON_TRY
+                    {
+                        ptr = &ptr->at(static_cast<size_type>(array_index(reference_token)));
+                    }
+                    JSON_CATCH(std::invalid_argument&)
+                    {
+                        JSON_THROW(detail::parse_error::create(109, 0, "array index '" + reference_token + "' is not a number"));
+                    }
+                    break;
+                }
+
+                default:
+                    JSON_THROW(detail::out_of_range::create(404, "unresolved reference token '" + reference_token + "'"));
+            }
+        }
+
+        return *ptr;
+    }
+
+    /*!
+    @brief split the string input to reference tokens
+
+    @note This function is only called by the json_pointer constructor.
+          All exceptions below are documented there.
+
+    @throw parse_error.107  if the pointer is not empty or begins with '/'
+    @throw parse_error.108  if character '~' is not followed by '0' or '1'
+    */
+    static std::vector<std::string> split(const std::string& reference_string)
+    {
+        std::vector<std::string> result;
+
+        // special case: empty reference string -> no reference tokens
+        if (reference_string.empty())
+        {
+            return result;
+        }
+
+        // check if nonempty reference string begins with slash
+        if (JSON_UNLIKELY(reference_string[0] != '/'))
+        {
+            JSON_THROW(detail::parse_error::create(107, 1,
+                                                   "JSON pointer must be empty or begin with '/' - was: '" +
+                                                   reference_string + "'"));
+        }
+
+        // extract the reference tokens:
+        // - slash: position of the last read slash (or end of string)
+        // - start: position after the previous slash
+        for (
+            // search for the first slash after the first character
+            std::size_t slash = reference_string.find_first_of('/', 1),
+            // set the beginning of the first reference token
+            start = 1;
+            // we can stop if start == 0 (if slash == std::string::npos)
+            start != 0;
+            // set the beginning of the next reference token
+            // (will eventually be 0 if slash == std::string::npos)
+            start = (slash == std::string::npos) ? 0 : slash + 1,
+            // find next slash
+            slash = reference_string.find_first_of('/', start))
+        {
+            // use the text between the beginning of the reference token
+            // (start) and the last slash (slash).
+            auto reference_token = reference_string.substr(start, slash - start);
+
+            // check reference tokens are properly escaped
+            for (std::size_t pos = reference_token.find_first_of('~');
+                    pos != std::string::npos;
+                    pos = reference_token.find_first_of('~', pos + 1))
+            {
+                assert(reference_token[pos] == '~');
+
+                // ~ must be followed by 0 or 1
+                if (JSON_UNLIKELY(pos == reference_token.size() - 1 or
+                                  (reference_token[pos + 1] != '0' and
+                                   reference_token[pos + 1] != '1')))
+                {
+                    JSON_THROW(detail::parse_error::create(108, 0, "escape character '~' must be followed with '0' or '1'"));
+                }
+            }
+
+            // finally, store the reference token
+            unescape(reference_token);
+            result.push_back(reference_token);
+        }
+
+        return result;
+    }
+
+    /*!
+    @brief replace all occurrences of a substring by another string
+
+    @param[in,out] s  the string to manipulate; changed so that all
+                   occurrences of @a f are replaced with @a t
+    @param[in]     f  the substring to replace with @a t
+    @param[in]     t  the string to replace @a f
+
+    @pre The search string @a f must not be empty. **This precondition is
+    enforced with an assertion.**
+
+    @since version 2.0.0
+    */
+    static void replace_substring(std::string& s, const std::string& f,
+                                  const std::string& t)
+    {
+        assert(not f.empty());
+        for (auto pos = s.find(f);                // find first occurrence of f
+                pos != std::string::npos;         // make sure f was found
+                s.replace(pos, f.size(), t),      // replace with t, and
+                pos = s.find(f, pos + t.size()))  // find next occurrence of f
+        {}
+    }
+
+    /// escape "~" to "~0" and "/" to "~1"
+    static std::string escape(std::string s)
+    {
+        replace_substring(s, "~", "~0");
+        replace_substring(s, "/", "~1");
+        return s;
+    }
+
+    /// unescape "~1" to tilde and "~0" to slash (order is important!)
+    static void unescape(std::string& s)
+    {
+        replace_substring(s, "~1", "/");
+        replace_substring(s, "~0", "~");
+    }
+
+    /*!
+    @param[in] reference_string  the reference string to the current value
+    @param[in] value             the value to consider
+    @param[in,out] result        the result object to insert values to
+
+    @note Empty objects or arrays are flattened to `null`.
+    */
+    static void flatten(const std::string& reference_string,
+                        const BasicJsonType& value,
+                        BasicJsonType& result)
+    {
+        switch (value.m_type)
+        {
+            case detail::value_t::array:
+            {
+                if (value.m_value.array->empty())
+                {
+                    // flatten empty array as null
+                    result[reference_string] = nullptr;
+                }
+                else
+                {
+                    // iterate array and use index as reference string
+                    for (std::size_t i = 0; i < value.m_value.array->size(); ++i)
+                    {
+                        flatten(reference_string + "/" + std::to_string(i),
+                                value.m_value.array->operator[](i), result);
+                    }
+                }
+                break;
+            }
+
+            case detail::value_t::object:
+            {
+                if (value.m_value.object->empty())
+                {
+                    // flatten empty object as null
+                    result[reference_string] = nullptr;
+                }
+                else
+                {
+                    // iterate object and use keys as reference string
+                    for (const auto& element : *value.m_value.object)
+                    {
+                        flatten(reference_string + "/" + escape(element.first), element.second, result);
+                    }
+                }
+                break;
+            }
+
+            default:
+            {
+                // add primitive value with its reference string
+                result[reference_string] = value;
+                break;
+            }
+        }
+    }
+
+    /*!
+    @param[in] value  flattened JSON
+
+    @return unflattened JSON
+
+    @throw parse_error.109 if array index is not a number
+    @throw type_error.314  if value is not an object
+    @throw type_error.315  if object values are not primitive
+    @throw type_error.313  if value cannot be unflattened
+    */
+    static BasicJsonType
+    unflatten(const BasicJsonType& value)
+    {
+        if (JSON_UNLIKELY(not value.is_object()))
+        {
+            JSON_THROW(detail::type_error::create(314, "only objects can be unflattened"));
+        }
+
+        BasicJsonType result;
+
+        // iterate the JSON object values
+        for (const auto& element : *value.m_value.object)
+        {
+            if (JSON_UNLIKELY(not element.second.is_primitive()))
+            {
+                JSON_THROW(detail::type_error::create(315, "values in object must be primitive"));
+            }
+
+            // assign value to reference pointed to by JSON pointer; Note that if
+            // the JSON pointer is "" (i.e., points to the whole value), function
+            // get_and_create returns a reference to result itself. An assignment
+            // will then create a primitive value.
+            json_pointer(element.first).get_and_create(result) = element.second;
+        }
+
+        return result;
+    }
+
+    friend bool operator==(json_pointer const& lhs,
+                           json_pointer const& rhs) noexcept
+    {
+        return (lhs.reference_tokens == rhs.reference_tokens);
+    }
+
+    friend bool operator!=(json_pointer const& lhs,
+                           json_pointer const& rhs) noexcept
+    {
+        return not (lhs == rhs);
+    }
+
+    /// the reference tokens
+    std::vector<std::string> reference_tokens;
+};
+}  // namespace nlohmann
+
+// #include <nlohmann/adl_serializer.hpp>
+
+
+#include <utility>
+
+// #include <nlohmann/detail/conversions/from_json.hpp>
+
+// #include <nlohmann/detail/conversions/to_json.hpp>
+
+
+namespace nlohmann
+{
+
+template<typename, typename>
+struct adl_serializer
+{
+    /*!
+    @brief convert a JSON value to any value type
+
+    This function is usually called by the `get()` function of the
+    @ref basic_json class (either explicit or via conversion operators).
+
+    @param[in] j        JSON value to read from
+    @param[in,out] val  value to write to
+    */
+    template<typename BasicJsonType, typename ValueType>
+    static auto from_json(BasicJsonType&& j, ValueType& val) noexcept(
+        noexcept(::nlohmann::from_json(std::forward<BasicJsonType>(j), val)))
+    -> decltype(::nlohmann::from_json(std::forward<BasicJsonType>(j), val), void())
+    {
+        ::nlohmann::from_json(std::forward<BasicJsonType>(j), val);
+    }
+
+    /*!
+    @brief convert any value type to a JSON value
+
+    This function is usually called by the constructors of the @ref basic_json
+    class.
+
+    @param[in,out] j  JSON value to write to
+    @param[in] val    value to read from
+    */
+    template <typename BasicJsonType, typename ValueType>
+    static auto to_json(BasicJsonType& j, ValueType&& val) noexcept(
+        noexcept(::nlohmann::to_json(j, std::forward<ValueType>(val))))
+    -> decltype(::nlohmann::to_json(j, std::forward<ValueType>(val)), void())
+    {
+        ::nlohmann::to_json(j, std::forward<ValueType>(val));
+    }
+};
+
+}  // namespace nlohmann
+
+
+/*!
+@brief namespace for Niels Lohmann
+@see https://github.com/nlohmann
+@since version 1.0.0
+*/
+namespace nlohmann
+{
+
+/*!
+@brief a class to store JSON values
+
+@tparam ObjectType type for JSON objects (`std::map` by default; will be used
+in @ref object_t)
+@tparam ArrayType type for JSON arrays (`std::vector` by default; will be used
+in @ref array_t)
+@tparam StringType type for JSON strings and object keys (`std::string` by
+default; will be used in @ref string_t)
+@tparam BooleanType type for JSON booleans (`bool` by default; will be used
+in @ref boolean_t)
+@tparam NumberIntegerType type for JSON integer numbers (`int64_t` by
+default; will be used in @ref number_integer_t)
+@tparam NumberUnsignedType type for JSON unsigned integer numbers (@c
+`uint64_t` by default; will be used in @ref number_unsigned_t)
+@tparam NumberFloatType type for JSON floating-point numbers (`double` by
+default; will be used in @ref number_float_t)
+@tparam AllocatorType type of the allocator to use (`std::allocator` by
+default)
+@tparam JSONSerializer the serializer to resolve internal calls to `to_json()`
+and `from_json()` (@ref adl_serializer by default)
+
+@requirement The class satisfies the following concept requirements:
+- Basic
+ - [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible):
+   JSON values can be default constructed. The result will be a JSON null
+   value.
+ - [MoveConstructible](https://en.cppreference.com/w/cpp/named_req/MoveConstructible):
+   A JSON value can be constructed from an rvalue argument.
+ - [CopyConstructible](https://en.cppreference.com/w/cpp/named_req/CopyConstructible):
+   A JSON value can be copy-constructed from an lvalue expression.
+ - [MoveAssignable](https://en.cppreference.com/w/cpp/named_req/MoveAssignable):
+   A JSON value van be assigned from an rvalue argument.
+ - [CopyAssignable](https://en.cppreference.com/w/cpp/named_req/CopyAssignable):
+   A JSON value can be copy-assigned from an lvalue expression.
+ - [Destructible](https://en.cppreference.com/w/cpp/named_req/Destructible):
+   JSON values can be destructed.
+- Layout
+ - [StandardLayoutType](https://en.cppreference.com/w/cpp/named_req/StandardLayoutType):
+   JSON values have
+   [standard layout](https://en.cppreference.com/w/cpp/language/data_members#Standard_layout):
+   All non-static data members are private and standard layout types, the
+   class has no virtual functions or (virtual) base classes.
+- Library-wide
+ - [EqualityComparable](https://en.cppreference.com/w/cpp/named_req/EqualityComparable):
+   JSON values can be compared with `==`, see @ref
+   operator==(const_reference,const_reference).
+ - [LessThanComparable](https://en.cppreference.com/w/cpp/named_req/LessThanComparable):
+   JSON values can be compared with `<`, see @ref
+   operator<(const_reference,const_reference).
+ - [Swappable](https://en.cppreference.com/w/cpp/named_req/Swappable):
+   Any JSON lvalue or rvalue of can be swapped with any lvalue or rvalue of
+   other compatible types, using unqualified function call @ref swap().
+ - [NullablePointer](https://en.cppreference.com/w/cpp/named_req/NullablePointer):
+   JSON values can be compared against `std::nullptr_t` objects which are used
+   to model the `null` value.
+- Container
+ - [Container](https://en.cppreference.com/w/cpp/named_req/Container):
+   JSON values can be used like STL containers and provide iterator access.
+ - [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer);
+   JSON values can be used like STL containers and provide reverse iterator
+   access.
+
+@invariant The member variables @a m_value and @a m_type have the following
+relationship:
+- If `m_type == value_t::object`, then `m_value.object != nullptr`.
+- If `m_type == value_t::array`, then `m_value.array != nullptr`.
+- If `m_type == value_t::string`, then `m_value.string != nullptr`.
+The invariants are checked by member function assert_invariant().
+
+@internal
+@note ObjectType trick from http://stackoverflow.com/a/9860911
+@endinternal
+
+@see [RFC 7159: The JavaScript Object Notation (JSON) Data Interchange
+Format](http://rfc7159.net/rfc7159)
+
+@since version 1.0.0
+
+@nosubgrouping
+*/
+NLOHMANN_BASIC_JSON_TPL_DECLARATION
+class basic_json
+{
+  private:
+    template<detail::value_t> friend struct detail::external_constructor;
+    friend ::nlohmann::json_pointer<basic_json>;
+    friend ::nlohmann::detail::parser<basic_json>;
+    friend ::nlohmann::detail::serializer<basic_json>;
+    template<typename BasicJsonType>
+    friend class ::nlohmann::detail::iter_impl;
+    template<typename BasicJsonType, typename CharType>
+    friend class ::nlohmann::detail::binary_writer;
+    template<typename BasicJsonType, typename SAX>
+    friend class ::nlohmann::detail::binary_reader;
+    template<typename BasicJsonType>
+    friend class ::nlohmann::detail::json_sax_dom_parser;
+    template<typename BasicJsonType>
+    friend class ::nlohmann::detail::json_sax_dom_callback_parser;
+
+    /// workaround type for MSVC
+    using basic_json_t = NLOHMANN_BASIC_JSON_TPL;
+
+    // convenience aliases for types residing in namespace detail;
+    using lexer = ::nlohmann::detail::lexer<basic_json>;
+    using parser = ::nlohmann::detail::parser<basic_json>;
+
+    using primitive_iterator_t = ::nlohmann::detail::primitive_iterator_t;
+    template<typename BasicJsonType>
+    using internal_iterator = ::nlohmann::detail::internal_iterator<BasicJsonType>;
+    template<typename BasicJsonType>
+    using iter_impl = ::nlohmann::detail::iter_impl<BasicJsonType>;
+    template<typename Iterator>
+    using iteration_proxy = ::nlohmann::detail::iteration_proxy<Iterator>;
+    template<typename Base> using json_reverse_iterator = ::nlohmann::detail::json_reverse_iterator<Base>;
+
+    template<typename CharType>
+    using output_adapter_t = ::nlohmann::detail::output_adapter_t<CharType>;
+
+    using binary_reader = ::nlohmann::detail::binary_reader<basic_json>;
+    template<typename CharType> using binary_writer = ::nlohmann::detail::binary_writer<basic_json, CharType>;
+
+    using serializer = ::nlohmann::detail::serializer<basic_json>;
+
+  public:
+    using value_t = detail::value_t;
+    /// JSON Pointer, see @ref nlohmann::json_pointer
+    using json_pointer = ::nlohmann::json_pointer<basic_json>;
+    template<typename T, typename SFINAE>
+    using json_serializer = JSONSerializer<T, SFINAE>;
+    /// how to treat decoding errors
+    using error_handler_t = detail::error_handler_t;
+    /// helper type for initializer lists of basic_json values
+    using initializer_list_t = std::initializer_list<detail::json_ref<basic_json>>;
+
+    using input_format_t = detail::input_format_t;
+    /// SAX interface type, see @ref nlohmann::json_sax
+    using json_sax_t = json_sax<basic_json>;
+
+    ////////////////
+    // exceptions //
+    ////////////////
+
+    /// @name exceptions
+    /// Classes to implement user-defined exceptions.
+    /// @{
+
+    /// @copydoc detail::exception
+    using exception = detail::exception;
+    /// @copydoc detail::parse_error
+    using parse_error = detail::parse_error;
+    /// @copydoc detail::invalid_iterator
+    using invalid_iterator = detail::invalid_iterator;
+    /// @copydoc detail::type_error
+    using type_error = detail::type_error;
+    /// @copydoc detail::out_of_range
+    using out_of_range = detail::out_of_range;
+    /// @copydoc detail::other_error
+    using other_error = detail::other_error;
+
+    /// @}
+
+
+    /////////////////////
+    // container types //
+    /////////////////////
+
+    /// @name container types
+    /// The canonic container types to use @ref basic_json like any other STL
+    /// container.
+    /// @{
+
+    /// the type of elements in a basic_json container
+    using value_type = basic_json;
+
+    /// the type of an element reference
+    using reference = value_type&;
+    /// the type of an element const reference
+    using const_reference = const value_type&;
+
+    /// a type to represent differences between iterators
+    using difference_type = std::ptrdiff_t;
+    /// a type to represent container sizes
+    using size_type = std::size_t;
+
+    /// the allocator type
+    using allocator_type = AllocatorType<basic_json>;
+
+    /// the type of an element pointer
+    using pointer = typename std::allocator_traits<allocator_type>::pointer;
+    /// the type of an element const pointer
+    using const_pointer = typename std::allocator_traits<allocator_type>::const_pointer;
+
+    /// an iterator for a basic_json container
+    using iterator = iter_impl<basic_json>;
+    /// a const iterator for a basic_json container
+    using const_iterator = iter_impl<const basic_json>;
+    /// a reverse iterator for a basic_json container
+    using reverse_iterator = json_reverse_iterator<typename basic_json::iterator>;
+    /// a const reverse iterator for a basic_json container
+    using const_reverse_iterator = json_reverse_iterator<typename basic_json::const_iterator>;
+
+    /// @}
+
+
+    /*!
+    @brief returns the allocator associated with the container
+    */
+    static allocator_type get_allocator()
+    {
+        return allocator_type();
+    }
+
+    /*!
+    @brief returns version information on the library
+
+    This function returns a JSON object with information about the library,
+    including the version number and information on the platform and compiler.
+
+    @return JSON object holding version information
+    key         | description
+    ----------- | ---------------
+    `compiler`  | Information on the used compiler. It is an object with the following keys: `c++` (the used C++ standard), `family` (the compiler family; possible values are `clang`, `icc`, `gcc`, `ilecpp`, `msvc`, `pgcpp`, `sunpro`, and `unknown`), and `version` (the compiler version).
+    `copyright` | The copyright line for the library as string.
+    `name`      | The name of the library as string.
+    `platform`  | The used platform as string. Possible values are `win32`, `linux`, `apple`, `unix`, and `unknown`.
+    `url`       | The URL of the project as string.
+    `version`   | The version of the library. It is an object with the following keys: `major`, `minor`, and `patch` as defined by [Semantic Versioning](http://semver.org), and `string` (the version string).
+
+    @liveexample{The following code shows an example output of the `meta()`
+    function.,meta}
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes to any JSON value.
+
+    @complexity Constant.
+
+    @since 2.1.0
+    */
+    static basic_json meta()
+    {
+        basic_json result;
+
+        result["copyright"] = "(C) 2013-2017 Niels Lohmann";
+        result["name"] = "JSON for Modern C++";
+        result["url"] = "https://github.com/nlohmann/json";
+        result["version"]["string"] =
+            std::to_string(NLOHMANN_JSON_VERSION_MAJOR) + "." +
+            std::to_string(NLOHMANN_JSON_VERSION_MINOR) + "." +
+            std::to_string(NLOHMANN_JSON_VERSION_PATCH);
+        result["version"]["major"] = NLOHMANN_JSON_VERSION_MAJOR;
+        result["version"]["minor"] = NLOHMANN_JSON_VERSION_MINOR;
+        result["version"]["patch"] = NLOHMANN_JSON_VERSION_PATCH;
+
+#ifdef _WIN32
+        result["platform"] = "win32";
+#elif defined __linux__
+        result["platform"] = "linux";
+#elif defined __APPLE__
+        result["platform"] = "apple";
+#elif defined __unix__
+        result["platform"] = "unix";
+#else
+        result["platform"] = "unknown";
+#endif
+
+#if defined(__ICC) || defined(__INTEL_COMPILER)
+        result["compiler"] = {{"family", "icc"}, {"version", __INTEL_COMPILER}};
+#elif defined(__clang__)
+        result["compiler"] = {{"family", "clang"}, {"version", __clang_version__}};
+#elif defined(__GNUC__) || defined(__GNUG__)
+        result["compiler"] = {{"family", "gcc"}, {"version", std::to_string(__GNUC__) + "." + std::to_string(__GNUC_MINOR__) + "." + std::to_string(__GNUC_PATCHLEVEL__)}};
+#elif defined(__HP_cc) || defined(__HP_aCC)
+        result["compiler"] = "hp"
+#elif defined(__IBMCPP__)
+        result["compiler"] = {{"family", "ilecpp"}, {"version", __IBMCPP__}};
+#elif defined(_MSC_VER)
+        result["compiler"] = {{"family", "msvc"}, {"version", _MSC_VER}};
+#elif defined(__PGI)
+        result["compiler"] = {{"family", "pgcpp"}, {"version", __PGI}};
+#elif defined(__SUNPRO_CC)
+        result["compiler"] = {{"family", "sunpro"}, {"version", __SUNPRO_CC}};
+#else
+        result["compiler"] = {{"family", "unknown"}, {"version", "unknown"}};
+#endif
+
+#ifdef __cplusplus
+        result["compiler"]["c++"] = std::to_string(__cplusplus);
+#else
+        result["compiler"]["c++"] = "unknown";
+#endif
+        return result;
+    }
+
+
+    ///////////////////////////
+    // JSON value data types //
+    ///////////////////////////
+
+    /// @name JSON value data types
+    /// The data types to store a JSON value. These types are derived from
+    /// the template arguments passed to class @ref basic_json.
+    /// @{
+
+#if defined(JSON_HAS_CPP_14)
+    // Use transparent comparator if possible, combined with perfect forwarding
+    // on find() and count() calls prevents unnecessary string construction.
+    using object_comparator_t = std::less<>;
+#else
+    using object_comparator_t = std::less<StringType>;
+#endif
+
+    /*!
+    @brief a type for an object
+
+    [RFC 7159](http://rfc7159.net/rfc7159) describes JSON objects as follows:
+    > An object is an unordered collection of zero or more name/value pairs,
+    > where a name is a string and a value is a string, number, boolean, null,
+    > object, or array.
+
+    To store objects in C++, a type is defined by the template parameters
+    described below.
+
+    @tparam ObjectType  the container to store objects (e.g., `std::map` or
+    `std::unordered_map`)
+    @tparam StringType the type of the keys or names (e.g., `std::string`).
+    The comparison function `std::less<StringType>` is used to order elements
+    inside the container.
+    @tparam AllocatorType the allocator to use for objects (e.g.,
+    `std::allocator`)
+
+    #### Default type
+
+    With the default values for @a ObjectType (`std::map`), @a StringType
+    (`std::string`), and @a AllocatorType (`std::allocator`), the default
+    value for @a object_t is:
+
+    @code {.cpp}
+    std::map<
+      std::string, // key_type
+      basic_json, // value_type
+      std::less<std::string>, // key_compare
+      std::allocator<std::pair<const std::string, basic_json>> // allocator_type
+    >
+    @endcode
+
+    #### Behavior
+
+    The choice of @a object_t influences the behavior of the JSON class. With
+    the default type, objects have the following behavior:
+
+    - When all names are unique, objects will be interoperable in the sense
+      that all software implementations receiving that object will agree on
+      the name-value mappings.
+    - When the names within an object are not unique, it is unspecified which
+      one of the values for a given key will be chosen. For instance,
+      `{"key": 2, "key": 1}` could be equal to either `{"key": 1}` or
+      `{"key": 2}`.
+    - Internally, name/value pairs are stored in lexicographical order of the
+      names. Objects will also be serialized (see @ref dump) in this order.
+      For instance, `{"b": 1, "a": 2}` and `{"a": 2, "b": 1}` will be stored
+      and serialized as `{"a": 2, "b": 1}`.
+    - When comparing objects, the order of the name/value pairs is irrelevant.
+      This makes objects interoperable in the sense that they will not be
+      affected by these differences. For instance, `{"b": 1, "a": 2}` and
+      `{"a": 2, "b": 1}` will be treated as equal.
+
+    #### Limits
+
+    [RFC 7159](http://rfc7159.net/rfc7159) specifies:
+    > An implementation may set limits on the maximum depth of nesting.
+
+    In this class, the object's limit of nesting is not explicitly constrained.
+    However, a maximum depth of nesting may be introduced by the compiler or
+    runtime environment. A theoretical limit can be queried by calling the
+    @ref max_size function of a JSON object.
+
+    #### Storage
+
+    Objects are stored as pointers in a @ref basic_json type. That is, for any
+    access to object values, a pointer of type `object_t*` must be
+    dereferenced.
+
+    @sa @ref array_t -- type for an array value
+
+    @since version 1.0.0
+
+    @note The order name/value pairs are added to the object is *not*
+    preserved by the library. Therefore, iterating an object may return
+    name/value pairs in a different order than they were originally stored. In
+    fact, keys will be traversed in alphabetical order as `std::map` with
+    `std::less` is used by default. Please note this behavior conforms to [RFC
+    7159](http://rfc7159.net/rfc7159), because any order implements the
+    specified "unordered" nature of JSON objects.
+    */
+    using object_t = ObjectType<StringType,
+          basic_json,
+          object_comparator_t,
+          AllocatorType<std::pair<const StringType,
+          basic_json>>>;
+
+    /*!
+    @brief a type for an array
+
+    [RFC 7159](http://rfc7159.net/rfc7159) describes JSON arrays as follows:
+    > An array is an ordered sequence of zero or more values.
+
+    To store objects in C++, a type is defined by the template parameters
+    explained below.
+
+    @tparam ArrayType  container type to store arrays (e.g., `std::vector` or
+    `std::list`)
+    @tparam AllocatorType allocator to use for arrays (e.g., `std::allocator`)
+
+    #### Default type
+
+    With the default values for @a ArrayType (`std::vector`) and @a
+    AllocatorType (`std::allocator`), the default value for @a array_t is:
+
+    @code {.cpp}
+    std::vector<
+      basic_json, // value_type
+      std::allocator<basic_json> // allocator_type
+    >
+    @endcode
+
+    #### Limits
+
+    [RFC 7159](http://rfc7159.net/rfc7159) specifies:
+    > An implementation may set limits on the maximum depth of nesting.
+
+    In this class, the array's limit of nesting is not explicitly constrained.
+    However, a maximum depth of nesting may be introduced by the compiler or
+    runtime environment. A theoretical limit can be queried by calling the
+    @ref max_size function of a JSON array.
+
+    #### Storage
+
+    Arrays are stored as pointers in a @ref basic_json type. That is, for any
+    access to array values, a pointer of type `array_t*` must be dereferenced.
+
+    @sa @ref object_t -- type for an object value
+
+    @since version 1.0.0
+    */
+    using array_t = ArrayType<basic_json, AllocatorType<basic_json>>;
+
+    /*!
+    @brief a type for a string
+
+    [RFC 7159](http://rfc7159.net/rfc7159) describes JSON strings as follows:
+    > A string is a sequence of zero or more Unicode characters.
+
+    To store objects in C++, a type is defined by the template parameter
+    described below. Unicode values are split by the JSON class into
+    byte-sized characters during deserialization.
+
+    @tparam StringType  the container to store strings (e.g., `std::string`).
+    Note this container is used for keys/names in objects, see @ref object_t.
+
+    #### Default type
+
+    With the default values for @a StringType (`std::string`), the default
+    value for @a string_t is:
+
+    @code {.cpp}
+    std::string
+    @endcode
+
+    #### Encoding
+
+    Strings are stored in UTF-8 encoding. Therefore, functions like
+    `std::string::size()` or `std::string::length()` return the number of
+    bytes in the string rather than the number of characters or glyphs.
+
+    #### String comparison
+
+    [RFC 7159](http://rfc7159.net/rfc7159) states:
+    > Software implementations are typically required to test names of object
+    > members for equality. Implementations that transform the textual
+    > representation into sequences of Unicode code units and then perform the
+    > comparison numerically, code unit by code unit, are interoperable in the
+    > sense that implementations will agree in all cases on equality or
+    > inequality of two strings. For example, implementations that compare
+    > strings with escaped characters unconverted may incorrectly find that
+    > `"a\\b"` and `"a\u005Cb"` are not equal.
+
+    This implementation is interoperable as it does compare strings code unit
+    by code unit.
+
+    #### Storage
+
+    String values are stored as pointers in a @ref basic_json type. That is,
+    for any access to string values, a pointer of type `string_t*` must be
+    dereferenced.
+
+    @since version 1.0.0
+    */
+    using string_t = StringType;
+
+    /*!
+    @brief a type for a boolean
+
+    [RFC 7159](http://rfc7159.net/rfc7159) implicitly describes a boolean as a
+    type which differentiates the two literals `true` and `false`.
+
+    To store objects in C++, a type is defined by the template parameter @a
+    BooleanType which chooses the type to use.
+
+    #### Default type
+
+    With the default values for @a BooleanType (`bool`), the default value for
+    @a boolean_t is:
+
+    @code {.cpp}
+    bool
+    @endcode
+
+    #### Storage
+
+    Boolean values are stored directly inside a @ref basic_json type.
+
+    @since version 1.0.0
+    */
+    using boolean_t = BooleanType;
+
+    /*!
+    @brief a type for a number (integer)
+
+    [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows:
+    > The representation of numbers is similar to that used in most
+    > programming languages. A number is represented in base 10 using decimal
+    > digits. It contains an integer component that may be prefixed with an
+    > optional minus sign, which may be followed by a fraction part and/or an
+    > exponent part. Leading zeros are not allowed. (...) Numeric values that
+    > cannot be represented in the grammar below (such as Infinity and NaN)
+    > are not permitted.
+
+    This description includes both integer and floating-point numbers.
+    However, C++ allows more precise storage if it is known whether the number
+    is a signed integer, an unsigned integer or a floating-point number.
+    Therefore, three different types, @ref number_integer_t, @ref
+    number_unsigned_t and @ref number_float_t are used.
+
+    To store integer numbers in C++, a type is defined by the template
+    parameter @a NumberIntegerType which chooses the type to use.
+
+    #### Default type
+
+    With the default values for @a NumberIntegerType (`int64_t`), the default
+    value for @a number_integer_t is:
+
+    @code {.cpp}
+    int64_t
+    @endcode
+
+    #### Default behavior
+
+    - The restrictions about leading zeros is not enforced in C++. Instead,
+      leading zeros in integer literals lead to an interpretation as octal
+      number. Internally, the value will be stored as decimal number. For
+      instance, the C++ integer literal `010` will be serialized to `8`.
+      During deserialization, leading zeros yield an error.
+    - Not-a-number (NaN) values will be serialized to `null`.
+
+    #### Limits
+
+    [RFC 7159](http://rfc7159.net/rfc7159) specifies:
+    > An implementation may set limits on the range and precision of numbers.
+
+    When the default type is used, the maximal integer number that can be
+    stored is `9223372036854775807` (INT64_MAX) and the minimal integer number
+    that can be stored is `-9223372036854775808` (INT64_MIN). Integer numbers
+    that are out of range will yield over/underflow when used in a
+    constructor. During deserialization, too large or small integer numbers
+    will be automatically be stored as @ref number_unsigned_t or @ref
+    number_float_t.
+
+    [RFC 7159](http://rfc7159.net/rfc7159) further states:
+    > Note that when such software is used, numbers that are integers and are
+    > in the range \f$[-2^{53}+1, 2^{53}-1]\f$ are interoperable in the sense
+    > that implementations will agree exactly on their numeric values.
+
+    As this range is a subrange of the exactly supported range [INT64_MIN,
+    INT64_MAX], this class's integer type is interoperable.
+
+    #### Storage
+
+    Integer number values are stored directly inside a @ref basic_json type.
+
+    @sa @ref number_float_t -- type for number values (floating-point)
+
+    @sa @ref number_unsigned_t -- type for number values (unsigned integer)
+
+    @since version 1.0.0
+    */
+    using number_integer_t = NumberIntegerType;
+
+    /*!
+    @brief a type for a number (unsigned)
+
+    [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows:
+    > The representation of numbers is similar to that used in most
+    > programming languages. A number is represented in base 10 using decimal
+    > digits. It contains an integer component that may be prefixed with an
+    > optional minus sign, which may be followed by a fraction part and/or an
+    > exponent part. Leading zeros are not allowed. (...) Numeric values that
+    > cannot be represented in the grammar below (such as Infinity and NaN)
+    > are not permitted.
+
+    This description includes both integer and floating-point numbers.
+    However, C++ allows more precise storage if it is known whether the number
+    is a signed integer, an unsigned integer or a floating-point number.
+    Therefore, three different types, @ref number_integer_t, @ref
+    number_unsigned_t and @ref number_float_t are used.
+
+    To store unsigned integer numbers in C++, a type is defined by the
+    template parameter @a NumberUnsignedType which chooses the type to use.
+
+    #### Default type
+
+    With the default values for @a NumberUnsignedType (`uint64_t`), the
+    default value for @a number_unsigned_t is:
+
+    @code {.cpp}
+    uint64_t
+    @endcode
+
+    #### Default behavior
+
+    - The restrictions about leading zeros is not enforced in C++. Instead,
+      leading zeros in integer literals lead to an interpretation as octal
+      number. Internally, the value will be stored as decimal number. For
+      instance, the C++ integer literal `010` will be serialized to `8`.
+      During deserialization, leading zeros yield an error.
+    - Not-a-number (NaN) values will be serialized to `null`.
+
+    #### Limits
+
+    [RFC 7159](http://rfc7159.net/rfc7159) specifies:
+    > An implementation may set limits on the range and precision of numbers.
+
+    When the default type is used, the maximal integer number that can be
+    stored is `18446744073709551615` (UINT64_MAX) and the minimal integer
+    number that can be stored is `0`. Integer numbers that are out of range
+    will yield over/underflow when used in a constructor. During
+    deserialization, too large or small integer numbers will be automatically
+    be stored as @ref number_integer_t or @ref number_float_t.
+
+    [RFC 7159](http://rfc7159.net/rfc7159) further states:
+    > Note that when such software is used, numbers that are integers and are
+    > in the range \f$[-2^{53}+1, 2^{53}-1]\f$ are interoperable in the sense
+    > that implementations will agree exactly on their numeric values.
+
+    As this range is a subrange (when considered in conjunction with the
+    number_integer_t type) of the exactly supported range [0, UINT64_MAX],
+    this class's integer type is interoperable.
+
+    #### Storage
+
+    Integer number values are stored directly inside a @ref basic_json type.
+
+    @sa @ref number_float_t -- type for number values (floating-point)
+    @sa @ref number_integer_t -- type for number values (integer)
+
+    @since version 2.0.0
+    */
+    using number_unsigned_t = NumberUnsignedType;
+
+    /*!
+    @brief a type for a number (floating-point)
+
+    [RFC 7159](http://rfc7159.net/rfc7159) describes numbers as follows:
+    > The representation of numbers is similar to that used in most
+    > programming languages. A number is represented in base 10 using decimal
+    > digits. It contains an integer component that may be prefixed with an
+    > optional minus sign, which may be followed by a fraction part and/or an
+    > exponent part. Leading zeros are not allowed. (...) Numeric values that
+    > cannot be represented in the grammar below (such as Infinity and NaN)
+    > are not permitted.
+
+    This description includes both integer and floating-point numbers.
+    However, C++ allows more precise storage if it is known whether the number
+    is a signed integer, an unsigned integer or a floating-point number.
+    Therefore, three different types, @ref number_integer_t, @ref
+    number_unsigned_t and @ref number_float_t are used.
+
+    To store floating-point numbers in C++, a type is defined by the template
+    parameter @a NumberFloatType which chooses the type to use.
+
+    #### Default type
+
+    With the default values for @a NumberFloatType (`double`), the default
+    value for @a number_float_t is:
+
+    @code {.cpp}
+    double
+    @endcode
+
+    #### Default behavior
+
+    - The restrictions about leading zeros is not enforced in C++. Instead,
+      leading zeros in floating-point literals will be ignored. Internally,
+      the value will be stored as decimal number. For instance, the C++
+      floating-point literal `01.2` will be serialized to `1.2`. During
+      deserialization, leading zeros yield an error.
+    - Not-a-number (NaN) values will be serialized to `null`.
+
+    #### Limits
+
+    [RFC 7159](http://rfc7159.net/rfc7159) states:
+    > This specification allows implementations to set limits on the range and
+    > precision of numbers accepted. Since software that implements IEEE
+    > 754-2008 binary64 (double precision) numbers is generally available and
+    > widely used, good interoperability can be achieved by implementations
+    > that expect no more precision or range than these provide, in the sense
+    > that implementations will approximate JSON numbers within the expected
+    > precision.
+
+    This implementation does exactly follow this approach, as it uses double
+    precision floating-point numbers. Note values smaller than
+    `-1.79769313486232e+308` and values greater than `1.79769313486232e+308`
+    will be stored as NaN internally and be serialized to `null`.
+
+    #### Storage
+
+    Floating-point number values are stored directly inside a @ref basic_json
+    type.
+
+    @sa @ref number_integer_t -- type for number values (integer)
+
+    @sa @ref number_unsigned_t -- type for number values (unsigned integer)
+
+    @since version 1.0.0
+    */
+    using number_float_t = NumberFloatType;
+
+    /// @}
+
+  private:
+
+    /// helper for exception-safe object creation
+    template<typename T, typename... Args>
+    static T* create(Args&& ... args)
+    {
+        AllocatorType<T> alloc;
+        using AllocatorTraits = std::allocator_traits<AllocatorType<T>>;
+
+        auto deleter = [&](T * object)
+        {
+            AllocatorTraits::deallocate(alloc, object, 1);
+        };
+        std::unique_ptr<T, decltype(deleter)> object(AllocatorTraits::allocate(alloc, 1), deleter);
+        AllocatorTraits::construct(alloc, object.get(), std::forward<Args>(args)...);
+        assert(object != nullptr);
+        return object.release();
+    }
+
+    ////////////////////////
+    // JSON value storage //
+    ////////////////////////
+
+    /*!
+    @brief a JSON value
+
+    The actual storage for a JSON value of the @ref basic_json class. This
+    union combines the different storage types for the JSON value types
+    defined in @ref value_t.
+
+    JSON type | value_t type    | used type
+    --------- | --------------- | ------------------------
+    object    | object          | pointer to @ref object_t
+    array     | array           | pointer to @ref array_t
+    string    | string          | pointer to @ref string_t
+    boolean   | boolean         | @ref boolean_t
+    number    | number_integer  | @ref number_integer_t
+    number    | number_unsigned | @ref number_unsigned_t
+    number    | number_float    | @ref number_float_t
+    null      | null            | *no value is stored*
+
+    @note Variable-length types (objects, arrays, and strings) are stored as
+    pointers. The size of the union should not exceed 64 bits if the default
+    value types are used.
+
+    @since version 1.0.0
+    */
+    union json_value
+    {
+        /// object (stored with pointer to save storage)
+        object_t* object;
+        /// array (stored with pointer to save storage)
+        array_t* array;
+        /// string (stored with pointer to save storage)
+        string_t* string;
+        /// boolean
+        boolean_t boolean;
+        /// number (integer)
+        number_integer_t number_integer;
+        /// number (unsigned integer)
+        number_unsigned_t number_unsigned;
+        /// number (floating-point)
+        number_float_t number_float;
+
+        /// default constructor (for null values)
+        json_value() = default;
+        /// constructor for booleans
+        json_value(boolean_t v) noexcept : boolean(v) {}
+        /// constructor for numbers (integer)
+        json_value(number_integer_t v) noexcept : number_integer(v) {}
+        /// constructor for numbers (unsigned)
+        json_value(number_unsigned_t v) noexcept : number_unsigned(v) {}
+        /// constructor for numbers (floating-point)
+        json_value(number_float_t v) noexcept : number_float(v) {}
+        /// constructor for empty values of a given type
+        json_value(value_t t)
+        {
+            switch (t)
+            {
+                case value_t::object:
+                {
+                    object = create<object_t>();
+                    break;
+                }
+
+                case value_t::array:
+                {
+                    array = create<array_t>();
+                    break;
+                }
+
+                case value_t::string:
+                {
+                    string = create<string_t>("");
+                    break;
+                }
+
+                case value_t::boolean:
+                {
+                    boolean = boolean_t(false);
+                    break;
+                }
+
+                case value_t::number_integer:
+                {
+                    number_integer = number_integer_t(0);
+                    break;
+                }
+
+                case value_t::number_unsigned:
+                {
+                    number_unsigned = number_unsigned_t(0);
+                    break;
+                }
+
+                case value_t::number_float:
+                {
+                    number_float = number_float_t(0.0);
+                    break;
+                }
+
+                case value_t::null:
+                {
+                    object = nullptr;  // silence warning, see #821
+                    break;
+                }
+
+                default:
+                {
+                    object = nullptr;  // silence warning, see #821
+                    if (JSON_UNLIKELY(t == value_t::null))
+                    {
+                        JSON_THROW(other_error::create(500, "961c151d2e87f2686a955a9be24d316f1362bf21 3.5.0")); // LCOV_EXCL_LINE
+                    }
+                    break;
+                }
+            }
+        }
+
+        /// constructor for strings
+        json_value(const string_t& value)
+        {
+            string = create<string_t>(value);
+        }
+
+        /// constructor for rvalue strings
+        json_value(string_t&& value)
+        {
+            string = create<string_t>(std::move(value));
+        }
+
+        /// constructor for objects
+        json_value(const object_t& value)
+        {
+            object = create<object_t>(value);
+        }
+
+        /// constructor for rvalue objects
+        json_value(object_t&& value)
+        {
+            object = create<object_t>(std::move(value));
+        }
+
+        /// constructor for arrays
+        json_value(const array_t& value)
+        {
+            array = create<array_t>(value);
+        }
+
+        /// constructor for rvalue arrays
+        json_value(array_t&& value)
+        {
+            array = create<array_t>(std::move(value));
+        }
+
+        void destroy(value_t t) noexcept
+        {
+            switch (t)
+            {
+                case value_t::object:
+                {
+                    AllocatorType<object_t> alloc;
+                    std::allocator_traits<decltype(alloc)>::destroy(alloc, object);
+                    std::allocator_traits<decltype(alloc)>::deallocate(alloc, object, 1);
+                    break;
+                }
+
+                case value_t::array:
+                {
+                    AllocatorType<array_t> alloc;
+                    std::allocator_traits<decltype(alloc)>::destroy(alloc, array);
+                    std::allocator_traits<decltype(alloc)>::deallocate(alloc, array, 1);
+                    break;
+                }
+
+                case value_t::string:
+                {
+                    AllocatorType<string_t> alloc;
+                    std::allocator_traits<decltype(alloc)>::destroy(alloc, string);
+                    std::allocator_traits<decltype(alloc)>::deallocate(alloc, string, 1);
+                    break;
+                }
+
+                default:
+                {
+                    break;
+                }
+            }
+        }
+    };
+
+    /*!
+    @brief checks the class invariants
+
+    This function asserts the class invariants. It needs to be called at the
+    end of every constructor to make sure that created objects respect the
+    invariant. Furthermore, it has to be called each time the type of a JSON
+    value is changed, because the invariant expresses a relationship between
+    @a m_type and @a m_value.
+    */
+    void assert_invariant() const noexcept
+    {
+        assert(m_type != value_t::object or m_value.object != nullptr);
+        assert(m_type != value_t::array or m_value.array != nullptr);
+        assert(m_type != value_t::string or m_value.string != nullptr);
+    }
+
+  public:
+    //////////////////////////
+    // JSON parser callback //
+    //////////////////////////
+
+    /*!
+    @brief parser event types
+
+    The parser callback distinguishes the following events:
+    - `object_start`: the parser read `{` and started to process a JSON object
+    - `key`: the parser read a key of a value in an object
+    - `object_end`: the parser read `}` and finished processing a JSON object
+    - `array_start`: the parser read `[` and started to process a JSON array
+    - `array_end`: the parser read `]` and finished processing a JSON array
+    - `value`: the parser finished reading a JSON value
+
+    @image html callback_events.png "Example when certain parse events are triggered"
+
+    @sa @ref parser_callback_t for more information and examples
+    */
+    using parse_event_t = typename parser::parse_event_t;
+
+    /*!
+    @brief per-element parser callback type
+
+    With a parser callback function, the result of parsing a JSON text can be
+    influenced. When passed to @ref parse, it is called on certain events
+    (passed as @ref parse_event_t via parameter @a event) with a set recursion
+    depth @a depth and context JSON value @a parsed. The return value of the
+    callback function is a boolean indicating whether the element that emitted
+    the callback shall be kept or not.
+
+    We distinguish six scenarios (determined by the event type) in which the
+    callback function can be called. The following table describes the values
+    of the parameters @a depth, @a event, and @a parsed.
+
+    parameter @a event | description | parameter @a depth | parameter @a parsed
+    ------------------ | ----------- | ------------------ | -------------------
+    parse_event_t::object_start | the parser read `{` and started to process a JSON object | depth of the parent of the JSON object | a JSON value with type discarded
+    parse_event_t::key | the parser read a key of a value in an object | depth of the currently parsed JSON object | a JSON string containing the key
+    parse_event_t::object_end | the parser read `}` and finished processing a JSON object | depth of the parent of the JSON object | the parsed JSON object
+    parse_event_t::array_start | the parser read `[` and started to process a JSON array | depth of the parent of the JSON array | a JSON value with type discarded
+    parse_event_t::array_end | the parser read `]` and finished processing a JSON array | depth of the parent of the JSON array | the parsed JSON array
+    parse_event_t::value | the parser finished reading a JSON value | depth of the value | the parsed JSON value
+
+    @image html callback_events.png "Example when certain parse events are triggered"
+
+    Discarding a value (i.e., returning `false`) has different effects
+    depending on the context in which function was called:
+
+    - Discarded values in structured types are skipped. That is, the parser
+      will behave as if the discarded value was never read.
+    - In case a value outside a structured type is skipped, it is replaced
+      with `null`. This case happens if the top-level element is skipped.
+
+    @param[in] depth  the depth of the recursion during parsing
+
+    @param[in] event  an event of type parse_event_t indicating the context in
+    the callback function has been called
+
+    @param[in,out] parsed  the current intermediate parse result; note that
+    writing to this value has no effect for parse_event_t::key events
+
+    @return Whether the JSON value which called the function during parsing
+    should be kept (`true`) or not (`false`). In the latter case, it is either
+    skipped completely or replaced by an empty discarded object.
+
+    @sa @ref parse for examples
+
+    @since version 1.0.0
+    */
+    using parser_callback_t = typename parser::parser_callback_t;
+
+    //////////////////
+    // constructors //
+    //////////////////
+
+    /// @name constructors and destructors
+    /// Constructors of class @ref basic_json, copy/move constructor, copy
+    /// assignment, static functions creating objects, and the destructor.
+    /// @{
+
+    /*!
+    @brief create an empty value with a given type
+
+    Create an empty JSON value with a given type. The value will be default
+    initialized with an empty value which depends on the type:
+
+    Value type  | initial value
+    ----------- | -------------
+    null        | `null`
+    boolean     | `false`
+    string      | `""`
+    number      | `0`
+    object      | `{}`
+    array       | `[]`
+
+    @param[in] v  the type of the value to create
+
+    @complexity Constant.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes to any JSON value.
+
+    @liveexample{The following code shows the constructor for different @ref
+    value_t values,basic_json__value_t}
+
+    @sa @ref clear() -- restores the postcondition of this constructor
+
+    @since version 1.0.0
+    */
+    basic_json(const value_t v)
+        : m_type(v), m_value(v)
+    {
+        assert_invariant();
+    }
+
+    /*!
+    @brief create a null object
+
+    Create a `null` JSON value. It either takes a null pointer as parameter
+    (explicitly creating `null`) or no parameter (implicitly creating `null`).
+    The passed null pointer itself is not read -- it is only used to choose
+    the right constructor.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this constructor never throws
+    exceptions.
+
+    @liveexample{The following code shows the constructor with and without a
+    null pointer parameter.,basic_json__nullptr_t}
+
+    @since version 1.0.0
+    */
+    basic_json(std::nullptr_t = nullptr) noexcept
+        : basic_json(value_t::null)
+    {
+        assert_invariant();
+    }
+
+    /*!
+    @brief create a JSON value
+
+    This is a "catch all" constructor for all compatible JSON types; that is,
+    types for which a `to_json()` method exists. The constructor forwards the
+    parameter @a val to that method (to `json_serializer<U>::to_json` method
+    with `U = uncvref_t<CompatibleType>`, to be exact).
+
+    Template type @a CompatibleType includes, but is not limited to, the
+    following types:
+    - **arrays**: @ref array_t and all kinds of compatible containers such as
+      `std::vector`, `std::deque`, `std::list`, `std::forward_list`,
+      `std::array`, `std::valarray`, `std::set`, `std::unordered_set`,
+      `std::multiset`, and `std::unordered_multiset` with a `value_type` from
+      which a @ref basic_json value can be constructed.
+    - **objects**: @ref object_t and all kinds of compatible associative
+      containers such as `std::map`, `std::unordered_map`, `std::multimap`,
+      and `std::unordered_multimap` with a `key_type` compatible to
+      @ref string_t and a `value_type` from which a @ref basic_json value can
+      be constructed.
+    - **strings**: @ref string_t, string literals, and all compatible string
+      containers can be used.
+    - **numbers**: @ref number_integer_t, @ref number_unsigned_t,
+      @ref number_float_t, and all convertible number types such as `int`,
+      `size_t`, `int64_t`, `float` or `double` can be used.
+    - **boolean**: @ref boolean_t / `bool` can be used.
+
+    See the examples below.
+
+    @tparam CompatibleType a type such that:
+    - @a CompatibleType is not derived from `std::istream`,
+    - @a CompatibleType is not @ref basic_json (to avoid hijacking copy/move
+         constructors),
+    - @a CompatibleType is not a different @ref basic_json type (i.e. with different template arguments)
+    - @a CompatibleType is not a @ref basic_json nested type (e.g.,
+         @ref json_pointer, @ref iterator, etc ...)
+    - @ref @ref json_serializer<U> has a
+         `to_json(basic_json_t&, CompatibleType&&)` method
+
+    @tparam U = `uncvref_t<CompatibleType>`
+
+    @param[in] val the value to be forwarded to the respective constructor
+
+    @complexity Usually linear in the size of the passed @a val, also
+                depending on the implementation of the called `to_json()`
+                method.
+
+    @exceptionsafety Depends on the called constructor. For types directly
+    supported by the library (i.e., all types for which no `to_json()` function
+    was provided), strong guarantee holds: if an exception is thrown, there are
+    no changes to any JSON value.
+
+    @liveexample{The following code shows the constructor with several
+    compatible types.,basic_json__CompatibleType}
+
+    @since version 2.1.0
+    */
+    template <typename CompatibleType,
+              typename U = detail::uncvref_t<CompatibleType>,
+              detail::enable_if_t<
+                  not detail::is_basic_json<U>::value and detail::is_compatible_type<basic_json_t, U>::value, int> = 0>
+    basic_json(CompatibleType && val) noexcept(noexcept(
+                JSONSerializer<U>::to_json(std::declval<basic_json_t&>(),
+                                           std::forward<CompatibleType>(val))))
+    {
+        JSONSerializer<U>::to_json(*this, std::forward<CompatibleType>(val));
+        assert_invariant();
+    }
+
+    /*!
+    @brief create a JSON value from an existing one
+
+    This is a constructor for existing @ref basic_json types.
+    It does not hijack copy/move constructors, since the parameter has different
+    template arguments than the current ones.
+
+    The constructor tries to convert the internal @ref m_value of the parameter.
+
+    @tparam BasicJsonType a type such that:
+    - @a BasicJsonType is a @ref basic_json type.
+    - @a BasicJsonType has different template arguments than @ref basic_json_t.
+
+    @param[in] val the @ref basic_json value to be converted.
+
+    @complexity Usually linear in the size of the passed @a val, also
+                depending on the implementation of the called `to_json()`
+                method.
+
+    @exceptionsafety Depends on the called constructor. For types directly
+    supported by the library (i.e., all types for which no `to_json()` function
+    was provided), strong guarantee holds: if an exception is thrown, there are
+    no changes to any JSON value.
+
+    @since version 3.2.0
+    */
+    template <typename BasicJsonType,
+              detail::enable_if_t<
+                  detail::is_basic_json<BasicJsonType>::value and not std::is_same<basic_json, BasicJsonType>::value, int> = 0>
+    basic_json(const BasicJsonType& val)
+    {
+        using other_boolean_t = typename BasicJsonType::boolean_t;
+        using other_number_float_t = typename BasicJsonType::number_float_t;
+        using other_number_integer_t = typename BasicJsonType::number_integer_t;
+        using other_number_unsigned_t = typename BasicJsonType::number_unsigned_t;
+        using other_string_t = typename BasicJsonType::string_t;
+        using other_object_t = typename BasicJsonType::object_t;
+        using other_array_t = typename BasicJsonType::array_t;
+
+        switch (val.type())
+        {
+            case value_t::boolean:
+                JSONSerializer<other_boolean_t>::to_json(*this, val.template get<other_boolean_t>());
+                break;
+            case value_t::number_float:
+                JSONSerializer<other_number_float_t>::to_json(*this, val.template get<other_number_float_t>());
+                break;
+            case value_t::number_integer:
+                JSONSerializer<other_number_integer_t>::to_json(*this, val.template get<other_number_integer_t>());
+                break;
+            case value_t::number_unsigned:
+                JSONSerializer<other_number_unsigned_t>::to_json(*this, val.template get<other_number_unsigned_t>());
+                break;
+            case value_t::string:
+                JSONSerializer<other_string_t>::to_json(*this, val.template get_ref<const other_string_t&>());
+                break;
+            case value_t::object:
+                JSONSerializer<other_object_t>::to_json(*this, val.template get_ref<const other_object_t&>());
+                break;
+            case value_t::array:
+                JSONSerializer<other_array_t>::to_json(*this, val.template get_ref<const other_array_t&>());
+                break;
+            case value_t::null:
+                *this = nullptr;
+                break;
+            case value_t::discarded:
+                m_type = value_t::discarded;
+                break;
+        }
+        assert_invariant();
+    }
+
+    /*!
+    @brief create a container (array or object) from an initializer list
+
+    Creates a JSON value of type array or object from the passed initializer
+    list @a init. In case @a type_deduction is `true` (default), the type of
+    the JSON value to be created is deducted from the initializer list @a init
+    according to the following rules:
+
+    1. If the list is empty, an empty JSON object value `{}` is created.
+    2. If the list consists of pairs whose first element is a string, a JSON
+       object value is created where the first elements of the pairs are
+       treated as keys and the second elements are as values.
+    3. In all other cases, an array is created.
+
+    The rules aim to create the best fit between a C++ initializer list and
+    JSON values. The rationale is as follows:
+
+    1. The empty initializer list is written as `{}` which is exactly an empty
+       JSON object.
+    2. C++ has no way of describing mapped types other than to list a list of
+       pairs. As JSON requires that keys must be of type string, rule 2 is the
+       weakest constraint one can pose on initializer lists to interpret them
+       as an object.
+    3. In all other cases, the initializer list could not be interpreted as
+       JSON object type, so interpreting it as JSON array type is safe.
+
+    With the rules described above, the following JSON values cannot be
+    expressed by an initializer list:
+
+    - the empty array (`[]`): use @ref array(initializer_list_t)
+      with an empty initializer list in this case
+    - arrays whose elements satisfy rule 2: use @ref
+      array(initializer_list_t) with the same initializer list
+      in this case
+
+    @note When used without parentheses around an empty initializer list, @ref
+    basic_json() is called instead of this function, yielding the JSON null
+    value.
+
+    @param[in] init  initializer list with JSON values
+
+    @param[in] type_deduction internal parameter; when set to `true`, the type
+    of the JSON value is deducted from the initializer list @a init; when set
+    to `false`, the type provided via @a manual_type is forced. This mode is
+    used by the functions @ref array(initializer_list_t) and
+    @ref object(initializer_list_t).
+
+    @param[in] manual_type internal parameter; when @a type_deduction is set
+    to `false`, the created JSON value will use the provided type (only @ref
+    value_t::array and @ref value_t::object are valid); when @a type_deduction
+    is set to `true`, this parameter has no effect
+
+    @throw type_error.301 if @a type_deduction is `false`, @a manual_type is
+    `value_t::object`, but @a init contains an element which is not a pair
+    whose first element is a string. In this case, the constructor could not
+    create an object. If @a type_deduction would have be `true`, an array
+    would have been created. See @ref object(initializer_list_t)
+    for an example.
+
+    @complexity Linear in the size of the initializer list @a init.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes to any JSON value.
+
+    @liveexample{The example below shows how JSON values are created from
+    initializer lists.,basic_json__list_init_t}
+
+    @sa @ref array(initializer_list_t) -- create a JSON array
+    value from an initializer list
+    @sa @ref object(initializer_list_t) -- create a JSON object
+    value from an initializer list
+
+    @since version 1.0.0
+    */
+    basic_json(initializer_list_t init,
+               bool type_deduction = true,
+               value_t manual_type = value_t::array)
+    {
+        // check if each element is an array with two elements whose first
+        // element is a string
+        bool is_an_object = std::all_of(init.begin(), init.end(),
+                                        [](const detail::json_ref<basic_json>& element_ref)
+        {
+            return (element_ref->is_array() and element_ref->size() == 2 and (*element_ref)[0].is_string());
+        });
+
+        // adjust type if type deduction is not wanted
+        if (not type_deduction)
+        {
+            // if array is wanted, do not create an object though possible
+            if (manual_type == value_t::array)
+            {
+                is_an_object = false;
+            }
+
+            // if object is wanted but impossible, throw an exception
+            if (JSON_UNLIKELY(manual_type == value_t::object and not is_an_object))
+            {
+                JSON_THROW(type_error::create(301, "cannot create object from initializer list"));
+            }
+        }
+
+        if (is_an_object)
+        {
+            // the initializer list is a list of pairs -> create object
+            m_type = value_t::object;
+            m_value = value_t::object;
+
+            std::for_each(init.begin(), init.end(), [this](const detail::json_ref<basic_json>& element_ref)
+            {
+                auto element = element_ref.moved_or_copied();
+                m_value.object->emplace(
+                    std::move(*((*element.m_value.array)[0].m_value.string)),
+                    std::move((*element.m_value.array)[1]));
+            });
+        }
+        else
+        {
+            // the initializer list describes an array -> create array
+            m_type = value_t::array;
+            m_value.array = create<array_t>(init.begin(), init.end());
+        }
+
+        assert_invariant();
+    }
+
+    /*!
+    @brief explicitly create an array from an initializer list
+
+    Creates a JSON array value from a given initializer list. That is, given a
+    list of values `a, b, c`, creates the JSON value `[a, b, c]`. If the
+    initializer list is empty, the empty array `[]` is created.
+
+    @note This function is only needed to express two edge cases that cannot
+    be realized with the initializer list constructor (@ref
+    basic_json(initializer_list_t, bool, value_t)). These cases
+    are:
+    1. creating an array whose elements are all pairs whose first element is a
+    string -- in this case, the initializer list constructor would create an
+    object, taking the first elements as keys
+    2. creating an empty array -- passing the empty initializer list to the
+    initializer list constructor yields an empty object
+
+    @param[in] init  initializer list with JSON values to create an array from
+    (optional)
+
+    @return JSON array value
+
+    @complexity Linear in the size of @a init.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes to any JSON value.
+
+    @liveexample{The following code shows an example for the `array`
+    function.,array}
+
+    @sa @ref basic_json(initializer_list_t, bool, value_t) --
+    create a JSON value from an initializer list
+    @sa @ref object(initializer_list_t) -- create a JSON object
+    value from an initializer list
+
+    @since version 1.0.0
+    */
+    static basic_json array(initializer_list_t init = {})
+    {
+        return basic_json(init, false, value_t::array);
+    }
+
+    /*!
+    @brief explicitly create an object from an initializer list
+
+    Creates a JSON object value from a given initializer list. The initializer
+    lists elements must be pairs, and their first elements must be strings. If
+    the initializer list is empty, the empty object `{}` is created.
+
+    @note This function is only added for symmetry reasons. In contrast to the
+    related function @ref array(initializer_list_t), there are
+    no cases which can only be expressed by this function. That is, any
+    initializer list @a init can also be passed to the initializer list
+    constructor @ref basic_json(initializer_list_t, bool, value_t).
+
+    @param[in] init  initializer list to create an object from (optional)
+
+    @return JSON object value
+
+    @throw type_error.301 if @a init is not a list of pairs whose first
+    elements are strings. In this case, no object can be created. When such a
+    value is passed to @ref basic_json(initializer_list_t, bool, value_t),
+    an array would have been created from the passed initializer list @a init.
+    See example below.
+
+    @complexity Linear in the size of @a init.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes to any JSON value.
+
+    @liveexample{The following code shows an example for the `object`
+    function.,object}
+
+    @sa @ref basic_json(initializer_list_t, bool, value_t) --
+    create a JSON value from an initializer list
+    @sa @ref array(initializer_list_t) -- create a JSON array
+    value from an initializer list
+
+    @since version 1.0.0
+    */
+    static basic_json object(initializer_list_t init = {})
+    {
+        return basic_json(init, false, value_t::object);
+    }
+
+    /*!
+    @brief construct an array with count copies of given value
+
+    Constructs a JSON array value by creating @a cnt copies of a passed value.
+    In case @a cnt is `0`, an empty array is created.
+
+    @param[in] cnt  the number of JSON copies of @a val to create
+    @param[in] val  the JSON value to copy
+
+    @post `std::distance(begin(),end()) == cnt` holds.
+
+    @complexity Linear in @a cnt.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes to any JSON value.
+
+    @liveexample{The following code shows examples for the @ref
+    basic_json(size_type\, const basic_json&)
+    constructor.,basic_json__size_type_basic_json}
+
+    @since version 1.0.0
+    */
+    basic_json(size_type cnt, const basic_json& val)
+        : m_type(value_t::array)
+    {
+        m_value.array = create<array_t>(cnt, val);
+        assert_invariant();
+    }
+
+    /*!
+    @brief construct a JSON container given an iterator range
+
+    Constructs the JSON value with the contents of the range `[first, last)`.
+    The semantics depends on the different types a JSON value can have:
+    - In case of a null type, invalid_iterator.206 is thrown.
+    - In case of other primitive types (number, boolean, or string), @a first
+      must be `begin()` and @a last must be `end()`. In this case, the value is
+      copied. Otherwise, invalid_iterator.204 is thrown.
+    - In case of structured types (array, object), the constructor behaves as
+      similar versions for `std::vector` or `std::map`; that is, a JSON array
+      or object is constructed from the values in the range.
+
+    @tparam InputIT an input iterator type (@ref iterator or @ref
+    const_iterator)
+
+    @param[in] first begin of the range to copy from (included)
+    @param[in] last end of the range to copy from (excluded)
+
+    @pre Iterators @a first and @a last must be initialized. **This
+         precondition is enforced with an assertion (see warning).** If
+         assertions are switched off, a violation of this precondition yields
+         undefined behavior.
+
+    @pre Range `[first, last)` is valid. Usually, this precondition cannot be
+         checked efficiently. Only certain edge cases are detected; see the
+         description of the exceptions below. A violation of this precondition
+         yields undefined behavior.
+
+    @warning A precondition is enforced with a runtime assertion that will
+             result in calling `std::abort` if this precondition is not met.
+             Assertions can be disabled by defining `NDEBUG` at compile time.
+             See https://en.cppreference.com/w/cpp/error/assert for more
+             information.
+
+    @throw invalid_iterator.201 if iterators @a first and @a last are not
+    compatible (i.e., do not belong to the same JSON value). In this case,
+    the range `[first, last)` is undefined.
+    @throw invalid_iterator.204 if iterators @a first and @a last belong to a
+    primitive type (number, boolean, or string), but @a first does not point
+    to the first element any more. In this case, the range `[first, last)` is
+    undefined. See example code below.
+    @throw invalid_iterator.206 if iterators @a first and @a last belong to a
+    null value. In this case, the range `[first, last)` is undefined.
+
+    @complexity Linear in distance between @a first and @a last.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes to any JSON value.
+
+    @liveexample{The example below shows several ways to create JSON values by
+    specifying a subrange with iterators.,basic_json__InputIt_InputIt}
+
+    @since version 1.0.0
+    */
+    template<class InputIT, typename std::enable_if<
+                 std::is_same<InputIT, typename basic_json_t::iterator>::value or
+                 std::is_same<InputIT, typename basic_json_t::const_iterator>::value, int>::type = 0>
+    basic_json(InputIT first, InputIT last)
+    {
+        assert(first.m_object != nullptr);
+        assert(last.m_object != nullptr);
+
+        // make sure iterator fits the current value
+        if (JSON_UNLIKELY(first.m_object != last.m_object))
+        {
+            JSON_THROW(invalid_iterator::create(201, "iterators are not compatible"));
+        }
+
+        // copy type from first iterator
+        m_type = first.m_object->m_type;
+
+        // check if iterator range is complete for primitive values
+        switch (m_type)
+        {
+            case value_t::boolean:
+            case value_t::number_float:
+            case value_t::number_integer:
+            case value_t::number_unsigned:
+            case value_t::string:
+            {
+                if (JSON_UNLIKELY(not first.m_it.primitive_iterator.is_begin()
+                                  or not last.m_it.primitive_iterator.is_end()))
+                {
+                    JSON_THROW(invalid_iterator::create(204, "iterators out of range"));
+                }
+                break;
+            }
+
+            default:
+                break;
+        }
+
+        switch (m_type)
+        {
+            case value_t::number_integer:
+            {
+                m_value.number_integer = first.m_object->m_value.number_integer;
+                break;
+            }
+
+            case value_t::number_unsigned:
+            {
+                m_value.number_unsigned = first.m_object->m_value.number_unsigned;
+                break;
+            }
+
+            case value_t::number_float:
+            {
+                m_value.number_float = first.m_object->m_value.number_float;
+                break;
+            }
+
+            case value_t::boolean:
+            {
+                m_value.boolean = first.m_object->m_value.boolean;
+                break;
+            }
+
+            case value_t::string:
+            {
+                m_value = *first.m_object->m_value.string;
+                break;
+            }
+
+            case value_t::object:
+            {
+                m_value.object = create<object_t>(first.m_it.object_iterator,
+                                                  last.m_it.object_iterator);
+                break;
+            }
+
+            case value_t::array:
+            {
+                m_value.array = create<array_t>(first.m_it.array_iterator,
+                                                last.m_it.array_iterator);
+                break;
+            }
+
+            default:
+                JSON_THROW(invalid_iterator::create(206, "cannot construct with iterators from " +
+                                                    std::string(first.m_object->type_name())));
+        }
+
+        assert_invariant();
+    }
+
+
+    ///////////////////////////////////////
+    // other constructors and destructor //
+    ///////////////////////////////////////
+
+    /// @private
+    basic_json(const detail::json_ref<basic_json>& ref)
+        : basic_json(ref.moved_or_copied())
+    {}
+
+    /*!
+    @brief copy constructor
+
+    Creates a copy of a given JSON value.
+
+    @param[in] other  the JSON value to copy
+
+    @post `*this == other`
+
+    @complexity Linear in the size of @a other.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes to any JSON value.
+
+    @requirement This function helps `basic_json` satisfying the
+    [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+    requirements:
+    - The complexity is linear.
+    - As postcondition, it holds: `other == basic_json(other)`.
+
+    @liveexample{The following code shows an example for the copy
+    constructor.,basic_json__basic_json}
+
+    @since version 1.0.0
+    */
+    basic_json(const basic_json& other)
+        : m_type(other.m_type)
+    {
+        // check of passed value is valid
+        other.assert_invariant();
+
+        switch (m_type)
+        {
+            case value_t::object:
+            {
+                m_value = *other.m_value.object;
+                break;
+            }
+
+            case value_t::array:
+            {
+                m_value = *other.m_value.array;
+                break;
+            }
+
+            case value_t::string:
+            {
+                m_value = *other.m_value.string;
+                break;
+            }
+
+            case value_t::boolean:
+            {
+                m_value = other.m_value.boolean;
+                break;
+            }
+
+            case value_t::number_integer:
+            {
+                m_value = other.m_value.number_integer;
+                break;
+            }
+
+            case value_t::number_unsigned:
+            {
+                m_value = other.m_value.number_unsigned;
+                break;
+            }
+
+            case value_t::number_float:
+            {
+                m_value = other.m_value.number_float;
+                break;
+            }
+
+            default:
+                break;
+        }
+
+        assert_invariant();
+    }
+
+    /*!
+    @brief move constructor
+
+    Move constructor. Constructs a JSON value with the contents of the given
+    value @a other using move semantics. It "steals" the resources from @a
+    other and leaves it as JSON null value.
+
+    @param[in,out] other  value to move to this object
+
+    @post `*this` has the same value as @a other before the call.
+    @post @a other is a JSON null value.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this constructor never throws
+    exceptions.
+
+    @requirement This function helps `basic_json` satisfying the
+    [MoveConstructible](https://en.cppreference.com/w/cpp/named_req/MoveConstructible)
+    requirements.
+
+    @liveexample{The code below shows the move constructor explicitly called
+    via std::move.,basic_json__moveconstructor}
+
+    @since version 1.0.0
+    */
+    basic_json(basic_json&& other) noexcept
+        : m_type(std::move(other.m_type)),
+          m_value(std::move(other.m_value))
+    {
+        // check that passed value is valid
+        other.assert_invariant();
+
+        // invalidate payload
+        other.m_type = value_t::null;
+        other.m_value = {};
+
+        assert_invariant();
+    }
+
+    /*!
+    @brief copy assignment
+
+    Copy assignment operator. Copies a JSON value via the "copy and swap"
+    strategy: It is expressed in terms of the copy constructor, destructor,
+    and the `swap()` member function.
+
+    @param[in] other  value to copy from
+
+    @complexity Linear.
+
+    @requirement This function helps `basic_json` satisfying the
+    [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+    requirements:
+    - The complexity is linear.
+
+    @liveexample{The code below shows and example for the copy assignment. It
+    creates a copy of value `a` which is then swapped with `b`. Finally\, the
+    copy of `a` (which is the null value after the swap) is
+    destroyed.,basic_json__copyassignment}
+
+    @since version 1.0.0
+    */
+    basic_json& operator=(basic_json other) noexcept (
+        std::is_nothrow_move_constructible<value_t>::value and
+        std::is_nothrow_move_assignable<value_t>::value and
+        std::is_nothrow_move_constructible<json_value>::value and
+        std::is_nothrow_move_assignable<json_value>::value
+    )
+    {
+        // check that passed value is valid
+        other.assert_invariant();
+
+        using std::swap;
+        swap(m_type, other.m_type);
+        swap(m_value, other.m_value);
+
+        assert_invariant();
+        return *this;
+    }
+
+    /*!
+    @brief destructor
+
+    Destroys the JSON value and frees all allocated memory.
+
+    @complexity Linear.
+
+    @requirement This function helps `basic_json` satisfying the
+    [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+    requirements:
+    - The complexity is linear.
+    - All stored elements are destroyed and all memory is freed.
+
+    @since version 1.0.0
+    */
+    ~basic_json() noexcept
+    {
+        assert_invariant();
+        m_value.destroy(m_type);
+    }
+
+    /// @}
+
+  public:
+    ///////////////////////
+    // object inspection //
+    ///////////////////////
+
+    /// @name object inspection
+    /// Functions to inspect the type of a JSON value.
+    /// @{
+
+    /*!
+    @brief serialization
+
+    Serialization function for JSON values. The function tries to mimic
+    Python's `json.dumps()` function, and currently supports its @a indent
+    and @a ensure_ascii parameters.
+
+    @param[in] indent If indent is nonnegative, then array elements and object
+    members will be pretty-printed with that indent level. An indent level of
+    `0` will only insert newlines. `-1` (the default) selects the most compact
+    representation.
+    @param[in] indent_char The character to use for indentation if @a indent is
+    greater than `0`. The default is ` ` (space).
+    @param[in] ensure_ascii If @a ensure_ascii is true, all non-ASCII characters
+    in the output are escaped with `\uXXXX` sequences, and the result consists
+    of ASCII characters only.
+    @param[in] error_handler  how to react on decoding errors; there are three
+    possible values: `strict` (throws and exception in case a decoding error
+    occurs; default), `replace` (replace invalid UTF-8 sequences with U+FFFD),
+    and `ignore` (ignore invalid UTF-8 sequences during serialization).
+
+    @return string containing the serialization of the JSON value
+
+    @throw type_error.316 if a string stored inside the JSON value is not
+                          UTF-8 encoded
+
+    @complexity Linear.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes in the JSON value.
+
+    @liveexample{The following example shows the effect of different @a indent\,
+    @a indent_char\, and @a ensure_ascii parameters to the result of the
+    serialization.,dump}
+
+    @see https://docs.python.org/2/library/json.html#json.dump
+
+    @since version 1.0.0; indentation character @a indent_char, option
+           @a ensure_ascii and exceptions added in version 3.0.0; error
+           handlers added in version 3.4.0.
+    */
+    string_t dump(const int indent = -1,
+                  const char indent_char = ' ',
+                  const bool ensure_ascii = false,
+                  const error_handler_t error_handler = error_handler_t::strict) const
+    {
+        string_t result;
+        serializer s(detail::output_adapter<char, string_t>(result), indent_char, error_handler);
+
+        if (indent >= 0)
+        {
+            s.dump(*this, true, ensure_ascii, static_cast<unsigned int>(indent));
+        }
+        else
+        {
+            s.dump(*this, false, ensure_ascii, 0);
+        }
+
+        return result;
+    }
+
+    /*!
+    @brief return the type of the JSON value (explicit)
+
+    Return the type of the JSON value as a value from the @ref value_t
+    enumeration.
+
+    @return the type of the JSON value
+            Value type                | return value
+            ------------------------- | -------------------------
+            null                      | value_t::null
+            boolean                   | value_t::boolean
+            string                    | value_t::string
+            number (integer)          | value_t::number_integer
+            number (unsigned integer) | value_t::number_unsigned
+            number (floating-point)   | value_t::number_float
+            object                    | value_t::object
+            array                     | value_t::array
+            discarded                 | value_t::discarded
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `type()` for all JSON
+    types.,type}
+
+    @sa @ref operator value_t() -- return the type of the JSON value (implicit)
+    @sa @ref type_name() -- return the type as string
+
+    @since version 1.0.0
+    */
+    constexpr value_t type() const noexcept
+    {
+        return m_type;
+    }
+
+    /*!
+    @brief return whether type is primitive
+
+    This function returns true if and only if the JSON type is primitive
+    (string, number, boolean, or null).
+
+    @return `true` if type is primitive (string, number, boolean, or null),
+    `false` otherwise.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `is_primitive()` for all JSON
+    types.,is_primitive}
+
+    @sa @ref is_structured() -- returns whether JSON value is structured
+    @sa @ref is_null() -- returns whether JSON value is `null`
+    @sa @ref is_string() -- returns whether JSON value is a string
+    @sa @ref is_boolean() -- returns whether JSON value is a boolean
+    @sa @ref is_number() -- returns whether JSON value is a number
+
+    @since version 1.0.0
+    */
+    constexpr bool is_primitive() const noexcept
+    {
+        return is_null() or is_string() or is_boolean() or is_number();
+    }
+
+    /*!
+    @brief return whether type is structured
+
+    This function returns true if and only if the JSON type is structured
+    (array or object).
+
+    @return `true` if type is structured (array or object), `false` otherwise.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `is_structured()` for all JSON
+    types.,is_structured}
+
+    @sa @ref is_primitive() -- returns whether value is primitive
+    @sa @ref is_array() -- returns whether value is an array
+    @sa @ref is_object() -- returns whether value is an object
+
+    @since version 1.0.0
+    */
+    constexpr bool is_structured() const noexcept
+    {
+        return is_array() or is_object();
+    }
+
+    /*!
+    @brief return whether value is null
+
+    This function returns true if and only if the JSON value is null.
+
+    @return `true` if type is null, `false` otherwise.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `is_null()` for all JSON
+    types.,is_null}
+
+    @since version 1.0.0
+    */
+    constexpr bool is_null() const noexcept
+    {
+        return (m_type == value_t::null);
+    }
+
+    /*!
+    @brief return whether value is a boolean
+
+    This function returns true if and only if the JSON value is a boolean.
+
+    @return `true` if type is boolean, `false` otherwise.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `is_boolean()` for all JSON
+    types.,is_boolean}
+
+    @since version 1.0.0
+    */
+    constexpr bool is_boolean() const noexcept
+    {
+        return (m_type == value_t::boolean);
+    }
+
+    /*!
+    @brief return whether value is a number
+
+    This function returns true if and only if the JSON value is a number. This
+    includes both integer (signed and unsigned) and floating-point values.
+
+    @return `true` if type is number (regardless whether integer, unsigned
+    integer or floating-type), `false` otherwise.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `is_number()` for all JSON
+    types.,is_number}
+
+    @sa @ref is_number_integer() -- check if value is an integer or unsigned
+    integer number
+    @sa @ref is_number_unsigned() -- check if value is an unsigned integer
+    number
+    @sa @ref is_number_float() -- check if value is a floating-point number
+
+    @since version 1.0.0
+    */
+    constexpr bool is_number() const noexcept
+    {
+        return is_number_integer() or is_number_float();
+    }
+
+    /*!
+    @brief return whether value is an integer number
+
+    This function returns true if and only if the JSON value is a signed or
+    unsigned integer number. This excludes floating-point values.
+
+    @return `true` if type is an integer or unsigned integer number, `false`
+    otherwise.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `is_number_integer()` for all
+    JSON types.,is_number_integer}
+
+    @sa @ref is_number() -- check if value is a number
+    @sa @ref is_number_unsigned() -- check if value is an unsigned integer
+    number
+    @sa @ref is_number_float() -- check if value is a floating-point number
+
+    @since version 1.0.0
+    */
+    constexpr bool is_number_integer() const noexcept
+    {
+        return (m_type == value_t::number_integer or m_type == value_t::number_unsigned);
+    }
+
+    /*!
+    @brief return whether value is an unsigned integer number
+
+    This function returns true if and only if the JSON value is an unsigned
+    integer number. This excludes floating-point and signed integer values.
+
+    @return `true` if type is an unsigned integer number, `false` otherwise.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `is_number_unsigned()` for all
+    JSON types.,is_number_unsigned}
+
+    @sa @ref is_number() -- check if value is a number
+    @sa @ref is_number_integer() -- check if value is an integer or unsigned
+    integer number
+    @sa @ref is_number_float() -- check if value is a floating-point number
+
+    @since version 2.0.0
+    */
+    constexpr bool is_number_unsigned() const noexcept
+    {
+        return (m_type == value_t::number_unsigned);
+    }
+
+    /*!
+    @brief return whether value is a floating-point number
+
+    This function returns true if and only if the JSON value is a
+    floating-point number. This excludes signed and unsigned integer values.
+
+    @return `true` if type is a floating-point number, `false` otherwise.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `is_number_float()` for all
+    JSON types.,is_number_float}
+
+    @sa @ref is_number() -- check if value is number
+    @sa @ref is_number_integer() -- check if value is an integer number
+    @sa @ref is_number_unsigned() -- check if value is an unsigned integer
+    number
+
+    @since version 1.0.0
+    */
+    constexpr bool is_number_float() const noexcept
+    {
+        return (m_type == value_t::number_float);
+    }
+
+    /*!
+    @brief return whether value is an object
+
+    This function returns true if and only if the JSON value is an object.
+
+    @return `true` if type is object, `false` otherwise.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `is_object()` for all JSON
+    types.,is_object}
+
+    @since version 1.0.0
+    */
+    constexpr bool is_object() const noexcept
+    {
+        return (m_type == value_t::object);
+    }
+
+    /*!
+    @brief return whether value is an array
+
+    This function returns true if and only if the JSON value is an array.
+
+    @return `true` if type is array, `false` otherwise.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `is_array()` for all JSON
+    types.,is_array}
+
+    @since version 1.0.0
+    */
+    constexpr bool is_array() const noexcept
+    {
+        return (m_type == value_t::array);
+    }
+
+    /*!
+    @brief return whether value is a string
+
+    This function returns true if and only if the JSON value is a string.
+
+    @return `true` if type is string, `false` otherwise.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `is_string()` for all JSON
+    types.,is_string}
+
+    @since version 1.0.0
+    */
+    constexpr bool is_string() const noexcept
+    {
+        return (m_type == value_t::string);
+    }
+
+    /*!
+    @brief return whether value is discarded
+
+    This function returns true if and only if the JSON value was discarded
+    during parsing with a callback function (see @ref parser_callback_t).
+
+    @note This function will always be `false` for JSON values after parsing.
+    That is, discarded values can only occur during parsing, but will be
+    removed when inside a structured value or replaced by null in other cases.
+
+    @return `true` if type is discarded, `false` otherwise.
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies `is_discarded()` for all JSON
+    types.,is_discarded}
+
+    @since version 1.0.0
+    */
+    constexpr bool is_discarded() const noexcept
+    {
+        return (m_type == value_t::discarded);
+    }
+
+    /*!
+    @brief return the type of the JSON value (implicit)
+
+    Implicitly return the type of the JSON value as a value from the @ref
+    value_t enumeration.
+
+    @return the type of the JSON value
+
+    @complexity Constant.
+
+    @exceptionsafety No-throw guarantee: this member function never throws
+    exceptions.
+
+    @liveexample{The following code exemplifies the @ref value_t operator for
+    all JSON types.,operator__value_t}
+
+    @sa @ref type() -- return the type of the JSON value (explicit)
+    @sa @ref type_name() -- return the type as string
+
+    @since version 1.0.0
+    */
+    constexpr operator value_t() const noexcept
+    {
+        return m_type;
+    }
+
+    /// @}
+
+  private:
+    //////////////////
+    // value access //
+    //////////////////
+
+    /// get a boolean (explicit)
+    boolean_t get_impl(boolean_t* /*unused*/) const
+    {
+        if (JSON_LIKELY(is_boolean()))
+        {
+            return m_value.boolean;
+        }
+
+        JSON_THROW(type_error::create(302, "type must be boolean, but is " + std::string(type_name())));
+    }
+
+    /// get a pointer to the value (object)
+    object_t* get_impl_ptr(object_t* /*unused*/) noexcept
+    {
+        return is_object() ? m_value.object : nullptr;
+    }
+
+    /// get a pointer to the value (object)
+    constexpr const object_t* get_impl_ptr(const object_t* /*unused*/) const noexcept
+    {
+        return is_object() ? m_value.object : nullptr;
+    }
+
+    /// get a pointer to the value (array)
+    array_t* get_impl_ptr(array_t* /*unused*/) noexcept
+    {
+        return is_array() ? m_value.array : nullptr;
+    }
+
+    /// get a pointer to the value (array)
+    constexpr const array_t* get_impl_ptr(const array_t* /*unused*/) const noexcept
+    {
+        return is_array() ? m_value.array : nullptr;
+    }
+
+    /// get a pointer to the value (string)
+    string_t* get_impl_ptr(string_t* /*unused*/) noexcept
+    {
+        return is_string() ? m_value.string : nullptr;
+    }
+
+    /// get a pointer to the value (string)
+    constexpr const string_t* get_impl_ptr(const string_t* /*unused*/) const noexcept
+    {
+        return is_string() ? m_value.string : nullptr;
+    }
+
+    /// get a pointer to the value (boolean)
+    boolean_t* get_impl_ptr(boolean_t* /*unused*/) noexcept
+    {
+        return is_boolean() ? &m_value.boolean : nullptr;
+    }
+
+    /// get a pointer to the value (boolean)
+    constexpr const boolean_t* get_impl_ptr(const boolean_t* /*unused*/) const noexcept
+    {
+        return is_boolean() ? &m_value.boolean : nullptr;
+    }
+
+    /// get a pointer to the value (integer number)
+    number_integer_t* get_impl_ptr(number_integer_t* /*unused*/) noexcept
+    {
+        return is_number_integer() ? &m_value.number_integer : nullptr;
+    }
+
+    /// get a pointer to the value (integer number)
+    constexpr const number_integer_t* get_impl_ptr(const number_integer_t* /*unused*/) const noexcept
+    {
+        return is_number_integer() ? &m_value.number_integer : nullptr;
+    }
+
+    /// get a pointer to the value (unsigned number)
+    number_unsigned_t* get_impl_ptr(number_unsigned_t* /*unused*/) noexcept
+    {
+        return is_number_unsigned() ? &m_value.number_unsigned : nullptr;
+    }
+
+    /// get a pointer to the value (unsigned number)
+    constexpr const number_unsigned_t* get_impl_ptr(const number_unsigned_t* /*unused*/) const noexcept
+    {
+        return is_number_unsigned() ? &m_value.number_unsigned : nullptr;
+    }
+
+    /// get a pointer to the value (floating-point number)
+    number_float_t* get_impl_ptr(number_float_t* /*unused*/) noexcept
+    {
+        return is_number_float() ? &m_value.number_float : nullptr;
+    }
+
+    /// get a pointer to the value (floating-point number)
+    constexpr const number_float_t* get_impl_ptr(const number_float_t* /*unused*/) const noexcept
+    {
+        return is_number_float() ? &m_value.number_float : nullptr;
+    }
+
+    /*!
+    @brief helper function to implement get_ref()
+
+    This function helps to implement get_ref() without code duplication for
+    const and non-const overloads
+
+    @tparam ThisType will be deduced as `basic_json` or `const basic_json`
+
+    @throw type_error.303 if ReferenceType does not match underlying value
+    type of the current JSON
+    */
+    template<typename ReferenceType, typename ThisType>
+    static ReferenceType get_ref_impl(ThisType& obj)
+    {
+        // delegate the call to get_ptr<>()
+        auto ptr = obj.template get_ptr<typename std::add_pointer<ReferenceType>::type>();
+
+        if (JSON_LIKELY(ptr != nullptr))
+        {
+            return *ptr;
+        }
+
+        JSON_THROW(type_error::create(303, "incompatible ReferenceType for get_ref, actual type is " + std::string(obj.type_name())));
+    }
+
+  public:
+    /// @name value access
+    /// Direct access to the stored value of a JSON value.
+    /// @{
+
+    /*!
+    @brief get special-case overload
+
+    This overloads avoids a lot of template boilerplate, it can be seen as the
+    identity method
+
+    @tparam BasicJsonType == @ref basic_json
+
+    @return a copy of *this
+
+    @complexity Constant.
+
+    @since version 2.1.0
+    */
+    template<typename BasicJsonType, detail::enable_if_t<
+                 std::is_same<typename std::remove_const<BasicJsonType>::type, basic_json_t>::value,
+                 int> = 0>
+    basic_json get() const
+    {
+        return *this;
+    }
+
+    /*!
+    @brief get special-case overload
+
+    This overloads converts the current @ref basic_json in a different
+    @ref basic_json type
+
+    @tparam BasicJsonType == @ref basic_json
+
+    @return a copy of *this, converted into @tparam BasicJsonType
+
+    @complexity Depending on the implementation of the called `from_json()`
+                method.
+
+    @since version 3.2.0
+    */
+    template<typename BasicJsonType, detail::enable_if_t<
+                 not std::is_same<BasicJsonType, basic_json>::value and
+                 detail::is_basic_json<BasicJsonType>::value, int> = 0>
+    BasicJsonType get() const
+    {
+        return *this;
+    }
+
+    /*!
+    @brief get a value (explicit)
+
+    Explicit type conversion between the JSON value and a compatible value
+    which is [CopyConstructible](https://en.cppreference.com/w/cpp/named_req/CopyConstructible)
+    and [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible).
+    The value is converted by calling the @ref json_serializer<ValueType>
+    `from_json()` method.
+
+    The function is equivalent to executing
+    @code {.cpp}
+    ValueType ret;
+    JSONSerializer<ValueType>::from_json(*this, ret);
+    return ret;
+    @endcode
+
+    This overloads is chosen if:
+    - @a ValueType is not @ref basic_json,
+    - @ref json_serializer<ValueType> has a `from_json()` method of the form
+      `void from_json(const basic_json&, ValueType&)`, and
+    - @ref json_serializer<ValueType> does not have a `from_json()` method of
+      the form `ValueType from_json(const basic_json&)`
+
+    @tparam ValueTypeCV the provided value type
+    @tparam ValueType the returned value type
+
+    @return copy of the JSON value, converted to @a ValueType
+
+    @throw what @ref json_serializer<ValueType> `from_json()` method throws
+
+    @liveexample{The example below shows several conversions from JSON values
+    to other types. There a few things to note: (1) Floating-point numbers can
+    be converted to integers\, (2) A JSON array can be converted to a standard
+    `std::vector<short>`\, (3) A JSON object can be converted to C++
+    associative containers such as `std::unordered_map<std::string\,
+    json>`.,get__ValueType_const}
+
+    @since version 2.1.0
+    */
+    template<typename ValueTypeCV, typename ValueType = detail::uncvref_t<ValueTypeCV>,
+             detail::enable_if_t <
+                 not detail::is_basic_json<ValueType>::value and
+                 detail::has_from_json<basic_json_t, ValueType>::value and
+                 not detail::has_non_default_from_json<basic_json_t, ValueType>::value,
+                 int> = 0>
+    ValueType get() const noexcept(noexcept(
+                                       JSONSerializer<ValueType>::from_json(std::declval<const basic_json_t&>(), std::declval<ValueType&>())))
+    {
+        // we cannot static_assert on ValueTypeCV being non-const, because
+        // there is support for get<const basic_json_t>(), which is why we
+        // still need the uncvref
+        static_assert(not std::is_reference<ValueTypeCV>::value,
+                      "get() cannot be used with reference types, you might want to use get_ref()");
+        static_assert(std::is_default_constructible<ValueType>::value,
+                      "types must be DefaultConstructible when used with get()");
+
+        ValueType ret;
+        JSONSerializer<ValueType>::from_json(*this, ret);
+        return ret;
+    }
+
+    /*!
+    @brief get a value (explicit); special case
+
+    Explicit type conversion between the JSON value and a compatible value
+    which is **not** [CopyConstructible](https://en.cppreference.com/w/cpp/named_req/CopyConstructible)
+    and **not** [DefaultConstructible](https://en.cppreference.com/w/cpp/named_req/DefaultConstructible).
+    The value is converted by calling the @ref json_serializer<ValueType>
+    `from_json()` method.
+
+    The function is equivalent to executing
+    @code {.cpp}
+    return JSONSerializer<ValueTypeCV>::from_json(*this);
+    @endcode
+
+    This overloads is chosen if:
+    - @a ValueType is not @ref basic_json and
+    - @ref json_serializer<ValueType> has a `from_json()` method of the form
+      `ValueType from_json(const basic_json&)`
+
+    @note If @ref json_serializer<ValueType> has both overloads of
+    `from_json()`, this one is chosen.
+
+    @tparam ValueTypeCV the provided value type
+    @tparam ValueType the returned value type
+
+    @return copy of the JSON value, converted to @a ValueType
+
+    @throw what @ref json_serializer<ValueType> `from_json()` method throws
+
+    @since version 2.1.0
+    */
+    template<typename ValueTypeCV, typename ValueType = detail::uncvref_t<ValueTypeCV>,
+             detail::enable_if_t<not std::is_same<basic_json_t, ValueType>::value and
+                                 detail::has_non_default_from_json<basic_json_t, ValueType>::value,
+                                 int> = 0>
+    ValueType get() const noexcept(noexcept(
+                                       JSONSerializer<ValueTypeCV>::from_json(std::declval<const basic_json_t&>())))
+    {
+        static_assert(not std::is_reference<ValueTypeCV>::value,
+                      "get() cannot be used with reference types, you might want to use get_ref()");
+        return JSONSerializer<ValueTypeCV>::from_json(*this);
+    }
+
+    /*!
+    @brief get a value (explicit)
+
+    Explicit type conversion between the JSON value and a compatible value.
+    The value is filled into the input parameter by calling the @ref json_serializer<ValueType>
+    `from_json()` method.
+
+    The function is equivalent to executing
+    @code {.cpp}
+    ValueType v;
+    JSONSerializer<ValueType>::from_json(*this, v);
+    @endcode
+
+    This overloads is chosen if:
+    - @a ValueType is not @ref basic_json,
+    - @ref json_serializer<ValueType> has a `from_json()` method of the form
+      `void from_json(const basic_json&, ValueType&)`, and
+
+    @tparam ValueType the input parameter type.
+
+    @return the input parameter, allowing chaining calls.
+
+    @throw what @ref json_serializer<ValueType> `from_json()` method throws
+
+    @liveexample{The example below shows several conversions from JSON values
+    to other types. There a few things to note: (1) Floating-point numbers can
+    be converted to integers\, (2) A JSON array can be converted to a standard
+    `std::vector<short>`\, (3) A JSON object can be converted to C++
+    associative containers such as `std::unordered_map<std::string\,
+    json>`.,get_to}
+
+    @since version 3.3.0
+    */
+    template<typename ValueType,
+             detail::enable_if_t <
+                 not detail::is_basic_json<ValueType>::value and
+                 detail::has_from_json<basic_json_t, ValueType>::value,
+                 int> = 0>
+    ValueType & get_to(ValueType& v) const noexcept(noexcept(
+                JSONSerializer<ValueType>::from_json(std::declval<const basic_json_t&>(), v)))
+    {
+        JSONSerializer<ValueType>::from_json(*this, v);
+        return v;
+    }
+
+
+    /*!
+    @brief get a pointer value (implicit)
+
+    Implicit pointer access to the internally stored JSON value. No copies are
+    made.
+
+    @warning Writing data to the pointee of the result yields an undefined
+    state.
+
+    @tparam PointerType pointer type; must be a pointer to @ref array_t, @ref
+    object_t, @ref string_t, @ref boolean_t, @ref number_integer_t,
+    @ref number_unsigned_t, or @ref number_float_t. Enforced by a static
+    assertion.
+
+    @return pointer to the internally stored JSON value if the requested
+    pointer type @a PointerType fits to the JSON value; `nullptr` otherwise
+
+    @complexity Constant.
+
+    @liveexample{The example below shows how pointers to internal values of a
+    JSON value can be requested. Note that no type conversions are made and a
+    `nullptr` is returned if the value and the requested pointer type does not
+    match.,get_ptr}
+
+    @since version 1.0.0
+    */
+    template<typename PointerType, typename std::enable_if<
+                 std::is_pointer<PointerType>::value, int>::type = 0>
+    auto get_ptr() noexcept -> decltype(std::declval<basic_json_t&>().get_impl_ptr(std::declval<PointerType>()))
+    {
+        // delegate the call to get_impl_ptr<>()
+        return get_impl_ptr(static_cast<PointerType>(nullptr));
+    }
+
+    /*!
+    @brief get a pointer value (implicit)
+    @copydoc get_ptr()
+    */
+    template<typename PointerType, typename std::enable_if<
+                 std::is_pointer<PointerType>::value and
+                 std::is_const<typename std::remove_pointer<PointerType>::type>::value, int>::type = 0>
+    constexpr auto get_ptr() const noexcept -> decltype(std::declval<const basic_json_t&>().get_impl_ptr(std::declval<PointerType>()))
+    {
+        // delegate the call to get_impl_ptr<>() const
+        return get_impl_ptr(static_cast<PointerType>(nullptr));
+    }
+
+    /*!
+    @brief get a pointer value (explicit)
+
+    Explicit pointer access to the internally stored JSON value. No copies are
+    made.
+
+    @warning The pointer becomes invalid if the underlying JSON object
+    changes.
+
+    @tparam PointerType pointer type; must be a pointer to @ref array_t, @ref
+    object_t, @ref string_t, @ref boolean_t, @ref number_integer_t,
+    @ref number_unsigned_t, or @ref number_float_t.
+
+    @return pointer to the internally stored JSON value if the requested
+    pointer type @a PointerType fits to the JSON value; `nullptr` otherwise
+
+    @complexity Constant.
+
+    @liveexample{The example below shows how pointers to internal values of a
+    JSON value can be requested. Note that no type conversions are made and a
+    `nullptr` is returned if the value and the requested pointer type does not
+    match.,get__PointerType}
+
+    @sa @ref get_ptr() for explicit pointer-member access
+
+    @since version 1.0.0
+    */
+    template<typename PointerType, typename std::enable_if<
+                 std::is_pointer<PointerType>::value, int>::type = 0>
+    auto get() noexcept -> decltype(std::declval<basic_json_t&>().template get_ptr<PointerType>())
+    {
+        // delegate the call to get_ptr
+        return get_ptr<PointerType>();
+    }
+
+    /*!
+    @brief get a pointer value (explicit)
+    @copydoc get()
+    */
+    template<typename PointerType, typename std::enable_if<
+                 std::is_pointer<PointerType>::value, int>::type = 0>
+    constexpr auto get() const noexcept -> decltype(std::declval<const basic_json_t&>().template get_ptr<PointerType>())
+    {
+        // delegate the call to get_ptr
+        return get_ptr<PointerType>();
+    }
+
+    /*!
+    @brief get a reference value (implicit)
+
+    Implicit reference access to the internally stored JSON value. No copies
+    are made.
+
+    @warning Writing data to the referee of the result yields an undefined
+    state.
+
+    @tparam ReferenceType reference type; must be a reference to @ref array_t,
+    @ref object_t, @ref string_t, @ref boolean_t, @ref number_integer_t, or
+    @ref number_float_t. Enforced by static assertion.
+
+    @return reference to the internally stored JSON value if the requested
+    reference type @a ReferenceType fits to the JSON value; throws
+    type_error.303 otherwise
+
+    @throw type_error.303 in case passed type @a ReferenceType is incompatible
+    with the stored JSON value; see example below
+
+    @complexity Constant.
+
+    @liveexample{The example shows several calls to `get_ref()`.,get_ref}
+
+    @since version 1.1.0
+    */
+    template<typename ReferenceType, typename std::enable_if<
+                 std::is_reference<ReferenceType>::value, int>::type = 0>
+    ReferenceType get_ref()
+    {
+        // delegate call to get_ref_impl
+        return get_ref_impl<ReferenceType>(*this);
+    }
+
+    /*!
+    @brief get a reference value (implicit)
+    @copydoc get_ref()
+    */
+    template<typename ReferenceType, typename std::enable_if<
+                 std::is_reference<ReferenceType>::value and
+                 std::is_const<typename std::remove_reference<ReferenceType>::type>::value, int>::type = 0>
+    ReferenceType get_ref() const
+    {
+        // delegate call to get_ref_impl
+        return get_ref_impl<ReferenceType>(*this);
+    }
+
+    /*!
+    @brief get a value (implicit)
+
+    Implicit type conversion between the JSON value and a compatible value.
+    The call is realized by calling @ref get() const.
+
+    @tparam ValueType non-pointer type compatible to the JSON value, for
+    instance `int` for JSON integer numbers, `bool` for JSON booleans, or
+    `std::vector` types for JSON arrays. The character type of @ref string_t
+    as well as an initializer list of this type is excluded to avoid
+    ambiguities as these types implicitly convert to `std::string`.
+
+    @return copy of the JSON value, converted to type @a ValueType
+
+    @throw type_error.302 in case passed type @a ValueType is incompatible
+    to the JSON value type (e.g., the JSON value is of type boolean, but a
+    string is requested); see example below
+
+    @complexity Linear in the size of the JSON value.
+
+    @liveexample{The example below shows several conversions from JSON values
+    to other types. There a few things to note: (1) Floating-point numbers can
+    be converted to integers\, (2) A JSON array can be converted to a standard
+    `std::vector<short>`\, (3) A JSON object can be converted to C++
+    associative containers such as `std::unordered_map<std::string\,
+    json>`.,operator__ValueType}
+
+    @since version 1.0.0
+    */
+    template < typename ValueType, typename std::enable_if <
+                   not std::is_pointer<ValueType>::value and
+                   not std::is_same<ValueType, detail::json_ref<basic_json>>::value and
+                   not std::is_same<ValueType, typename string_t::value_type>::value and
+                   not detail::is_basic_json<ValueType>::value
+
+#ifndef _MSC_VER  // fix for issue #167 operator<< ambiguity under VS2015
+                   and not std::is_same<ValueType, std::initializer_list<typename string_t::value_type>>::value
+#if defined(JSON_HAS_CPP_17) && defined(_MSC_VER) and _MSC_VER <= 1914
+                   and not std::is_same<ValueType, typename std::string_view>::value
+#endif
+#endif
+                   and detail::is_detected<detail::get_template_function, const basic_json_t&, ValueType>::value
+                   , int >::type = 0 >
+    operator ValueType() const
+    {
+        // delegate the call to get<>() const
+        return get<ValueType>();
+    }
+
+    /// @}
+
+
+    ////////////////////
+    // element access //
+    ////////////////////
+
+    /// @name element access
+    /// Access to the JSON value.
+    /// @{
+
+    /*!
+    @brief access specified array element with bounds checking
+
+    Returns a reference to the element at specified location @a idx, with
+    bounds checking.
+
+    @param[in] idx  index of the element to access
+
+    @return reference to the element at index @a idx
+
+    @throw type_error.304 if the JSON value is not an array; in this case,
+    calling `at` with an index makes no sense. See example below.
+    @throw out_of_range.401 if the index @a idx is out of range of the array;
+    that is, `idx >= size()`. See example below.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes in the JSON value.
+
+    @complexity Constant.
+
+    @since version 1.0.0
+
+    @liveexample{The example below shows how array elements can be read and
+    written using `at()`. It also demonstrates the different exceptions that
+    can be thrown.,at__size_type}
+    */
+    reference at(size_type idx)
+    {
+        // at only works for arrays
+        if (JSON_LIKELY(is_array()))
+        {
+            JSON_TRY
+            {
+                return m_value.array->at(idx);
+            }
+            JSON_CATCH (std::out_of_range&)
+            {
+                // create better exception explanation
+                JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range"));
+            }
+        }
+        else
+        {
+            JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name())));
+        }
+    }
+
+    /*!
+    @brief access specified array element with bounds checking
+
+    Returns a const reference to the element at specified location @a idx,
+    with bounds checking.
+
+    @param[in] idx  index of the element to access
+
+    @return const reference to the element at index @a idx
+
+    @throw type_error.304 if the JSON value is not an array; in this case,
+    calling `at` with an index makes no sense. See example below.
+    @throw out_of_range.401 if the index @a idx is out of range of the array;
+    that is, `idx >= size()`. See example below.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes in the JSON value.
+
+    @complexity Constant.
+
+    @since version 1.0.0
+
+    @liveexample{The example below shows how array elements can be read using
+    `at()`. It also demonstrates the different exceptions that can be thrown.,
+    at__size_type_const}
+    */
+    const_reference at(size_type idx) const
+    {
+        // at only works for arrays
+        if (JSON_LIKELY(is_array()))
+        {
+            JSON_TRY
+            {
+                return m_value.array->at(idx);
+            }
+            JSON_CATCH (std::out_of_range&)
+            {
+                // create better exception explanation
+                JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range"));
+            }
+        }
+        else
+        {
+            JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name())));
+        }
+    }
+
+    /*!
+    @brief access specified object element with bounds checking
+
+    Returns a reference to the element at with specified key @a key, with
+    bounds checking.
+
+    @param[in] key  key of the element to access
+
+    @return reference to the element at key @a key
+
+    @throw type_error.304 if the JSON value is not an object; in this case,
+    calling `at` with a key makes no sense. See example below.
+    @throw out_of_range.403 if the key @a key is is not stored in the object;
+    that is, `find(key) == end()`. See example below.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes in the JSON value.
+
+    @complexity Logarithmic in the size of the container.
+
+    @sa @ref operator[](const typename object_t::key_type&) for unchecked
+    access by reference
+    @sa @ref value() for access by value with a default value
+
+    @since version 1.0.0
+
+    @liveexample{The example below shows how object elements can be read and
+    written using `at()`. It also demonstrates the different exceptions that
+    can be thrown.,at__object_t_key_type}
+    */
+    reference at(const typename object_t::key_type& key)
+    {
+        // at only works for objects
+        if (JSON_LIKELY(is_object()))
+        {
+            JSON_TRY
+            {
+                return m_value.object->at(key);
+            }
+            JSON_CATCH (std::out_of_range&)
+            {
+                // create better exception explanation
+                JSON_THROW(out_of_range::create(403, "key '" + key + "' not found"));
+            }
+        }
+        else
+        {
+            JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name())));
+        }
+    }
+
+    /*!
+    @brief access specified object element with bounds checking
+
+    Returns a const reference to the element at with specified key @a key,
+    with bounds checking.
+
+    @param[in] key  key of the element to access
+
+    @return const reference to the element at key @a key
+
+    @throw type_error.304 if the JSON value is not an object; in this case,
+    calling `at` with a key makes no sense. See example below.
+    @throw out_of_range.403 if the key @a key is is not stored in the object;
+    that is, `find(key) == end()`. See example below.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes in the JSON value.
+
+    @complexity Logarithmic in the size of the container.
+
+    @sa @ref operator[](const typename object_t::key_type&) for unchecked
+    access by reference
+    @sa @ref value() for access by value with a default value
+
+    @since version 1.0.0
+
+    @liveexample{The example below shows how object elements can be read using
+    `at()`. It also demonstrates the different exceptions that can be thrown.,
+    at__object_t_key_type_const}
+    */
+    const_reference at(const typename object_t::key_type& key) const
+    {
+        // at only works for objects
+        if (JSON_LIKELY(is_object()))
+        {
+            JSON_TRY
+            {
+                return m_value.object->at(key);
+            }
+            JSON_CATCH (std::out_of_range&)
+            {
+                // create better exception explanation
+                JSON_THROW(out_of_range::create(403, "key '" + key + "' not found"));
+            }
+        }
+        else
+        {
+            JSON_THROW(type_error::create(304, "cannot use at() with " + std::string(type_name())));
+        }
+    }
+
+    /*!
+    @brief access specified array element
+
+    Returns a reference to the element at specified location @a idx.
+
+    @note If @a idx is beyond the range of the array (i.e., `idx >= size()`),
+    then the array is silently filled up with `null` values to make `idx` a
+    valid reference to the last stored element.
+
+    @param[in] idx  index of the element to access
+
+    @return reference to the element at index @a idx
+
+    @throw type_error.305 if the JSON value is not an array or null; in that
+    cases, using the [] operator with an index makes no sense.
+
+    @complexity Constant if @a idx is in the range of the array. Otherwise
+    linear in `idx - size()`.
+
+    @liveexample{The example below shows how array elements can be read and
+    written using `[]` operator. Note the addition of `null`
+    values.,operatorarray__size_type}
+
+    @since version 1.0.0
+    */
+    reference operator[](size_type idx)
+    {
+        // implicitly convert null value to an empty array
+        if (is_null())
+        {
+            m_type = value_t::array;
+            m_value.array = create<array_t>();
+            assert_invariant();
+        }
+
+        // operator[] only works for arrays
+        if (JSON_LIKELY(is_array()))
+        {
+            // fill up array with null values if given idx is outside range
+            if (idx >= m_value.array->size())
+            {
+                m_value.array->insert(m_value.array->end(),
+                                      idx - m_value.array->size() + 1,
+                                      basic_json());
+            }
+
+            return m_value.array->operator[](idx);
+        }
+
+        JSON_THROW(type_error::create(305, "cannot use operator[] with a numeric argument with " + std::string(type_name())));
+    }
+
+    /*!
+    @brief access specified array element
+
+    Returns a const reference to the element at specified location @a idx.
+
+    @param[in] idx  index of the element to access
+
+    @return const reference to the element at index @a idx
+
+    @throw type_error.305 if the JSON value is not an array; in that case,
+    using the [] operator with an index makes no sense.
+
+    @complexity Constant.
+
+    @liveexample{The example below shows how array elements can be read using
+    the `[]` operator.,operatorarray__size_type_const}
+
+    @since version 1.0.0
+    */
+    const_reference operator[](size_type idx) const
+    {
+        // const operator[] only works for arrays
+        if (JSON_LIKELY(is_array()))
+        {
+            return m_value.array->operator[](idx);
+        }
+
+        JSON_THROW(type_error::create(305, "cannot use operator[] with a numeric argument with " + std::string(type_name())));
+    }
+
+    /*!
+    @brief access specified object element
+
+    Returns a reference to the element at with specified key @a key.
+
+    @note If @a key is not found in the object, then it is silently added to
+    the object and filled with a `null` value to make `key` a valid reference.
+    In case the value was `null` before, it is converted to an object.
+
+    @param[in] key  key of the element to access
+
+    @return reference to the element at key @a key
+
+    @throw type_error.305 if the JSON value is not an object or null; in that
+    cases, using the [] operator with a key makes no sense.
+
+    @complexity Logarithmic in the size of the container.
+
+    @liveexample{The example below shows how object elements can be read and
+    written using the `[]` operator.,operatorarray__key_type}
+
+    @sa @ref at(const typename object_t::key_type&) for access by reference
+    with range checking
+    @sa @ref value() for access by value with a default value
+
+    @since version 1.0.0
+    */
+    reference operator[](const typename object_t::key_type& key)
+    {
+        // implicitly convert null value to an empty object
+        if (is_null())
+        {
+            m_type = value_t::object;
+            m_value.object = create<object_t>();
+            assert_invariant();
+        }
+
+        // operator[] only works for objects
+        if (JSON_LIKELY(is_object()))
+        {
+            return m_value.object->operator[](key);
+        }
+
+        JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name())));
+    }
+
+    /*!
+    @brief read-only access specified object element
+
+    Returns a const reference to the element at with specified key @a key. No
+    bounds checking is performed.
+
+    @warning If the element with key @a key does not exist, the behavior is
+    undefined.
+
+    @param[in] key  key of the element to access
+
+    @return const reference to the element at key @a key
+
+    @pre The element with key @a key must exist. **This precondition is
+         enforced with an assertion.**
+
+    @throw type_error.305 if the JSON value is not an object; in that case,
+    using the [] operator with a key makes no sense.
+
+    @complexity Logarithmic in the size of the container.
+
+    @liveexample{The example below shows how object elements can be read using
+    the `[]` operator.,operatorarray__key_type_const}
+
+    @sa @ref at(const typename object_t::key_type&) for access by reference
+    with range checking
+    @sa @ref value() for access by value with a default value
+
+    @since version 1.0.0
+    */
+    const_reference operator[](const typename object_t::key_type& key) const
+    {
+        // const operator[] only works for objects
+        if (JSON_LIKELY(is_object()))
+        {
+            assert(m_value.object->find(key) != m_value.object->end());
+            return m_value.object->find(key)->second;
+        }
+
+        JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name())));
+    }
+
+    /*!
+    @brief access specified object element
+
+    Returns a reference to the element at with specified key @a key.
+
+    @note If @a key is not found in the object, then it is silently added to
+    the object and filled with a `null` value to make `key` a valid reference.
+    In case the value was `null` before, it is converted to an object.
+
+    @param[in] key  key of the element to access
+
+    @return reference to the element at key @a key
+
+    @throw type_error.305 if the JSON value is not an object or null; in that
+    cases, using the [] operator with a key makes no sense.
+
+    @complexity Logarithmic in the size of the container.
+
+    @liveexample{The example below shows how object elements can be read and
+    written using the `[]` operator.,operatorarray__key_type}
+
+    @sa @ref at(const typename object_t::key_type&) for access by reference
+    with range checking
+    @sa @ref value() for access by value with a default value
+
+    @since version 1.1.0
+    */
+    template<typename T>
+    reference operator[](T* key)
+    {
+        // implicitly convert null to object
+        if (is_null())
+        {
+            m_type = value_t::object;
+            m_value = value_t::object;
+            assert_invariant();
+        }
+
+        // at only works for objects
+        if (JSON_LIKELY(is_object()))
+        {
+            return m_value.object->operator[](key);
+        }
+
+        JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name())));
+    }
+
+    /*!
+    @brief read-only access specified object element
+
+    Returns a const reference to the element at with specified key @a key. No
+    bounds checking is performed.
+
+    @warning If the element with key @a key does not exist, the behavior is
+    undefined.
+
+    @param[in] key  key of the element to access
+
+    @return const reference to the element at key @a key
+
+    @pre The element with key @a key must exist. **This precondition is
+         enforced with an assertion.**
+
+    @throw type_error.305 if the JSON value is not an object; in that case,
+    using the [] operator with a key makes no sense.
+
+    @complexity Logarithmic in the size of the container.
+
+    @liveexample{The example below shows how object elements can be read using
+    the `[]` operator.,operatorarray__key_type_const}
+
+    @sa @ref at(const typename object_t::key_type&) for access by reference
+    with range checking
+    @sa @ref value() for access by value with a default value
+
+    @since version 1.1.0
+    */
+    template<typename T>
+    const_reference operator[](T* key) const
+    {
+        // at only works for objects
+        if (JSON_LIKELY(is_object()))
+        {
+            assert(m_value.object->find(key) != m_value.object->end());
+            return m_value.object->find(key)->second;
+        }
+
+        JSON_THROW(type_error::create(305, "cannot use operator[] with a string argument with " + std::string(type_name())));
+    }
+
+    /*!
+    @brief access specified object element with default value
+
+    Returns either a copy of an object's element at the specified key @a key
+    or a given default value if no element with key @a key exists.
+
+    The function is basically equivalent to executing
+    @code {.cpp}
+    try {
+        return at(key);
+    } catch(out_of_range) {
+        return default_value;
+    }
+    @endcode
+
+    @note Unlike @ref at(const typename object_t::key_type&), this function
+    does not throw if the given key @a key was not found.
+
+    @note Unlike @ref operator[](const typename object_t::key_type& key), this
+    function does not implicitly add an element to the position defined by @a
+    key. This function is furthermore also applicable to const objects.
+
+    @param[in] key  key of the element to access
+    @param[in] default_value  the value to return if @a key is not found
+
+    @tparam ValueType type compatible to JSON values, for instance `int` for
+    JSON integer numbers, `bool` for JSON booleans, or `std::vector` types for
+    JSON arrays. Note the type of the expected value at @a key and the default
+    value @a default_value must be compatible.
+
+    @return copy of the element at key @a key or @a default_value if @a key
+    is not found
+
+    @throw type_error.306 if the JSON value is not an object; in that case,
+    using `value()` with a key makes no sense.
+
+    @complexity Logarithmic in the size of the container.
+
+    @liveexample{The example below shows how object elements can be queried
+    with a default value.,basic_json__value}
+
+    @sa @ref at(const typename object_t::key_type&) for access by reference
+    with range checking
+    @sa @ref operator[](const typename object_t::key_type&) for unchecked
+    access by reference
+
+    @since version 1.0.0
+    */
+    template<class ValueType, typename std::enable_if<
+                 std::is_convertible<basic_json_t, ValueType>::value, int>::type = 0>
+    ValueType value(const typename object_t::key_type& key, const ValueType& default_value) const
+    {
+        // at only works for objects
+        if (JSON_LIKELY(is_object()))
+        {
+            // if key is found, return value and given default value otherwise
+            const auto it = find(key);
+            if (it != end())
+            {
+                return *it;
+            }
+
+            return default_value;
+        }
+
+        JSON_THROW(type_error::create(306, "cannot use value() with " + std::string(type_name())));
+    }
+
+    /*!
+    @brief overload for a default value of type const char*
+    @copydoc basic_json::value(const typename object_t::key_type&, const ValueType&) const
+    */
+    string_t value(const typename object_t::key_type& key, const char* default_value) const
+    {
+        return value(key, string_t(default_value));
+    }
+
+    /*!
+    @brief access specified object element via JSON Pointer with default value
+
+    Returns either a copy of an object's element at the specified key @a key
+    or a given default value if no element with key @a key exists.
+
+    The function is basically equivalent to executing
+    @code {.cpp}
+    try {
+        return at(ptr);
+    } catch(out_of_range) {
+        return default_value;
+    }
+    @endcode
+
+    @note Unlike @ref at(const json_pointer&), this function does not throw
+    if the given key @a key was not found.
+
+    @param[in] ptr  a JSON pointer to the element to access
+    @param[in] default_value  the value to return if @a ptr found no value
+
+    @tparam ValueType type compatible to JSON values, for instance `int` for
+    JSON integer numbers, `bool` for JSON booleans, or `std::vector` types for
+    JSON arrays. Note the type of the expected value at @a key and the default
+    value @a default_value must be compatible.
+
+    @return copy of the element at key @a key or @a default_value if @a key
+    is not found
+
+    @throw type_error.306 if the JSON value is not an object; in that case,
+    using `value()` with a key makes no sense.
+
+    @complexity Logarithmic in the size of the container.
+
+    @liveexample{The example below shows how object elements can be queried
+    with a default value.,basic_json__value_ptr}
+
+    @sa @ref operator[](const json_pointer&) for unchecked access by reference
+
+    @since version 2.0.2
+    */
+    template<class ValueType, typename std::enable_if<
+                 std::is_convertible<basic_json_t, ValueType>::value, int>::type = 0>
+    ValueType value(const json_pointer& ptr, const ValueType& default_value) const
+    {
+        // at only works for objects
+        if (JSON_LIKELY(is_object()))
+        {
+            // if pointer resolves a value, return it or use default value
+            JSON_TRY
+            {
+                return ptr.get_checked(this);
+            }
+            JSON_INTERNAL_CATCH (out_of_range&)
+            {
+                return default_value;
+            }
+        }
+
+        JSON_THROW(type_error::create(306, "cannot use value() with " + std::string(type_name())));
+    }
+
+    /*!
+    @brief overload for a default value of type const char*
+    @copydoc basic_json::value(const json_pointer&, ValueType) const
+    */
+    string_t value(const json_pointer& ptr, const char* default_value) const
+    {
+        return value(ptr, string_t(default_value));
+    }
+
+    /*!
+    @brief access the first element
+
+    Returns a reference to the first element in the container. For a JSON
+    container `c`, the expression `c.front()` is equivalent to `*c.begin()`.
+
+    @return In case of a structured type (array or object), a reference to the
+    first element is returned. In case of number, string, or boolean values, a
+    reference to the value is returned.
+
+    @complexity Constant.
+
+    @pre The JSON value must not be `null` (would throw `std::out_of_range`)
+    or an empty array or object (undefined behavior, **guarded by
+    assertions**).
+    @post The JSON value remains unchanged.
+
+    @throw invalid_iterator.214 when called on `null` value
+
+    @liveexample{The following code shows an example for `front()`.,front}
+
+    @sa @ref back() -- access the last element
+
+    @since version 1.0.0
+    */
+    reference front()
+    {
+        return *begin();
+    }
+
+    /*!
+    @copydoc basic_json::front()
+    */
+    const_reference front() const
+    {
+        return *cbegin();
+    }
+
+    /*!
+    @brief access the last element
+
+    Returns a reference to the last element in the container. For a JSON
+    container `c`, the expression `c.back()` is equivalent to
+    @code {.cpp}
+    auto tmp = c.end();
+    --tmp;
+    return *tmp;
+    @endcode
+
+    @return In case of a structured type (array or object), a reference to the
+    last element is returned. In case of number, string, or boolean values, a
+    reference to the value is returned.
+
+    @complexity Constant.
+
+    @pre The JSON value must not be `null` (would throw `std::out_of_range`)
+    or an empty array or object (undefined behavior, **guarded by
+    assertions**).
+    @post The JSON value remains unchanged.
+
+    @throw invalid_iterator.214 when called on a `null` value. See example
+    below.
+
+    @liveexample{The following code shows an example for `back()`.,back}
+
+    @sa @ref front() -- access the first element
+
+    @since version 1.0.0
+    */
+    reference back()
+    {
+        auto tmp = end();
+        --tmp;
+        return *tmp;
+    }
+
+    /*!
+    @copydoc basic_json::back()
+    */
+    const_reference back() const
+    {
+        auto tmp = cend();
+        --tmp;
+        return *tmp;
+    }
+
+    /*!
+    @brief remove element given an iterator
+
+    Removes the element specified by iterator @a pos. The iterator @a pos must
+    be valid and dereferenceable. Thus the `end()` iterator (which is valid,
+    but is not dereferenceable) cannot be used as a value for @a pos.
+
+    If called on a primitive type other than `null`, the resulting JSON value
+    will be `null`.
+
+    @param[in] pos iterator to the element to remove
+    @return Iterator following the last removed element. If the iterator @a
+    pos refers to the last element, the `end()` iterator is returned.
+
+    @tparam IteratorType an @ref iterator or @ref const_iterator
+
+    @post Invalidates iterators and references at or after the point of the
+    erase, including the `end()` iterator.
+
+    @throw type_error.307 if called on a `null` value; example: `"cannot use
+    erase() with null"`
+    @throw invalid_iterator.202 if called on an iterator which does not belong
+    to the current JSON value; example: `"iterator does not fit current
+    value"`
+    @throw invalid_iterator.205 if called on a primitive type with invalid
+    iterator (i.e., any iterator which is not `begin()`); example: `"iterator
+    out of range"`
+
+    @complexity The complexity depends on the type:
+    - objects: amortized constant
+    - arrays: linear in distance between @a pos and the end of the container
+    - strings: linear in the length of the string
+    - other types: constant
+
+    @liveexample{The example shows the result of `erase()` for different JSON
+    types.,erase__IteratorType}
+
+    @sa @ref erase(IteratorType, IteratorType) -- removes the elements in
+    the given range
+    @sa @ref erase(const typename object_t::key_type&) -- removes the element
+    from an object at the given key
+    @sa @ref erase(const size_type) -- removes the element from an array at
+    the given index
+
+    @since version 1.0.0
+    */
+    template<class IteratorType, typename std::enable_if<
+                 std::is_same<IteratorType, typename basic_json_t::iterator>::value or
+                 std::is_same<IteratorType, typename basic_json_t::const_iterator>::value, int>::type
+             = 0>
+    IteratorType erase(IteratorType pos)
+    {
+        // make sure iterator fits the current value
+        if (JSON_UNLIKELY(this != pos.m_object))
+        {
+            JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
+        }
+
+        IteratorType result = end();
+
+        switch (m_type)
+        {
+            case value_t::boolean:
+            case value_t::number_float:
+            case value_t::number_integer:
+            case value_t::number_unsigned:
+            case value_t::string:
+            {
+                if (JSON_UNLIKELY(not pos.m_it.primitive_iterator.is_begin()))
+                {
+                    JSON_THROW(invalid_iterator::create(205, "iterator out of range"));
+                }
+
+                if (is_string())
+                {
+                    AllocatorType<string_t> alloc;
+                    std::allocator_traits<decltype(alloc)>::destroy(alloc, m_value.string);
+                    std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.string, 1);
+                    m_value.string = nullptr;
+                }
+
+                m_type = value_t::null;
+                assert_invariant();
+                break;
+            }
+
+            case value_t::object:
+            {
+                result.m_it.object_iterator = m_value.object->erase(pos.m_it.object_iterator);
+                break;
+            }
+
+            case value_t::array:
+            {
+                result.m_it.array_iterator = m_value.array->erase(pos.m_it.array_iterator);
+                break;
+            }
+
+            default:
+                JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name())));
+        }
+
+        return result;
+    }
+
+    /*!
+    @brief remove elements given an iterator range
+
+    Removes the element specified by the range `[first; last)`. The iterator
+    @a first does not need to be dereferenceable if `first == last`: erasing
+    an empty range is a no-op.
+
+    If called on a primitive type other than `null`, the resulting JSON value
+    will be `null`.
+
+    @param[in] first iterator to the beginning of the range to remove
+    @param[in] last iterator past the end of the range to remove
+    @return Iterator following the last removed element. If the iterator @a
+    second refers to the last element, the `end()` iterator is returned.
+
+    @tparam IteratorType an @ref iterator or @ref const_iterator
+
+    @post Invalidates iterators and references at or after the point of the
+    erase, including the `end()` iterator.
+
+    @throw type_error.307 if called on a `null` value; example: `"cannot use
+    erase() with null"`
+    @throw invalid_iterator.203 if called on iterators which does not belong
+    to the current JSON value; example: `"iterators do not fit current value"`
+    @throw invalid_iterator.204 if called on a primitive type with invalid
+    iterators (i.e., if `first != begin()` and `last != end()`); example:
+    `"iterators out of range"`
+
+    @complexity The complexity depends on the type:
+    - objects: `log(size()) + std::distance(first, last)`
+    - arrays: linear in the distance between @a first and @a last, plus linear
+      in the distance between @a last and end of the container
+    - strings: linear in the length of the string
+    - other types: constant
+
+    @liveexample{The example shows the result of `erase()` for different JSON
+    types.,erase__IteratorType_IteratorType}
+
+    @sa @ref erase(IteratorType) -- removes the element at a given position
+    @sa @ref erase(const typename object_t::key_type&) -- removes the element
+    from an object at the given key
+    @sa @ref erase(const size_type) -- removes the element from an array at
+    the given index
+
+    @since version 1.0.0
+    */
+    template<class IteratorType, typename std::enable_if<
+                 std::is_same<IteratorType, typename basic_json_t::iterator>::value or
+                 std::is_same<IteratorType, typename basic_json_t::const_iterator>::value, int>::type
+             = 0>
+    IteratorType erase(IteratorType first, IteratorType last)
+    {
+        // make sure iterator fits the current value
+        if (JSON_UNLIKELY(this != first.m_object or this != last.m_object))
+        {
+            JSON_THROW(invalid_iterator::create(203, "iterators do not fit current value"));
+        }
+
+        IteratorType result = end();
+
+        switch (m_type)
+        {
+            case value_t::boolean:
+            case value_t::number_float:
+            case value_t::number_integer:
+            case value_t::number_unsigned:
+            case value_t::string:
+            {
+                if (JSON_LIKELY(not first.m_it.primitive_iterator.is_begin()
+                                or not last.m_it.primitive_iterator.is_end()))
+                {
+                    JSON_THROW(invalid_iterator::create(204, "iterators out of range"));
+                }
+
+                if (is_string())
+                {
+                    AllocatorType<string_t> alloc;
+                    std::allocator_traits<decltype(alloc)>::destroy(alloc, m_value.string);
+                    std::allocator_traits<decltype(alloc)>::deallocate(alloc, m_value.string, 1);
+                    m_value.string = nullptr;
+                }
+
+                m_type = value_t::null;
+                assert_invariant();
+                break;
+            }
+
+            case value_t::object:
+            {
+                result.m_it.object_iterator = m_value.object->erase(first.m_it.object_iterator,
+                                              last.m_it.object_iterator);
+                break;
+            }
+
+            case value_t::array:
+            {
+                result.m_it.array_iterator = m_value.array->erase(first.m_it.array_iterator,
+                                             last.m_it.array_iterator);
+                break;
+            }
+
+            default:
+                JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name())));
+        }
+
+        return result;
+    }
+
+    /*!
+    @brief remove element from a JSON object given a key
+
+    Removes elements from a JSON object with the key value @a key.
+
+    @param[in] key value of the elements to remove
+
+    @return Number of elements removed. If @a ObjectType is the default
+    `std::map` type, the return value will always be `0` (@a key was not
+    found) or `1` (@a key was found).
+
+    @post References and iterators to the erased elements are invalidated.
+    Other references and iterators are not affected.
+
+    @throw type_error.307 when called on a type other than JSON object;
+    example: `"cannot use erase() with null"`
+
+    @complexity `log(size()) + count(key)`
+
+    @liveexample{The example shows the effect of `erase()`.,erase__key_type}
+
+    @sa @ref erase(IteratorType) -- removes the element at a given position
+    @sa @ref erase(IteratorType, IteratorType) -- removes the elements in
+    the given range
+    @sa @ref erase(const size_type) -- removes the element from an array at
+    the given index
+
+    @since version 1.0.0
+    */
+    size_type erase(const typename object_t::key_type& key)
+    {
+        // this erase only works for objects
+        if (JSON_LIKELY(is_object()))
+        {
+            return m_value.object->erase(key);
+        }
+
+        JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name())));
+    }
+
+    /*!
+    @brief remove element from a JSON array given an index
+
+    Removes element from a JSON array at the index @a idx.
+
+    @param[in] idx index of the element to remove
+
+    @throw type_error.307 when called on a type other than JSON object;
+    example: `"cannot use erase() with null"`
+    @throw out_of_range.401 when `idx >= size()`; example: `"array index 17
+    is out of range"`
+
+    @complexity Linear in distance between @a idx and the end of the container.
+
+    @liveexample{The example shows the effect of `erase()`.,erase__size_type}
+
+    @sa @ref erase(IteratorType) -- removes the element at a given position
+    @sa @ref erase(IteratorType, IteratorType) -- removes the elements in
+    the given range
+    @sa @ref erase(const typename object_t::key_type&) -- removes the element
+    from an object at the given key
+
+    @since version 1.0.0
+    */
+    void erase(const size_type idx)
+    {
+        // this erase only works for arrays
+        if (JSON_LIKELY(is_array()))
+        {
+            if (JSON_UNLIKELY(idx >= size()))
+            {
+                JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range"));
+            }
+
+            m_value.array->erase(m_value.array->begin() + static_cast<difference_type>(idx));
+        }
+        else
+        {
+            JSON_THROW(type_error::create(307, "cannot use erase() with " + std::string(type_name())));
+        }
+    }
+
+    /// @}
+
+
+    ////////////
+    // lookup //
+    ////////////
+
+    /// @name lookup
+    /// @{
+
+    /*!
+    @brief find an element in a JSON object
+
+    Finds an element in a JSON object with key equivalent to @a key. If the
+    element is not found or the JSON value is not an object, end() is
+    returned.
+
+    @note This method always returns @ref end() when executed on a JSON type
+          that is not an object.
+
+    @param[in] key key value of the element to search for.
+
+    @return Iterator to an element with key equivalent to @a key. If no such
+    element is found or the JSON value is not an object, past-the-end (see
+    @ref end()) iterator is returned.
+
+    @complexity Logarithmic in the size of the JSON object.
+
+    @liveexample{The example shows how `find()` is used.,find__key_type}
+
+    @since version 1.0.0
+    */
+    template<typename KeyT>
+    iterator find(KeyT&& key)
+    {
+        auto result = end();
+
+        if (is_object())
+        {
+            result.m_it.object_iterator = m_value.object->find(std::forward<KeyT>(key));
+        }
+
+        return result;
+    }
+
+    /*!
+    @brief find an element in a JSON object
+    @copydoc find(KeyT&&)
+    */
+    template<typename KeyT>
+    const_iterator find(KeyT&& key) const
+    {
+        auto result = cend();
+
+        if (is_object())
+        {
+            result.m_it.object_iterator = m_value.object->find(std::forward<KeyT>(key));
+        }
+
+        return result;
+    }
+
+    /*!
+    @brief returns the number of occurrences of a key in a JSON object
+
+    Returns the number of elements with key @a key. If ObjectType is the
+    default `std::map` type, the return value will always be `0` (@a key was
+    not found) or `1` (@a key was found).
+
+    @note This method always returns `0` when executed on a JSON type that is
+          not an object.
+
+    @param[in] key key value of the element to count
+
+    @return Number of elements with key @a key. If the JSON value is not an
+    object, the return value will be `0`.
+
+    @complexity Logarithmic in the size of the JSON object.
+
+    @liveexample{The example shows how `count()` is used.,count}
+
+    @since version 1.0.0
+    */
+    template<typename KeyT>
+    size_type count(KeyT&& key) const
+    {
+        // return 0 for all nonobject types
+        return is_object() ? m_value.object->count(std::forward<KeyT>(key)) : 0;
+    }
+
+    /// @}
+
+
+    ///////////////
+    // iterators //
+    ///////////////
+
+    /// @name iterators
+    /// @{
+
+    /*!
+    @brief returns an iterator to the first element
+
+    Returns an iterator to the first element.
+
+    @image html range-begin-end.svg "Illustration from cppreference.com"
+
+    @return iterator to the first element
+
+    @complexity Constant.
+
+    @requirement This function helps `basic_json` satisfying the
+    [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+    requirements:
+    - The complexity is constant.
+
+    @liveexample{The following code shows an example for `begin()`.,begin}
+
+    @sa @ref cbegin() -- returns a const iterator to the beginning
+    @sa @ref end() -- returns an iterator to the end
+    @sa @ref cend() -- returns a const iterator to the end
+
+    @since version 1.0.0
+    */
+    iterator begin() noexcept
+    {
+        iterator result(this);
+        result.set_begin();
+        return result;
+    }
+
+    /*!
+    @copydoc basic_json::cbegin()
+    */
+    const_iterator begin() const noexcept
+    {
+        return cbegin();
+    }
+
+    /*!
+    @brief returns a const iterator to the first element
+
+    Returns a const iterator to the first element.
+
+    @image html range-begin-end.svg "Illustration from cppreference.com"
+
+    @return const iterator to the first element
+
+    @complexity Constant.
+
+    @requirement This function helps `basic_json` satisfying the
+    [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+    requirements:
+    - The complexity is constant.
+    - Has the semantics of `const_cast<const basic_json&>(*this).begin()`.
+
+    @liveexample{The following code shows an example for `cbegin()`.,cbegin}
+
+    @sa @ref begin() -- returns an iterator to the beginning
+    @sa @ref end() -- returns an iterator to the end
+    @sa @ref cend() -- returns a const iterator to the end
+
+    @since version 1.0.0
+    */
+    const_iterator cbegin() const noexcept
+    {
+        const_iterator result(this);
+        result.set_begin();
+        return result;
+    }
+
+    /*!
+    @brief returns an iterator to one past the last element
+
+    Returns an iterator to one past the last element.
+
+    @image html range-begin-end.svg "Illustration from cppreference.com"
+
+    @return iterator one past the last element
+
+    @complexity Constant.
+
+    @requirement This function helps `basic_json` satisfying the
+    [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+    requirements:
+    - The complexity is constant.
+
+    @liveexample{The following code shows an example for `end()`.,end}
+
+    @sa @ref cend() -- returns a const iterator to the end
+    @sa @ref begin() -- returns an iterator to the beginning
+    @sa @ref cbegin() -- returns a const iterator to the beginning
+
+    @since version 1.0.0
+    */
+    iterator end() noexcept
+    {
+        iterator result(this);
+        result.set_end();
+        return result;
+    }
+
+    /*!
+    @copydoc basic_json::cend()
+    */
+    const_iterator end() const noexcept
+    {
+        return cend();
+    }
+
+    /*!
+    @brief returns a const iterator to one past the last element
+
+    Returns a const iterator to one past the last element.
+
+    @image html range-begin-end.svg "Illustration from cppreference.com"
+
+    @return const iterator one past the last element
+
+    @complexity Constant.
+
+    @requirement This function helps `basic_json` satisfying the
+    [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+    requirements:
+    - The complexity is constant.
+    - Has the semantics of `const_cast<const basic_json&>(*this).end()`.
+
+    @liveexample{The following code shows an example for `cend()`.,cend}
+
+    @sa @ref end() -- returns an iterator to the end
+    @sa @ref begin() -- returns an iterator to the beginning
+    @sa @ref cbegin() -- returns a const iterator to the beginning
+
+    @since version 1.0.0
+    */
+    const_iterator cend() const noexcept
+    {
+        const_iterator result(this);
+        result.set_end();
+        return result;
+    }
+
+    /*!
+    @brief returns an iterator to the reverse-beginning
+
+    Returns an iterator to the reverse-beginning; that is, the last element.
+
+    @image html range-rbegin-rend.svg "Illustration from cppreference.com"
+
+    @complexity Constant.
+
+    @requirement This function helps `basic_json` satisfying the
+    [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer)
+    requirements:
+    - The complexity is constant.
+    - Has the semantics of `reverse_iterator(end())`.
+
+    @liveexample{The following code shows an example for `rbegin()`.,rbegin}
+
+    @sa @ref crbegin() -- returns a const reverse iterator to the beginning
+    @sa @ref rend() -- returns a reverse iterator to the end
+    @sa @ref crend() -- returns a const reverse iterator to the end
+
+    @since version 1.0.0
+    */
+    reverse_iterator rbegin() noexcept
+    {
+        return reverse_iterator(end());
+    }
+
+    /*!
+    @copydoc basic_json::crbegin()
+    */
+    const_reverse_iterator rbegin() const noexcept
+    {
+        return crbegin();
+    }
+
+    /*!
+    @brief returns an iterator to the reverse-end
+
+    Returns an iterator to the reverse-end; that is, one before the first
+    element.
+
+    @image html range-rbegin-rend.svg "Illustration from cppreference.com"
+
+    @complexity Constant.
+
+    @requirement This function helps `basic_json` satisfying the
+    [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer)
+    requirements:
+    - The complexity is constant.
+    - Has the semantics of `reverse_iterator(begin())`.
+
+    @liveexample{The following code shows an example for `rend()`.,rend}
+
+    @sa @ref crend() -- returns a const reverse iterator to the end
+    @sa @ref rbegin() -- returns a reverse iterator to the beginning
+    @sa @ref crbegin() -- returns a const reverse iterator to the beginning
+
+    @since version 1.0.0
+    */
+    reverse_iterator rend() noexcept
+    {
+        return reverse_iterator(begin());
+    }
+
+    /*!
+    @copydoc basic_json::crend()
+    */
+    const_reverse_iterator rend() const noexcept
+    {
+        return crend();
+    }
+
+    /*!
+    @brief returns a const reverse iterator to the last element
+
+    Returns a const iterator to the reverse-beginning; that is, the last
+    element.
+
+    @image html range-rbegin-rend.svg "Illustration from cppreference.com"
+
+    @complexity Constant.
+
+    @requirement This function helps `basic_json` satisfying the
+    [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer)
+    requirements:
+    - The complexity is constant.
+    - Has the semantics of `const_cast<const basic_json&>(*this).rbegin()`.
+
+    @liveexample{The following code shows an example for `crbegin()`.,crbegin}
+
+    @sa @ref rbegin() -- returns a reverse iterator to the beginning
+    @sa @ref rend() -- returns a reverse iterator to the end
+    @sa @ref crend() -- returns a const reverse iterator to the end
+
+    @since version 1.0.0
+    */
+    const_reverse_iterator crbegin() const noexcept
+    {
+        return const_reverse_iterator(cend());
+    }
+
+    /*!
+    @brief returns a const reverse iterator to one before the first
+
+    Returns a const reverse iterator to the reverse-end; that is, one before
+    the first element.
+
+    @image html range-rbegin-rend.svg "Illustration from cppreference.com"
+
+    @complexity Constant.
+
+    @requirement This function helps `basic_json` satisfying the
+    [ReversibleContainer](https://en.cppreference.com/w/cpp/named_req/ReversibleContainer)
+    requirements:
+    - The complexity is constant.
+    - Has the semantics of `const_cast<const basic_json&>(*this).rend()`.
+
+    @liveexample{The following code shows an example for `crend()`.,crend}
+
+    @sa @ref rend() -- returns a reverse iterator to the end
+    @sa @ref rbegin() -- returns a reverse iterator to the beginning
+    @sa @ref crbegin() -- returns a const reverse iterator to the beginning
+
+    @since version 1.0.0
+    */
+    const_reverse_iterator crend() const noexcept
+    {
+        return const_reverse_iterator(cbegin());
+    }
+
+  public:
+    /*!
+    @brief wrapper to access iterator member functions in range-based for
+
+    This function allows to access @ref iterator::key() and @ref
+    iterator::value() during range-based for loops. In these loops, a
+    reference to the JSON values is returned, so there is no access to the
+    underlying iterator.
+
+    For loop without iterator_wrapper:
+
+    @code{cpp}
+    for (auto it = j_object.begin(); it != j_object.end(); ++it)
+    {
+        std::cout << "key: " << it.key() << ", value:" << it.value() << '\n';
+    }
+    @endcode
+
+    Range-based for loop without iterator proxy:
+
+    @code{cpp}
+    for (auto it : j_object)
+    {
+        // "it" is of type json::reference and has no key() member
+        std::cout << "value: " << it << '\n';
+    }
+    @endcode
+
+    Range-based for loop with iterator proxy:
+
+    @code{cpp}
+    for (auto it : json::iterator_wrapper(j_object))
+    {
+        std::cout << "key: " << it.key() << ", value:" << it.value() << '\n';
+    }
+    @endcode
+
+    @note When iterating over an array, `key()` will return the index of the
+          element as string (see example).
+
+    @param[in] ref  reference to a JSON value
+    @return iteration proxy object wrapping @a ref with an interface to use in
+            range-based for loops
+
+    @liveexample{The following code shows how the wrapper is used,iterator_wrapper}
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes in the JSON value.
+
+    @complexity Constant.
+
+    @note The name of this function is not yet final and may change in the
+    future.
+
+    @deprecated This stream operator is deprecated and will be removed in
+                future 4.0.0 of the library. Please use @ref items() instead;
+                that is, replace `json::iterator_wrapper(j)` with `j.items()`.
+    */
+    JSON_DEPRECATED
+    static iteration_proxy<iterator> iterator_wrapper(reference ref) noexcept
+    {
+        return ref.items();
+    }
+
+    /*!
+    @copydoc iterator_wrapper(reference)
+    */
+    JSON_DEPRECATED
+    static iteration_proxy<const_iterator> iterator_wrapper(const_reference ref) noexcept
+    {
+        return ref.items();
+    }
+
+    /*!
+    @brief helper to access iterator member functions in range-based for
+
+    This function allows to access @ref iterator::key() and @ref
+    iterator::value() during range-based for loops. In these loops, a
+    reference to the JSON values is returned, so there is no access to the
+    underlying iterator.
+
+    For loop without `items()` function:
+
+    @code{cpp}
+    for (auto it = j_object.begin(); it != j_object.end(); ++it)
+    {
+        std::cout << "key: " << it.key() << ", value:" << it.value() << '\n';
+    }
+    @endcode
+
+    Range-based for loop without `items()` function:
+
+    @code{cpp}
+    for (auto it : j_object)
+    {
+        // "it" is of type json::reference and has no key() member
+        std::cout << "value: " << it << '\n';
+    }
+    @endcode
+
+    Range-based for loop with `items()` function:
+
+    @code{cpp}
+    for (auto& el : j_object.items())
+    {
+        std::cout << "key: " << el.key() << ", value:" << el.value() << '\n';
+    }
+    @endcode
+
+    The `items()` function also allows to use
+    [structured bindings](https://en.cppreference.com/w/cpp/language/structured_binding)
+    (C++17):
+
+    @code{cpp}
+    for (auto& [key, val] : j_object.items())
+    {
+        std::cout << "key: " << key << ", value:" << val << '\n';
+    }
+    @endcode
+
+    @note When iterating over an array, `key()` will return the index of the
+          element as string (see example). For primitive types (e.g., numbers),
+          `key()` returns an empty string.
+
+    @return iteration proxy object wrapping @a ref with an interface to use in
+            range-based for loops
+
+    @liveexample{The following code shows how the function is used.,items}
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes in the JSON value.
+
+    @complexity Constant.
+
+    @since version 3.1.0, structured bindings support since 3.5.0.
+    */
+    iteration_proxy<iterator> items() noexcept
+    {
+        return iteration_proxy<iterator>(*this);
+    }
+
+    /*!
+    @copydoc items()
+    */
+    iteration_proxy<const_iterator> items() const noexcept
+    {
+        return iteration_proxy<const_iterator>(*this);
+    }
+
+    /// @}
+
+
+    //////////////
+    // capacity //
+    //////////////
+
+    /// @name capacity
+    /// @{
+
+    /*!
+    @brief checks whether the container is empty.
+
+    Checks if a JSON value has no elements (i.e. whether its @ref size is `0`).
+
+    @return The return value depends on the different types and is
+            defined as follows:
+            Value type  | return value
+            ----------- | -------------
+            null        | `true`
+            boolean     | `false`
+            string      | `false`
+            number      | `false`
+            object      | result of function `object_t::empty()`
+            array       | result of function `array_t::empty()`
+
+    @liveexample{The following code uses `empty()` to check if a JSON
+    object contains any elements.,empty}
+
+    @complexity Constant, as long as @ref array_t and @ref object_t satisfy
+    the Container concept; that is, their `empty()` functions have constant
+    complexity.
+
+    @iterators No changes.
+
+    @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+    @note This function does not return whether a string stored as JSON value
+    is empty - it returns whether the JSON container itself is empty which is
+    false in the case of a string.
+
+    @requirement This function helps `basic_json` satisfying the
+    [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+    requirements:
+    - The complexity is constant.
+    - Has the semantics of `begin() == end()`.
+
+    @sa @ref size() -- returns the number of elements
+
+    @since version 1.0.0
+    */
+    bool empty() const noexcept
+    {
+        switch (m_type)
+        {
+            case value_t::null:
+            {
+                // null values are empty
+                return true;
+            }
+
+            case value_t::array:
+            {
+                // delegate call to array_t::empty()
+                return m_value.array->empty();
+            }
+
+            case value_t::object:
+            {
+                // delegate call to object_t::empty()
+                return m_value.object->empty();
+            }
+
+            default:
+            {
+                // all other types are nonempty
+                return false;
+            }
+        }
+    }
+
+    /*!
+    @brief returns the number of elements
+
+    Returns the number of elements in a JSON value.
+
+    @return The return value depends on the different types and is
+            defined as follows:
+            Value type  | return value
+            ----------- | -------------
+            null        | `0`
+            boolean     | `1`
+            string      | `1`
+            number      | `1`
+            object      | result of function object_t::size()
+            array       | result of function array_t::size()
+
+    @liveexample{The following code calls `size()` on the different value
+    types.,size}
+
+    @complexity Constant, as long as @ref array_t and @ref object_t satisfy
+    the Container concept; that is, their size() functions have constant
+    complexity.
+
+    @iterators No changes.
+
+    @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+    @note This function does not return the length of a string stored as JSON
+    value - it returns the number of elements in the JSON value which is 1 in
+    the case of a string.
+
+    @requirement This function helps `basic_json` satisfying the
+    [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+    requirements:
+    - The complexity is constant.
+    - Has the semantics of `std::distance(begin(), end())`.
+
+    @sa @ref empty() -- checks whether the container is empty
+    @sa @ref max_size() -- returns the maximal number of elements
+
+    @since version 1.0.0
+    */
+    size_type size() const noexcept
+    {
+        switch (m_type)
+        {
+            case value_t::null:
+            {
+                // null values are empty
+                return 0;
+            }
+
+            case value_t::array:
+            {
+                // delegate call to array_t::size()
+                return m_value.array->size();
+            }
+
+            case value_t::object:
+            {
+                // delegate call to object_t::size()
+                return m_value.object->size();
+            }
+
+            default:
+            {
+                // all other types have size 1
+                return 1;
+            }
+        }
+    }
+
+    /*!
+    @brief returns the maximum possible number of elements
+
+    Returns the maximum number of elements a JSON value is able to hold due to
+    system or library implementation limitations, i.e. `std::distance(begin(),
+    end())` for the JSON value.
+
+    @return The return value depends on the different types and is
+            defined as follows:
+            Value type  | return value
+            ----------- | -------------
+            null        | `0` (same as `size()`)
+            boolean     | `1` (same as `size()`)
+            string      | `1` (same as `size()`)
+            number      | `1` (same as `size()`)
+            object      | result of function `object_t::max_size()`
+            array       | result of function `array_t::max_size()`
+
+    @liveexample{The following code calls `max_size()` on the different value
+    types. Note the output is implementation specific.,max_size}
+
+    @complexity Constant, as long as @ref array_t and @ref object_t satisfy
+    the Container concept; that is, their `max_size()` functions have constant
+    complexity.
+
+    @iterators No changes.
+
+    @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+    @requirement This function helps `basic_json` satisfying the
+    [Container](https://en.cppreference.com/w/cpp/named_req/Container)
+    requirements:
+    - The complexity is constant.
+    - Has the semantics of returning `b.size()` where `b` is the largest
+      possible JSON value.
+
+    @sa @ref size() -- returns the number of elements
+
+    @since version 1.0.0
+    */
+    size_type max_size() const noexcept
+    {
+        switch (m_type)
+        {
+            case value_t::array:
+            {
+                // delegate call to array_t::max_size()
+                return m_value.array->max_size();
+            }
+
+            case value_t::object:
+            {
+                // delegate call to object_t::max_size()
+                return m_value.object->max_size();
+            }
+
+            default:
+            {
+                // all other types have max_size() == size()
+                return size();
+            }
+        }
+    }
+
+    /// @}
+
+
+    ///////////////
+    // modifiers //
+    ///////////////
+
+    /// @name modifiers
+    /// @{
+
+    /*!
+    @brief clears the contents
+
+    Clears the content of a JSON value and resets it to the default value as
+    if @ref basic_json(value_t) would have been called with the current value
+    type from @ref type():
+
+    Value type  | initial value
+    ----------- | -------------
+    null        | `null`
+    boolean     | `false`
+    string      | `""`
+    number      | `0`
+    object      | `{}`
+    array       | `[]`
+
+    @post Has the same effect as calling
+    @code {.cpp}
+    *this = basic_json(type());
+    @endcode
+
+    @liveexample{The example below shows the effect of `clear()` to different
+    JSON types.,clear}
+
+    @complexity Linear in the size of the JSON value.
+
+    @iterators All iterators, pointers and references related to this container
+               are invalidated.
+
+    @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+    @sa @ref basic_json(value_t) -- constructor that creates an object with the
+        same value than calling `clear()`
+
+    @since version 1.0.0
+    */
+    void clear() noexcept
+    {
+        switch (m_type)
+        {
+            case value_t::number_integer:
+            {
+                m_value.number_integer = 0;
+                break;
+            }
+
+            case value_t::number_unsigned:
+            {
+                m_value.number_unsigned = 0;
+                break;
+            }
+
+            case value_t::number_float:
+            {
+                m_value.number_float = 0.0;
+                break;
+            }
+
+            case value_t::boolean:
+            {
+                m_value.boolean = false;
+                break;
+            }
+
+            case value_t::string:
+            {
+                m_value.string->clear();
+                break;
+            }
+
+            case value_t::array:
+            {
+                m_value.array->clear();
+                break;
+            }
+
+            case value_t::object:
+            {
+                m_value.object->clear();
+                break;
+            }
+
+            default:
+                break;
+        }
+    }
+
+    /*!
+    @brief add an object to an array
+
+    Appends the given element @a val to the end of the JSON value. If the
+    function is called on a JSON null value, an empty array is created before
+    appending @a val.
+
+    @param[in] val the value to add to the JSON array
+
+    @throw type_error.308 when called on a type other than JSON array or
+    null; example: `"cannot use push_back() with number"`
+
+    @complexity Amortized constant.
+
+    @liveexample{The example shows how `push_back()` and `+=` can be used to
+    add elements to a JSON array. Note how the `null` value was silently
+    converted to a JSON array.,push_back}
+
+    @since version 1.0.0
+    */
+    void push_back(basic_json&& val)
+    {
+        // push_back only works for null objects or arrays
+        if (JSON_UNLIKELY(not(is_null() or is_array())))
+        {
+            JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name())));
+        }
+
+        // transform null object into an array
+        if (is_null())
+        {
+            m_type = value_t::array;
+            m_value = value_t::array;
+            assert_invariant();
+        }
+
+        // add element to array (move semantics)
+        m_value.array->push_back(std::move(val));
+        // invalidate object
+        val.m_type = value_t::null;
+    }
+
+    /*!
+    @brief add an object to an array
+    @copydoc push_back(basic_json&&)
+    */
+    reference operator+=(basic_json&& val)
+    {
+        push_back(std::move(val));
+        return *this;
+    }
+
+    /*!
+    @brief add an object to an array
+    @copydoc push_back(basic_json&&)
+    */
+    void push_back(const basic_json& val)
+    {
+        // push_back only works for null objects or arrays
+        if (JSON_UNLIKELY(not(is_null() or is_array())))
+        {
+            JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name())));
+        }
+
+        // transform null object into an array
+        if (is_null())
+        {
+            m_type = value_t::array;
+            m_value = value_t::array;
+            assert_invariant();
+        }
+
+        // add element to array
+        m_value.array->push_back(val);
+    }
+
+    /*!
+    @brief add an object to an array
+    @copydoc push_back(basic_json&&)
+    */
+    reference operator+=(const basic_json& val)
+    {
+        push_back(val);
+        return *this;
+    }
+
+    /*!
+    @brief add an object to an object
+
+    Inserts the given element @a val to the JSON object. If the function is
+    called on a JSON null value, an empty object is created before inserting
+    @a val.
+
+    @param[in] val the value to add to the JSON object
+
+    @throw type_error.308 when called on a type other than JSON object or
+    null; example: `"cannot use push_back() with number"`
+
+    @complexity Logarithmic in the size of the container, O(log(`size()`)).
+
+    @liveexample{The example shows how `push_back()` and `+=` can be used to
+    add elements to a JSON object. Note how the `null` value was silently
+    converted to a JSON object.,push_back__object_t__value}
+
+    @since version 1.0.0
+    */
+    void push_back(const typename object_t::value_type& val)
+    {
+        // push_back only works for null objects or objects
+        if (JSON_UNLIKELY(not(is_null() or is_object())))
+        {
+            JSON_THROW(type_error::create(308, "cannot use push_back() with " + std::string(type_name())));
+        }
+
+        // transform null object into an object
+        if (is_null())
+        {
+            m_type = value_t::object;
+            m_value = value_t::object;
+            assert_invariant();
+        }
+
+        // add element to array
+        m_value.object->insert(val);
+    }
+
+    /*!
+    @brief add an object to an object
+    @copydoc push_back(const typename object_t::value_type&)
+    */
+    reference operator+=(const typename object_t::value_type& val)
+    {
+        push_back(val);
+        return *this;
+    }
+
+    /*!
+    @brief add an object to an object
+
+    This function allows to use `push_back` with an initializer list. In case
+
+    1. the current value is an object,
+    2. the initializer list @a init contains only two elements, and
+    3. the first element of @a init is a string,
+
+    @a init is converted into an object element and added using
+    @ref push_back(const typename object_t::value_type&). Otherwise, @a init
+    is converted to a JSON value and added using @ref push_back(basic_json&&).
+
+    @param[in] init  an initializer list
+
+    @complexity Linear in the size of the initializer list @a init.
+
+    @note This function is required to resolve an ambiguous overload error,
+          because pairs like `{"key", "value"}` can be both interpreted as
+          `object_t::value_type` or `std::initializer_list<basic_json>`, see
+          https://github.com/nlohmann/json/issues/235 for more information.
+
+    @liveexample{The example shows how initializer lists are treated as
+    objects when possible.,push_back__initializer_list}
+    */
+    void push_back(initializer_list_t init)
+    {
+        if (is_object() and init.size() == 2 and (*init.begin())->is_string())
+        {
+            basic_json&& key = init.begin()->moved_or_copied();
+            push_back(typename object_t::value_type(
+                          std::move(key.get_ref<string_t&>()), (init.begin() + 1)->moved_or_copied()));
+        }
+        else
+        {
+            push_back(basic_json(init));
+        }
+    }
+
+    /*!
+    @brief add an object to an object
+    @copydoc push_back(initializer_list_t)
+    */
+    reference operator+=(initializer_list_t init)
+    {
+        push_back(init);
+        return *this;
+    }
+
+    /*!
+    @brief add an object to an array
+
+    Creates a JSON value from the passed parameters @a args to the end of the
+    JSON value. If the function is called on a JSON null value, an empty array
+    is created before appending the value created from @a args.
+
+    @param[in] args arguments to forward to a constructor of @ref basic_json
+    @tparam Args compatible types to create a @ref basic_json object
+
+    @throw type_error.311 when called on a type other than JSON array or
+    null; example: `"cannot use emplace_back() with number"`
+
+    @complexity Amortized constant.
+
+    @liveexample{The example shows how `push_back()` can be used to add
+    elements to a JSON array. Note how the `null` value was silently converted
+    to a JSON array.,emplace_back}
+
+    @since version 2.0.8
+    */
+    template<class... Args>
+    void emplace_back(Args&& ... args)
+    {
+        // emplace_back only works for null objects or arrays
+        if (JSON_UNLIKELY(not(is_null() or is_array())))
+        {
+            JSON_THROW(type_error::create(311, "cannot use emplace_back() with " + std::string(type_name())));
+        }
+
+        // transform null object into an array
+        if (is_null())
+        {
+            m_type = value_t::array;
+            m_value = value_t::array;
+            assert_invariant();
+        }
+
+        // add element to array (perfect forwarding)
+        m_value.array->emplace_back(std::forward<Args>(args)...);
+    }
+
+    /*!
+    @brief add an object to an object if key does not exist
+
+    Inserts a new element into a JSON object constructed in-place with the
+    given @a args if there is no element with the key in the container. If the
+    function is called on a JSON null value, an empty object is created before
+    appending the value created from @a args.
+
+    @param[in] args arguments to forward to a constructor of @ref basic_json
+    @tparam Args compatible types to create a @ref basic_json object
+
+    @return a pair consisting of an iterator to the inserted element, or the
+            already-existing element if no insertion happened, and a bool
+            denoting whether the insertion took place.
+
+    @throw type_error.311 when called on a type other than JSON object or
+    null; example: `"cannot use emplace() with number"`
+
+    @complexity Logarithmic in the size of the container, O(log(`size()`)).
+
+    @liveexample{The example shows how `emplace()` can be used to add elements
+    to a JSON object. Note how the `null` value was silently converted to a
+    JSON object. Further note how no value is added if there was already one
+    value stored with the same key.,emplace}
+
+    @since version 2.0.8
+    */
+    template<class... Args>
+    std::pair<iterator, bool> emplace(Args&& ... args)
+    {
+        // emplace only works for null objects or arrays
+        if (JSON_UNLIKELY(not(is_null() or is_object())))
+        {
+            JSON_THROW(type_error::create(311, "cannot use emplace() with " + std::string(type_name())));
+        }
+
+        // transform null object into an object
+        if (is_null())
+        {
+            m_type = value_t::object;
+            m_value = value_t::object;
+            assert_invariant();
+        }
+
+        // add element to array (perfect forwarding)
+        auto res = m_value.object->emplace(std::forward<Args>(args)...);
+        // create result iterator and set iterator to the result of emplace
+        auto it = begin();
+        it.m_it.object_iterator = res.first;
+
+        // return pair of iterator and boolean
+        return {it, res.second};
+    }
+
+    /// Helper for insertion of an iterator
+    /// @note: This uses std::distance to support GCC 4.8,
+    ///        see https://github.com/nlohmann/json/pull/1257
+    template<typename... Args>
+    iterator insert_iterator(const_iterator pos, Args&& ... args)
+    {
+        iterator result(this);
+        assert(m_value.array != nullptr);
+
+        auto insert_pos = std::distance(m_value.array->begin(), pos.m_it.array_iterator);
+        m_value.array->insert(pos.m_it.array_iterator, std::forward<Args>(args)...);
+        result.m_it.array_iterator = m_value.array->begin() + insert_pos;
+
+        // This could have been written as:
+        // result.m_it.array_iterator = m_value.array->insert(pos.m_it.array_iterator, cnt, val);
+        // but the return value of insert is missing in GCC 4.8, so it is written this way instead.
+
+        return result;
+    }
+
+    /*!
+    @brief inserts element
+
+    Inserts element @a val before iterator @a pos.
+
+    @param[in] pos iterator before which the content will be inserted; may be
+    the end() iterator
+    @param[in] val element to insert
+    @return iterator pointing to the inserted @a val.
+
+    @throw type_error.309 if called on JSON values other than arrays;
+    example: `"cannot use insert() with string"`
+    @throw invalid_iterator.202 if @a pos is not an iterator of *this;
+    example: `"iterator does not fit current value"`
+
+    @complexity Constant plus linear in the distance between @a pos and end of
+    the container.
+
+    @liveexample{The example shows how `insert()` is used.,insert}
+
+    @since version 1.0.0
+    */
+    iterator insert(const_iterator pos, const basic_json& val)
+    {
+        // insert only works for arrays
+        if (JSON_LIKELY(is_array()))
+        {
+            // check if iterator pos fits to this JSON value
+            if (JSON_UNLIKELY(pos.m_object != this))
+            {
+                JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
+            }
+
+            // insert to array and return iterator
+            return insert_iterator(pos, val);
+        }
+
+        JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
+    }
+
+    /*!
+    @brief inserts element
+    @copydoc insert(const_iterator, const basic_json&)
+    */
+    iterator insert(const_iterator pos, basic_json&& val)
+    {
+        return insert(pos, val);
+    }
+
+    /*!
+    @brief inserts elements
+
+    Inserts @a cnt copies of @a val before iterator @a pos.
+
+    @param[in] pos iterator before which the content will be inserted; may be
+    the end() iterator
+    @param[in] cnt number of copies of @a val to insert
+    @param[in] val element to insert
+    @return iterator pointing to the first element inserted, or @a pos if
+    `cnt==0`
+
+    @throw type_error.309 if called on JSON values other than arrays; example:
+    `"cannot use insert() with string"`
+    @throw invalid_iterator.202 if @a pos is not an iterator of *this;
+    example: `"iterator does not fit current value"`
+
+    @complexity Linear in @a cnt plus linear in the distance between @a pos
+    and end of the container.
+
+    @liveexample{The example shows how `insert()` is used.,insert__count}
+
+    @since version 1.0.0
+    */
+    iterator insert(const_iterator pos, size_type cnt, const basic_json& val)
+    {
+        // insert only works for arrays
+        if (JSON_LIKELY(is_array()))
+        {
+            // check if iterator pos fits to this JSON value
+            if (JSON_UNLIKELY(pos.m_object != this))
+            {
+                JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
+            }
+
+            // insert to array and return iterator
+            return insert_iterator(pos, cnt, val);
+        }
+
+        JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
+    }
+
+    /*!
+    @brief inserts elements
+
+    Inserts elements from range `[first, last)` before iterator @a pos.
+
+    @param[in] pos iterator before which the content will be inserted; may be
+    the end() iterator
+    @param[in] first begin of the range of elements to insert
+    @param[in] last end of the range of elements to insert
+
+    @throw type_error.309 if called on JSON values other than arrays; example:
+    `"cannot use insert() with string"`
+    @throw invalid_iterator.202 if @a pos is not an iterator of *this;
+    example: `"iterator does not fit current value"`
+    @throw invalid_iterator.210 if @a first and @a last do not belong to the
+    same JSON value; example: `"iterators do not fit"`
+    @throw invalid_iterator.211 if @a first or @a last are iterators into
+    container for which insert is called; example: `"passed iterators may not
+    belong to container"`
+
+    @return iterator pointing to the first element inserted, or @a pos if
+    `first==last`
+
+    @complexity Linear in `std::distance(first, last)` plus linear in the
+    distance between @a pos and end of the container.
+
+    @liveexample{The example shows how `insert()` is used.,insert__range}
+
+    @since version 1.0.0
+    */
+    iterator insert(const_iterator pos, const_iterator first, const_iterator last)
+    {
+        // insert only works for arrays
+        if (JSON_UNLIKELY(not is_array()))
+        {
+            JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
+        }
+
+        // check if iterator pos fits to this JSON value
+        if (JSON_UNLIKELY(pos.m_object != this))
+        {
+            JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
+        }
+
+        // check if range iterators belong to the same JSON object
+        if (JSON_UNLIKELY(first.m_object != last.m_object))
+        {
+            JSON_THROW(invalid_iterator::create(210, "iterators do not fit"));
+        }
+
+        if (JSON_UNLIKELY(first.m_object == this))
+        {
+            JSON_THROW(invalid_iterator::create(211, "passed iterators may not belong to container"));
+        }
+
+        // insert to array and return iterator
+        return insert_iterator(pos, first.m_it.array_iterator, last.m_it.array_iterator);
+    }
+
+    /*!
+    @brief inserts elements
+
+    Inserts elements from initializer list @a ilist before iterator @a pos.
+
+    @param[in] pos iterator before which the content will be inserted; may be
+    the end() iterator
+    @param[in] ilist initializer list to insert the values from
+
+    @throw type_error.309 if called on JSON values other than arrays; example:
+    `"cannot use insert() with string"`
+    @throw invalid_iterator.202 if @a pos is not an iterator of *this;
+    example: `"iterator does not fit current value"`
+
+    @return iterator pointing to the first element inserted, or @a pos if
+    `ilist` is empty
+
+    @complexity Linear in `ilist.size()` plus linear in the distance between
+    @a pos and end of the container.
+
+    @liveexample{The example shows how `insert()` is used.,insert__ilist}
+
+    @since version 1.0.0
+    */
+    iterator insert(const_iterator pos, initializer_list_t ilist)
+    {
+        // insert only works for arrays
+        if (JSON_UNLIKELY(not is_array()))
+        {
+            JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
+        }
+
+        // check if iterator pos fits to this JSON value
+        if (JSON_UNLIKELY(pos.m_object != this))
+        {
+            JSON_THROW(invalid_iterator::create(202, "iterator does not fit current value"));
+        }
+
+        // insert to array and return iterator
+        return insert_iterator(pos, ilist.begin(), ilist.end());
+    }
+
+    /*!
+    @brief inserts elements
+
+    Inserts elements from range `[first, last)`.
+
+    @param[in] first begin of the range of elements to insert
+    @param[in] last end of the range of elements to insert
+
+    @throw type_error.309 if called on JSON values other than objects; example:
+    `"cannot use insert() with string"`
+    @throw invalid_iterator.202 if iterator @a first or @a last does does not
+    point to an object; example: `"iterators first and last must point to
+    objects"`
+    @throw invalid_iterator.210 if @a first and @a last do not belong to the
+    same JSON value; example: `"iterators do not fit"`
+
+    @complexity Logarithmic: `O(N*log(size() + N))`, where `N` is the number
+    of elements to insert.
+
+    @liveexample{The example shows how `insert()` is used.,insert__range_object}
+
+    @since version 3.0.0
+    */
+    void insert(const_iterator first, const_iterator last)
+    {
+        // insert only works for objects
+        if (JSON_UNLIKELY(not is_object()))
+        {
+            JSON_THROW(type_error::create(309, "cannot use insert() with " + std::string(type_name())));
+        }
+
+        // check if range iterators belong to the same JSON object
+        if (JSON_UNLIKELY(first.m_object != last.m_object))
+        {
+            JSON_THROW(invalid_iterator::create(210, "iterators do not fit"));
+        }
+
+        // passed iterators must belong to objects
+        if (JSON_UNLIKELY(not first.m_object->is_object()))
+        {
+            JSON_THROW(invalid_iterator::create(202, "iterators first and last must point to objects"));
+        }
+
+        m_value.object->insert(first.m_it.object_iterator, last.m_it.object_iterator);
+    }
+
+    /*!
+    @brief updates a JSON object from another object, overwriting existing keys
+
+    Inserts all values from JSON object @a j and overwrites existing keys.
+
+    @param[in] j  JSON object to read values from
+
+    @throw type_error.312 if called on JSON values other than objects; example:
+    `"cannot use update() with string"`
+
+    @complexity O(N*log(size() + N)), where N is the number of elements to
+                insert.
+
+    @liveexample{The example shows how `update()` is used.,update}
+
+    @sa https://docs.python.org/3.6/library/stdtypes.html#dict.update
+
+    @since version 3.0.0
+    */
+    void update(const_reference j)
+    {
+        // implicitly convert null value to an empty object
+        if (is_null())
+        {
+            m_type = value_t::object;
+            m_value.object = create<object_t>();
+            assert_invariant();
+        }
+
+        if (JSON_UNLIKELY(not is_object()))
+        {
+            JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(type_name())));
+        }
+        if (JSON_UNLIKELY(not j.is_object()))
+        {
+            JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(j.type_name())));
+        }
+
+        for (auto it = j.cbegin(); it != j.cend(); ++it)
+        {
+            m_value.object->operator[](it.key()) = it.value();
+        }
+    }
+
+    /*!
+    @brief updates a JSON object from another object, overwriting existing keys
+
+    Inserts all values from from range `[first, last)` and overwrites existing
+    keys.
+
+    @param[in] first begin of the range of elements to insert
+    @param[in] last end of the range of elements to insert
+
+    @throw type_error.312 if called on JSON values other than objects; example:
+    `"cannot use update() with string"`
+    @throw invalid_iterator.202 if iterator @a first or @a last does does not
+    point to an object; example: `"iterators first and last must point to
+    objects"`
+    @throw invalid_iterator.210 if @a first and @a last do not belong to the
+    same JSON value; example: `"iterators do not fit"`
+
+    @complexity O(N*log(size() + N)), where N is the number of elements to
+                insert.
+
+    @liveexample{The example shows how `update()` is used__range.,update}
+
+    @sa https://docs.python.org/3.6/library/stdtypes.html#dict.update
+
+    @since version 3.0.0
+    */
+    void update(const_iterator first, const_iterator last)
+    {
+        // implicitly convert null value to an empty object
+        if (is_null())
+        {
+            m_type = value_t::object;
+            m_value.object = create<object_t>();
+            assert_invariant();
+        }
+
+        if (JSON_UNLIKELY(not is_object()))
+        {
+            JSON_THROW(type_error::create(312, "cannot use update() with " + std::string(type_name())));
+        }
+
+        // check if range iterators belong to the same JSON object
+        if (JSON_UNLIKELY(first.m_object != last.m_object))
+        {
+            JSON_THROW(invalid_iterator::create(210, "iterators do not fit"));
+        }
+
+        // passed iterators must belong to objects
+        if (JSON_UNLIKELY(not first.m_object->is_object()
+                          or not last.m_object->is_object()))
+        {
+            JSON_THROW(invalid_iterator::create(202, "iterators first and last must point to objects"));
+        }
+
+        for (auto it = first; it != last; ++it)
+        {
+            m_value.object->operator[](it.key()) = it.value();
+        }
+    }
+
+    /*!
+    @brief exchanges the values
+
+    Exchanges the contents of the JSON value with those of @a other. Does not
+    invoke any move, copy, or swap operations on individual elements. All
+    iterators and references remain valid. The past-the-end iterator is
+    invalidated.
+
+    @param[in,out] other JSON value to exchange the contents with
+
+    @complexity Constant.
+
+    @liveexample{The example below shows how JSON values can be swapped with
+    `swap()`.,swap__reference}
+
+    @since version 1.0.0
+    */
+    void swap(reference other) noexcept (
+        std::is_nothrow_move_constructible<value_t>::value and
+        std::is_nothrow_move_assignable<value_t>::value and
+        std::is_nothrow_move_constructible<json_value>::value and
+        std::is_nothrow_move_assignable<json_value>::value
+    )
+    {
+        std::swap(m_type, other.m_type);
+        std::swap(m_value, other.m_value);
+        assert_invariant();
+    }
+
+    /*!
+    @brief exchanges the values
+
+    Exchanges the contents of a JSON array with those of @a other. Does not
+    invoke any move, copy, or swap operations on individual elements. All
+    iterators and references remain valid. The past-the-end iterator is
+    invalidated.
+
+    @param[in,out] other array to exchange the contents with
+
+    @throw type_error.310 when JSON value is not an array; example: `"cannot
+    use swap() with string"`
+
+    @complexity Constant.
+
+    @liveexample{The example below shows how arrays can be swapped with
+    `swap()`.,swap__array_t}
+
+    @since version 1.0.0
+    */
+    void swap(array_t& other)
+    {
+        // swap only works for arrays
+        if (JSON_LIKELY(is_array()))
+        {
+            std::swap(*(m_value.array), other);
+        }
+        else
+        {
+            JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name())));
+        }
+    }
+
+    /*!
+    @brief exchanges the values
+
+    Exchanges the contents of a JSON object with those of @a other. Does not
+    invoke any move, copy, or swap operations on individual elements. All
+    iterators and references remain valid. The past-the-end iterator is
+    invalidated.
+
+    @param[in,out] other object to exchange the contents with
+
+    @throw type_error.310 when JSON value is not an object; example:
+    `"cannot use swap() with string"`
+
+    @complexity Constant.
+
+    @liveexample{The example below shows how objects can be swapped with
+    `swap()`.,swap__object_t}
+
+    @since version 1.0.0
+    */
+    void swap(object_t& other)
+    {
+        // swap only works for objects
+        if (JSON_LIKELY(is_object()))
+        {
+            std::swap(*(m_value.object), other);
+        }
+        else
+        {
+            JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name())));
+        }
+    }
+
+    /*!
+    @brief exchanges the values
+
+    Exchanges the contents of a JSON string with those of @a other. Does not
+    invoke any move, copy, or swap operations on individual elements. All
+    iterators and references remain valid. The past-the-end iterator is
+    invalidated.
+
+    @param[in,out] other string to exchange the contents with
+
+    @throw type_error.310 when JSON value is not a string; example: `"cannot
+    use swap() with boolean"`
+
+    @complexity Constant.
+
+    @liveexample{The example below shows how strings can be swapped with
+    `swap()`.,swap__string_t}
+
+    @since version 1.0.0
+    */
+    void swap(string_t& other)
+    {
+        // swap only works for strings
+        if (JSON_LIKELY(is_string()))
+        {
+            std::swap(*(m_value.string), other);
+        }
+        else
+        {
+            JSON_THROW(type_error::create(310, "cannot use swap() with " + std::string(type_name())));
+        }
+    }
+
+    /// @}
+
+  public:
+    //////////////////////////////////////////
+    // lexicographical comparison operators //
+    //////////////////////////////////////////
+
+    /// @name lexicographical comparison operators
+    /// @{
+
+    /*!
+    @brief comparison: equal
+
+    Compares two JSON values for equality according to the following rules:
+    - Two JSON values are equal if (1) they are from the same type and (2)
+      their stored values are the same according to their respective
+      `operator==`.
+    - Integer and floating-point numbers are automatically converted before
+      comparison. Note than two NaN values are always treated as unequal.
+    - Two JSON null values are equal.
+
+    @note Floating-point inside JSON values numbers are compared with
+    `json::number_float_t::operator==` which is `double::operator==` by
+    default. To compare floating-point while respecting an epsilon, an alternative
+    [comparison function](https://github.com/mariokonrad/marnav/blob/master/src/marnav/math/floatingpoint.hpp#L34-#L39)
+    could be used, for instance
+    @code {.cpp}
+    template<typename T, typename = typename std::enable_if<std::is_floating_point<T>::value, T>::type>
+    inline bool is_same(T a, T b, T epsilon = std::numeric_limits<T>::epsilon()) noexcept
+    {
+        return std::abs(a - b) <= epsilon;
+    }
+    @endcode
+
+    @note NaN values never compare equal to themselves or to other NaN values.
+
+    @param[in] lhs  first JSON value to consider
+    @param[in] rhs  second JSON value to consider
+    @return whether the values @a lhs and @a rhs are equal
+
+    @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+    @complexity Linear.
+
+    @liveexample{The example demonstrates comparing several JSON
+    types.,operator__equal}
+
+    @since version 1.0.0
+    */
+    friend bool operator==(const_reference lhs, const_reference rhs) noexcept
+    {
+        const auto lhs_type = lhs.type();
+        const auto rhs_type = rhs.type();
+
+        if (lhs_type == rhs_type)
+        {
+            switch (lhs_type)
+            {
+                case value_t::array:
+                    return (*lhs.m_value.array == *rhs.m_value.array);
+
+                case value_t::object:
+                    return (*lhs.m_value.object == *rhs.m_value.object);
+
+                case value_t::null:
+                    return true;
+
+                case value_t::string:
+                    return (*lhs.m_value.string == *rhs.m_value.string);
+
+                case value_t::boolean:
+                    return (lhs.m_value.boolean == rhs.m_value.boolean);
+
+                case value_t::number_integer:
+                    return (lhs.m_value.number_integer == rhs.m_value.number_integer);
+
+                case value_t::number_unsigned:
+                    return (lhs.m_value.number_unsigned == rhs.m_value.number_unsigned);
+
+                case value_t::number_float:
+                    return (lhs.m_value.number_float == rhs.m_value.number_float);
+
+                default:
+                    return false;
+            }
+        }
+        else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_float)
+        {
+            return (static_cast<number_float_t>(lhs.m_value.number_integer) == rhs.m_value.number_float);
+        }
+        else if (lhs_type == value_t::number_float and rhs_type == value_t::number_integer)
+        {
+            return (lhs.m_value.number_float == static_cast<number_float_t>(rhs.m_value.number_integer));
+        }
+        else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_float)
+        {
+            return (static_cast<number_float_t>(lhs.m_value.number_unsigned) == rhs.m_value.number_float);
+        }
+        else if (lhs_type == value_t::number_float and rhs_type == value_t::number_unsigned)
+        {
+            return (lhs.m_value.number_float == static_cast<number_float_t>(rhs.m_value.number_unsigned));
+        }
+        else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_integer)
+        {
+            return (static_cast<number_integer_t>(lhs.m_value.number_unsigned) == rhs.m_value.number_integer);
+        }
+        else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_unsigned)
+        {
+            return (lhs.m_value.number_integer == static_cast<number_integer_t>(rhs.m_value.number_unsigned));
+        }
+
+        return false;
+    }
+
+    /*!
+    @brief comparison: equal
+    @copydoc operator==(const_reference, const_reference)
+    */
+    template<typename ScalarType, typename std::enable_if<
+                 std::is_scalar<ScalarType>::value, int>::type = 0>
+    friend bool operator==(const_reference lhs, const ScalarType rhs) noexcept
+    {
+        return (lhs == basic_json(rhs));
+    }
+
+    /*!
+    @brief comparison: equal
+    @copydoc operator==(const_reference, const_reference)
+    */
+    template<typename ScalarType, typename std::enable_if<
+                 std::is_scalar<ScalarType>::value, int>::type = 0>
+    friend bool operator==(const ScalarType lhs, const_reference rhs) noexcept
+    {
+        return (basic_json(lhs) == rhs);
+    }
+
+    /*!
+    @brief comparison: not equal
+
+    Compares two JSON values for inequality by calculating `not (lhs == rhs)`.
+
+    @param[in] lhs  first JSON value to consider
+    @param[in] rhs  second JSON value to consider
+    @return whether the values @a lhs and @a rhs are not equal
+
+    @complexity Linear.
+
+    @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+    @liveexample{The example demonstrates comparing several JSON
+    types.,operator__notequal}
+
+    @since version 1.0.0
+    */
+    friend bool operator!=(const_reference lhs, const_reference rhs) noexcept
+    {
+        return not (lhs == rhs);
+    }
+
+    /*!
+    @brief comparison: not equal
+    @copydoc operator!=(const_reference, const_reference)
+    */
+    template<typename ScalarType, typename std::enable_if<
+                 std::is_scalar<ScalarType>::value, int>::type = 0>
+    friend bool operator!=(const_reference lhs, const ScalarType rhs) noexcept
+    {
+        return (lhs != basic_json(rhs));
+    }
+
+    /*!
+    @brief comparison: not equal
+    @copydoc operator!=(const_reference, const_reference)
+    */
+    template<typename ScalarType, typename std::enable_if<
+                 std::is_scalar<ScalarType>::value, int>::type = 0>
+    friend bool operator!=(const ScalarType lhs, const_reference rhs) noexcept
+    {
+        return (basic_json(lhs) != rhs);
+    }
+
+    /*!
+    @brief comparison: less than
+
+    Compares whether one JSON value @a lhs is less than another JSON value @a
+    rhs according to the following rules:
+    - If @a lhs and @a rhs have the same type, the values are compared using
+      the default `<` operator.
+    - Integer and floating-point numbers are automatically converted before
+      comparison
+    - In case @a lhs and @a rhs have different types, the values are ignored
+      and the order of the types is considered, see
+      @ref operator<(const value_t, const value_t).
+
+    @param[in] lhs  first JSON value to consider
+    @param[in] rhs  second JSON value to consider
+    @return whether @a lhs is less than @a rhs
+
+    @complexity Linear.
+
+    @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+    @liveexample{The example demonstrates comparing several JSON
+    types.,operator__less}
+
+    @since version 1.0.0
+    */
+    friend bool operator<(const_reference lhs, const_reference rhs) noexcept
+    {
+        const auto lhs_type = lhs.type();
+        const auto rhs_type = rhs.type();
+
+        if (lhs_type == rhs_type)
+        {
+            switch (lhs_type)
+            {
+                case value_t::array:
+                    return (*lhs.m_value.array) < (*rhs.m_value.array);
+
+                case value_t::object:
+                    return *lhs.m_value.object < *rhs.m_value.object;
+
+                case value_t::null:
+                    return false;
+
+                case value_t::string:
+                    return *lhs.m_value.string < *rhs.m_value.string;
+
+                case value_t::boolean:
+                    return lhs.m_value.boolean < rhs.m_value.boolean;
+
+                case value_t::number_integer:
+                    return lhs.m_value.number_integer < rhs.m_value.number_integer;
+
+                case value_t::number_unsigned:
+                    return lhs.m_value.number_unsigned < rhs.m_value.number_unsigned;
+
+                case value_t::number_float:
+                    return lhs.m_value.number_float < rhs.m_value.number_float;
+
+                default:
+                    return false;
+            }
+        }
+        else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_float)
+        {
+            return static_cast<number_float_t>(lhs.m_value.number_integer) < rhs.m_value.number_float;
+        }
+        else if (lhs_type == value_t::number_float and rhs_type == value_t::number_integer)
+        {
+            return lhs.m_value.number_float < static_cast<number_float_t>(rhs.m_value.number_integer);
+        }
+        else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_float)
+        {
+            return static_cast<number_float_t>(lhs.m_value.number_unsigned) < rhs.m_value.number_float;
+        }
+        else if (lhs_type == value_t::number_float and rhs_type == value_t::number_unsigned)
+        {
+            return lhs.m_value.number_float < static_cast<number_float_t>(rhs.m_value.number_unsigned);
+        }
+        else if (lhs_type == value_t::number_integer and rhs_type == value_t::number_unsigned)
+        {
+            return lhs.m_value.number_integer < static_cast<number_integer_t>(rhs.m_value.number_unsigned);
+        }
+        else if (lhs_type == value_t::number_unsigned and rhs_type == value_t::number_integer)
+        {
+            return static_cast<number_integer_t>(lhs.m_value.number_unsigned) < rhs.m_value.number_integer;
+        }
+
+        // We only reach this line if we cannot compare values. In that case,
+        // we compare types. Note we have to call the operator explicitly,
+        // because MSVC has problems otherwise.
+        return operator<(lhs_type, rhs_type);
+    }
+
+    /*!
+    @brief comparison: less than
+    @copydoc operator<(const_reference, const_reference)
+    */
+    template<typename ScalarType, typename std::enable_if<
+                 std::is_scalar<ScalarType>::value, int>::type = 0>
+    friend bool operator<(const_reference lhs, const ScalarType rhs) noexcept
+    {
+        return (lhs < basic_json(rhs));
+    }
+
+    /*!
+    @brief comparison: less than
+    @copydoc operator<(const_reference, const_reference)
+    */
+    template<typename ScalarType, typename std::enable_if<
+                 std::is_scalar<ScalarType>::value, int>::type = 0>
+    friend bool operator<(const ScalarType lhs, const_reference rhs) noexcept
+    {
+        return (basic_json(lhs) < rhs);
+    }
+
+    /*!
+    @brief comparison: less than or equal
+
+    Compares whether one JSON value @a lhs is less than or equal to another
+    JSON value by calculating `not (rhs < lhs)`.
+
+    @param[in] lhs  first JSON value to consider
+    @param[in] rhs  second JSON value to consider
+    @return whether @a lhs is less than or equal to @a rhs
+
+    @complexity Linear.
+
+    @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+    @liveexample{The example demonstrates comparing several JSON
+    types.,operator__greater}
+
+    @since version 1.0.0
+    */
+    friend bool operator<=(const_reference lhs, const_reference rhs) noexcept
+    {
+        return not (rhs < lhs);
+    }
+
+    /*!
+    @brief comparison: less than or equal
+    @copydoc operator<=(const_reference, const_reference)
+    */
+    template<typename ScalarType, typename std::enable_if<
+                 std::is_scalar<ScalarType>::value, int>::type = 0>
+    friend bool operator<=(const_reference lhs, const ScalarType rhs) noexcept
+    {
+        return (lhs <= basic_json(rhs));
+    }
+
+    /*!
+    @brief comparison: less than or equal
+    @copydoc operator<=(const_reference, const_reference)
+    */
+    template<typename ScalarType, typename std::enable_if<
+                 std::is_scalar<ScalarType>::value, int>::type = 0>
+    friend bool operator<=(const ScalarType lhs, const_reference rhs) noexcept
+    {
+        return (basic_json(lhs) <= rhs);
+    }
+
+    /*!
+    @brief comparison: greater than
+
+    Compares whether one JSON value @a lhs is greater than another
+    JSON value by calculating `not (lhs <= rhs)`.
+
+    @param[in] lhs  first JSON value to consider
+    @param[in] rhs  second JSON value to consider
+    @return whether @a lhs is greater than to @a rhs
+
+    @complexity Linear.
+
+    @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+    @liveexample{The example demonstrates comparing several JSON
+    types.,operator__lessequal}
+
+    @since version 1.0.0
+    */
+    friend bool operator>(const_reference lhs, const_reference rhs) noexcept
+    {
+        return not (lhs <= rhs);
+    }
+
+    /*!
+    @brief comparison: greater than
+    @copydoc operator>(const_reference, const_reference)
+    */
+    template<typename ScalarType, typename std::enable_if<
+                 std::is_scalar<ScalarType>::value, int>::type = 0>
+    friend bool operator>(const_reference lhs, const ScalarType rhs) noexcept
+    {
+        return (lhs > basic_json(rhs));
+    }
+
+    /*!
+    @brief comparison: greater than
+    @copydoc operator>(const_reference, const_reference)
+    */
+    template<typename ScalarType, typename std::enable_if<
+                 std::is_scalar<ScalarType>::value, int>::type = 0>
+    friend bool operator>(const ScalarType lhs, const_reference rhs) noexcept
+    {
+        return (basic_json(lhs) > rhs);
+    }
+
+    /*!
+    @brief comparison: greater than or equal
+
+    Compares whether one JSON value @a lhs is greater than or equal to another
+    JSON value by calculating `not (lhs < rhs)`.
+
+    @param[in] lhs  first JSON value to consider
+    @param[in] rhs  second JSON value to consider
+    @return whether @a lhs is greater than or equal to @a rhs
+
+    @complexity Linear.
+
+    @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+    @liveexample{The example demonstrates comparing several JSON
+    types.,operator__greaterequal}
+
+    @since version 1.0.0
+    */
+    friend bool operator>=(const_reference lhs, const_reference rhs) noexcept
+    {
+        return not (lhs < rhs);
+    }
+
+    /*!
+    @brief comparison: greater than or equal
+    @copydoc operator>=(const_reference, const_reference)
+    */
+    template<typename ScalarType, typename std::enable_if<
+                 std::is_scalar<ScalarType>::value, int>::type = 0>
+    friend bool operator>=(const_reference lhs, const ScalarType rhs) noexcept
+    {
+        return (lhs >= basic_json(rhs));
+    }
+
+    /*!
+    @brief comparison: greater than or equal
+    @copydoc operator>=(const_reference, const_reference)
+    */
+    template<typename ScalarType, typename std::enable_if<
+                 std::is_scalar<ScalarType>::value, int>::type = 0>
+    friend bool operator>=(const ScalarType lhs, const_reference rhs) noexcept
+    {
+        return (basic_json(lhs) >= rhs);
+    }
+
+    /// @}
+
+    ///////////////////
+    // serialization //
+    ///////////////////
+
+    /// @name serialization
+    /// @{
+
+    /*!
+    @brief serialize to stream
+
+    Serialize the given JSON value @a j to the output stream @a o. The JSON
+    value will be serialized using the @ref dump member function.
+
+    - The indentation of the output can be controlled with the member variable
+      `width` of the output stream @a o. For instance, using the manipulator
+      `std::setw(4)` on @a o sets the indentation level to `4` and the
+      serialization result is the same as calling `dump(4)`.
+
+    - The indentation character can be controlled with the member variable
+      `fill` of the output stream @a o. For instance, the manipulator
+      `std::setfill('\\t')` sets indentation to use a tab character rather than
+      the default space character.
+
+    @param[in,out] o  stream to serialize to
+    @param[in] j  JSON value to serialize
+
+    @return the stream @a o
+
+    @throw type_error.316 if a string stored inside the JSON value is not
+                          UTF-8 encoded
+
+    @complexity Linear.
+
+    @liveexample{The example below shows the serialization with different
+    parameters to `width` to adjust the indentation level.,operator_serialize}
+
+    @since version 1.0.0; indentation character added in version 3.0.0
+    */
+    friend std::ostream& operator<<(std::ostream& o, const basic_json& j)
+    {
+        // read width member and use it as indentation parameter if nonzero
+        const bool pretty_print = (o.width() > 0);
+        const auto indentation = (pretty_print ? o.width() : 0);
+
+        // reset width to 0 for subsequent calls to this stream
+        o.width(0);
+
+        // do the actual serialization
+        serializer s(detail::output_adapter<char>(o), o.fill());
+        s.dump(j, pretty_print, false, static_cast<unsigned int>(indentation));
+        return o;
+    }
+
+    /*!
+    @brief serialize to stream
+    @deprecated This stream operator is deprecated and will be removed in
+                future 4.0.0 of the library. Please use
+                @ref operator<<(std::ostream&, const basic_json&)
+                instead; that is, replace calls like `j >> o;` with `o << j;`.
+    @since version 1.0.0; deprecated since version 3.0.0
+    */
+    JSON_DEPRECATED
+    friend std::ostream& operator>>(const basic_json& j, std::ostream& o)
+    {
+        return o << j;
+    }
+
+    /// @}
+
+
+    /////////////////////
+    // deserialization //
+    /////////////////////
+
+    /// @name deserialization
+    /// @{
+
+    /*!
+    @brief deserialize from a compatible input
+
+    This function reads from a compatible input. Examples are:
+    - an array of 1-byte values
+    - strings with character/literal type with size of 1 byte
+    - input streams
+    - container with contiguous storage of 1-byte values. Compatible container
+      types include `std::vector`, `std::string`, `std::array`,
+      `std::valarray`, and `std::initializer_list`. Furthermore, C-style
+      arrays can be used with `std::begin()`/`std::end()`. User-defined
+      containers can be used as long as they implement random-access iterators
+      and a contiguous storage.
+
+    @pre Each element of the container has a size of 1 byte. Violating this
+    precondition yields undefined behavior. **This precondition is enforced
+    with a static assertion.**
+
+    @pre The container storage is contiguous. Violating this precondition
+    yields undefined behavior. **This precondition is enforced with an
+    assertion.**
+    @pre Each element of the container has a size of 1 byte. Violating this
+    precondition yields undefined behavior. **This precondition is enforced
+    with a static assertion.**
+
+    @warning There is no way to enforce all preconditions at compile-time. If
+             the function is called with a noncompliant container and with
+             assertions switched off, the behavior is undefined and will most
+             likely yield segmentation violation.
+
+    @param[in] i  input to read from
+    @param[in] cb  a parser callback function of type @ref parser_callback_t
+    which is used to control the deserialization by filtering unwanted values
+    (optional)
+    @param[in] allow_exceptions  whether to throw exceptions in case of a
+    parse error (optional, true by default)
+
+    @return result of the deserialization
+
+    @throw parse_error.101 if a parse error occurs; example: `""unexpected end
+    of input; expected string literal""`
+    @throw parse_error.102 if to_unicode fails or surrogate error
+    @throw parse_error.103 if to_unicode fails
+
+    @complexity Linear in the length of the input. The parser is a predictive
+    LL(1) parser. The complexity can be higher if the parser callback function
+    @a cb has a super-linear complexity.
+
+    @note A UTF-8 byte order mark is silently ignored.
+
+    @liveexample{The example below demonstrates the `parse()` function reading
+    from an array.,parse__array__parser_callback_t}
+
+    @liveexample{The example below demonstrates the `parse()` function with
+    and without callback function.,parse__string__parser_callback_t}
+
+    @liveexample{The example below demonstrates the `parse()` function with
+    and without callback function.,parse__istream__parser_callback_t}
+
+    @liveexample{The example below demonstrates the `parse()` function reading
+    from a contiguous container.,parse__contiguouscontainer__parser_callback_t}
+
+    @since version 2.0.3 (contiguous containers)
+    */
+    static basic_json parse(detail::input_adapter&& i,
+                            const parser_callback_t cb = nullptr,
+                            const bool allow_exceptions = true)
+    {
+        basic_json result;
+        parser(i, cb, allow_exceptions).parse(true, result);
+        return result;
+    }
+
+    static bool accept(detail::input_adapter&& i)
+    {
+        return parser(i).accept(true);
+    }
+
+    /*!
+    @brief generate SAX events
+
+    The SAX event lister must follow the interface of @ref json_sax.
+
+    This function reads from a compatible input. Examples are:
+    - an array of 1-byte values
+    - strings with character/literal type with size of 1 byte
+    - input streams
+    - container with contiguous storage of 1-byte values. Compatible container
+      types include `std::vector`, `std::string`, `std::array`,
+      `std::valarray`, and `std::initializer_list`. Furthermore, C-style
+      arrays can be used with `std::begin()`/`std::end()`. User-defined
+      containers can be used as long as they implement random-access iterators
+      and a contiguous storage.
+
+    @pre Each element of the container has a size of 1 byte. Violating this
+    precondition yields undefined behavior. **This precondition is enforced
+    with a static assertion.**
+
+    @pre The container storage is contiguous. Violating this precondition
+    yields undefined behavior. **This precondition is enforced with an
+    assertion.**
+    @pre Each element of the container has a size of 1 byte. Violating this
+    precondition yields undefined behavior. **This precondition is enforced
+    with a static assertion.**
+
+    @warning There is no way to enforce all preconditions at compile-time. If
+             the function is called with a noncompliant container and with
+             assertions switched off, the behavior is undefined and will most
+             likely yield segmentation violation.
+
+    @param[in] i  input to read from
+    @param[in,out] sax  SAX event listener
+    @param[in] format  the format to parse (JSON, CBOR, MessagePack, or UBJSON)
+    @param[in] strict  whether the input has to be consumed completely
+
+    @return return value of the last processed SAX event
+
+    @throw parse_error.101 if a parse error occurs; example: `""unexpected end
+    of input; expected string literal""`
+    @throw parse_error.102 if to_unicode fails or surrogate error
+    @throw parse_error.103 if to_unicode fails
+
+    @complexity Linear in the length of the input. The parser is a predictive
+    LL(1) parser. The complexity can be higher if the SAX consumer @a sax has
+    a super-linear complexity.
+
+    @note A UTF-8 byte order mark is silently ignored.
+
+    @liveexample{The example below demonstrates the `sax_parse()` function
+    reading from string and processing the events with a user-defined SAX
+    event consumer.,sax_parse}
+
+    @since version 3.2.0
+    */
+    template <typename SAX>
+    static bool sax_parse(detail::input_adapter&& i, SAX* sax,
+                          input_format_t format = input_format_t::json,
+                          const bool strict = true)
+    {
+        assert(sax);
+        switch (format)
+        {
+            case input_format_t::json:
+                return parser(std::move(i)).sax_parse(sax, strict);
+            default:
+                return detail::binary_reader<basic_json, SAX>(std::move(i)).sax_parse(format, sax, strict);
+        }
+    }
+
+    /*!
+    @brief deserialize from an iterator range with contiguous storage
+
+    This function reads from an iterator range of a container with contiguous
+    storage of 1-byte values. Compatible container types include
+    `std::vector`, `std::string`, `std::array`, `std::valarray`, and
+    `std::initializer_list`. Furthermore, C-style arrays can be used with
+    `std::begin()`/`std::end()`. User-defined containers can be used as long
+    as they implement random-access iterators and a contiguous storage.
+
+    @pre The iterator range is contiguous. Violating this precondition yields
+    undefined behavior. **This precondition is enforced with an assertion.**
+    @pre Each element in the range has a size of 1 byte. Violating this
+    precondition yields undefined behavior. **This precondition is enforced
+    with a static assertion.**
+
+    @warning There is no way to enforce all preconditions at compile-time. If
+             the function is called with noncompliant iterators and with
+             assertions switched off, the behavior is undefined and will most
+             likely yield segmentation violation.
+
+    @tparam IteratorType iterator of container with contiguous storage
+    @param[in] first  begin of the range to parse (included)
+    @param[in] last  end of the range to parse (excluded)
+    @param[in] cb  a parser callback function of type @ref parser_callback_t
+    which is used to control the deserialization by filtering unwanted values
+    (optional)
+    @param[in] allow_exceptions  whether to throw exceptions in case of a
+    parse error (optional, true by default)
+
+    @return result of the deserialization
+
+    @throw parse_error.101 in case of an unexpected token
+    @throw parse_error.102 if to_unicode fails or surrogate error
+    @throw parse_error.103 if to_unicode fails
+
+    @complexity Linear in the length of the input. The parser is a predictive
+    LL(1) parser. The complexity can be higher if the parser callback function
+    @a cb has a super-linear complexity.
+
+    @note A UTF-8 byte order mark is silently ignored.
+
+    @liveexample{The example below demonstrates the `parse()` function reading
+    from an iterator range.,parse__iteratortype__parser_callback_t}
+
+    @since version 2.0.3
+    */
+    template<class IteratorType, typename std::enable_if<
+                 std::is_base_of<
+                     std::random_access_iterator_tag,
+                     typename std::iterator_traits<IteratorType>::iterator_category>::value, int>::type = 0>
+    static basic_json parse(IteratorType first, IteratorType last,
+                            const parser_callback_t cb = nullptr,
+                            const bool allow_exceptions = true)
+    {
+        basic_json result;
+        parser(detail::input_adapter(first, last), cb, allow_exceptions).parse(true, result);
+        return result;
+    }
+
+    template<class IteratorType, typename std::enable_if<
+                 std::is_base_of<
+                     std::random_access_iterator_tag,
+                     typename std::iterator_traits<IteratorType>::iterator_category>::value, int>::type = 0>
+    static bool accept(IteratorType first, IteratorType last)
+    {
+        return parser(detail::input_adapter(first, last)).accept(true);
+    }
+
+    template<class IteratorType, class SAX, typename std::enable_if<
+                 std::is_base_of<
+                     std::random_access_iterator_tag,
+                     typename std::iterator_traits<IteratorType>::iterator_category>::value, int>::type = 0>
+    static bool sax_parse(IteratorType first, IteratorType last, SAX* sax)
+    {
+        return parser(detail::input_adapter(first, last)).sax_parse(sax);
+    }
+
+    /*!
+    @brief deserialize from stream
+    @deprecated This stream operator is deprecated and will be removed in
+                version 4.0.0 of the library. Please use
+                @ref operator>>(std::istream&, basic_json&)
+                instead; that is, replace calls like `j << i;` with `i >> j;`.
+    @since version 1.0.0; deprecated since version 3.0.0
+    */
+    JSON_DEPRECATED
+    friend std::istream& operator<<(basic_json& j, std::istream& i)
+    {
+        return operator>>(i, j);
+    }
+
+    /*!
+    @brief deserialize from stream
+
+    Deserializes an input stream to a JSON value.
+
+    @param[in,out] i  input stream to read a serialized JSON value from
+    @param[in,out] j  JSON value to write the deserialized input to
+
+    @throw parse_error.101 in case of an unexpected token
+    @throw parse_error.102 if to_unicode fails or surrogate error
+    @throw parse_error.103 if to_unicode fails
+
+    @complexity Linear in the length of the input. The parser is a predictive
+    LL(1) parser.
+
+    @note A UTF-8 byte order mark is silently ignored.
+
+    @liveexample{The example below shows how a JSON value is constructed by
+    reading a serialization from a stream.,operator_deserialize}
+
+    @sa parse(std::istream&, const parser_callback_t) for a variant with a
+    parser callback function to filter values while parsing
+
+    @since version 1.0.0
+    */
+    friend std::istream& operator>>(std::istream& i, basic_json& j)
+    {
+        parser(detail::input_adapter(i)).parse(false, j);
+        return i;
+    }
+
+    /// @}
+
+    ///////////////////////////
+    // convenience functions //
+    ///////////////////////////
+
+    /*!
+    @brief return the type as string
+
+    Returns the type name as string to be used in error messages - usually to
+    indicate that a function was called on a wrong JSON type.
+
+    @return a string representation of a the @a m_type member:
+            Value type  | return value
+            ----------- | -------------
+            null        | `"null"`
+            boolean     | `"boolean"`
+            string      | `"string"`
+            number      | `"number"` (for all number types)
+            object      | `"object"`
+            array       | `"array"`
+            discarded   | `"discarded"`
+
+    @exceptionsafety No-throw guarantee: this function never throws exceptions.
+
+    @complexity Constant.
+
+    @liveexample{The following code exemplifies `type_name()` for all JSON
+    types.,type_name}
+
+    @sa @ref type() -- return the type of the JSON value
+    @sa @ref operator value_t() -- return the type of the JSON value (implicit)
+
+    @since version 1.0.0, public since 2.1.0, `const char*` and `noexcept`
+    since 3.0.0
+    */
+    const char* type_name() const noexcept
+    {
+        {
+            switch (m_type)
+            {
+                case value_t::null:
+                    return "null";
+                case value_t::object:
+                    return "object";
+                case value_t::array:
+                    return "array";
+                case value_t::string:
+                    return "string";
+                case value_t::boolean:
+                    return "boolean";
+                case value_t::discarded:
+                    return "discarded";
+                default:
+                    return "number";
+            }
+        }
+    }
+
+
+  private:
+    //////////////////////
+    // member variables //
+    //////////////////////
+
+    /// the type of the current element
+    value_t m_type = value_t::null;
+
+    /// the value of the current element
+    json_value m_value = {};
+
+    //////////////////////////////////////////
+    // binary serialization/deserialization //
+    //////////////////////////////////////////
+
+    /// @name binary serialization/deserialization support
+    /// @{
+
+  public:
+    /*!
+    @brief create a CBOR serialization of a given JSON value
+
+    Serializes a given JSON value @a j to a byte vector using the CBOR (Concise
+    Binary Object Representation) serialization format. CBOR is a binary
+    serialization format which aims to be more compact than JSON itself, yet
+    more efficient to parse.
+
+    The library uses the following mapping from JSON values types to
+    CBOR types according to the CBOR specification (RFC 7049):
+
+    JSON value type | value/range                                | CBOR type                          | first byte
+    --------------- | ------------------------------------------ | ---------------------------------- | ---------------
+    null            | `null`                                     | Null                               | 0xF6
+    boolean         | `true`                                     | True                               | 0xF5
+    boolean         | `false`                                    | False                              | 0xF4
+    number_integer  | -9223372036854775808..-2147483649          | Negative integer (8 bytes follow)  | 0x3B
+    number_integer  | -2147483648..-32769                        | Negative integer (4 bytes follow)  | 0x3A
+    number_integer  | -32768..-129                               | Negative integer (2 bytes follow)  | 0x39
+    number_integer  | -128..-25                                  | Negative integer (1 byte follow)   | 0x38
+    number_integer  | -24..-1                                    | Negative integer                   | 0x20..0x37
+    number_integer  | 0..23                                      | Integer                            | 0x00..0x17
+    number_integer  | 24..255                                    | Unsigned integer (1 byte follow)   | 0x18
+    number_integer  | 256..65535                                 | Unsigned integer (2 bytes follow)  | 0x19
+    number_integer  | 65536..4294967295                          | Unsigned integer (4 bytes follow)  | 0x1A
+    number_integer  | 4294967296..18446744073709551615           | Unsigned integer (8 bytes follow)  | 0x1B
+    number_unsigned | 0..23                                      | Integer                            | 0x00..0x17
+    number_unsigned | 24..255                                    | Unsigned integer (1 byte follow)   | 0x18
+    number_unsigned | 256..65535                                 | Unsigned integer (2 bytes follow)  | 0x19
+    number_unsigned | 65536..4294967295                          | Unsigned integer (4 bytes follow)  | 0x1A
+    number_unsigned | 4294967296..18446744073709551615           | Unsigned integer (8 bytes follow)  | 0x1B
+    number_float    | *any value*                                | Double-Precision Float             | 0xFB
+    string          | *length*: 0..23                            | UTF-8 string                       | 0x60..0x77
+    string          | *length*: 23..255                          | UTF-8 string (1 byte follow)       | 0x78
+    string          | *length*: 256..65535                       | UTF-8 string (2 bytes follow)      | 0x79
+    string          | *length*: 65536..4294967295                | UTF-8 string (4 bytes follow)      | 0x7A
+    string          | *length*: 4294967296..18446744073709551615 | UTF-8 string (8 bytes follow)      | 0x7B
+    array           | *size*: 0..23                              | array                              | 0x80..0x97
+    array           | *size*: 23..255                            | array (1 byte follow)              | 0x98
+    array           | *size*: 256..65535                         | array (2 bytes follow)             | 0x99
+    array           | *size*: 65536..4294967295                  | array (4 bytes follow)             | 0x9A
+    array           | *size*: 4294967296..18446744073709551615   | array (8 bytes follow)             | 0x9B
+    object          | *size*: 0..23                              | map                                | 0xA0..0xB7
+    object          | *size*: 23..255                            | map (1 byte follow)                | 0xB8
+    object          | *size*: 256..65535                         | map (2 bytes follow)               | 0xB9
+    object          | *size*: 65536..4294967295                  | map (4 bytes follow)               | 0xBA
+    object          | *size*: 4294967296..18446744073709551615   | map (8 bytes follow)               | 0xBB
+
+    @note The mapping is **complete** in the sense that any JSON value type
+          can be converted to a CBOR value.
+
+    @note If NaN or Infinity are stored inside a JSON number, they are
+          serialized properly. This behavior differs from the @ref dump()
+          function which serializes NaN or Infinity to `null`.
+
+    @note The following CBOR types are not used in the conversion:
+          - byte strings (0x40..0x5F)
+          - UTF-8 strings terminated by "break" (0x7F)
+          - arrays terminated by "break" (0x9F)
+          - maps terminated by "break" (0xBF)
+          - date/time (0xC0..0xC1)
+          - bignum (0xC2..0xC3)
+          - decimal fraction (0xC4)
+          - bigfloat (0xC5)
+          - tagged items (0xC6..0xD4, 0xD8..0xDB)
+          - expected conversions (0xD5..0xD7)
+          - simple values (0xE0..0xF3, 0xF8)
+          - undefined (0xF7)
+          - half and single-precision floats (0xF9-0xFA)
+          - break (0xFF)
+
+    @param[in] j  JSON value to serialize
+    @return MessagePack serialization as byte vector
+
+    @complexity Linear in the size of the JSON value @a j.
+
+    @liveexample{The example shows the serialization of a JSON value to a byte
+    vector in CBOR format.,to_cbor}
+
+    @sa http://cbor.io
+    @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool) for the
+        analogous deserialization
+    @sa @ref to_msgpack(const basic_json&) for the related MessagePack format
+    @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the
+             related UBJSON format
+
+    @since version 2.0.9
+    */
+    static std::vector<uint8_t> to_cbor(const basic_json& j)
+    {
+        std::vector<uint8_t> result;
+        to_cbor(j, result);
+        return result;
+    }
+
+    static void to_cbor(const basic_json& j, detail::output_adapter<uint8_t> o)
+    {
+        binary_writer<uint8_t>(o).write_cbor(j);
+    }
+
+    static void to_cbor(const basic_json& j, detail::output_adapter<char> o)
+    {
+        binary_writer<char>(o).write_cbor(j);
+    }
+
+    /*!
+    @brief create a MessagePack serialization of a given JSON value
+
+    Serializes a given JSON value @a j to a byte vector using the MessagePack
+    serialization format. MessagePack is a binary serialization format which
+    aims to be more compact than JSON itself, yet more efficient to parse.
+
+    The library uses the following mapping from JSON values types to
+    MessagePack types according to the MessagePack specification:
+
+    JSON value type | value/range                       | MessagePack type | first byte
+    --------------- | --------------------------------- | ---------------- | ----------
+    null            | `null`                            | nil              | 0xC0
+    boolean         | `true`                            | true             | 0xC3
+    boolean         | `false`                           | false            | 0xC2
+    number_integer  | -9223372036854775808..-2147483649 | int64            | 0xD3
+    number_integer  | -2147483648..-32769               | int32            | 0xD2
+    number_integer  | -32768..-129                      | int16            | 0xD1
+    number_integer  | -128..-33                         | int8             | 0xD0
+    number_integer  | -32..-1                           | negative fixint  | 0xE0..0xFF
+    number_integer  | 0..127                            | positive fixint  | 0x00..0x7F
+    number_integer  | 128..255                          | uint 8           | 0xCC
+    number_integer  | 256..65535                        | uint 16          | 0xCD
+    number_integer  | 65536..4294967295                 | uint 32          | 0xCE
+    number_integer  | 4294967296..18446744073709551615  | uint 64          | 0xCF
+    number_unsigned | 0..127                            | positive fixint  | 0x00..0x7F
+    number_unsigned | 128..255                          | uint 8           | 0xCC
+    number_unsigned | 256..65535                        | uint 16          | 0xCD
+    number_unsigned | 65536..4294967295                 | uint 32          | 0xCE
+    number_unsigned | 4294967296..18446744073709551615  | uint 64          | 0xCF
+    number_float    | *any value*                       | float 64         | 0xCB
+    string          | *length*: 0..31                   | fixstr           | 0xA0..0xBF
+    string          | *length*: 32..255                 | str 8            | 0xD9
+    string          | *length*: 256..65535              | str 16           | 0xDA
+    string          | *length*: 65536..4294967295       | str 32           | 0xDB
+    array           | *size*: 0..15                     | fixarray         | 0x90..0x9F
+    array           | *size*: 16..65535                 | array 16         | 0xDC
+    array           | *size*: 65536..4294967295         | array 32         | 0xDD
+    object          | *size*: 0..15                     | fix map          | 0x80..0x8F
+    object          | *size*: 16..65535                 | map 16           | 0xDE
+    object          | *size*: 65536..4294967295         | map 32           | 0xDF
+
+    @note The mapping is **complete** in the sense that any JSON value type
+          can be converted to a MessagePack value.
+
+    @note The following values can **not** be converted to a MessagePack value:
+          - strings with more than 4294967295 bytes
+          - arrays with more than 4294967295 elements
+          - objects with more than 4294967295 elements
+
+    @note The following MessagePack types are not used in the conversion:
+          - bin 8 - bin 32 (0xC4..0xC6)
+          - ext 8 - ext 32 (0xC7..0xC9)
+          - float 32 (0xCA)
+          - fixext 1 - fixext 16 (0xD4..0xD8)
+
+    @note Any MessagePack output created @ref to_msgpack can be successfully
+          parsed by @ref from_msgpack.
+
+    @note If NaN or Infinity are stored inside a JSON number, they are
+          serialized properly. This behavior differs from the @ref dump()
+          function which serializes NaN or Infinity to `null`.
+
+    @param[in] j  JSON value to serialize
+    @return MessagePack serialization as byte vector
+
+    @complexity Linear in the size of the JSON value @a j.
+
+    @liveexample{The example shows the serialization of a JSON value to a byte
+    vector in MessagePack format.,to_msgpack}
+
+    @sa http://msgpack.org
+    @sa @ref from_msgpack for the analogous deserialization
+    @sa @ref to_cbor(const basic_json& for the related CBOR format
+    @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the
+             related UBJSON format
+
+    @since version 2.0.9
+    */
+    static std::vector<uint8_t> to_msgpack(const basic_json& j)
+    {
+        std::vector<uint8_t> result;
+        to_msgpack(j, result);
+        return result;
+    }
+
+    static void to_msgpack(const basic_json& j, detail::output_adapter<uint8_t> o)
+    {
+        binary_writer<uint8_t>(o).write_msgpack(j);
+    }
+
+    static void to_msgpack(const basic_json& j, detail::output_adapter<char> o)
+    {
+        binary_writer<char>(o).write_msgpack(j);
+    }
+
+    /*!
+    @brief create a UBJSON serialization of a given JSON value
+
+    Serializes a given JSON value @a j to a byte vector using the UBJSON
+    (Universal Binary JSON) serialization format. UBJSON aims to be more compact
+    than JSON itself, yet more efficient to parse.
+
+    The library uses the following mapping from JSON values types to
+    UBJSON types according to the UBJSON specification:
+
+    JSON value type | value/range                       | UBJSON type | marker
+    --------------- | --------------------------------- | ----------- | ------
+    null            | `null`                            | null        | `Z`
+    boolean         | `true`                            | true        | `T`
+    boolean         | `false`                           | false       | `F`
+    number_integer  | -9223372036854775808..-2147483649 | int64       | `L`
+    number_integer  | -2147483648..-32769               | int32       | `l`
+    number_integer  | -32768..-129                      | int16       | `I`
+    number_integer  | -128..127                         | int8        | `i`
+    number_integer  | 128..255                          | uint8       | `U`
+    number_integer  | 256..32767                        | int16       | `I`
+    number_integer  | 32768..2147483647                 | int32       | `l`
+    number_integer  | 2147483648..9223372036854775807   | int64       | `L`
+    number_unsigned | 0..127                            | int8        | `i`
+    number_unsigned | 128..255                          | uint8       | `U`
+    number_unsigned | 256..32767                        | int16       | `I`
+    number_unsigned | 32768..2147483647                 | int32       | `l`
+    number_unsigned | 2147483648..9223372036854775807   | int64       | `L`
+    number_float    | *any value*                       | float64     | `D`
+    string          | *with shortest length indicator*  | string      | `S`
+    array           | *see notes on optimized format*   | array       | `[`
+    object          | *see notes on optimized format*   | map         | `{`
+
+    @note The mapping is **complete** in the sense that any JSON value type
+          can be converted to a UBJSON value.
+
+    @note The following values can **not** be converted to a UBJSON value:
+          - strings with more than 9223372036854775807 bytes (theoretical)
+          - unsigned integer numbers above 9223372036854775807
+
+    @note The following markers are not used in the conversion:
+          - `Z`: no-op values are not created.
+          - `C`: single-byte strings are serialized with `S` markers.
+
+    @note Any UBJSON output created @ref to_ubjson can be successfully parsed
+          by @ref from_ubjson.
+
+    @note If NaN or Infinity are stored inside a JSON number, they are
+          serialized properly. This behavior differs from the @ref dump()
+          function which serializes NaN or Infinity to `null`.
+
+    @note The optimized formats for containers are supported: Parameter
+          @a use_size adds size information to the beginning of a container and
+          removes the closing marker. Parameter @a use_type further checks
+          whether all elements of a container have the same type and adds the
+          type marker to the beginning of the container. The @a use_type
+          parameter must only be used together with @a use_size = true. Note
+          that @a use_size = true alone may result in larger representations -
+          the benefit of this parameter is that the receiving side is
+          immediately informed on the number of elements of the container.
+
+    @param[in] j  JSON value to serialize
+    @param[in] use_size  whether to add size annotations to container types
+    @param[in] use_type  whether to add type annotations to container types
+                         (must be combined with @a use_size = true)
+    @return UBJSON serialization as byte vector
+
+    @complexity Linear in the size of the JSON value @a j.
+
+    @liveexample{The example shows the serialization of a JSON value to a byte
+    vector in UBJSON format.,to_ubjson}
+
+    @sa http://ubjson.org
+    @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the
+        analogous deserialization
+    @sa @ref to_cbor(const basic_json& for the related CBOR format
+    @sa @ref to_msgpack(const basic_json&) for the related MessagePack format
+
+    @since version 3.1.0
+    */
+    static std::vector<uint8_t> to_ubjson(const basic_json& j,
+                                          const bool use_size = false,
+                                          const bool use_type = false)
+    {
+        std::vector<uint8_t> result;
+        to_ubjson(j, result, use_size, use_type);
+        return result;
+    }
+
+    static void to_ubjson(const basic_json& j, detail::output_adapter<uint8_t> o,
+                          const bool use_size = false, const bool use_type = false)
+    {
+        binary_writer<uint8_t>(o).write_ubjson(j, use_size, use_type);
+    }
+
+    static void to_ubjson(const basic_json& j, detail::output_adapter<char> o,
+                          const bool use_size = false, const bool use_type = false)
+    {
+        binary_writer<char>(o).write_ubjson(j, use_size, use_type);
+    }
+
+
+    /*!
+    @brief Serializes the given JSON object `j` to BSON and returns a vector
+           containing the corresponding BSON-representation.
+
+    BSON (Binary JSON) is a binary format in which zero or more ordered key/value pairs are
+    stored as a single entity (a so-called document).
+
+    The library uses the following mapping from JSON values types to BSON types:
+
+    JSON value type | value/range                       | BSON type   | marker
+    --------------- | --------------------------------- | ----------- | ------
+    null            | `null`                            | null        | 0x0A
+    boolean         | `true`, `false`                   | boolean     | 0x08
+    number_integer  | -9223372036854775808..-2147483649 | int64       | 0x12
+    number_integer  | -2147483648..2147483647           | int32       | 0x10
+    number_integer  | 2147483648..9223372036854775807   | int64       | 0x12
+    number_unsigned | 0..2147483647                     | int32       | 0x10
+    number_unsigned | 2147483648..9223372036854775807   | int64       | 0x12
+    number_unsigned | 9223372036854775808..18446744073709551615| --   | --
+    number_float    | *any value*                       | double      | 0x01
+    string          | *any value*                       | string      | 0x02
+    array           | *any value*                       | document    | 0x04
+    object          | *any value*                       | document    | 0x03
+
+    @warning The mapping is **incomplete**, since only JSON-objects (and things
+    contained therein) can be serialized to BSON.
+    Also, integers larger than 9223372036854775807 cannot be serialized to BSON,
+    and the keys may not contain U+0000, since they are serialized a
+    zero-terminated c-strings.
+
+    @throw out_of_range.407  if `j.is_number_unsigned() && j.get<std::uint64_t>() > 9223372036854775807`
+    @throw out_of_range.409  if a key in `j` contains a NULL (U+0000)
+    @throw type_error.317    if `!j.is_object()`
+
+    @pre The input `j` is required to be an object: `j.is_object() == true`.
+
+    @note Any BSON output created via @ref to_bson can be successfully parsed
+          by @ref from_bson.
+
+    @param[in] j  JSON value to serialize
+    @return BSON serialization as byte vector
+
+    @complexity Linear in the size of the JSON value @a j.
+
+    @liveexample{The example shows the serialization of a JSON value to a byte
+    vector in BSON format.,to_bson}
+
+    @sa http://bsonspec.org/spec.html
+    @sa @ref from_bson(detail::input_adapter&&, const bool strict) for the
+        analogous deserialization
+    @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the
+             related UBJSON format
+    @sa @ref to_cbor(const basic_json&) for the related CBOR format
+    @sa @ref to_msgpack(const basic_json&) for the related MessagePack format
+    */
+    static std::vector<uint8_t> to_bson(const basic_json& j)
+    {
+        std::vector<uint8_t> result;
+        to_bson(j, result);
+        return result;
+    }
+
+    /*!
+    @brief Serializes the given JSON object `j` to BSON and forwards the
+           corresponding BSON-representation to the given output_adapter `o`.
+    @param j The JSON object to convert to BSON.
+    @param o The output adapter that receives the binary BSON representation.
+    @pre The input `j` shall be an object: `j.is_object() == true`
+    @sa @ref to_bson(const basic_json&)
+    */
+    static void to_bson(const basic_json& j, detail::output_adapter<uint8_t> o)
+    {
+        binary_writer<uint8_t>(o).write_bson(j);
+    }
+
+    /*!
+    @copydoc to_bson(const basic_json&, detail::output_adapter<uint8_t>)
+    */
+    static void to_bson(const basic_json& j, detail::output_adapter<char> o)
+    {
+        binary_writer<char>(o).write_bson(j);
+    }
+
+
+    /*!
+    @brief create a JSON value from an input in CBOR format
+
+    Deserializes a given input @a i to a JSON value using the CBOR (Concise
+    Binary Object Representation) serialization format.
+
+    The library maps CBOR types to JSON value types as follows:
+
+    CBOR type              | JSON value type | first byte
+    ---------------------- | --------------- | ----------
+    Integer                | number_unsigned | 0x00..0x17
+    Unsigned integer       | number_unsigned | 0x18
+    Unsigned integer       | number_unsigned | 0x19
+    Unsigned integer       | number_unsigned | 0x1A
+    Unsigned integer       | number_unsigned | 0x1B
+    Negative integer       | number_integer  | 0x20..0x37
+    Negative integer       | number_integer  | 0x38
+    Negative integer       | number_integer  | 0x39
+    Negative integer       | number_integer  | 0x3A
+    Negative integer       | number_integer  | 0x3B
+    Negative integer       | number_integer  | 0x40..0x57
+    UTF-8 string           | string          | 0x60..0x77
+    UTF-8 string           | string          | 0x78
+    UTF-8 string           | string          | 0x79
+    UTF-8 string           | string          | 0x7A
+    UTF-8 string           | string          | 0x7B
+    UTF-8 string           | string          | 0x7F
+    array                  | array           | 0x80..0x97
+    array                  | array           | 0x98
+    array                  | array           | 0x99
+    array                  | array           | 0x9A
+    array                  | array           | 0x9B
+    array                  | array           | 0x9F
+    map                    | object          | 0xA0..0xB7
+    map                    | object          | 0xB8
+    map                    | object          | 0xB9
+    map                    | object          | 0xBA
+    map                    | object          | 0xBB
+    map                    | object          | 0xBF
+    False                  | `false`         | 0xF4
+    True                   | `true`          | 0xF5
+    Null                   | `null`          | 0xF6
+    Half-Precision Float   | number_float    | 0xF9
+    Single-Precision Float | number_float    | 0xFA
+    Double-Precision Float | number_float    | 0xFB
+
+    @warning The mapping is **incomplete** in the sense that not all CBOR
+             types can be converted to a JSON value. The following CBOR types
+             are not supported and will yield parse errors (parse_error.112):
+             - byte strings (0x40..0x5F)
+             - date/time (0xC0..0xC1)
+             - bignum (0xC2..0xC3)
+             - decimal fraction (0xC4)
+             - bigfloat (0xC5)
+             - tagged items (0xC6..0xD4, 0xD8..0xDB)
+             - expected conversions (0xD5..0xD7)
+             - simple values (0xE0..0xF3, 0xF8)
+             - undefined (0xF7)
+
+    @warning CBOR allows map keys of any type, whereas JSON only allows
+             strings as keys in object values. Therefore, CBOR maps with keys
+             other than UTF-8 strings are rejected (parse_error.113).
+
+    @note Any CBOR output created @ref to_cbor can be successfully parsed by
+          @ref from_cbor.
+
+    @param[in] i  an input in CBOR format convertible to an input adapter
+    @param[in] strict  whether to expect the input to be consumed until EOF
+                       (true by default)
+    @param[in] allow_exceptions  whether to throw exceptions in case of a
+    parse error (optional, true by default)
+
+    @return deserialized JSON value
+
+    @throw parse_error.110 if the given input ends prematurely or the end of
+    file was not reached when @a strict was set to true
+    @throw parse_error.112 if unsupported features from CBOR were
+    used in the given input @a v or if the input is not valid CBOR
+    @throw parse_error.113 if a string was expected as map key, but not found
+
+    @complexity Linear in the size of the input @a i.
+
+    @liveexample{The example shows the deserialization of a byte vector in CBOR
+    format to a JSON value.,from_cbor}
+
+    @sa http://cbor.io
+    @sa @ref to_cbor(const basic_json&) for the analogous serialization
+    @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for the
+        related MessagePack format
+    @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the
+        related UBJSON format
+
+    @since version 2.0.9; parameter @a start_index since 2.1.1; changed to
+           consume input adapters, removed start_index parameter, and added
+           @a strict parameter since 3.0.0; added @a allow_exceptions parameter
+           since 3.2.0
+    */
+    static basic_json from_cbor(detail::input_adapter&& i,
+                                const bool strict = true,
+                                const bool allow_exceptions = true)
+    {
+        basic_json result;
+        detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+        const bool res = binary_reader(detail::input_adapter(i)).sax_parse(input_format_t::cbor, &sdp, strict);
+        return res ? result : basic_json(value_t::discarded);
+    }
+
+    /*!
+    @copydoc from_cbor(detail::input_adapter&&, const bool, const bool)
+    */
+    template<typename A1, typename A2,
+             detail::enable_if_t<std::is_constructible<detail::input_adapter, A1, A2>::value, int> = 0>
+    static basic_json from_cbor(A1 && a1, A2 && a2,
+                                const bool strict = true,
+                                const bool allow_exceptions = true)
+    {
+        basic_json result;
+        detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+        const bool res = binary_reader(detail::input_adapter(std::forward<A1>(a1), std::forward<A2>(a2))).sax_parse(input_format_t::cbor, &sdp, strict);
+        return res ? result : basic_json(value_t::discarded);
+    }
+
+    /*!
+    @brief create a JSON value from an input in MessagePack format
+
+    Deserializes a given input @a i to a JSON value using the MessagePack
+    serialization format.
+
+    The library maps MessagePack types to JSON value types as follows:
+
+    MessagePack type | JSON value type | first byte
+    ---------------- | --------------- | ----------
+    positive fixint  | number_unsigned | 0x00..0x7F
+    fixmap           | object          | 0x80..0x8F
+    fixarray         | array           | 0x90..0x9F
+    fixstr           | string          | 0xA0..0xBF
+    nil              | `null`          | 0xC0
+    false            | `false`         | 0xC2
+    true             | `true`          | 0xC3
+    float 32         | number_float    | 0xCA
+    float 64         | number_float    | 0xCB
+    uint 8           | number_unsigned | 0xCC
+    uint 16          | number_unsigned | 0xCD
+    uint 32          | number_unsigned | 0xCE
+    uint 64          | number_unsigned | 0xCF
+    int 8            | number_integer  | 0xD0
+    int 16           | number_integer  | 0xD1
+    int 32           | number_integer  | 0xD2
+    int 64           | number_integer  | 0xD3
+    str 8            | string          | 0xD9
+    str 16           | string          | 0xDA
+    str 32           | string          | 0xDB
+    array 16         | array           | 0xDC
+    array 32         | array           | 0xDD
+    map 16           | object          | 0xDE
+    map 32           | object          | 0xDF
+    negative fixint  | number_integer  | 0xE0-0xFF
+
+    @warning The mapping is **incomplete** in the sense that not all
+             MessagePack types can be converted to a JSON value. The following
+             MessagePack types are not supported and will yield parse errors:
+              - bin 8 - bin 32 (0xC4..0xC6)
+              - ext 8 - ext 32 (0xC7..0xC9)
+              - fixext 1 - fixext 16 (0xD4..0xD8)
+
+    @note Any MessagePack output created @ref to_msgpack can be successfully
+          parsed by @ref from_msgpack.
+
+    @param[in] i  an input in MessagePack format convertible to an input
+                  adapter
+    @param[in] strict  whether to expect the input to be consumed until EOF
+                       (true by default)
+    @param[in] allow_exceptions  whether to throw exceptions in case of a
+    parse error (optional, true by default)
+
+    @return deserialized JSON value
+
+    @throw parse_error.110 if the given input ends prematurely or the end of
+    file was not reached when @a strict was set to true
+    @throw parse_error.112 if unsupported features from MessagePack were
+    used in the given input @a i or if the input is not valid MessagePack
+    @throw parse_error.113 if a string was expected as map key, but not found
+
+    @complexity Linear in the size of the input @a i.
+
+    @liveexample{The example shows the deserialization of a byte vector in
+    MessagePack format to a JSON value.,from_msgpack}
+
+    @sa http://msgpack.org
+    @sa @ref to_msgpack(const basic_json&) for the analogous serialization
+    @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool) for the
+        related CBOR format
+    @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for
+        the related UBJSON format
+    @sa @ref from_bson(detail::input_adapter&&, const bool, const bool) for
+        the related BSON format
+
+    @since version 2.0.9; parameter @a start_index since 2.1.1; changed to
+           consume input adapters, removed start_index parameter, and added
+           @a strict parameter since 3.0.0; added @a allow_exceptions parameter
+           since 3.2.0
+    */
+    static basic_json from_msgpack(detail::input_adapter&& i,
+                                   const bool strict = true,
+                                   const bool allow_exceptions = true)
+    {
+        basic_json result;
+        detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+        const bool res = binary_reader(detail::input_adapter(i)).sax_parse(input_format_t::msgpack, &sdp, strict);
+        return res ? result : basic_json(value_t::discarded);
+    }
+
+    /*!
+    @copydoc from_msgpack(detail::input_adapter&&, const bool, const bool)
+    */
+    template<typename A1, typename A2,
+             detail::enable_if_t<std::is_constructible<detail::input_adapter, A1, A2>::value, int> = 0>
+    static basic_json from_msgpack(A1 && a1, A2 && a2,
+                                   const bool strict = true,
+                                   const bool allow_exceptions = true)
+    {
+        basic_json result;
+        detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+        const bool res = binary_reader(detail::input_adapter(std::forward<A1>(a1), std::forward<A2>(a2))).sax_parse(input_format_t::msgpack, &sdp, strict);
+        return res ? result : basic_json(value_t::discarded);
+    }
+
+    /*!
+    @brief create a JSON value from an input in UBJSON format
+
+    Deserializes a given input @a i to a JSON value using the UBJSON (Universal
+    Binary JSON) serialization format.
+
+    The library maps UBJSON types to JSON value types as follows:
+
+    UBJSON type | JSON value type                         | marker
+    ----------- | --------------------------------------- | ------
+    no-op       | *no value, next value is read*          | `N`
+    null        | `null`                                  | `Z`
+    false       | `false`                                 | `F`
+    true        | `true`                                  | `T`
+    float32     | number_float                            | `d`
+    float64     | number_float                            | `D`
+    uint8       | number_unsigned                         | `U`
+    int8        | number_integer                          | `i`
+    int16       | number_integer                          | `I`
+    int32       | number_integer                          | `l`
+    int64       | number_integer                          | `L`
+    string      | string                                  | `S`
+    char        | string                                  | `C`
+    array       | array (optimized values are supported)  | `[`
+    object      | object (optimized values are supported) | `{`
+
+    @note The mapping is **complete** in the sense that any UBJSON value can
+          be converted to a JSON value.
+
+    @param[in] i  an input in UBJSON format convertible to an input adapter
+    @param[in] strict  whether to expect the input to be consumed until EOF
+                       (true by default)
+    @param[in] allow_exceptions  whether to throw exceptions in case of a
+    parse error (optional, true by default)
+
+    @return deserialized JSON value
+
+    @throw parse_error.110 if the given input ends prematurely or the end of
+    file was not reached when @a strict was set to true
+    @throw parse_error.112 if a parse error occurs
+    @throw parse_error.113 if a string could not be parsed successfully
+
+    @complexity Linear in the size of the input @a i.
+
+    @liveexample{The example shows the deserialization of a byte vector in
+    UBJSON format to a JSON value.,from_ubjson}
+
+    @sa http://ubjson.org
+    @sa @ref to_ubjson(const basic_json&, const bool, const bool) for the
+             analogous serialization
+    @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool) for the
+        related CBOR format
+    @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for
+        the related MessagePack format
+    @sa @ref from_bson(detail::input_adapter&&, const bool, const bool) for
+        the related BSON format
+
+    @since version 3.1.0; added @a allow_exceptions parameter since 3.2.0
+    */
+    static basic_json from_ubjson(detail::input_adapter&& i,
+                                  const bool strict = true,
+                                  const bool allow_exceptions = true)
+    {
+        basic_json result;
+        detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+        const bool res = binary_reader(detail::input_adapter(i)).sax_parse(input_format_t::ubjson, &sdp, strict);
+        return res ? result : basic_json(value_t::discarded);
+    }
+
+    /*!
+    @copydoc from_ubjson(detail::input_adapter&&, const bool, const bool)
+    */
+    template<typename A1, typename A2,
+             detail::enable_if_t<std::is_constructible<detail::input_adapter, A1, A2>::value, int> = 0>
+    static basic_json from_ubjson(A1 && a1, A2 && a2,
+                                  const bool strict = true,
+                                  const bool allow_exceptions = true)
+    {
+        basic_json result;
+        detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+        const bool res = binary_reader(detail::input_adapter(std::forward<A1>(a1), std::forward<A2>(a2))).sax_parse(input_format_t::ubjson, &sdp, strict);
+        return res ? result : basic_json(value_t::discarded);
+    }
+
+    /*!
+    @brief Create a JSON value from an input in BSON format
+
+    Deserializes a given input @a i to a JSON value using the BSON (Binary JSON)
+    serialization format.
+
+    The library maps BSON record types to JSON value types as follows:
+
+    BSON type       | BSON marker byte | JSON value type
+    --------------- | ---------------- | ---------------------------
+    double          | 0x01             | number_float
+    string          | 0x02             | string
+    document        | 0x03             | object
+    array           | 0x04             | array
+    binary          | 0x05             | still unsupported
+    undefined       | 0x06             | still unsupported
+    ObjectId        | 0x07             | still unsupported
+    boolean         | 0x08             | boolean
+    UTC Date-Time   | 0x09             | still unsupported
+    null            | 0x0A             | null
+    Regular Expr.   | 0x0B             | still unsupported
+    DB Pointer      | 0x0C             | still unsupported
+    JavaScript Code | 0x0D             | still unsupported
+    Symbol          | 0x0E             | still unsupported
+    JavaScript Code | 0x0F             | still unsupported
+    int32           | 0x10             | number_integer
+    Timestamp       | 0x11             | still unsupported
+    128-bit decimal float | 0x13       | still unsupported
+    Max Key         | 0x7F             | still unsupported
+    Min Key         | 0xFF             | still unsupported
+
+    @warning The mapping is **incomplete**. The unsupported mappings
+             are indicated in the table above.
+
+    @param[in] i  an input in BSON format convertible to an input adapter
+    @param[in] strict  whether to expect the input to be consumed until EOF
+                       (true by default)
+    @param[in] allow_exceptions  whether to throw exceptions in case of a
+    parse error (optional, true by default)
+
+    @return deserialized JSON value
+
+    @throw parse_error.114 if an unsupported BSON record type is encountered
+
+    @complexity Linear in the size of the input @a i.
+
+    @liveexample{The example shows the deserialization of a byte vector in
+    BSON format to a JSON value.,from_bson}
+
+    @sa http://bsonspec.org/spec.html
+    @sa @ref to_bson(const basic_json&) for the analogous serialization
+    @sa @ref from_cbor(detail::input_adapter&&, const bool, const bool) for the
+        related CBOR format
+    @sa @ref from_msgpack(detail::input_adapter&&, const bool, const bool) for
+        the related MessagePack format
+    @sa @ref from_ubjson(detail::input_adapter&&, const bool, const bool) for the
+        related UBJSON format
+    */
+    static basic_json from_bson(detail::input_adapter&& i,
+                                const bool strict = true,
+                                const bool allow_exceptions = true)
+    {
+        basic_json result;
+        detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+        const bool res = binary_reader(detail::input_adapter(i)).sax_parse(input_format_t::bson, &sdp, strict);
+        return res ? result : basic_json(value_t::discarded);
+    }
+
+    /*!
+    @copydoc from_bson(detail::input_adapter&&, const bool, const bool)
+    */
+    template<typename A1, typename A2,
+             detail::enable_if_t<std::is_constructible<detail::input_adapter, A1, A2>::value, int> = 0>
+    static basic_json from_bson(A1 && a1, A2 && a2,
+                                const bool strict = true,
+                                const bool allow_exceptions = true)
+    {
+        basic_json result;
+        detail::json_sax_dom_parser<basic_json> sdp(result, allow_exceptions);
+        const bool res = binary_reader(detail::input_adapter(std::forward<A1>(a1), std::forward<A2>(a2))).sax_parse(input_format_t::bson, &sdp, strict);
+        return res ? result : basic_json(value_t::discarded);
+    }
+
+
+
+    /// @}
+
+    //////////////////////////
+    // JSON Pointer support //
+    //////////////////////////
+
+    /// @name JSON Pointer functions
+    /// @{
+
+    /*!
+    @brief access specified element via JSON Pointer
+
+    Uses a JSON pointer to retrieve a reference to the respective JSON value.
+    No bound checking is performed. Similar to @ref operator[](const typename
+    object_t::key_type&), `null` values are created in arrays and objects if
+    necessary.
+
+    In particular:
+    - If the JSON pointer points to an object key that does not exist, it
+      is created an filled with a `null` value before a reference to it
+      is returned.
+    - If the JSON pointer points to an array index that does not exist, it
+      is created an filled with a `null` value before a reference to it
+      is returned. All indices between the current maximum and the given
+      index are also filled with `null`.
+    - The special value `-` is treated as a synonym for the index past the
+      end.
+
+    @param[in] ptr  a JSON pointer
+
+    @return reference to the element pointed to by @a ptr
+
+    @complexity Constant.
+
+    @throw parse_error.106   if an array index begins with '0'
+    @throw parse_error.109   if an array index was not a number
+    @throw out_of_range.404  if the JSON pointer can not be resolved
+
+    @liveexample{The behavior is shown in the example.,operatorjson_pointer}
+
+    @since version 2.0.0
+    */
+    reference operator[](const json_pointer& ptr)
+    {
+        return ptr.get_unchecked(this);
+    }
+
+    /*!
+    @brief access specified element via JSON Pointer
+
+    Uses a JSON pointer to retrieve a reference to the respective JSON value.
+    No bound checking is performed. The function does not change the JSON
+    value; no `null` values are created. In particular, the the special value
+    `-` yields an exception.
+
+    @param[in] ptr  JSON pointer to the desired element
+
+    @return const reference to the element pointed to by @a ptr
+
+    @complexity Constant.
+
+    @throw parse_error.106   if an array index begins with '0'
+    @throw parse_error.109   if an array index was not a number
+    @throw out_of_range.402  if the array index '-' is used
+    @throw out_of_range.404  if the JSON pointer can not be resolved
+
+    @liveexample{The behavior is shown in the example.,operatorjson_pointer_const}
+
+    @since version 2.0.0
+    */
+    const_reference operator[](const json_pointer& ptr) const
+    {
+        return ptr.get_unchecked(this);
+    }
+
+    /*!
+    @brief access specified element via JSON Pointer
+
+    Returns a reference to the element at with specified JSON pointer @a ptr,
+    with bounds checking.
+
+    @param[in] ptr  JSON pointer to the desired element
+
+    @return reference to the element pointed to by @a ptr
+
+    @throw parse_error.106 if an array index in the passed JSON pointer @a ptr
+    begins with '0'. See example below.
+
+    @throw parse_error.109 if an array index in the passed JSON pointer @a ptr
+    is not a number. See example below.
+
+    @throw out_of_range.401 if an array index in the passed JSON pointer @a ptr
+    is out of range. See example below.
+
+    @throw out_of_range.402 if the array index '-' is used in the passed JSON
+    pointer @a ptr. As `at` provides checked access (and no elements are
+    implicitly inserted), the index '-' is always invalid. See example below.
+
+    @throw out_of_range.403 if the JSON pointer describes a key of an object
+    which cannot be found. See example below.
+
+    @throw out_of_range.404 if the JSON pointer @a ptr can not be resolved.
+    See example below.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes in the JSON value.
+
+    @complexity Constant.
+
+    @since version 2.0.0
+
+    @liveexample{The behavior is shown in the example.,at_json_pointer}
+    */
+    reference at(const json_pointer& ptr)
+    {
+        return ptr.get_checked(this);
+    }
+
+    /*!
+    @brief access specified element via JSON Pointer
+
+    Returns a const reference to the element at with specified JSON pointer @a
+    ptr, with bounds checking.
+
+    @param[in] ptr  JSON pointer to the desired element
+
+    @return reference to the element pointed to by @a ptr
+
+    @throw parse_error.106 if an array index in the passed JSON pointer @a ptr
+    begins with '0'. See example below.
+
+    @throw parse_error.109 if an array index in the passed JSON pointer @a ptr
+    is not a number. See example below.
+
+    @throw out_of_range.401 if an array index in the passed JSON pointer @a ptr
+    is out of range. See example below.
+
+    @throw out_of_range.402 if the array index '-' is used in the passed JSON
+    pointer @a ptr. As `at` provides checked access (and no elements are
+    implicitly inserted), the index '-' is always invalid. See example below.
+
+    @throw out_of_range.403 if the JSON pointer describes a key of an object
+    which cannot be found. See example below.
+
+    @throw out_of_range.404 if the JSON pointer @a ptr can not be resolved.
+    See example below.
+
+    @exceptionsafety Strong guarantee: if an exception is thrown, there are no
+    changes in the JSON value.
+
+    @complexity Constant.
+
+    @since version 2.0.0
+
+    @liveexample{The behavior is shown in the example.,at_json_pointer_const}
+    */
+    const_reference at(const json_pointer& ptr) const
+    {
+        return ptr.get_checked(this);
+    }
+
+    /*!
+    @brief return flattened JSON value
+
+    The function creates a JSON object whose keys are JSON pointers (see [RFC
+    6901](https://tools.ietf.org/html/rfc6901)) and whose values are all
+    primitive. The original JSON value can be restored using the @ref
+    unflatten() function.
+
+    @return an object that maps JSON pointers to primitive values
+
+    @note Empty objects and arrays are flattened to `null` and will not be
+          reconstructed correctly by the @ref unflatten() function.
+
+    @complexity Linear in the size the JSON value.
+
+    @liveexample{The following code shows how a JSON object is flattened to an
+    object whose keys consist of JSON pointers.,flatten}
+
+    @sa @ref unflatten() for the reverse function
+
+    @since version 2.0.0
+    */
+    basic_json flatten() const
+    {
+        basic_json result(value_t::object);
+        json_pointer::flatten("", *this, result);
+        return result;
+    }
+
+    /*!
+    @brief unflatten a previously flattened JSON value
+
+    The function restores the arbitrary nesting of a JSON value that has been
+    flattened before using the @ref flatten() function. The JSON value must
+    meet certain constraints:
+    1. The value must be an object.
+    2. The keys must be JSON pointers (see
+       [RFC 6901](https://tools.ietf.org/html/rfc6901))
+    3. The mapped values must be primitive JSON types.
+
+    @return the original JSON from a flattened version
+
+    @note Empty objects and arrays are flattened by @ref flatten() to `null`
+          values and can not unflattened to their original type. Apart from
+          this example, for a JSON value `j`, the following is always true:
+          `j == j.flatten().unflatten()`.
+
+    @complexity Linear in the size the JSON value.
+
+    @throw type_error.314  if value is not an object
+    @throw type_error.315  if object values are not primitive
+
+    @liveexample{The following code shows how a flattened JSON object is
+    unflattened into the original nested JSON object.,unflatten}
+
+    @sa @ref flatten() for the reverse function
+
+    @since version 2.0.0
+    */
+    basic_json unflatten() const
+    {
+        return json_pointer::unflatten(*this);
+    }
+
+    /// @}
+
+    //////////////////////////
+    // JSON Patch functions //
+    //////////////////////////
+
+    /// @name JSON Patch functions
+    /// @{
+
+    /*!
+    @brief applies a JSON patch
+
+    [JSON Patch](http://jsonpatch.com) defines a JSON document structure for
+    expressing a sequence of operations to apply to a JSON) document. With
+    this function, a JSON Patch is applied to the current JSON value by
+    executing all operations from the patch.
+
+    @param[in] json_patch  JSON patch document
+    @return patched document
+
+    @note The application of a patch is atomic: Either all operations succeed
+          and the patched document is returned or an exception is thrown. In
+          any case, the original value is not changed: the patch is applied
+          to a copy of the value.
+
+    @throw parse_error.104 if the JSON patch does not consist of an array of
+    objects
+
+    @throw parse_error.105 if the JSON patch is malformed (e.g., mandatory
+    attributes are missing); example: `"operation add must have member path"`
+
+    @throw out_of_range.401 if an array index is out of range.
+
+    @throw out_of_range.403 if a JSON pointer inside the patch could not be
+    resolved successfully in the current JSON value; example: `"key baz not
+    found"`
+
+    @throw out_of_range.405 if JSON pointer has no parent ("add", "remove",
+    "move")
+
+    @throw other_error.501 if "test" operation was unsuccessful
+
+    @complexity Linear in the size of the JSON value and the length of the
+    JSON patch. As usually only a fraction of the JSON value is affected by
+    the patch, the complexity can usually be neglected.
+
+    @liveexample{The following code shows how a JSON patch is applied to a
+    value.,patch}
+
+    @sa @ref diff -- create a JSON patch by comparing two JSON values
+
+    @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902)
+    @sa [RFC 6901 (JSON Pointer)](https://tools.ietf.org/html/rfc6901)
+
+    @since version 2.0.0
+    */
+    basic_json patch(const basic_json& json_patch) const
+    {
+        // make a working copy to apply the patch to
+        basic_json result = *this;
+
+        // the valid JSON Patch operations
+        enum class patch_operations {add, remove, replace, move, copy, test, invalid};
+
+        const auto get_op = [](const std::string & op)
+        {
+            if (op == "add")
+            {
+                return patch_operations::add;
+            }
+            if (op == "remove")
+            {
+                return patch_operations::remove;
+            }
+            if (op == "replace")
+            {
+                return patch_operations::replace;
+            }
+            if (op == "move")
+            {
+                return patch_operations::move;
+            }
+            if (op == "copy")
+            {
+                return patch_operations::copy;
+            }
+            if (op == "test")
+            {
+                return patch_operations::test;
+            }
+
+            return patch_operations::invalid;
+        };
+
+        // wrapper for "add" operation; add value at ptr
+        const auto operation_add = [&result](json_pointer & ptr, basic_json val)
+        {
+            // adding to the root of the target document means replacing it
+            if (ptr.is_root())
+            {
+                result = val;
+            }
+            else
+            {
+                // make sure the top element of the pointer exists
+                json_pointer top_pointer = ptr.top();
+                if (top_pointer != ptr)
+                {
+                    result.at(top_pointer);
+                }
+
+                // get reference to parent of JSON pointer ptr
+                const auto last_path = ptr.pop_back();
+                basic_json& parent = result[ptr];
+
+                switch (parent.m_type)
+                {
+                    case value_t::null:
+                    case value_t::object:
+                    {
+                        // use operator[] to add value
+                        parent[last_path] = val;
+                        break;
+                    }
+
+                    case value_t::array:
+                    {
+                        if (last_path == "-")
+                        {
+                            // special case: append to back
+                            parent.push_back(val);
+                        }
+                        else
+                        {
+                            const auto idx = json_pointer::array_index(last_path);
+                            if (JSON_UNLIKELY(static_cast<size_type>(idx) > parent.size()))
+                            {
+                                // avoid undefined behavior
+                                JSON_THROW(out_of_range::create(401, "array index " + std::to_string(idx) + " is out of range"));
+                            }
+
+                            // default case: insert add offset
+                            parent.insert(parent.begin() + static_cast<difference_type>(idx), val);
+                        }
+                        break;
+                    }
+
+                    // LCOV_EXCL_START
+                    default:
+                    {
+                        // if there exists a parent it cannot be primitive
+                        assert(false);
+                    }
+                        // LCOV_EXCL_STOP
+                }
+            }
+        };
+
+        // wrapper for "remove" operation; remove value at ptr
+        const auto operation_remove = [&result](json_pointer & ptr)
+        {
+            // get reference to parent of JSON pointer ptr
+            const auto last_path = ptr.pop_back();
+            basic_json& parent = result.at(ptr);
+
+            // remove child
+            if (parent.is_object())
+            {
+                // perform range check
+                auto it = parent.find(last_path);
+                if (JSON_LIKELY(it != parent.end()))
+                {
+                    parent.erase(it);
+                }
+                else
+                {
+                    JSON_THROW(out_of_range::create(403, "key '" + last_path + "' not found"));
+                }
+            }
+            else if (parent.is_array())
+            {
+                // note erase performs range check
+                parent.erase(static_cast<size_type>(json_pointer::array_index(last_path)));
+            }
+        };
+
+        // type check: top level value must be an array
+        if (JSON_UNLIKELY(not json_patch.is_array()))
+        {
+            JSON_THROW(parse_error::create(104, 0, "JSON patch must be an array of objects"));
+        }
+
+        // iterate and apply the operations
+        for (const auto& val : json_patch)
+        {
+            // wrapper to get a value for an operation
+            const auto get_value = [&val](const std::string & op,
+                                          const std::string & member,
+                                          bool string_type) -> basic_json &
+            {
+                // find value
+                auto it = val.m_value.object->find(member);
+
+                // context-sensitive error message
+                const auto error_msg = (op == "op") ? "operation" : "operation '" + op + "'";
+
+                // check if desired value is present
+                if (JSON_UNLIKELY(it == val.m_value.object->end()))
+                {
+                    JSON_THROW(parse_error::create(105, 0, error_msg + " must have member '" + member + "'"));
+                }
+
+                // check if result is of type string
+                if (JSON_UNLIKELY(string_type and not it->second.is_string()))
+                {
+                    JSON_THROW(parse_error::create(105, 0, error_msg + " must have string member '" + member + "'"));
+                }
+
+                // no error: return value
+                return it->second;
+            };
+
+            // type check: every element of the array must be an object
+            if (JSON_UNLIKELY(not val.is_object()))
+            {
+                JSON_THROW(parse_error::create(104, 0, "JSON patch must be an array of objects"));
+            }
+
+            // collect mandatory members
+            const std::string op = get_value("op", "op", true);
+            const std::string path = get_value(op, "path", true);
+            json_pointer ptr(path);
+
+            switch (get_op(op))
+            {
+                case patch_operations::add:
+                {
+                    operation_add(ptr, get_value("add", "value", false));
+                    break;
+                }
+
+                case patch_operations::remove:
+                {
+                    operation_remove(ptr);
+                    break;
+                }
+
+                case patch_operations::replace:
+                {
+                    // the "path" location must exist - use at()
+                    result.at(ptr) = get_value("replace", "value", false);
+                    break;
+                }
+
+                case patch_operations::move:
+                {
+                    const std::string from_path = get_value("move", "from", true);
+                    json_pointer from_ptr(from_path);
+
+                    // the "from" location must exist - use at()
+                    basic_json v = result.at(from_ptr);
+
+                    // The move operation is functionally identical to a
+                    // "remove" operation on the "from" location, followed
+                    // immediately by an "add" operation at the target
+                    // location with the value that was just removed.
+                    operation_remove(from_ptr);
+                    operation_add(ptr, v);
+                    break;
+                }
+
+                case patch_operations::copy:
+                {
+                    const std::string from_path = get_value("copy", "from", true);
+                    const json_pointer from_ptr(from_path);
+
+                    // the "from" location must exist - use at()
+                    basic_json v = result.at(from_ptr);
+
+                    // The copy is functionally identical to an "add"
+                    // operation at the target location using the value
+                    // specified in the "from" member.
+                    operation_add(ptr, v);
+                    break;
+                }
+
+                case patch_operations::test:
+                {
+                    bool success = false;
+                    JSON_TRY
+                    {
+                        // check if "value" matches the one at "path"
+                        // the "path" location must exist - use at()
+                        success = (result.at(ptr) == get_value("test", "value", false));
+                    }
+                    JSON_INTERNAL_CATCH (out_of_range&)
+                    {
+                        // ignore out of range errors: success remains false
+                    }
+
+                    // throw an exception if test fails
+                    if (JSON_UNLIKELY(not success))
+                    {
+                        JSON_THROW(other_error::create(501, "unsuccessful: " + val.dump()));
+                    }
+
+                    break;
+                }
+
+                case patch_operations::invalid:
+                {
+                    // op must be "add", "remove", "replace", "move", "copy", or
+                    // "test"
+                    JSON_THROW(parse_error::create(105, 0, "operation value '" + op + "' is invalid"));
+                }
+            }
+        }
+
+        return result;
+    }
+
+    /*!
+    @brief creates a diff as a JSON patch
+
+    Creates a [JSON Patch](http://jsonpatch.com) so that value @a source can
+    be changed into the value @a target by calling @ref patch function.
+
+    @invariant For two JSON values @a source and @a target, the following code
+    yields always `true`:
+    @code {.cpp}
+    source.patch(diff(source, target)) == target;
+    @endcode
+
+    @note Currently, only `remove`, `add`, and `replace` operations are
+          generated.
+
+    @param[in] source  JSON value to compare from
+    @param[in] target  JSON value to compare against
+    @param[in] path    helper value to create JSON pointers
+
+    @return a JSON patch to convert the @a source to @a target
+
+    @complexity Linear in the lengths of @a source and @a target.
+
+    @liveexample{The following code shows how a JSON patch is created as a
+    diff for two JSON values.,diff}
+
+    @sa @ref patch -- apply a JSON patch
+    @sa @ref merge_patch -- apply a JSON Merge Patch
+
+    @sa [RFC 6902 (JSON Patch)](https://tools.ietf.org/html/rfc6902)
+
+    @since version 2.0.0
+    */
+    static basic_json diff(const basic_json& source, const basic_json& target,
+                           const std::string& path = "")
+    {
+        // the patch
+        basic_json result(value_t::array);
+
+        // if the values are the same, return empty patch
+        if (source == target)
+        {
+            return result;
+        }
+
+        if (source.type() != target.type())
+        {
+            // different types: replace value
+            result.push_back(
+            {
+                {"op", "replace"}, {"path", path}, {"value", target}
+            });
+        }
+        else
+        {
+            switch (source.type())
+            {
+                case value_t::array:
+                {
+                    // first pass: traverse common elements
+                    std::size_t i = 0;
+                    while (i < source.size() and i < target.size())
+                    {
+                        // recursive call to compare array values at index i
+                        auto temp_diff = diff(source[i], target[i], path + "/" + std::to_string(i));
+                        result.insert(result.end(), temp_diff.begin(), temp_diff.end());
+                        ++i;
+                    }
+
+                    // i now reached the end of at least one array
+                    // in a second pass, traverse the remaining elements
+
+                    // remove my remaining elements
+                    const auto end_index = static_cast<difference_type>(result.size());
+                    while (i < source.size())
+                    {
+                        // add operations in reverse order to avoid invalid
+                        // indices
+                        result.insert(result.begin() + end_index, object(
+                        {
+                            {"op", "remove"},
+                            {"path", path + "/" + std::to_string(i)}
+                        }));
+                        ++i;
+                    }
+
+                    // add other remaining elements
+                    while (i < target.size())
+                    {
+                        result.push_back(
+                        {
+                            {"op", "add"},
+                            {"path", path + "/" + std::to_string(i)},
+                            {"value", target[i]}
+                        });
+                        ++i;
+                    }
+
+                    break;
+                }
+
+                case value_t::object:
+                {
+                    // first pass: traverse this object's elements
+                    for (auto it = source.cbegin(); it != source.cend(); ++it)
+                    {
+                        // escape the key name to be used in a JSON patch
+                        const auto key = json_pointer::escape(it.key());
+
+                        if (target.find(it.key()) != target.end())
+                        {
+                            // recursive call to compare object values at key it
+                            auto temp_diff = diff(it.value(), target[it.key()], path + "/" + key);
+                            result.insert(result.end(), temp_diff.begin(), temp_diff.end());
+                        }
+                        else
+                        {
+                            // found a key that is not in o -> remove it
+                            result.push_back(object(
+                            {
+                                {"op", "remove"}, {"path", path + "/" + key}
+                            }));
+                        }
+                    }
+
+                    // second pass: traverse other object's elements
+                    for (auto it = target.cbegin(); it != target.cend(); ++it)
+                    {
+                        if (source.find(it.key()) == source.end())
+                        {
+                            // found a key that is not in this -> add it
+                            const auto key = json_pointer::escape(it.key());
+                            result.push_back(
+                            {
+                                {"op", "add"}, {"path", path + "/" + key},
+                                {"value", it.value()}
+                            });
+                        }
+                    }
+
+                    break;
+                }
+
+                default:
+                {
+                    // both primitive type: replace value
+                    result.push_back(
+                    {
+                        {"op", "replace"}, {"path", path}, {"value", target}
+                    });
+                    break;
+                }
+            }
+        }
+
+        return result;
+    }
+
+    /// @}
+
+    ////////////////////////////////
+    // JSON Merge Patch functions //
+    ////////////////////////////////
+
+    /// @name JSON Merge Patch functions
+    /// @{
+
+    /*!
+    @brief applies a JSON Merge Patch
+
+    The merge patch format is primarily intended for use with the HTTP PATCH
+    method as a means of describing a set of modifications to a target
+    resource's content. This function applies a merge patch to the current
+    JSON value.
+
+    The function implements the following algorithm from Section 2 of
+    [RFC 7396 (JSON Merge Patch)](https://tools.ietf.org/html/rfc7396):
+
+    ```
+    define MergePatch(Target, Patch):
+      if Patch is an Object:
+        if Target is not an Object:
+          Target = {} // Ignore the contents and set it to an empty Object
+        for each Name/Value pair in Patch:
+          if Value is null:
+            if Name exists in Target:
+              remove the Name/Value pair from Target
+          else:
+            Target[Name] = MergePatch(Target[Name], Value)
+        return Target
+      else:
+        return Patch
+    ```
+
+    Thereby, `Target` is the current object; that is, the patch is applied to
+    the current value.
+
+    @param[in] apply_patch  the patch to apply
+
+    @complexity Linear in the lengths of @a patch.
+
+    @liveexample{The following code shows how a JSON Merge Patch is applied to
+    a JSON document.,merge_patch}
+
+    @sa @ref patch -- apply a JSON patch
+    @sa [RFC 7396 (JSON Merge Patch)](https://tools.ietf.org/html/rfc7396)
+
+    @since version 3.0.0
+    */
+    void merge_patch(const basic_json& apply_patch)
+    {
+        if (apply_patch.is_object())
+        {
+            if (not is_object())
+            {
+                *this = object();
+            }
+            for (auto it = apply_patch.begin(); it != apply_patch.end(); ++it)
+            {
+                if (it.value().is_null())
+                {
+                    erase(it.key());
+                }
+                else
+                {
+                    operator[](it.key()).merge_patch(it.value());
+                }
+            }
+        }
+        else
+        {
+            *this = apply_patch;
+        }
+    }
+
+    /// @}
+};
+} // namespace nlohmann
+
+///////////////////////
+// nonmember support //
+///////////////////////
+
+// specialization of std::swap, and std::hash
+namespace std
+{
+
+/// hash value for JSON objects
+template<>
+struct hash<nlohmann::json>
+{
+    /*!
+    @brief return a hash value for a JSON object
+
+    @since version 1.0.0
+    */
+    std::size_t operator()(const nlohmann::json& j) const
+    {
+        // a naive hashing via the string representation
+        const auto& h = hash<nlohmann::json::string_t>();
+        return h(j.dump());
+    }
+};
+
+/// specialization for std::less<value_t>
+/// @note: do not remove the space after '<',
+///        see https://github.com/nlohmann/json/pull/679
+template<>
+struct less< ::nlohmann::detail::value_t>
+{
+    /*!
+    @brief compare two value_t enum values
+    @since version 3.0.0
+    */
+    bool operator()(nlohmann::detail::value_t lhs,
+                    nlohmann::detail::value_t rhs) const noexcept
+    {
+        return nlohmann::detail::operator<(lhs, rhs);
+    }
+};
+
+/*!
+@brief exchanges the values of two JSON objects
+
+@since version 1.0.0
+*/
+template<>
+inline void swap<nlohmann::json>(nlohmann::json& j1, nlohmann::json& j2) noexcept(
+    is_nothrow_move_constructible<nlohmann::json>::value and
+    is_nothrow_move_assignable<nlohmann::json>::value
+)
+{
+    j1.swap(j2);
+}
+
+} // namespace std
+
+/*!
+@brief user-defined string literal for JSON values
+
+This operator implements a user-defined string literal for JSON objects. It
+can be used by adding `"_json"` to a string literal and returns a JSON object
+if no parse error occurred.
+
+@param[in] s  a string representation of a JSON object
+@param[in] n  the length of string @a s
+@return a JSON object
+
+@since version 1.0.0
+*/
+inline nlohmann::json operator "" _json(const char* s, std::size_t n)
+{
+    return nlohmann::json::parse(s, s + n);
+}
+
+/*!
+@brief user-defined string literal for JSON pointer
+
+This operator implements a user-defined string literal for JSON Pointers. It
+can be used by adding `"_json_pointer"` to a string literal and returns a JSON pointer
+object if no parse error occurred.
+
+@param[in] s  a string representation of a JSON Pointer
+@param[in] n  the length of string @a s
+@return a JSON pointer object
+
+@since version 2.0.0
+*/
+inline nlohmann::json::json_pointer operator "" _json_pointer(const char* s, std::size_t n)
+{
+    return nlohmann::json::json_pointer(std::string(s, n));
+}
+
+// #include <nlohmann/detail/macro_unscope.hpp>
+
+
+// restore GCC/clang diagnostic settings
+#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
+    #pragma GCC diagnostic pop
+#endif
+#if defined(__clang__)
+    #pragma GCC diagnostic pop
+#endif
+
+// clean up
+#undef JSON_INTERNAL_CATCH
+#undef JSON_CATCH
+#undef JSON_THROW
+#undef JSON_TRY
+#undef JSON_LIKELY
+#undef JSON_UNLIKELY
+#undef JSON_DEPRECATED
+#undef JSON_HAS_CPP_14
+#undef JSON_HAS_CPP_17
+#undef NLOHMANN_BASIC_JSON_TPL_DECLARATION
+#undef NLOHMANN_BASIC_JSON_TPL
+
+
+#endif
diff --git a/tests/build-hook-ca-fixed.nix b/tests/build-hook-ca-fixed.nix
new file mode 100644
index 0000000000000000000000000000000000000000..ec7171ac90e94724eaad61297385db92993dd8b0
--- /dev/null
+++ b/tests/build-hook-ca-fixed.nix
@@ -0,0 +1,56 @@
+{ busybox }:
+
+with import ./config.nix;
+
+let
+
+  mkDerivation = args:
+    derivation ({
+      inherit system;
+      builder = busybox;
+      args = ["sh" "-e" args.builder or (builtins.toFile "builder-${args.name}.sh" "if [ -e .attrs.sh ]; then source .attrs.sh; fi; eval \"$buildCommand\"")];
+      outputHashMode = "recursive";
+      outputHashAlgo = "sha256";
+    } // removeAttrs args ["builder" "meta"])
+    // { meta = args.meta or {}; };
+
+  input1 = mkDerivation {
+    shell = busybox;
+    name = "build-remote-input-1";
+    buildCommand = "echo FOO > $out";
+    requiredSystemFeatures = ["foo"];
+    outputHash = "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=";
+  };
+
+  input2 = mkDerivation {
+    shell = busybox;
+    name = "build-remote-input-2";
+    buildCommand = "echo BAR > $out";
+    requiredSystemFeatures = ["bar"];
+    outputHash = "sha256-XArauVH91AVwP9hBBQNlkX9ccuPpSYx9o0zeIHb6e+Q=";
+  };
+
+  input3 = mkDerivation {
+    shell = busybox;
+    name = "build-remote-input-3";
+    buildCommand = ''
+      read x < ${input2}
+      echo $x BAZ > $out
+    '';
+    requiredSystemFeatures = ["baz"];
+    outputHash = "sha256-daKAcPp/+BYMQsVi/YYMlCKoNAxCNDsaivwSHgQqD2s=";
+  };
+
+in
+
+  mkDerivation {
+    shell = busybox;
+    name = "build-remote";
+    buildCommand =
+      ''
+        read x < ${input1}
+        read y < ${input3}
+        echo "$x $y" > $out
+      '';
+    outputHash = "sha256-5SxbkUw6xe2l9TE1uwCvTtTDysD1vhRor38OtDF0LqQ=";
+  }
diff --git a/tests/build-hook-ca.nix b/tests/build-hook-ca-floating.nix
similarity index 100%
rename from tests/build-hook-ca.nix
rename to tests/build-hook-ca-floating.nix
diff --git a/tests/build-remote-content-addressed-fixed.sh b/tests/build-remote-content-addressed-fixed.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ae744159113ab3ac130727e0e148b2f7e152aea8
--- /dev/null
+++ b/tests/build-remote-content-addressed-fixed.sh
@@ -0,0 +1,5 @@
+source common.sh
+
+file=build-hook-ca-fixed.nix
+
+source build-remote.sh
diff --git a/tests/build-remote-content-addressed-floating.sh b/tests/build-remote-content-addressed-floating.sh
index cbb75729ba46e42dce14465fd7877ed16ad7e874..13ef47d2d90a996bfc58c21a16943af9acd93297 100644
--- a/tests/build-remote-content-addressed-floating.sh
+++ b/tests/build-remote-content-addressed-floating.sh
@@ -1,7 +1,9 @@
 source common.sh
 
-file=build-hook-ca.nix
+file=build-hook-ca-floating.nix
 
 sed -i 's/experimental-features .*/& ca-derivations/' "$NIX_CONF_DIR"/nix.conf
 
+CONTENT_ADDRESSED=true
+
 source build-remote.sh
diff --git a/tests/build-remote.sh b/tests/build-remote.sh
index 04848e4b528cbc5cb3f83765266f8ed8d8f1ed6a..27d85a83d86e171640e17c5f9ab77652dd76c85d 100644
--- a/tests/build-remote.sh
+++ b/tests/build-remote.sh
@@ -1,17 +1,22 @@
-if ! canUseSandbox; then exit; fi
-if ! [[ $busybox =~ busybox ]]; then exit; fi
+if ! canUseSandbox; then exit 99; fi
+if ! [[ $busybox =~ busybox ]]; then exit 99; fi
 
 unset NIX_STORE_DIR
 unset NIX_STATE_DIR
 
 function join_by { local d=$1; shift; echo -n "$1"; shift; printf "%s" "${@/#/$d}"; }
 
+EXTRA_SYSTEM_FEATURES=()
+if [[ -n "$CONTENT_ADDRESSED" ]]; then
+    EXTRA_SYSTEM_FEATURES=("ca-derivations")
+fi
+
 builders=(
   # system-features will automatically be added to the outer URL, but not inner
   # remote-store URL.
-  "ssh://localhost?remote-store=$TEST_ROOT/machine1?system-features=foo - - 1 1 foo"
-  "$TEST_ROOT/machine2 - - 1 1 bar"
-  "ssh-ng://localhost?remote-store=$TEST_ROOT/machine3?system-features=baz - - 1 1 baz"
+  "ssh://localhost?remote-store=$TEST_ROOT/machine1?system-features=$(join_by "%20" foo ${EXTRA_SYSTEM_FEATURES[@]}) - - 1 1 $(join_by "," foo ${EXTRA_SYSTEM_FEATURES[@]})"
+  "$TEST_ROOT/machine2 - - 1 1 $(join_by "," bar ${EXTRA_SYSTEM_FEATURES[@]})"
+  "ssh-ng://localhost?remote-store=$TEST_ROOT/machine3?system-features=$(join_by "%20" baz ${EXTRA_SYSTEM_FEATURES[@]}) - - 1 1 $(join_by "," baz ${EXTRA_SYSTEM_FEATURES[@]})"
 )
 
 chmod -R +w $TEST_ROOT/machine* || true
diff --git a/tests/build.sh b/tests/build.sh
index aa54b88eb8abf50206a3e29d6f5b0e87461b857c..c77f620f75515835a54601331b09ac88eca8489c 100644
--- a/tests/build.sh
+++ b/tests/build.sh
@@ -1,7 +1,7 @@
 source common.sh
 
 expectedJSONRegex='\[\{"drvPath":".*multiple-outputs-a.drv","outputs":\{"first":".*multiple-outputs-a-first","second":".*multiple-outputs-a-second"}},\{"drvPath":".*multiple-outputs-b.drv","outputs":\{"out":".*multiple-outputs-b"}}]'
-nix build -f multiple-outputs.nix --json a.all b.all | jq --exit-status '
+nix build -f multiple-outputs.nix --json a.all b.all --no-link | jq --exit-status '
   (.[0] |
     (.drvPath | match(".*multiple-outputs-a.drv")) and
     (.outputs.first | match(".*multiple-outputs-a-first")) and
@@ -10,3 +10,10 @@ nix build -f multiple-outputs.nix --json a.all b.all | jq --exit-status '
     (.drvPath | match(".*multiple-outputs-b.drv")) and
     (.outputs.out | match(".*multiple-outputs-b")))
 '
+testNormalization () {
+    clearStore
+    outPath=$(nix-build ./simple.nix --no-out-link)
+    test "$(stat -c %Y $outPath)" -eq 1
+}
+
+testNormalization
diff --git a/tests/ca/build-with-garbage-path.sh b/tests/ca/build-with-garbage-path.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e6f8787020ecb58851c8c030f885754976b8efc6
--- /dev/null
+++ b/tests/ca/build-with-garbage-path.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+# Regression test for https://github.com/NixOS/nix/issues/4858
+
+source common.sh
+sed -i 's/experimental-features .*/& ca-derivations ca-references/' "$NIX_CONF_DIR"/nix.conf
+
+# Get the output path of `rootCA`, and put some garbage instead
+outPath="$(nix-build ./content-addressed.nix -A rootCA --no-out-link)"
+nix-store --delete "$outPath"
+touch "$outPath"
+
+# The build should correctly remove the garbage and put the expected path instead
+nix-build ./content-addressed.nix -A rootCA --no-out-link
+
+# Rebuild it. This shouldn’t overwrite the existing path
+oldInode=$(stat -c '%i' "$outPath")
+nix-build ./content-addressed.nix -A rootCA --no-out-link --arg seed 2
+newInode=$(stat -c '%i' "$outPath")
+[[ "$oldInode" == "$newInode" ]]
diff --git a/tests/ca/build.sh b/tests/ca/build.sh
index 35bf1dcf700002277ce7932dab71d8a971e1da59..c8877f87fec550f2f7cbe8274ef78ad3fe16676a 100644
--- a/tests/ca/build.sh
+++ b/tests/ca/build.sh
@@ -59,9 +59,17 @@ testNixCommand () {
     nix build --experimental-features 'nix-command ca-derivations' --file ./content-addressed.nix --no-link
 }
 
+# Regression test for https://github.com/NixOS/nix/issues/4775
+testNormalization () {
+    clearStore
+    outPath=$(buildAttr rootCA 1)
+    test "$(stat -c %Y $outPath)" -eq 1
+}
+
 # Disabled until we have it properly working
 # testRemoteCache
 clearStore
+testNormalization
 testDeterministicCA
 clearStore
 testCutoff
diff --git a/tests/ca/config.nix.in b/tests/ca/config.nix.in
new file mode 120000
index 0000000000000000000000000000000000000000..af24ddb30b013e01f1fdf363779bfdaeef14cbbe
--- /dev/null
+++ b/tests/ca/config.nix.in
@@ -0,0 +1 @@
+../config.nix.in
\ No newline at end of file
diff --git a/tests/ca/content-addressed.nix b/tests/ca/content-addressed.nix
index e5b1c4de3dd7cf389a5649ff87c929960fbf0498..d328fc92c1b54f6169f18a663e085724ddcc219c 100644
--- a/tests/ca/content-addressed.nix
+++ b/tests/ca/content-addressed.nix
@@ -1,4 +1,11 @@
-with import ../config.nix;
+with import ./config.nix;
+
+let mkCADerivation = args: mkDerivation ({
+    __contentAddressed = true;
+    outputHashMode = "recursive";
+    outputHashAlgo = "sha256";
+} // args);
+in
 
 { seed ? 0 }:
 # A simple content-addressed derivation.
@@ -14,7 +21,7 @@ rec {
       echo "Hello World" > $out/hello
     '';
   };
-  rootCA = mkDerivation {
+  rootCA = mkCADerivation {
     name = "rootCA";
     outputs = [ "out" "dev" "foo"];
     buildCommand = ''
@@ -27,11 +34,8 @@ rec {
       ln -s $out $dev
       ln -s $out $foo
     '';
-    __contentAddressed = true;
-    outputHashMode = "recursive";
-    outputHashAlgo = "sha256";
   };
-  dependentCA = mkDerivation {
+  dependentCA = mkCADerivation {
     name = "dependent";
     buildCommand = ''
       echo "building a dependent derivation"
@@ -39,20 +43,14 @@ rec {
       cat ${rootCA}/self/dep
       echo ${rootCA}/self/dep > $out/dep
     '';
-    __contentAddressed = true;
-    outputHashMode = "recursive";
-    outputHashAlgo = "sha256";
   };
-  transitivelyDependentCA = mkDerivation {
+  transitivelyDependentCA = mkCADerivation {
     name = "transitively-dependent";
     buildCommand = ''
       echo "building transitively-dependent"
       cat ${dependentCA}/dep
       echo ${dependentCA} > $out
     '';
-    __contentAddressed = true;
-    outputHashMode = "recursive";
-    outputHashAlgo = "sha256";
   };
   dependentNonCA = mkDerivation {
     name = "dependent-non-ca";
@@ -72,6 +70,14 @@ rec {
       cat ${dependentCA}/dep
       echo foo > $out
     '';
-
+  };
+  runnable = mkCADerivation rec {
+    name = "runnable-thing";
+    buildCommand = ''
+      mkdir -p $out/bin
+      echo ${rootCA} # Just to make it depend on it
+      echo "" > $out/bin/${name}
+      chmod +x $out/bin/${name}
+    '';
   };
 }
diff --git a/tests/ca/duplicate-realisation-in-closure.sh b/tests/ca/duplicate-realisation-in-closure.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ca90996419020095a4b20d70aa789e693857cf74
--- /dev/null
+++ b/tests/ca/duplicate-realisation-in-closure.sh
@@ -0,0 +1,26 @@
+source ./common.sh
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references/' "$NIX_CONF_DIR"/nix.conf
+
+export REMOTE_STORE_DIR="$TEST_ROOT/remote_store"
+export REMOTE_STORE="file://$REMOTE_STORE_DIR"
+
+rm -rf $REMOTE_STORE_DIR
+clearStore
+
+# Build dep1 and push that to the binary cache.
+# This entails building (and pushing) current-time.
+nix copy --to "$REMOTE_STORE" -f nondeterministic.nix dep1
+clearStore
+sleep 2 # To make sure that `$(date)` will be different
+# Build dep2.
+# As we’ve cleared the cache, we’ll have to rebuild current-time. And because
+# the current time isn’t the same as before, this will yield a new (different)
+# realisation
+nix build -f nondeterministic.nix dep2 --no-link
+
+# Build something that depends both on dep1 and dep2.
+# If everything goes right, we should rebuild dep2 rather than fetch it from
+# the cache (because that would mean duplicating `current-time` in the closure),
+# and have `dep1 == dep2`.
+nix build --substituters "$REMOTE_STORE" -f nondeterministic.nix toplevel --no-require-sigs --no-link
diff --git a/tests/ca/flake.nix b/tests/ca/flake.nix
new file mode 100644
index 0000000000000000000000000000000000000000..332c92a6792b15ffee55119c86c3137062f2b03b
--- /dev/null
+++ b/tests/ca/flake.nix
@@ -0,0 +1,3 @@
+{
+  outputs = { self }: import ./content-addressed.nix {};
+}
diff --git a/tests/ca/gc.sh b/tests/ca/gc.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e4f9857d6e616829f3b598615ded3874e49dc746
--- /dev/null
+++ b/tests/ca/gc.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+# Ensure that garbage collection works properly with ca derivations
+
+source common.sh
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references/' "$NIX_CONF_DIR"/nix.conf
+
+export NIX_TESTS_CA_BY_DEFAULT=1
+
+cd ..
+source gc.sh
diff --git a/tests/ca/nix-run.sh b/tests/ca/nix-run.sh
new file mode 100755
index 0000000000000000000000000000000000000000..81402af10a4d23f5625cfc1e08d84205ed168181
--- /dev/null
+++ b/tests/ca/nix-run.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+source common.sh
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf
+
+FLAKE_PATH=path:$PWD
+
+nix run --no-write-lock-file $FLAKE_PATH#runnable
diff --git a/tests/ca/nix-shell.sh b/tests/ca/nix-shell.sh
new file mode 100755
index 0000000000000000000000000000000000000000..7f1a3a73ec1fc641ae6281dadc0fc3b224c98624
--- /dev/null
+++ b/tests/ca/nix-shell.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+source common.sh
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf
+
+CONTENT_ADDRESSED=true
+cd ..
+source ./nix-shell.sh
+
diff --git a/tests/ca/nondeterministic.nix b/tests/ca/nondeterministic.nix
new file mode 100644
index 0000000000000000000000000000000000000000..d6d099a3e0e970b58d81b6d3ca9bce9a965399ba
--- /dev/null
+++ b/tests/ca/nondeterministic.nix
@@ -0,0 +1,35 @@
+with import ./config.nix;
+
+let mkCADerivation = args: mkDerivation ({
+    __contentAddressed = true;
+    outputHashMode = "recursive";
+    outputHashAlgo = "sha256";
+} // args);
+in
+
+rec {
+  currentTime = mkCADerivation {
+    name = "current-time";
+    buildCommand = ''
+      mkdir $out
+      echo $(date) > $out/current-time
+    '';
+  };
+  dep = seed: mkCADerivation {
+    name = "dep";
+    inherit seed;
+    buildCommand = ''
+      echo ${currentTime} > $out
+    '';
+  };
+  dep1 = dep 1;
+  dep2 = dep 2;
+  toplevel = mkCADerivation {
+    name = "toplevel";
+    buildCommand = ''
+      test ${dep1} == ${dep2}
+      touch $out
+    '';
+  };
+}
+
diff --git a/tests/ca/recursive.sh b/tests/ca/recursive.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d9281d91f97db6af37bf25c99d1f6f57d9b22146
--- /dev/null
+++ b/tests/ca/recursive.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+source common.sh
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references nix-command flakes/' "$NIX_CONF_DIR"/nix.conf
+
+export NIX_TESTS_CA_BY_DEFAULT=1
+cd ..
+source ./recursive.sh
+
+
diff --git a/tests/ca/signatures.sh b/tests/ca/signatures.sh
new file mode 100644
index 0000000000000000000000000000000000000000..4b4e468f706724c2cf2909738099f5db528c2d12
--- /dev/null
+++ b/tests/ca/signatures.sh
@@ -0,0 +1,39 @@
+source common.sh
+
+# Globally enable the ca derivations experimental flag
+sed -i 's/experimental-features = .*/& ca-derivations ca-references/' "$NIX_CONF_DIR/nix.conf"
+
+clearStore
+clearCache
+
+nix-store --generate-binary-cache-key cache1.example.org $TEST_ROOT/sk1 $TEST_ROOT/pk1
+pk1=$(cat $TEST_ROOT/pk1)
+
+export REMOTE_STORE_DIR="$TEST_ROOT/remote_store"
+export REMOTE_STORE="file://$REMOTE_STORE_DIR"
+
+ensureCorrectlyCopied () {
+    attrPath="$1"
+    nix build --store "$REMOTE_STORE" --file ./content-addressed.nix "$attrPath"
+}
+
+testOneCopy () {
+    clearStore
+    rm -rf "$REMOTE_STORE_DIR"
+
+    attrPath="$1"
+    nix copy --to $REMOTE_STORE "$attrPath" --file ./content-addressed.nix \
+        --secret-key-files "$TEST_ROOT/sk1"
+
+    ensureCorrectlyCopied "$attrPath"
+
+    # Ensure that we can copy back what we put in the store
+    clearStore
+    nix copy --from $REMOTE_STORE \
+        --file ./content-addressed.nix "$attrPath" \
+        --trusted-public-keys $pk1
+}
+
+for attrPath in rootCA dependentCA transitivelyDependentCA dependentNonCA dependentFixedOutput; do
+    testOneCopy "$attrPath"
+done
diff --git a/tests/ca/substitute.sh b/tests/ca/substitute.sh
new file mode 100644
index 0000000000000000000000000000000000000000..c80feaacfbba2c3f75a824343b4743789625e8bc
--- /dev/null
+++ b/tests/ca/substitute.sh
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+
+# Ensure that binary substitution works properly with ca derivations
+
+source common.sh
+
+sed -i 's/experimental-features .*/& ca-derivations ca-references/' "$NIX_CONF_DIR"/nix.conf
+
+rm -rf $TEST_ROOT/binary_cache
+
+export REMOTE_STORE_DIR=$TEST_ROOT/binary_cache
+export REMOTE_STORE=file://$REMOTE_STORE_DIR
+
+buildDrvs () {
+    nix build --file ./content-addressed.nix -L --no-link "$@"
+}
+
+# Populate the remote cache
+clearStore
+nix copy --to $REMOTE_STORE --file ./content-addressed.nix
+
+# Restart the build on an empty store, ensuring that we don't build
+clearStore
+buildDrvs --substitute --substituters $REMOTE_STORE --no-require-sigs -j0 transitivelyDependentCA
+# Check that the thing we’ve just substituted has its realisation stored
+nix realisation info --file ./content-addressed.nix transitivelyDependentCA
+# Check that its dependencies have it too
+nix realisation info --file ./content-addressed.nix dependentCA rootCA
+
+# Same thing, but
+# 1. With non-ca derivations
+# 2. Erasing the realisations on the remote store
+#
+# Even in that case, realising the derivations should still produce the right
+# realisations on the local store
+#
+# Regression test for #4725
+clearStore
+nix build --file ../simple.nix -L --no-link --post-build-hook ../push-to-store.sh
+clearStore
+rm -r "$REMOTE_STORE_DIR/realisations"
+nix build --file ../simple.nix -L --no-link --substitute --substituters "$REMOTE_STORE" --no-require-sigs -j0
+# There's no easy way to check whether a realisation is present on the local
+# store − short of manually querying the db, but the build environment doesn't
+# have the sqlite binary − so we instead push things again, and check that the
+# realisations have correctly been pushed to the remote store
+nix copy --to "$REMOTE_STORE" --file ../simple.nix
+if [[ -z "$(ls "$REMOTE_STORE_DIR/realisations")" ]]; then
+    echo "Realisations not rebuilt"
+    exit 1
+fi
+
+# Test the local realisation disk cache
+buildDrvs --post-build-hook ../push-to-store.sh
+clearStore
+# Add the realisations of rootCA to the cachecache
+clearCacheCache
+export _NIX_FORCE_HTTP=1
+buildDrvs --substitute --substituters $REMOTE_STORE --no-require-sigs -j0
+# Try rebuilding, but remove the realisations from the remote cache to force
+# using the cachecache
+clearStore
+rm $REMOTE_STORE_DIR/realisations/*
+buildDrvs --substitute --substituters $REMOTE_STORE --no-require-sigs -j0
diff --git a/tests/common.sh.in b/tests/common.sh.in
index de44a4da47d7fd6b4da1a091f1cedb132d76c243..d31d3fbb83a36d4251c6dcc85fc35fab7a0427ac 100644
--- a/tests/common.sh.in
+++ b/tests/common.sh.in
@@ -29,6 +29,12 @@ unset XDG_CACHE_HOME
 mkdir -p $TEST_HOME
 
 export PATH=@bindir@:$PATH
+if [[ -n "${NIX_CLIENT_PACKAGE:-}" ]]; then
+  export PATH="$NIX_CLIENT_PACKAGE/bin":$PATH
+fi
+if [[ -n "${NIX_DAEMON_PACKAGE:-}" ]]; then
+  export NIX_DAEMON_COMMAND="$NIX_DAEMON_PACKAGE/bin/nix-daemon"
+fi
 coreutils=@coreutils@
 
 export dot=@dot@
@@ -57,7 +63,6 @@ clearStore() {
     mkdir "$NIX_STORE_DIR"
     rm -rf "$NIX_STATE_DIR"
     mkdir "$NIX_STATE_DIR"
-    nix-store --init
     clearProfiles
 }
 
@@ -73,7 +78,7 @@ startDaemon() {
     # Start the daemon, wait for the socket to appear.  !!!
     # ‘nix-daemon’ should have an option to fork into the background.
     rm -f $NIX_STATE_DIR/daemon-socket/socket
-    nix daemon &
+    ${NIX_DAEMON_COMMAND:-nix daemon} &
     for ((i = 0; i < 30; i++)); do
         if [ -e $NIX_DAEMON_SOCKET_PATH ]; then break; fi
         sleep 1
diff --git a/tests/config.nix.in b/tests/config.nix.in
index a57a8c5962e3134d49ef561a4e8cb0a4ffc48eef..9b00d9ddb8f67f244c2a9d9952701e86180300b4 100644
--- a/tests/config.nix.in
+++ b/tests/config.nix.in
@@ -1,3 +1,12 @@
+let
+  contentAddressedByDefault = builtins.getEnv "NIX_TESTS_CA_BY_DEFAULT" == "1";
+  caArgs = if contentAddressedByDefault then {
+    __contentAddressed = true;
+    outputHashMode = "recursive";
+    outputHashAlgo = "sha256";
+  } else {};
+in
+
 rec {
   shell = "@bash@";
 
@@ -15,4 +24,4 @@ rec {
       PATH = path;
     } // removeAttrs args ["builder" "meta"])
     // { meta = args.meta or {}; };
-}
+} // caArgs
diff --git a/tests/config.sh b/tests/config.sh
index eaa46c395cda584c9aa6153c58f36ad41e447afe..01c78f2c38f63cc2ff6ad60bbc51569276ccd9a1 100644
--- a/tests/config.sh
+++ b/tests/config.sh
@@ -1,15 +1,41 @@
 source common.sh
 
+# Isolate the home for this test.
+# Other tests (e.g. flake registry tests) could be writing to $HOME in parallel.
+export HOME=$TEST_ROOT/userhome
+
+# Test that using XDG_CONFIG_HOME works
+# Assert the config folder didn't exist initially.
+[ ! -e "$HOME/.config" ]
+# Without XDG_CONFIG_HOME, creates $HOME/.config
+unset XDG_CONFIG_HOME
+# Run against the nix registry to create the config dir
+# (Tip: this relies on removing non-existent entries being a no-op!)
+nix registry remove userhome-without-xdg
+# Verifies it created it
+[ -e "$HOME/.config" ]
+# Remove the directory it created
+rm -rf "$HOME/.config"
+# Run the same test, but with XDG_CONFIG_HOME
+export XDG_CONFIG_HOME=$TEST_ROOT/confighome
+# Assert the XDG_CONFIG_HOME/nix path does not exist yet.
+[ ! -e "$TEST_ROOT/confighome/nix" ]
+nix registry remove userhome-with-xdg
+# Verifies the confighome path has been created
+[ -e "$TEST_ROOT/confighome/nix" ]
+# Assert the .config folder hasn't been created.
+[ ! -e "$HOME/.config" ]
+
 # Test that files are loaded from XDG by default
-export XDG_CONFIG_HOME=/tmp/home
-export XDG_CONFIG_DIRS=/tmp/dir1:/tmp/dir2
+export XDG_CONFIG_HOME=$TEST_ROOT/confighome
+export XDG_CONFIG_DIRS=$TEST_ROOT/dir1:$TEST_ROOT/dir2
 files=$(nix-build --verbose --version | grep "User config" | cut -d ':' -f2- | xargs)
-[[ $files == "/tmp/home/nix/nix.conf:/tmp/dir1/nix/nix.conf:/tmp/dir2/nix/nix.conf" ]]
+[[ $files == "$TEST_ROOT/confighome/nix/nix.conf:$TEST_ROOT/dir1/nix/nix.conf:$TEST_ROOT/dir2/nix/nix.conf" ]]
 
 # Test that setting NIX_USER_CONF_FILES overrides all the default user config files
-export NIX_USER_CONF_FILES=/tmp/file1.conf:/tmp/file2.conf
+export NIX_USER_CONF_FILES=$TEST_ROOT/file1.conf:$TEST_ROOT/file2.conf
 files=$(nix-build --verbose --version | grep "User config" | cut -d ':' -f2- | xargs)
-[[ $files == "/tmp/file1.conf:/tmp/file2.conf" ]]
+[[ $files == "$TEST_ROOT/file1.conf:$TEST_ROOT/file2.conf" ]]
 
 # Test that it's possible to load the config from a custom location
 here=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")")
@@ -24,4 +50,4 @@ exp_cores=$(nix show-config | grep '^cores' | cut -d '=' -f 2 | xargs)
 exp_features=$(nix show-config | grep '^experimental-features' | cut -d '=' -f 2 | xargs)
 [[ $prev != $exp_cores ]]
 [[ $exp_cores == "4242" ]]
-[[ $exp_features == "nix-command flakes" ]]
\ No newline at end of file
+[[ $exp_features == "nix-command flakes" ]]
diff --git a/tests/db-migration.sh b/tests/db-migration.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e0ff7d311fef7bf75e70fe35d7d9de67ea9389a1
--- /dev/null
+++ b/tests/db-migration.sh
@@ -0,0 +1,26 @@
+# Test that we can successfully migrate from an older db schema
+
+# Only run this if we have an older Nix available
+# XXX: This assumes that the `daemon` package is older than the `client` one
+if [[ -z "$NIX_DAEMON_PACKAGE" ]]; then
+    exit 0
+fi
+
+source common.sh
+
+# Fill the db using the older Nix
+PATH_WITH_NEW_NIX="$PATH"
+export PATH="$NIX_DAEMON_PACKAGE/bin:$PATH"
+clearStore
+nix-build simple.nix --no-out-link
+nix-store --generate-binary-cache-key cache1.example.org $TEST_ROOT/sk1 $TEST_ROOT/pk1
+dependenciesOutPath=$(nix-build dependencies.nix --no-out-link --secret-key-files "$TEST_ROOT/sk1")
+fixedOutPath=$(IMPURE_VAR1=foo IMPURE_VAR2=bar nix-build fixed.nix -A good.0 --no-out-link)
+
+# Migrate to the new schema and ensure that everything's there
+export PATH="$PATH_WITH_NEW_NIX"
+info=$(nix path-info --json $dependenciesOutPath)
+[[ $info =~ '"ultimate":true' ]]
+[[ $info =~ 'cache1.example.org' ]]
+nix verify -r "$fixedOutPath"
+nix verify -r "$dependenciesOutPath" --sigs-needed 1 --trusted-public-keys $(cat $TEST_ROOT/pk1)
diff --git a/tests/fetchGit.sh b/tests/fetchGit.sh
index 1e8963d76b0d096bbe1364bfedc0e0d39063449a..88744ee7fe8941df44adc273dca9fb4920e1bfcf 100644
--- a/tests/fetchGit.sh
+++ b/tests/fetchGit.sh
@@ -179,3 +179,13 @@ git clone --depth 1 file://$repo $TEST_ROOT/shallow
 path6=$(nix eval --impure --raw --expr "(builtins.fetchTree { type = \"git\"; url = \"file://$TEST_ROOT/shallow\"; ref = \"dev\"; shallow = true; }).outPath")
 [[ $path3 = $path6 ]]
 [[ $(nix eval --impure --expr "(builtins.fetchTree { type = \"git\"; url = \"file://$TEST_ROOT/shallow\"; ref = \"dev\"; shallow = true; }).revCount or 123") == 123 ]]
+
+# Explicit ref = "HEAD" should work, and produce the same outPath as without ref
+path7=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"HEAD\"; }).outPath")
+path8=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; }).outPath")
+[[ $path7 = $path8 ]]
+
+# ref = "HEAD" should fetch the HEAD revision
+rev4=$(git -C $repo rev-parse HEAD)
+rev4_nix=$(nix eval --impure --raw --expr "(builtins.fetchGit { url = \"file://$repo\"; ref = \"HEAD\"; }).rev")
+[[ $rev4 = $rev4_nix ]]
diff --git a/tests/flakes.sh b/tests/flakes.sh
index 25ba2ac43b6b1e39405316089910babcbafc91d5..9764e1a6c5b40d72da57ea93ea285ad6b9153cb5 100644
--- a/tests/flakes.sh
+++ b/tests/flakes.sh
@@ -25,6 +25,7 @@ templatesDir=$TEST_ROOT/templates
 nonFlakeDir=$TEST_ROOT/nonFlake
 flakeA=$TEST_ROOT/flakeA
 flakeB=$TEST_ROOT/flakeB
+flakeGitBare=$TEST_ROOT/flakeGitBare
 
 for repo in $flake1Dir $flake2Dir $flake3Dir $flake7Dir $templatesDir $nonFlakeDir $flakeA $flakeB; do
     rm -rf $repo $repo.tmp
@@ -163,16 +164,17 @@ EOF
 # Test 'nix flake list'.
 [[ $(nix registry list | wc -l) == 7 ]]
 
-# Test 'nix flake info'.
-nix flake info flake1 | grep -q 'URL: .*flake1.*'
+# Test 'nix flake metadata'.
+nix flake metadata flake1
+nix flake metadata flake1 | grep -q 'Locked URL:.*flake1.*'
 
-# Test 'nix flake info' on a local flake.
-(cd $flake1Dir && nix flake info) | grep -q 'URL: .*flake1.*'
-(cd $flake1Dir && nix flake info .) | grep -q 'URL: .*flake1.*'
-nix flake info $flake1Dir | grep -q 'URL: .*flake1.*'
+# Test 'nix flake metadata' on a local flake.
+(cd $flake1Dir && nix flake metadata) | grep -q 'URL:.*flake1.*'
+(cd $flake1Dir && nix flake metadata .) | grep -q 'URL:.*flake1.*'
+nix flake metadata $flake1Dir | grep -q 'URL:.*flake1.*'
 
-# Test 'nix flake info --json'.
-json=$(nix flake info flake1 --json | jq .)
+# Test 'nix flake metadata --json'.
+json=$(nix flake metadata flake1 --json | jq .)
 [[ $(echo "$json" | jq -r .description) = 'Bla bla' ]]
 [[ -d $(echo "$json" | jq -r .path) ]]
 [[ $(echo "$json" | jq -r .lastModified) = $(git -C $flake1Dir log -n1 --format=%ct) ]]
@@ -180,7 +182,7 @@ hash1=$(echo "$json" | jq -r .revision)
 
 echo -n '# foo' >> $flake1Dir/flake.nix
 git -C $flake1Dir commit -a -m 'Foo'
-hash2=$(nix flake info flake1 --json --refresh | jq -r .revision)
+hash2=$(nix flake metadata flake1 --json --refresh | jq -r .revision)
 [[ $hash1 != $hash2 ]]
 
 # Test 'nix build' on a flake.
@@ -533,6 +535,21 @@ EOF
 
 (! nix flake check $flake3Dir)
 
+cat > $flake3Dir/flake.nix <<EOF
+{
+  outputs = { flake1, self }: {
+    defaultPackage = {
+        system-1 = "foo";
+        system-2 = "bar";
+    };
+  };
+}
+EOF
+
+checkRes=$(nix flake check --keep-going $flake3Dir 2>&1 && fail "nix flake check should have failed" || true)
+echo "$checkRes" | grep -q "defaultPackage.system-1"
+echo "$checkRes" | grep -q "defaultPackage.system-2"
+
 # Test 'follows' inputs.
 cat > $flake3Dir/flake.nix <<EOF
 {
@@ -604,6 +621,11 @@ nix flake update $flake3Dir
 [[ $(jq -c .nodes.flake2.inputs.flake1 $flake3Dir/flake.lock) =~ '["foo"]' ]]
 [[ $(jq .nodes.foo.locked.url $flake3Dir/flake.lock) =~ flake7 ]]
 
+# Test git+file with bare repo.
+rm -rf $flakeGitBare
+git clone --bare $flake1Dir $flakeGitBare
+nix build -o $TEST_ROOT/result git+file://$flakeGitBare
+
 # Test Mercurial flakes.
 rm -rf $flake5Dir
 hg init $flake5Dir
@@ -624,7 +646,7 @@ hg commit --config ui.username=foobar@example.org $flake5Dir -m 'Initial commit'
 nix build -o $TEST_ROOT/result hg+file://$flake5Dir
 [[ -e $TEST_ROOT/result/hello ]]
 
-(! nix flake info --json hg+file://$flake5Dir | jq -e -r .revision)
+(! nix flake metadata --json hg+file://$flake5Dir | jq -e -r .revision)
 
 nix eval hg+file://$flake5Dir#expr
 
@@ -632,13 +654,13 @@ nix eval hg+file://$flake5Dir#expr
 
 (! nix eval hg+file://$flake5Dir#expr --no-allow-dirty)
 
-(! nix flake info --json hg+file://$flake5Dir | jq -e -r .revision)
+(! nix flake metadata --json hg+file://$flake5Dir | jq -e -r .revision)
 
 hg commit --config ui.username=foobar@example.org $flake5Dir -m 'Add lock file'
 
-nix flake info --json hg+file://$flake5Dir --refresh | jq -e -r .revision
-nix flake info --json hg+file://$flake5Dir
-[[ $(nix flake info --json hg+file://$flake5Dir | jq -e -r .revCount) = 1 ]]
+nix flake metadata --json hg+file://$flake5Dir --refresh | jq -e -r .revision
+nix flake metadata --json hg+file://$flake5Dir
+[[ $(nix flake metadata --json hg+file://$flake5Dir | jq -e -r .revCount) = 1 ]]
 
 nix build -o $TEST_ROOT/result hg+file://$flake5Dir --no-registries --no-allow-dirty
 
@@ -648,7 +670,7 @@ tar cfz $TEST_ROOT/flake.tar.gz -C $TEST_ROOT --exclude .hg flake5
 nix build -o $TEST_ROOT/result file://$TEST_ROOT/flake.tar.gz
 
 # Building with a tarball URL containing a SRI hash should also work.
-url=$(nix flake info --json file://$TEST_ROOT/flake.tar.gz | jq -r .url)
+url=$(nix flake metadata --json file://$TEST_ROOT/flake.tar.gz | jq -r .url)
 [[ $url =~ sha256- ]]
 
 nix build -o $TEST_ROOT/result $url
@@ -674,9 +696,8 @@ nix flake lock $flake3Dir
 nix flake lock $flake3Dir --update-input flake2/flake1
 [[ $(jq -r .nodes.flake1_2.locked.rev $flake3Dir/flake.lock) =~ $hash2 ]]
 
-# Test 'nix flake list-inputs'.
-[[ $(nix flake list-inputs $flake3Dir | wc -l) == 5 ]]
-nix flake list-inputs $flake3Dir --json | jq .
+# Test 'nix flake metadata --json'.
+nix flake metadata $flake3Dir --json | jq .
 
 # Test circular flake dependencies.
 cat > $flakeA/flake.nix <<EOF
@@ -715,4 +736,4 @@ git -C $flakeB commit -a -m 'Foo'
 [[ $(nix eval --update-input b $flakeA#foo) = 1912 ]]
 
 # Test list-inputs with circular dependencies
-nix flake list-inputs $flakeA
+nix flake metadata $flakeA
diff --git a/tests/gc-runtime.sh b/tests/gc-runtime.sh
index 4c5028005c575e8fe034c138f4ece253a9390e05..6094959cbe09aba415904f239a7849eda829a9a1 100644
--- a/tests/gc-runtime.sh
+++ b/tests/gc-runtime.sh
@@ -4,7 +4,7 @@ case $system in
     *linux*)
         ;;
     *)
-        exit 0;
+        exit 99;
 esac
 
 set -m # enable job control, needed for kill
diff --git a/tests/lang/eval-okay-floor-ceil.exp b/tests/lang/eval-okay-floor-ceil.exp
new file mode 100644
index 0000000000000000000000000000000000000000..81f80420b9965b21819de254d1d3f63491fa2886
--- /dev/null
+++ b/tests/lang/eval-okay-floor-ceil.exp
@@ -0,0 +1 @@
+"23;24;23;23"
diff --git a/tests/lang/eval-okay-floor-ceil.nix b/tests/lang/eval-okay-floor-ceil.nix
new file mode 100644
index 0000000000000000000000000000000000000000..d76a0d86ea78cdf6c450b0832465bbe6613afb6d
--- /dev/null
+++ b/tests/lang/eval-okay-floor-ceil.nix
@@ -0,0 +1,9 @@
+with import ./lib.nix;
+
+let
+  n1 = builtins.floor 23.5;
+  n2 = builtins.ceil 23.5;
+  n3 = builtins.floor 23;
+  n4 = builtins.ceil 23;
+in
+  builtins.concatStringsSep ";" (map toString [ n1 n2 n3 n4 ])
diff --git a/tests/lang/parse-okay-url.nix b/tests/lang/parse-okay-url.nix
index fce3b13ee64b9a163cab46302a484b6e996eec1f..08de27d0a4ceddcaab7a864e2c00017c9bf95060 100644
--- a/tests/lang/parse-okay-url.nix
+++ b/tests/lang/parse-okay-url.nix
@@ -3,5 +3,6 @@
   http://www2.mplayerhq.hu/MPlayer/releases/fonts/font-arial-iso-8859-1.tar.bz2
   http://losser.st-lab.cs.uu.nl/~armijn/.nix/gcc-3.3.4-static-nix.tar.gz
   http://fpdownload.macromedia.com/get/shockwave/flash/english/linux/7.0r25/install_flash_player_7_linux.tar.gz
+  https://ftp5.gwdg.de/pub/linux/archlinux/extra/os/x86_64/unzip-6.0-14-x86_64.pkg.tar.zst
   ftp://ftp.gtk.org/pub/gtk/v1.2/gtk+-1.2.10.tar.gz
 ]
diff --git a/tests/linux-sandbox.sh b/tests/linux-sandbox.sh
index 70a90a9075ad589a76146a1a52b1cbe64d7fab62..eac62d46137850039651dd16983baf536fb36f66 100644
--- a/tests/linux-sandbox.sh
+++ b/tests/linux-sandbox.sh
@@ -2,13 +2,13 @@ source common.sh
 
 clearStore
 
-if ! canUseSandbox; then exit; fi
+if ! canUseSandbox; then exit 99; fi
 
 # Note: we need to bind-mount $SHELL into the chroot. Currently we
 # only support the case where $SHELL is in the Nix store, because
 # otherwise things get complicated (e.g. if it's in /bin, do we need
 # /lib as well?).
-if [[ ! $SHELL =~ /nix/store ]]; then exit; fi
+if [[ ! $SHELL =~ /nix/store ]]; then exit 99; fi
 
 chmod -R u+w $TEST_ROOT/store0 || true
 rm -rf $TEST_ROOT/store0
diff --git a/tests/local.mk b/tests/local.mk
index 07cfd7a50fca2510b3837fb18537263ca2b5d0bf..4d9d314ccafae41c3e1ec7c039f9df7e15e6898c 100644
--- a/tests/local.mk
+++ b/tests/local.mk
@@ -2,14 +2,17 @@ nix_tests = \
   hash.sh lang.sh add.sh simple.sh dependencies.sh \
   config.sh \
   gc.sh \
+  ca/gc.sh \
   gc-concurrent.sh \
   gc-auto.sh \
   referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \
   gc-runtime.sh check-refs.sh filter-source.sh \
   local-store.sh remote-store.sh export.sh export-graph.sh \
+  db-migration.sh \
   timeout.sh secure-drv-outputs.sh nix-channel.sh \
   multiple-outputs.sh import-derivation.sh fetchurl.sh optimise-store.sh \
   binary-cache.sh \
+  substitute-with-invalid-ca.sh \
   binary-cache-build-remote.sh \
   nix-profile.sh repair.sh dump-db.sh case-hack.sh \
   check-reqs.sh pass-as-file.sh tarball.sh restricted.sh \
@@ -17,6 +20,7 @@ nix_tests = \
   linux-sandbox.sh \
   build-dry.sh \
   build-remote-input-addressed.sh \
+  build-remote-content-addressed-fixed.sh \
   build-remote-content-addressed-floating.sh \
   ssh-relay.sh \
   nar-access.sh \
@@ -28,6 +32,7 @@ nix_tests = \
   signing.sh \
   shell.sh \
   brotli.sh \
+  zstd.sh \
   pure-eval.sh \
   check.sh \
   plugins.sh \
@@ -41,6 +46,13 @@ nix_tests = \
   build.sh \
   compute-levels.sh \
   ca/build.sh \
+  ca/build-with-garbage-path.sh \
+  ca/duplicate-realisation-in-closure.sh \
+  ca/substitute.sh \
+  ca/signatures.sh \
+  ca/nix-shell.sh \
+  ca/nix-run.sh \
+  ca/recursive.sh \
   ca/nix-copy.sh
   # parallel.sh
 
@@ -48,6 +60,6 @@ install-tests += $(foreach x, $(nix_tests), tests/$(x))
 
 tests-environment = NIX_REMOTE= $(bash) -e
 
-clean-files += $(d)/common.sh $(d)/config.nix
+clean-files += $(d)/common.sh $(d)/config.nix $(d)/ca/config.nix
 
-test-deps += tests/common.sh tests/config.nix tests/plugins/libplugintest.$(SO_EXT)
+test-deps += tests/common.sh tests/config.nix tests/ca/config.nix tests/plugins/libplugintest.$(SO_EXT)
diff --git a/tests/nix-shell.sh b/tests/nix-shell.sh
index 4775bafb9832d851b6def76278691aa0d57a137b..3481c2c69cba26ef82e97a81019da1128a087fc7 100644
--- a/tests/nix-shell.sh
+++ b/tests/nix-shell.sh
@@ -2,6 +2,20 @@ source common.sh
 
 clearStore
 
+if [[ -n ${CONTENT_ADDRESSED:-} ]]; then
+    nix-shell () {
+        command nix-shell --arg contentAddressed true "$@"
+    }
+
+    nix_develop() {
+        nix develop --arg contentAddressed true "$@"
+    }
+else
+    nix_develop() {
+        nix develop "$@"
+    }
+fi
+
 # Test nix-shell -A
 export IMPURE_VAR=foo
 export SELECTED_IMPURE_VAR=baz
@@ -41,7 +55,7 @@ output=$(NIX_PATH=nixpkgs=shell.nix nix-shell --pure -p foo bar --run 'echo "$(f
 [ "$output" = "foo bar" ]
 
 # Test nix-shell shebang mode
-sed -e "s|@ENV_PROG@|$(type -p env)|" shell.shebang.sh > $TEST_ROOT/shell.shebang.sh
+sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > $TEST_ROOT/shell.shebang.sh
 chmod a+rx $TEST_ROOT/shell.shebang.sh
 
 output=$($TEST_ROOT/shell.shebang.sh abc def)
@@ -49,7 +63,7 @@ output=$($TEST_ROOT/shell.shebang.sh abc def)
 
 # Test nix-shell shebang mode again with metacharacters in the filename.
 # First word of filename is chosen to not match any file in the test root.
-sed -e "s|@ENV_PROG@|$(type -p env)|" shell.shebang.sh > $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh
+sed -e "s|@ENV_PROG@|$(type -P env)|" shell.shebang.sh > $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh
 chmod a+rx $TEST_ROOT/spaced\ \\\'\"shell.shebang.sh
 
 output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.sh abc def)
@@ -58,7 +72,7 @@ output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.sh abc def)
 # Test nix-shell shebang mode for ruby
 # This uses a fake interpreter that returns the arguments passed
 # This, in turn, verifies the `rc` script is valid and the `load()` script (given using `-e`) is as expected.
-sed -e "s|@SHELL_PROG@|$(type -p nix-shell)|" shell.shebang.rb > $TEST_ROOT/shell.shebang.rb
+sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > $TEST_ROOT/shell.shebang.rb
 chmod a+rx $TEST_ROOT/shell.shebang.rb
 
 output=$($TEST_ROOT/shell.shebang.rb abc ruby)
@@ -66,20 +80,20 @@ output=$($TEST_ROOT/shell.shebang.rb abc ruby)
 
 # Test nix-shell shebang mode for ruby again with metacharacters in the filename.
 # Note: fake interpreter only space-separates args without adding escapes to its output.
-sed -e "s|@SHELL_PROG@|$(type -p nix-shell)|" shell.shebang.rb > $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb
+sed -e "s|@SHELL_PROG@|$(type -P nix-shell)|" shell.shebang.rb > $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb
 chmod a+rx $TEST_ROOT/spaced\ \\\'\"shell.shebang.rb
 
 output=$($TEST_ROOT/spaced\ \\\'\"shell.shebang.rb abc ruby)
 [ "$output" = '-e load(ARGV.shift) -- '"$TEST_ROOT"'/spaced \'\''"shell.shebang.rb abc ruby' ]
 
 # Test 'nix develop'.
-nix develop -f shell.nix shellDrv -c bash -c '[[ -n $stdenv ]]'
+nix_develop -f shell.nix shellDrv -c bash -c '[[ -n $stdenv ]]'
 
 # Ensure `nix develop -c` preserves stdin
 echo foo | nix develop -f shell.nix shellDrv -c cat | grep -q foo
 
 # Ensure `nix develop -c` actually executes the command if stdout isn't a terminal
-nix develop -f shell.nix shellDrv -c echo foo |& grep -q foo
+nix_develop -f shell.nix shellDrv -c echo foo |& grep -q foo
 
 # Test 'nix print-dev-env'.
 source <(nix print-dev-env -f shell.nix shellDrv)
diff --git a/tests/push-to-store.sh b/tests/push-to-store.sh
index 6aadb916ba0b647d37ac2f2825cd71b026795a66..25352c751a91d57b34ff59d84b7519a47b49e834 100755
--- a/tests/push-to-store.sh
+++ b/tests/push-to-store.sh
@@ -1,4 +1,6 @@
 #!/bin/sh
 
-echo Pushing "$@" to "$REMOTE_STORE"
-printf "%s" "$OUT_PATHS" | xargs -d: nix copy --to "$REMOTE_STORE" --no-require-sigs
+set -x
+
+echo Pushing "$OUT_PATHS" to "$REMOTE_STORE"
+printf "%s" "$DRV_PATH" | xargs nix copy --to "$REMOTE_STORE" --no-require-sigs
diff --git a/tests/recursive.sh b/tests/recursive.sh
index b020ec7100493b5f9c2556a5f4f722c1e2a9f5e9..b6740877d6b4db0cb46c01138d29e28541fd91bf 100644
--- a/tests/recursive.sh
+++ b/tests/recursive.sh
@@ -1,7 +1,7 @@
 source common.sh
 
 # FIXME
-if [[ $(uname) != Linux ]]; then exit; fi
+if [[ $(uname) != Linux ]]; then exit 99; fi
 
 clearStore
 
@@ -9,9 +9,9 @@ rm -f $TEST_ROOT/result
 
 export unreachable=$(nix store add-path ./recursive.sh)
 
-NIX_BIN_DIR=$(dirname $(type -p nix)) nix --experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --expr '
+NIX_BIN_DIR=$(dirname $(type -p nix)) nix --extra-experimental-features 'nix-command recursive-nix' build -o $TEST_ROOT/result -L --impure --expr '
   with import ./config.nix;
-  mkDerivation {
+  mkDerivation rec {
     name = "recursive";
     dummy = builtins.toFile "dummy" "bla bla";
     SHELL = shell;
@@ -19,11 +19,13 @@ NIX_BIN_DIR=$(dirname $(type -p nix)) nix --experimental-features 'nix-command r
     # Note: this is a string without context.
     unreachable = builtins.getEnv "unreachable";
 
+    NIX_TESTS_CA_BY_DEFAULT = builtins.getEnv "NIX_TESTS_CA_BY_DEFAULT";
+
     requiredSystemFeatures = [ "recursive-nix" ];
 
     buildCommand = '\'\''
       mkdir $out
-      opts="--experimental-features nix-command"
+      opts="--experimental-features nix-command ${if (NIX_TESTS_CA_BY_DEFAULT == "1") then "--extra-experimental-features ca-derivations" else ""}"
 
       PATH=${builtins.getEnv "NIX_BIN_DIR"}:$PATH
 
@@ -46,16 +48,15 @@ NIX_BIN_DIR=$(dirname $(type -p nix)) nix --experimental-features 'nix-command r
       # Add it to our closure.
       ln -s $foobar $out/foobar
 
-      [[ $(nix $opts path-info --all | wc -l) -eq 3 ]]
+      [[ $(nix $opts path-info --all | wc -l) -eq 4 ]]
 
       # Build a derivation.
       nix $opts build -L --impure --expr '\''
-        derivation {
+        with import ${./config.nix};
+        mkDerivation {
           name = "inner1";
-          builder = builtins.getEnv "SHELL";
-          system = builtins.getEnv "system";
+          buildCommand = "echo $fnord blaat > $out";
           fnord = builtins.toFile "fnord" "fnord";
-          args = [ "-c" "echo $fnord blaat > $out" ];
         }
       '\''
 
diff --git a/tests/remote-store.sh b/tests/remote-store.sh
index f7ae1a2edd425550e1b05147fdb95041f5536ee9..31210ab47e6d93857f1dc57c009325c14da1db7a 100644
--- a/tests/remote-store.sh
+++ b/tests/remote-store.sh
@@ -23,12 +23,12 @@ startDaemon
 
 storeCleared=1 NIX_REMOTE_=$NIX_REMOTE $SHELL ./user-envs.sh
 
+nix-store --gc --max-freed 1K
+
 nix-store --dump-db > $TEST_ROOT/d1
 NIX_REMOTE= nix-store --dump-db > $TEST_ROOT/d2
 cmp $TEST_ROOT/d1 $TEST_ROOT/d2
 
-nix-store --gc --max-freed 1K
-
 killDaemon
 
 user=$(whoami)
diff --git a/tests/shell.nix b/tests/shell.nix
index 24ebcc04cbcb235f2bcaae892980c2031f8c1efc..53759f99aa2d94b8372ac9e9ca6da1e1ba419350 100644
--- a/tests/shell.nix
+++ b/tests/shell.nix
@@ -1,6 +1,18 @@
-{ inNixShell ? false }:
+{ inNixShell ? false, contentAddressed ? false }:
 
-with import ./config.nix;
+let cfg = import ./config.nix; in
+with cfg;
+
+let
+  mkDerivation =
+    if contentAddressed then
+      args: cfg.mkDerivation ({
+        __contentAddressed = true;
+        outputHashMode = "recursive";
+        outputHashAlgo = "sha256";
+      } // args)
+    else cfg.mkDerivation;
+in
 
 let pkgs = rec {
   setupSh = builtins.toFile "setup" ''
diff --git a/tests/shell.sh b/tests/shell.sh
index 7a9ee8ab099828d1310cd5c3806530f34601eb0d..2b85bb33782776c97c748e597c243e952bf0f843 100644
--- a/tests/shell.sh
+++ b/tests/shell.sh
@@ -6,7 +6,7 @@ clearCache
 nix shell -f shell-hello.nix hello -c hello | grep 'Hello World'
 nix shell -f shell-hello.nix hello -c hello NixOS | grep 'Hello NixOS'
 
-if ! canUseSandbox; then exit; fi
+if ! canUseSandbox; then exit 99; fi
 
 chmod -R u+w $TEST_ROOT/store0 || true
 rm -rf $TEST_ROOT/store0
diff --git a/tests/substitute-with-invalid-ca.sh b/tests/substitute-with-invalid-ca.sh
new file mode 100644
index 0000000000000000000000000000000000000000..4d0b01e0f04b626aba008192c21b6f62a9c5f4e8
--- /dev/null
+++ b/tests/substitute-with-invalid-ca.sh
@@ -0,0 +1,38 @@
+source common.sh
+
+BINARY_CACHE=file://$cacheDir
+
+getHash() {
+    basename "$1" | cut -d '-' -f 1
+}
+getRemoteNarInfo () {
+    echo "$cacheDir/$(getHash "$1").narinfo"
+}
+
+cat <<EOF > $TEST_HOME/good.txt
+I’m a good path
+EOF
+
+cat <<EOF > $TEST_HOME/bad.txt
+I’m a bad path
+EOF
+
+good=$(nix-store --add $TEST_HOME/good.txt)
+bad=$(nix-store --add $TEST_HOME/bad.txt)
+nix copy --to "$BINARY_CACHE" "$good"
+nix copy --to "$BINARY_CACHE" "$bad"
+nix-collect-garbage >/dev/null 2>&1
+
+# Falsifying the narinfo file for '$good'
+goodPathNarInfo=$(getRemoteNarInfo "$good")
+badPathNarInfo=$(getRemoteNarInfo "$bad")
+for fieldName in URL FileHash FileSize NarHash NarSize; do
+    sed -i "/^$fieldName/d" "$goodPathNarInfo"
+    grep -E "^$fieldName" "$badPathNarInfo" >> "$goodPathNarInfo"
+done
+
+# Copying back '$good' from the binary cache. This should fail as it is
+# corrupted
+if nix copy --from "$BINARY_CACHE" "$good"; then
+    fail "Importing a path with a wrong CA field should fail"
+fi
diff --git a/tests/zstd.sh b/tests/zstd.sh
new file mode 100644
index 0000000000000000000000000000000000000000..ba7c20501fe63e658a1af81891ff5b08b03bce82
--- /dev/null
+++ b/tests/zstd.sh
@@ -0,0 +1,28 @@
+source common.sh
+
+clearStore
+clearCache
+
+cacheURI="file://$cacheDir?compression=zstd"
+
+outPath=$(nix-build dependencies.nix --no-out-link)
+
+nix copy --to $cacheURI $outPath
+
+HASH=$(nix hash path $outPath)
+
+clearStore
+clearCacheCache
+
+nix copy --from $cacheURI $outPath --no-check-sigs
+
+if ls $cacheDir/nar/*.zst &> /dev/null; then
+    echo "files do exist"
+else
+    echo "nars do not exist"
+    exit 1
+fi
+
+HASH2=$(nix hash path $outPath)
+
+[[ $HASH = $HASH2 ]]