Merge branch '2021-01-22-doc-updates'

- Assorted documentation updates
diff --git a/doc/Makefile b/doc/Makefile
index 0e0da56..a686d47 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -106,7 +106,7 @@
 	$(Q)$(MAKE) BUILDDIR=$(abspath $(BUILDDIR)) $(build)=doc/media clean
 
 dochelp:
-	@echo  ' Linux kernel internal documentation in different formats from ReST:'
+	@echo  ' U-Boot documentation in different formats from ReST:'
 	@echo  '  htmldocs        - HTML'
 	@echo  '  latexdocs       - LaTeX'
 	@echo  '  pdfdocs         - PDF'
diff --git a/doc/README.menu b/doc/README.menu
deleted file mode 100644
index 0f3d741..0000000
--- a/doc/README.menu
+++ /dev/null
@@ -1,124 +0,0 @@
-SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright 2010-2011 Calxeda, Inc.
- */
-
-U-Boot provides a set of interfaces for creating and using simple, text
-based menus. Menus are displayed as lists of labeled entries on the
-console, and an entry can be selected by entering its label.
-
-To use the menu code, enable CONFIG_MENU, and include "menu.h" where
-the interfaces should be available.
-
-Menus are composed of items. Each item has a key used to identify it in
-the menu, and an opaque pointer to data controlled by the consumer.
-
-If you want to show a menu, instead starting the shell, define
-CONFIG_AUTOBOOT_MENU_SHOW. You have to code the int menu_show(int bootdelay)
-function, which handle your menu. This function returns the remaining
-bootdelay.
-
-Interfaces
-----------
-#include "menu.h"
-
-/*
- * Consumers of the menu interfaces will use a struct menu * as the
- * handle for a menu. struct menu is only fully defined in menu.c,
- * preventing consumers of the menu interfaces from accessing its
- * contents directly.
- */
-struct menu;
-
-/*
- * NOTE: See comments in common/menu.c for more detailed documentation on
- * these interfaces.
- */
-
-/*
- * menu_create() - Creates a menu handle with default settings
- */
-struct menu *menu_create(char *title, int timeout, int prompt,
-				void (*item_data_print)(void *),
-				char *(*item_choice)(void *),
-				void *item_choice_data);
-
-/*
- * menu_item_add() - Adds or replaces a menu item
- */
-int menu_item_add(struct menu *m, char *item_key, void *item_data);
-
-/*
- * menu_default_set() - Sets the default choice for the menu
- */
-int menu_default_set(struct menu *m, char *item_key);
-
-/*
- * menu_default_choice() - Set *choice to point to the default item's data
- */
-int menu_default_choice(struct menu *m, void **choice);
-
-/*
- * menu_get_choice() - Returns the user's selected menu entry, or the
- * default if the menu is set to not prompt or the timeout expires.
- */
-int menu_get_choice(struct menu *m, void **choice);
-
-/*
- * menu_destroy() - frees the memory used by a menu and its items.
- */
-int menu_destroy(struct menu *m);
-
-/*
- * menu_display_statusline(struct menu *m);
- * shows a statusline for every menu_display call.
- */
-void menu_display_statusline(struct menu *m);
-
-Example Code
-------------
-This example creates a menu that always prompts, and allows the user
-to pick from a list of tools.  The item key and data are the same.
-
-#include "menu.h"
-
-char *tools[] = {
-	"Hammer",
-	"Screwdriver",
-	"Nail gun",
-	NULL
-};
-
-char *pick_a_tool(void)
-{
-	struct menu *m;
-	int i;
-	char *tool = NULL;
-
-	m = menu_create("Tools", 0, 1, NULL);
-
-	for(i = 0; tools[i]; i++) {
-		if (menu_item_add(m, tools[i], tools[i]) != 1) {
-			printf("failed to add item!");
-			menu_destroy(m);
-			return NULL;
-		}
-	}
-
-	if (menu_get_choice(m, (void **)&tool) != 1)
-		printf("Problem picking tool!\n");
-
-	menu_destroy(m);
-
-	return tool;
-}
-
-void caller(void)
-{
-	char *tool = pick_a_tool();
-
-	if (tool) {
-		printf("picked a tool: %s\n", tool);
-		use_tool(tool);
-	}
-}
diff --git a/doc/board/freescale/b4860qds.rst b/doc/board/freescale/b4860qds.rst
index de14d85..6b5b121 100644
--- a/doc/board/freescale/b4860qds.rst
+++ b/doc/board/freescale/b4860qds.rst
@@ -133,8 +133,8 @@
 B4860QDS Default Settings
 -------------------------
 
-Switch Settings
-^^^^^^^^^^^^^^^
+B4860QDS Switch Settings
+^^^^^^^^^^^^^^^^^^^^^^^^
 
 .. code-block:: none
 
@@ -166,8 +166,8 @@
 B4420QDS Default Settings
 -------------------------
 
-Switch Settings
-^^^^^^^^^^^^^^^
+B4420QDS Switch Settings
+^^^^^^^^^^^^^^^^^^^^^^^^
 
 .. code-block:: none
 
diff --git a/doc/conf.py b/doc/conf.py
index ee7f201..1b9f359 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -16,6 +16,8 @@
 import os
 import sphinx
 
+from subprocess import check_output
+
 # Get Sphinx version
 major, minor, patch = sphinx.version_info[:3]
 
@@ -31,39 +33,98 @@
 # If your documentation needs a minimal Sphinx version, state it here.
 needs_sphinx = '1.3'
 
-latex_engine = 'xelatex'
-
 # Add any Sphinx extension module names here, as strings. They can be
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
 # ones.
-extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include', 'kfigure']
+extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include',
+              'kfigure', 'sphinx.ext.ifconfig', 'automarkup',
+              'maintainers_include', 'sphinx.ext.autosectionlabel',
+              'kernel_abi', 'kernel_feat']
 
 #
-# cdomain is badly broken in Sphinx 3+. Leaving it out generates *most*
-# of the docs correctly, but not all.
+# cdomain is badly broken in Sphinx 3+.  Leaving it out generates *most*
+# of the docs correctly, but not all.  Scream bloody murder but allow
+# the process to proceed; hopefully somebody will fix this properly soon.
 #
 if major >= 3:
+    sys.stderr.write('''WARNING: The kernel documentation build process
+        support for Sphinx v3.0 and above is brand new. Be prepared for
+        possible issues in the generated output.
+        ''')
     if (major > 3) or (minor > 0 or patch >= 2):
-        sys.stderr.write('''The build process with Sphinx 3+ is broken.
-You will have to remove -W in doc/Makefile.
-''')
         # Sphinx c function parser is more pedantic with regards to type
         # checking. Due to that, having macros at c:function cause problems.
-        # Those needed to be escaped by using c_id_attributes[] array
+        # Those needed to be scaped by using c_id_attributes[] array
         c_id_attributes = [
+            # GCC Compiler types not parsed by Sphinx:
+            "__restrict__",
 
-            # include/linux/compiler.h
+            # include/linux/compiler_types.h:
+            "__iomem",
+            "__kernel",
+            "noinstr",
+            "notrace",
+            "__percpu",
+            "__rcu",
+            "__user",
+
+            # include/linux/compiler_attributes.h:
+            "__alias",
+            "__aligned",
+            "__aligned_largest",
+            "__always_inline",
+            "__assume_aligned",
+            "__cold",
+            "__attribute_const__",
+            "__copy",
+            "__pure",
+            "__designated_init",
+            "__visible",
+            "__printf",
+            "__scanf",
+            "__gnu_inline",
+            "__malloc",
+            "__mode",
+            "__no_caller_saved_registers",
+            "__noclone",
+            "__nonstring",
+            "__noreturn",
+            "__packed",
+            "__pure",
+            "__section",
+            "__always_unused",
             "__maybe_unused",
+            "__used",
+            "__weak",
+            "noinline",
 
             # include/efi.h
             "EFIAPI",
 
             # include/efi_loader.h
             "__efi_runtime",
+
+            # include/linux/memblock.h:
+            "__init_memblock",
+            "__meminit",
+
+            # include/linux/init.h:
+            "__init",
+            "__ref",
+
+            # include/linux/linkage.h:
+            "asmlinkage",
         ]
 
 else:
     extensions.append('cdomain')
+    if major == 1 and minor < 7:
+        sys.stderr.write('WARNING: Sphinx 1.7 or greater will be required as of '
+                         'the v2021.04 release\n')
+
+# Ensure that autosectionlabel will produce unique names
+autosectionlabel_prefix_document = True
+autosectionlabel_maxdepth = 2
 
 # The name of the math extension changed on Sphinx 1.4
 if (major == 1 and minor > 3) or (major > 1):
@@ -86,9 +147,9 @@
 master_doc = 'index'
 
 # General information about the project.
-project = 'Das U-Boot'
-copyright = 'The U-Boot development community'
-author = 'The U-Boot development community'
+project = 'The Linux Kernel'
+copyright = 'The kernel development community'
+author = 'The kernel development community'
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
@@ -199,7 +260,7 @@
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-html_logo = '../tools/logos/u-boot_logo.svg'
+#html_logo = None
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
@@ -229,7 +290,7 @@
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+html_use_smartypants = False
 
 # Custom sidebar templates, maps document names to template names.
 #html_sidebars = {}
@@ -279,7 +340,7 @@
 #html_search_scorer = 'scorer.js'
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'TheUBootdoc'
+htmlhelp_basename = 'TheLinuxKerneldoc'
 
 # -- Options for LaTeX output ---------------------------------------------
 
@@ -288,7 +349,7 @@
 'papersize': 'a4paper',
 
 # The font size ('10pt', '11pt' or '12pt').
-'pointsize': '8pt',
+'pointsize': '11pt',
 
 # Latex figure (float) alignment
 #'figure_align': 'htbp',
@@ -301,13 +362,24 @@
     'preamble': '''
 	% Use some font with UTF-8 support with XeLaTeX
         \\usepackage{fontspec}
-        \\setsansfont{DejaVu Serif}
-        \\setromanfont{DejaVu Sans}
+        \\setsansfont{DejaVu Sans}
+        \\setromanfont{DejaVu Serif}
         \\setmonofont{DejaVu Sans Mono}
-
      '''
 }
 
+# At least one book (translations) may have Asian characters
+# with are only displayed if xeCJK is used
+
+cjk_cmd = check_output(['fc-list', '--format="%{family[0]}\n"']).decode('utf-8', 'ignore')
+if cjk_cmd.find("Noto Sans CJK SC") >= 0:
+    print ("enabling CJK for LaTeX builder")
+    latex_elements['preamble']  += '''
+	% This is needed for translations
+        \\usepackage{xeCJK}
+        \\setCJKmainfont{Noto Sans CJK SC}
+     '''
+
 # Fix reference escape troubles with Sphinx 1.4.x
 if major == 1 and minor > 3:
     latex_elements['preamble']  += '\\renewcommand*{\\DUrole}[2]{ #2 }\n'
@@ -398,10 +470,23 @@
 #  author, documentclass [howto, manual, or own class]).
 # Sorted in alphabetical order
 latex_documents = [
-    ('index', 'u-boot-hacker-manual.tex', 'U-Boot Hacker Manual',
-     'The U-Boot development community', 'manual'),
 ]
 
+# Add all other index files from Documentation/ subdirectories
+for fn in os.listdir('.'):
+    doc = os.path.join(fn, "index")
+    if os.path.exists(doc + ".rst"):
+        has = False
+        for l in latex_documents:
+            if l[0] == doc:
+                has = True
+                break
+        if not has:
+            latex_documents.append((doc, fn + '.tex',
+                                    'Linux %s Documentation' % fn.capitalize(),
+                                    'The kernel development community',
+                                    'manual'))
+
 # The name of an image file (relative to this directory) to place at the top of
 # the title page.
 #latex_logo = None
@@ -428,7 +513,7 @@
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
 man_pages = [
-    (master_doc, 'dasuboot', 'The U-Boot Documentation',
+    (master_doc, 'thelinuxkernel', 'The Linux Kernel Documentation',
      [author], 1)
 ]
 
@@ -442,8 +527,8 @@
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-    (master_doc, 'DasUBoot', 'The U-Boot Documentation',
-     author, 'DasUBoot', 'One line description of project.',
+    (master_doc, 'TheLinuxKernel', 'The Linux Kernel Documentation',
+     author, 'TheLinuxKernel', 'One line description of project.',
      'Miscellaneous'),
 ]
 
@@ -535,13 +620,13 @@
 # Grouping the document tree into PDF files. List of tuples
 # (source start file, target name, title, author, options).
 #
-# See the Sphinx chapter of http://ralsina.me/static/manual.pdf
+# See the Sphinx chapter of https://ralsina.me/static/manual.pdf
 #
 # FIXME: Do not add the index file here; the result will be too big. Adding
 # multiple PDF files here actually tries to get the cross-referencing right
 # *between* PDF files.
 pdf_documents = [
-    ('uboot-documentation', u'U-Boot', u'U-Boot', u'J. Random Bozo'),
+    ('kernel-documentation', u'Kernel', u'Kernel', u'J. Random Bozo'),
 ]
 
 # kernel-doc extension configuration for running Sphinx directly (e.g. by Read
diff --git a/doc/develop/index.rst b/doc/develop/index.rst
index 0a7e204..beaa64d 100644
--- a/doc/develop/index.rst
+++ b/doc/develop/index.rst
@@ -3,13 +3,32 @@
 Develop U-Boot
 ==============
 
+Implementation
+--------------
 
 .. toctree::
-   :maxdepth: 2
+   :maxdepth: 1
 
-   coccinelle
    commands
-   crash_dumps
    global_data
    logging
+   menus
+
+Debugging
+---------
+
+.. toctree::
+   :maxdepth: 1
+
+   crash_dumps
    trace
+
+Testing
+-------
+
+.. toctree::
+   :maxdepth: 1
+
+   coccinelle
+   testing
+   py_testing
diff --git a/doc/develop/menus.rst b/doc/develop/menus.rst
new file mode 100644
index 0000000..dda8f96
--- /dev/null
+++ b/doc/develop/menus.rst
@@ -0,0 +1,131 @@
+.. SPDX-License-Identifier: GPL-2.0+
+.. Copyright 2010-2011 Calxeda, Inc.
+
+Menus
+=====
+
+U-Boot provides a set of interfaces for creating and using simple, text
+based menus. Menus are displayed as lists of labeled entries on the
+console, and an entry can be selected by entering its label.
+
+To use the menu code, enable CONFIG_MENU, and include "menu.h" where
+the interfaces should be available.
+
+Menus are composed of items. Each item has a key used to identify it in
+the menu, and an opaque pointer to data controlled by the consumer.
+
+If you want to show a menu, instead starting the shell, define
+CONFIG_AUTOBOOT_MENU_SHOW. You have to code the int menu_show(int bootdelay)
+function, which handle your menu. This function returns the remaining
+bootdelay.
+
+Interfaces
+----------
+
+.. code-block:: c
+
+    #include "menu.h"
+
+    /*
+     * Consumers of the menu interfaces will use a struct menu * as the
+     * handle for a menu. struct menu is only fully defined in menu.c,
+     * preventing consumers of the menu interfaces from accessing its
+     * contents directly.
+     */
+    struct menu;
+
+    /*
+     * NOTE: See comments in common/menu.c for more detailed documentation on
+     * these interfaces.
+     */
+
+    /*
+     * menu_create() - Creates a menu handle with default settings
+     */
+    struct menu *menu_create(char *title, int timeout, int prompt,
+                             void (*item_data_print)(void *),
+                             char *(*item_choice)(void *),
+                             void *item_choice_data);
+
+    /*
+     * menu_item_add() - Adds or replaces a menu item
+     */
+    int menu_item_add(struct menu *m, char *item_key, void *item_data);
+
+    /*
+     * menu_default_set() - Sets the default choice for the menu
+     */
+    int menu_default_set(struct menu *m, char *item_key);
+
+    /*
+     * menu_default_choice() - Set *choice to point to the default item's data
+     */
+    int menu_default_choice(struct menu *m, void **choice);
+
+    /*
+     * menu_get_choice() - Returns the user's selected menu entry, or the
+     * default if the menu is set to not prompt or the timeout expires.
+     */
+    int menu_get_choice(struct menu *m, void **choice);
+
+    /*
+     * menu_destroy() - frees the memory used by a menu and its items.
+     */
+    int menu_destroy(struct menu *m);
+
+    /*
+     * menu_display_statusline(struct menu *m);
+     * shows a statusline for every menu_display call.
+     */
+    void menu_display_statusline(struct menu *m);
+
+Example Code
+------------
+
+This example creates a menu that always prompts, and allows the user
+to pick from a list of tools.  The item key and data are the same.
+
+.. code-block:: c
+
+    #include "menu.h"
+
+    char *tools[] = {
+        "Hammer",
+        "Screwdriver",
+        "Nail gun",
+        NULL
+    };
+
+    char *pick_a_tool(void)
+    {
+        struct menu *m;
+        int i;
+        char *tool = NULL;
+
+        m = menu_create("Tools", 0, 1, NULL);
+
+        for(i = 0; tools[i]; i++) {
+            if (menu_item_add(m, tools[i], tools[i]) != 1) {
+                printf("failed to add item!");
+                menu_destroy(m);
+                return NULL;
+            }
+        }
+
+        if (menu_get_choice(m, (void **)&tool) != 1)
+            printf("Problem picking tool!\n");
+
+        menu_destroy(m);
+
+        return tool;
+    }
+
+    void caller(void)
+    {
+        char *tool = pick_a_tool();
+
+        if (tool) {
+            printf("picked a tool: %s\n", tool);
+            use_tool(tool);
+        }
+    }
diff --git a/test/py/README.md b/doc/develop/py_testing.rst
similarity index 74%
rename from test/py/README.md
rename to doc/develop/py_testing.rst
index fddc104..f71e837 100644
--- a/test/py/README.md
+++ b/doc/develop/py_testing.rst
@@ -1,6 +1,8 @@
-# U-Boot pytest suite
+U-Boot pytest suite
+===================
 
-## Introduction
+Introduction
+------------
 
 This tool aims to test U-Boot by executing U-Boot shell commands using the
 console interface. A single top-level script exists to execute or attach to the
@@ -14,40 +16,42 @@
   flexible than writing it all in C.
 - It is reasonably simple to interact with U-Boot in this way.
 
-## Requirements
+Requirements
+------------
 
 The test suite is implemented using pytest. Interaction with the U-Boot console
 involves executing some binary and interacting with its stdin/stdout. You will
 need to implement various "hook" scripts that are called by the test suite at
 the appropriate time.
 
-In order to run the testsuite at a minimum we require that both python3 and
-pip for python3 be installed.  All of the required python modules are
-described in the requirements.txt file in this directory and can be installed
-with the command ```pip install -r requirements.txt```
+In order to run the test suite at a minimum we require that both Python 3 and
+pip for Python 3 are installed. All of the required python modules are
+described in the requirements.txt file in the /test/py/ directory and can be
+installed via the command
+
+.. code-block:: bash
+
+   pip install -r requirements.txt
 
 In order to execute certain tests on their supported platforms other tools
-will be required.  The following is an incomplete list:
+will be required. The following is an incomplete list:
 
-| Package        |
-| -------------- |
-| gdisk          |
-| dfu-util       |
-| dtc            |
-| openssl        |
-| sudo OR guestmount |
-| e2fsprogs      |
-| util-linux     |
-| coreutils      |
-| dosfstools     |
-| efitools       |
-| mount          |
-| mtools         |
-| sbsigntool     |
-| udisks2        |
+* gdisk
+* dfu-util
+* dtc
+* openssl
+* sudo OR guestmount
+* e2fsprogs
+* util-linux
+* coreutils
+* dosfstools
+* efitools
+* mount
+* mtools
+* sbsigntool
+* udisks2
 
-
-Please use the apporirate commands for your distribution to match these tools
+Please use the appropriate commands for your distribution to match these tools
 up with the package that provides them.
 
 The test script supports either:
@@ -58,28 +62,31 @@
   physical board, attach to the board's console stream, and reset the board.
   Further details are described later.
 
-### Using `virtualenv` to provide requirements
+Using `virtualenv` to provide requirements
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 The recommended way to run the test suite, in order to ensure reproducibility
 is to use `virtualenv` to set up the necessary environment.  This can be done
 via the following commands:
 
-```bash
-$ cd /path/to/u-boot
-$ sudo apt-get install python3 python3-virtualenv
-$ virtualenv -p /usr/bin/python3 venv
-$ . ./venv/bin/activate
-$ pip install -r test/py/requirements.txt
-```
 
-## Testing sandbox
+.. code-block:: console
 
-To run the testsuite on the sandbox port (U-Boot built as a native user-space
+    $ cd /path/to/u-boot
+    $ sudo apt-get install python3 python3-virtualenv
+    $ virtualenv -p /usr/bin/python3 venv
+    $ . ./venv/bin/activate
+    $ pip install -r test/py/requirements.txt
+
+Testing sandbox
+---------------
+
+To run the test suite on the sandbox port (U-Boot built as a native user-space
 application), simply execute:
 
-```
-./test/py/test.py --bd sandbox --build
-```
+.. code-block:: bash
+
+    ./test/py/test.py --bd sandbox --build
 
 The `--bd` option tells the test suite which board type is being tested. This
 lets the test suite know which features the board has, and hence exactly what
@@ -95,7 +102,8 @@
 browser, but may be read directly as plain text, perhaps with the aid of the
 `html2text` utility.
 
-### Testing under a debugger
+Testing under a debugger
+~~~~~~~~~~~~~~~~~~~~~~~~
 
 If you need to run sandbox under a debugger, you may pass the command-line
 option `--gdbserver COMM`. This causes two things to happens:
@@ -111,19 +119,21 @@
 A usage example is:
 
 Window 1:
-```shell
-./test/py/test.py --bd sandbox --gdbserver localhost:1234
-```
+
+.. code-block:: bash
+
+    ./test/py/test.py --bd sandbox --gdbserver localhost:1234
 
 Window 2:
-```shell
-gdb ./build-sandbox/u-boot -ex 'target remote localhost:1234'
-```
+
+.. code-block:: bash
+
+    gdb ./build-sandbox/u-boot -ex 'target remote localhost:1234'
 
 Alternatively, you could leave off the `-ex` option and type the command
 manually into gdb once it starts.
 
-You can use any debugger you wish, so long as it speaks the gdb remote
+You can use any debugger you wish, as long as it speaks the gdb remote
 protocol, or any graphical wrapper around gdb.
 
 Some tests deliberately cause the sandbox process to exit, e.g. to test the
@@ -132,30 +142,39 @@
 relevant to your debugging session, you can skip them using pytest's -k
 command-line option; see the next section.
 
-## Command-line options
+Command-line options
+--------------------
 
-- `--board-type`, `--bd`, `-B` set the type of the board to be tested. For
-  example, `sandbox` or `seaboard`.
-- `--board-identity`, `--id` set the identity of the board to be tested.
-  This allows differentiation between multiple instances of the same type of
-  physical board that are attached to the same host machine. This parameter is
-  not interpreted by the test script in any way, but rather is simply passed
-  to the hook scripts described below, and may be used in any site-specific
-  way deemed necessary.
-- `--build` indicates that the test script should compile U-Boot itself
-  before running the tests. If using this option, make sure that any
-  environment variables required by the build process are already set, such as
-  `$CROSS_COMPILE`.
-- `--buildman` indicates that `--build` should use buildman to build U-Boot.
-  There is no need to set $CROSS_COMPILE` in this case since buildman handles
-  it.
-- `--build-dir` sets the directory containing the compiled U-Boot binaries.
-  If omitted, this is `${source_dir}/build-${board_type}`.
-- `--result-dir` sets the directory to write results, such as log files,
-  into. If omitted, the build directory is used.
-- `--persistent-data-dir` sets the directory used to store persistent test
-  data. This is test data that may be re-used across test runs, such as file-
-  system images.
+--board-type, --bd, -B
+  set the type of the board to be tested. For example, `sandbox` or `seaboard`.
+
+--board-identity`, --id
+  sets the identity of the board to be tested. This allows differentiation
+  between multiple instances of the same type of physical board that are
+  attached to the same host machine. This parameter is not interpreted by th
+  test script in any way, but rather is simply passed to the hook scripts
+  described below, and may be used in any site-specific way deemed necessary.
+
+--build
+  indicates that the test script should compile U-Boot itself before running
+  the tests. If using this option, make sure that any environment variables
+  required by the build process are already set, such as `$CROSS_COMPILE`.
+
+--buildman
+  indicates that `--build` should use buildman to build U-Boot. There is no need
+  to set $CROSS_COMPILE` in this case since buildman handles it.
+
+--build-dir
+  sets the directory containing the compiled U-Boot binaries. If omitted, this
+  is `${source_dir}/build-${board_type}`.
+
+--result-dir
+  sets the directory to write results, such as log files, into.
+  If omitted, the build directory is used.
+
+--persistent-data-dir
+  sets the directory used to store persistent test data. This is test data that
+  may be re-used across test runs, such as file-system images.
 
 `pytest` also implements a number of its own command-line options. Commonly used
 options are mentioned below. Please see `pytest` documentation for complete
@@ -163,21 +182,26 @@
 test.py script passes all command-line arguments directly to `pytest` for
 processing.
 
-- `-k` selects which tests to run. The default is to run all known tests. This
+-k
+  selects which tests to run. The default is to run all known tests. This
   option takes a single argument which is used to filter test names. Simple
   logical operators are supported. For example:
-  - `'ums'` runs only tests with "ums" in their name.
-  - `'ut_dm'` runs only tests with "ut_dm" in their name. Note that in this
+
+  - `'-k ums'` runs only tests with "ums" in their name.
+  - `'-k ut_dm'` runs only tests with "ut_dm" in their name. Note that in this
     case, "ut_dm" is a parameter to a test rather than the test name. The full
     test name is e.g. "test_ut[ut_dm_leak]".
-  - `'not reset'` runs everything except tests with "reset" in their name.
-  - `'ut or hush'` runs only tests with "ut" or "hush" in their name.
-  - `'not (ut or hush)'` runs everything except tests with "ut" or "hush" in
+  - `'-k not reset'` runs everything except tests with "reset" in their name.
+  - `'-k ut or hush'` runs only tests with "ut" or "hush" in their name.
+  - `'-k not (ut or hush)'` runs everything except tests with "ut" or "hush" in
     their name.
-- `-s` prevents pytest from hiding a test's stdout. This allows you to see
+
+-s
+  prevents pytest from hiding a test's stdout. This allows you to see
   U-Boot's console log in real time on pytest's stdout.
 
-## Testing real hardware
+Testing real hardware
+---------------------
 
 The tools and techniques used to interact with real hardware will vary
 radically between different host and target systems, and the whims of the user.
@@ -187,9 +211,11 @@
 suite. This keeps the test suite simple and isolated from system variances
 unrelated to U-Boot features.
 
-### Hook scripts
+Hook scripts
+~~~~~~~~~~~~
 
-#### Environment variables
+Environment variables
+'''''''''''''''''''''
 
 The following environment variables are set when running hook scripts:
 
@@ -202,16 +228,18 @@
 - `UBOOT_RESULT_DIR` the test result directory.
 - `UBOOT_PERSISTENT_DATA_DIR` the test persistent data directory.
 
-#### `u-boot-test-console`
+u-boot-test-console
+'''''''''''''''''''
 
 This script provides access to the U-Boot console. The script's stdin/stdout
 should be connected to the board's console. This process should continue to run
 indefinitely, until killed. The test suite will run this script in parallel
 with all other hooks.
 
-This script may be implemented e.g. by exec()ing `cu`, `kermit`, `conmux`, etc.
+This script may be implemented e.g. by executing `cu`, `kermit`, `conmux`, etc.
+via exec().
 
-If you are able to run U-Boot under a hardware simulator such as qemu, then
+If you are able to run U-Boot under a hardware simulator such as QEMU, then
 you would likely spawn that simulator from this script. However, note that
 `u-boot-test-reset` may be called multiple times per test script run, and must
 cause U-Boot to start execution from scratch each time. Hopefully your
@@ -219,10 +247,11 @@
 simulator from `u-boot-test-reset` instead, while arranging for this console
 process to always communicate with the current simulator instance.
 
-#### `u-boot-test-flash`
+u-boot-test-flash
+'''''''''''''''''
 
 Prior to running the test suite against a board, some arrangement must be made
-so that the board executes the particular U-Boot binary to be tested. Often,
+so that the board executes the particular U-Boot binary to be tested. Often
 this involves writing the U-Boot binary to the board's flash ROM. The test
 suite calls this hook script for that purpose.
 
@@ -248,7 +277,8 @@
 This script will typically be implemented by calling out to some SoC- or
 board-specific vendor flashing utility.
 
-#### `u-boot-test-reset`
+u-boot-test-reset
+'''''''''''''''''
 
 Whenever the test suite needs to reset the target board, this script is
 executed. This is guaranteed to happen at least once, prior to executing the
@@ -261,20 +291,22 @@
 
 The semantics of this script require that when it is executed, U-Boot will
 start running from scratch. If the U-Boot binary to be tested has been written
-to flash, pulsing the board's reset signal is likely all this script need do.
-However, in some scenarios, this script may perform other actions. For
+to flash, pulsing the board's reset signal is likely all this script needs to
+do. However, in some scenarios, this script may perform other actions. For
 example, it may call out to some SoC- or board-specific vendor utility in order
 to download the U-Boot binary directly into RAM and execute it. This would
 avoid the need for `u-boot-test-flash` to actually write U-Boot to flash, thus
 saving wear on the flash chip(s).
 
-#### Examples
+Examples
+''''''''
 
 https://github.com/swarren/uboot-test-hooks contains some working example hook
 scripts, and may be useful as a reference when implementing hook scripts for
 your platform. These scripts are not considered part of U-Boot itself.
 
-### Board-type-specific configuration
+Board-type-specific configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Each board has a different configuration and behaviour. Many of these
 differences can be automatically detected by parsing the `.config` file in the
@@ -286,7 +318,8 @@
 contained in these scripts must be purely derived from U-Boot source code.
 Hence, these configuration files are part of the U-Boot source tree too.
 
-### Execution environment configuration
+Execution environment configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Each user's hardware setup may enable testing different subsets of the features
 implemented by a particular board's configuration of U-Boot. For example, a
@@ -304,14 +337,16 @@
 should set `$PYTHONPATH` prior to running the test script to allow these
 modules to be loaded.
 
-### Board module parameter usage
+Board module parameter usage
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 The test scripts rely on the following variables being defined by the board
 module:
 
-- None at present.
+- none at present
 
-### U-Boot `.config` feature usage
+U-Boot `.config` feature usage
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 The test scripts rely on various U-Boot `.config` features, either directly in
 order to test those features, or indirectly in order to query information from
@@ -328,7 +363,8 @@
 - `@pytest.mark.buildconfigspec(...`
 - `@pytest.mark.notbuildconfigspec(...`
 
-### Complete invocation example
+Complete invocation example
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Assuming that you have installed the hook scripts into $HOME/ubtest/bin, and
 any required environment configuration Python modules into $HOME/ubtest/py,
@@ -336,32 +372,33 @@
 
 If U-Boot has already been built:
 
-```bash
-PATH=$HOME/ubtest/bin:$PATH \
+.. code-block:: bash
+
+    PATH=$HOME/ubtest/bin:$PATH \
     PYTHONPATH=${HOME}/ubtest/py/${HOSTNAME}:${PYTHONPATH} \
     ./test/py/test.py --bd seaboard
-```
 
 If you want the test script to compile U-Boot for you too, then you likely
 need to set `$CROSS_COMPILE` to allow this, and invoke the test script as
 follows:
 
-```bash
-CROSS_COMPILE=arm-none-eabi- \
+.. code-block:: bash
+
+    CROSS_COMPILE=arm-none-eabi- \
     PATH=$HOME/ubtest/bin:$PATH \
     PYTHONPATH=${HOME}/ubtest/py/${HOSTNAME}:${PYTHONPATH} \
     ./test/py/test.py --bd seaboard --build
-```
 
 or, using buildman to handle it:
 
-```bash
+.. code-block:: bash
+
     PATH=$HOME/ubtest/bin:$PATH \
     PYTHONPATH=${HOME}/ubtest/py/${HOSTNAME}:${PYTHONPATH} \
     ./test/py/test.py --bd seaboard --build --buildman
-```
 
-## Writing tests
+Writing tests
+-------------
 
 Please refer to the pytest documentation for details of writing pytest tests.
 Details specific to the U-Boot test suite are described below.
diff --git a/test/README b/doc/develop/testing.rst
similarity index 100%
rename from test/README
rename to doc/develop/testing.rst
diff --git a/doc/sphinx/automarkup.py b/doc/sphinx/automarkup.py
new file mode 100644
index 0000000..953b24b
--- /dev/null
+++ b/doc/sphinx/automarkup.py
@@ -0,0 +1,290 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2019 Jonathan Corbet <corbet@lwn.net>
+#
+# Apply kernel-specific tweaks after the initial document processing
+# has been done.
+#
+from docutils import nodes
+import sphinx
+from sphinx import addnodes
+if sphinx.version_info[0] < 2 or \
+   sphinx.version_info[0] == 2 and sphinx.version_info[1] < 1:
+    from sphinx.environment import NoUri
+else:
+    from sphinx.errors import NoUri
+import re
+from itertools import chain
+
+#
+# Python 2 lacks re.ASCII...
+#
+try:
+    ascii_p3 = re.ASCII
+except AttributeError:
+    ascii_p3 = 0
+
+#
+# Regex nastiness.  Of course.
+# Try to identify "function()" that's not already marked up some
+# other way.  Sphinx doesn't like a lot of stuff right after a
+# :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last
+# bit tries to restrict matches to things that won't create trouble.
+#
+RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=ascii_p3)
+
+#
+# Sphinx 2 uses the same :c:type role for struct, union, enum and typedef
+#
+RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)',
+                             flags=ascii_p3)
+
+#
+# Sphinx 3 uses a different C role for each one of struct, union, enum and
+# typedef
+#
+RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
+RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
+RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
+RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
+
+#
+# Detects a reference to a documentation page of the form Documentation/... with
+# an optional extension
+#
+RE_doc = re.compile(r'\bDocumentation(/[\w\-_/]+)(\.\w+)*')
+
+RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
+
+#
+# Reserved C words that we should skip when cross-referencing
+#
+Skipnames = [ 'for', 'if', 'register', 'sizeof', 'struct', 'unsigned' ]
+
+
+#
+# Many places in the docs refer to common system calls.  It is
+# pointless to try to cross-reference them and, as has been known
+# to happen, somebody defining a function by these names can lead
+# to the creation of incorrect and confusing cross references.  So
+# just don't even try with these names.
+#
+Skipfuncs = [ 'open', 'close', 'read', 'write', 'fcntl', 'mmap',
+              'select', 'poll', 'fork', 'execve', 'clone', 'ioctl',
+              'socket' ]
+
+c_namespace = ''
+
+def markup_refs(docname, app, node):
+    t = node.astext()
+    done = 0
+    repl = [ ]
+    #
+    # Associate each regex with the function that will markup its matches
+    #
+    markup_func_sphinx2 = {RE_doc: markup_doc_ref,
+                           RE_function: markup_c_ref,
+                           RE_generic_type: markup_c_ref}
+
+    markup_func_sphinx3 = {RE_doc: markup_doc_ref,
+                           RE_function: markup_func_ref_sphinx3,
+                           RE_struct: markup_c_ref,
+                           RE_union: markup_c_ref,
+                           RE_enum: markup_c_ref,
+                           RE_typedef: markup_c_ref}
+
+    if sphinx.version_info[0] >= 3:
+        markup_func = markup_func_sphinx3
+    else:
+        markup_func = markup_func_sphinx2
+
+    match_iterators = [regex.finditer(t) for regex in markup_func]
+    #
+    # Sort all references by the starting position in text
+    #
+    sorted_matches = sorted(chain(*match_iterators), key=lambda m: m.start())
+    for m in sorted_matches:
+        #
+        # Include any text prior to match as a normal text node.
+        #
+        if m.start() > done:
+            repl.append(nodes.Text(t[done:m.start()]))
+
+        #
+        # Call the function associated with the regex that matched this text and
+        # append its return to the text
+        #
+        repl.append(markup_func[m.re](docname, app, m))
+
+        done = m.end()
+    if done < len(t):
+        repl.append(nodes.Text(t[done:]))
+    return repl
+
+#
+# In sphinx3 we can cross-reference to C macro and function, each one with its
+# own C role, but both match the same regex, so we try both.
+#
+def markup_func_ref_sphinx3(docname, app, match):
+    class_str = ['c-func', 'c-macro']
+    reftype_str = ['function', 'macro']
+
+    cdom = app.env.domains['c']
+    #
+    # Go through the dance of getting an xref out of the C domain
+    #
+    base_target = match.group(2)
+    target_text = nodes.Text(match.group(0))
+    xref = None
+    possible_targets = [base_target]
+    # Check if this document has a namespace, and if so, try
+    # cross-referencing inside it first.
+    if c_namespace:
+        possible_targets.insert(0, c_namespace + "." + base_target)
+
+    if base_target not in Skipnames:
+        for target in possible_targets:
+            if target not in Skipfuncs:
+                for class_s, reftype_s in zip(class_str, reftype_str):
+                    lit_text = nodes.literal(classes=['xref', 'c', class_s])
+                    lit_text += target_text
+                    pxref = addnodes.pending_xref('', refdomain = 'c',
+                                                  reftype = reftype_s,
+                                                  reftarget = target, modname = None,
+                                                  classname = None)
+                    #
+                    # XXX The Latex builder will throw NoUri exceptions here,
+                    # work around that by ignoring them.
+                    #
+                    try:
+                        xref = cdom.resolve_xref(app.env, docname, app.builder,
+                                                 reftype_s, target, pxref,
+                                                 lit_text)
+                    except NoUri:
+                        xref = None
+
+                    if xref:
+                        return xref
+
+    return target_text
+
+def markup_c_ref(docname, app, match):
+    class_str = {# Sphinx 2 only
+                 RE_function: 'c-func',
+                 RE_generic_type: 'c-type',
+                 # Sphinx 3+ only
+                 RE_struct: 'c-struct',
+                 RE_union: 'c-union',
+                 RE_enum: 'c-enum',
+                 RE_typedef: 'c-type',
+                 }
+    reftype_str = {# Sphinx 2 only
+                   RE_function: 'function',
+                   RE_generic_type: 'type',
+                   # Sphinx 3+ only
+                   RE_struct: 'struct',
+                   RE_union: 'union',
+                   RE_enum: 'enum',
+                   RE_typedef: 'type',
+                   }
+
+    cdom = app.env.domains['c']
+    #
+    # Go through the dance of getting an xref out of the C domain
+    #
+    base_target = match.group(2)
+    target_text = nodes.Text(match.group(0))
+    xref = None
+    possible_targets = [base_target]
+    # Check if this document has a namespace, and if so, try
+    # cross-referencing inside it first.
+    if c_namespace:
+        possible_targets.insert(0, c_namespace + "." + base_target)
+
+    if base_target not in Skipnames:
+        for target in possible_targets:
+            if not (match.re == RE_function and target in Skipfuncs):
+                lit_text = nodes.literal(classes=['xref', 'c', class_str[match.re]])
+                lit_text += target_text
+                pxref = addnodes.pending_xref('', refdomain = 'c',
+                                              reftype = reftype_str[match.re],
+                                              reftarget = target, modname = None,
+                                              classname = None)
+                #
+                # XXX The Latex builder will throw NoUri exceptions here,
+                # work around that by ignoring them.
+                #
+                try:
+                    xref = cdom.resolve_xref(app.env, docname, app.builder,
+                                             reftype_str[match.re], target, pxref,
+                                             lit_text)
+                except NoUri:
+                    xref = None
+
+                if xref:
+                    return xref
+
+    return target_text
+
+#
+# Try to replace a documentation reference of the form Documentation/... with a
+# cross reference to that page
+#
+def markup_doc_ref(docname, app, match):
+    stddom = app.env.domains['std']
+    #
+    # Go through the dance of getting an xref out of the std domain
+    #
+    target = match.group(1)
+    xref = None
+    pxref = addnodes.pending_xref('', refdomain = 'std', reftype = 'doc',
+                                  reftarget = target, modname = None,
+                                  classname = None, refexplicit = False)
+    #
+    # XXX The Latex builder will throw NoUri exceptions here,
+    # work around that by ignoring them.
+    #
+    try:
+        xref = stddom.resolve_xref(app.env, docname, app.builder, 'doc',
+                                   target, pxref, None)
+    except NoUri:
+        xref = None
+    #
+    # Return the xref if we got it; otherwise just return the plain text.
+    #
+    if xref:
+        return xref
+    else:
+        return nodes.Text(match.group(0))
+
+def get_c_namespace(app, docname):
+    source = app.env.doc2path(docname)
+    with open(source) as f:
+        for l in f:
+            match = RE_namespace.search(l)
+            if match:
+                return match.group(1)
+    return ''
+
+def auto_markup(app, doctree, name):
+    global c_namespace
+    c_namespace = get_c_namespace(app, name)
+    #
+    # This loop could eventually be improved on.  Someday maybe we
+    # want a proper tree traversal with a lot of awareness of which
+    # kinds of nodes to prune.  But this works well for now.
+    #
+    # The nodes.literal test catches ``literal text``, its purpose is to
+    # avoid adding cross-references to functions that have been explicitly
+    # marked with cc:func:.
+    #
+    for para in doctree.traverse(nodes.paragraph):
+        for node in para.traverse(nodes.Text):
+            if not isinstance(node.parent, nodes.literal):
+                node.parent.replace(node, markup_refs(name, app, node))
+
+def setup(app):
+    app.connect('doctree-resolved', auto_markup)
+    return {
+        'parallel_read_safe': True,
+        'parallel_write_safe': True,
+        }
diff --git a/doc/sphinx/cdomain.py b/doc/sphinx/cdomain.py
index cbac8e6..014a522 100644
--- a/doc/sphinx/cdomain.py
+++ b/doc/sphinx/cdomain.py
@@ -40,14 +40,94 @@
 from sphinx.domains.c import c_funcptr_sig_re, c_sig_re
 from sphinx.domains.c import CObject as Base_CObject
 from sphinx.domains.c import CDomain as Base_CDomain
+from itertools import chain
+import re
 
-__version__  = '1.0'
+__version__  = '1.1'
 
 # Get Sphinx version
 major, minor, patch = sphinx.version_info[:3]
 
+# Namespace to be prepended to the full name
+namespace = None
+
+#
+# Handle trivial newer c domain tags that are part of Sphinx 3.1 c domain tags
+# - Store the namespace if ".. c:namespace::" tag is found
+#
+RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
+
+def markup_namespace(match):
+    global namespace
+
+    namespace = match.group(1)
+
+    return ""
+
+#
+# Handle c:macro for function-style declaration
+#
+RE_macro = re.compile(r'^\s*..\s*c:macro::\s*(\S+)\s+(\S.*)\s*$')
+def markup_macro(match):
+    return ".. c:function:: " + match.group(1) + ' ' + match.group(2)
+
+#
+# Handle newer c domain tags that are evaluated as .. c:type: for
+# backward-compatibility with Sphinx < 3.0
+#
+RE_ctype = re.compile(r'^\s*..\s*c:(struct|union|enum|enumerator|alias)::\s*(.*)$')
+
+def markup_ctype(match):
+    return ".. c:type:: " + match.group(2)
+
+#
+# Handle newer c domain tags that are evaluated as :c:type: for
+# backward-compatibility with Sphinx < 3.0
+#
+RE_ctype_refs = re.compile(r':c:(var|struct|union|enum|enumerator)::`([^\`]+)`')
+def markup_ctype_refs(match):
+    return ":c:type:`" + match.group(2) + '`'
+
+#
+# Simply convert :c:expr: and :c:texpr: into a literal block.
+#
+RE_expr = re.compile(r':c:(expr|texpr):`([^\`]+)`')
+def markup_c_expr(match):
+    return '\ ``' + match.group(2) + '``\ '
+
+#
+# Parse Sphinx 3.x C markups, replacing them by backward-compatible ones
+#
+def c_markups(app, docname, source):
+    result = ""
+    markup_func = {
+        RE_namespace: markup_namespace,
+        RE_expr: markup_c_expr,
+        RE_macro: markup_macro,
+        RE_ctype: markup_ctype,
+        RE_ctype_refs: markup_ctype_refs,
+    }
+
+    lines = iter(source[0].splitlines(True))
+    for n in lines:
+        match_iterators = [regex.finditer(n) for regex in markup_func]
+        matches = sorted(chain(*match_iterators), key=lambda m: m.start())
+        for m in matches:
+            n = n[:m.start()] + markup_func[m.re](m) + n[m.end():]
+
+        result = result + n
+
+    source[0] = result
+
+#
+# Now implements support for the cdomain namespacing logic
+#
+
 def setup(app):
 
+    # Handle easy Sphinx 3.1+ simple new tags: :c:expr and .. c:namespace::
+    app.connect('source-read', c_markups)
+
     if (major == 1 and minor < 8):
         app.override_domain(CDomain)
     else:
@@ -75,6 +155,8 @@
         function-like macro, the name of the macro is returned. Otherwise
         ``False`` is returned.  """
 
+        global namespace
+
         if not self.objtype == 'function':
             return False
 
@@ -107,11 +189,16 @@
             param += nodes.emphasis(argname, argname)
             paramlist += param
 
+        if namespace:
+            fullname = namespace + "." + fullname
+
         return fullname
 
     def handle_signature(self, sig, signode):
         """Transform a C signature into RST nodes."""
 
+        global namespace
+
         fullname = self.handle_func_like_macro(sig, signode)
         if not fullname:
             fullname = super(CObject, self).handle_signature(sig, signode)
@@ -122,6 +209,10 @@
             else:
                 # FIXME: handle :name: value of other declaration types?
                 pass
+        else:
+            if namespace:
+                fullname = namespace + "." + fullname
+
         return fullname
 
     def add_target_and_index(self, name, sig, signode):
diff --git a/doc/sphinx/kernel_abi.py b/doc/sphinx/kernel_abi.py
new file mode 100644
index 0000000..f3da859
--- /dev/null
+++ b/doc/sphinx/kernel_abi.py
@@ -0,0 +1,194 @@
+# -*- coding: utf-8; mode: python -*-
+# coding=utf-8
+# SPDX-License-Identifier: GPL-2.0
+#
+u"""
+    kernel-abi
+    ~~~~~~~~~~
+
+    Implementation of the ``kernel-abi`` reST-directive.
+
+    :copyright:  Copyright (C) 2016  Markus Heiser
+    :copyright:  Copyright (C) 2016-2020  Mauro Carvalho Chehab
+    :maintained-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+    :license:    GPL Version 2, June 1991 see Linux/COPYING for details.
+
+    The ``kernel-abi`` (:py:class:`KernelCmd`) directive calls the
+    scripts/get_abi.pl script to parse the Kernel ABI files.
+
+    Overview of directive's argument and options.
+
+    .. code-block:: rst
+
+        .. kernel-abi:: <ABI directory location>
+            :debug:
+
+    The argument ``<ABI directory location>`` is required. It contains the
+    location of the ABI files to be parsed.
+
+    ``debug``
+      Inserts a code-block with the *raw* reST. Sometimes it is helpful to see
+      what reST is generated.
+
+"""
+
+import codecs
+import os
+import subprocess
+import sys
+import re
+import kernellog
+
+from os import path
+
+from docutils import nodes, statemachine
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import directives, Directive
+from docutils.utils.error_reporting import ErrorString
+
+#
+# AutodocReporter is only good up to Sphinx 1.7
+#
+import sphinx
+
+Use_SSI = sphinx.__version__[:3] >= '1.7'
+if Use_SSI:
+    from sphinx.util.docutils import switch_source_input
+else:
+    from sphinx.ext.autodoc import AutodocReporter
+
+__version__  = '1.0'
+
+def setup(app):
+
+    app.add_directive("kernel-abi", KernelCmd)
+    return dict(
+        version = __version__
+        , parallel_read_safe = True
+        , parallel_write_safe = True
+    )
+
+class KernelCmd(Directive):
+
+    u"""KernelABI (``kernel-abi``) directive"""
+
+    required_arguments = 1
+    optional_arguments = 2
+    has_content = False
+    final_argument_whitespace = True
+
+    option_spec = {
+        "debug"     : directives.flag,
+        "rst"       : directives.unchanged
+    }
+
+    def run(self):
+
+        doc = self.state.document
+        if not doc.settings.file_insertion_enabled:
+            raise self.warning("docutils: file insertion disabled")
+
+        env = doc.settings.env
+        cwd = path.dirname(doc.current_source)
+        cmd = "get_abi.pl rest --enable-lineno --dir "
+        cmd += self.arguments[0]
+
+        if 'rst' in self.options:
+            cmd += " --rst-source"
+
+        srctree = path.abspath(os.environ["srctree"])
+
+        fname = cmd
+
+        # extend PATH with $(srctree)/scripts
+        path_env = os.pathsep.join([
+            srctree + os.sep + "scripts",
+            os.environ["PATH"]
+        ])
+        shell_env = os.environ.copy()
+        shell_env["PATH"]    = path_env
+        shell_env["srctree"] = srctree
+
+        lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
+        nodeList = self.nestedParse(lines, self.arguments[0])
+        return nodeList
+
+    def runCmd(self, cmd, **kwargs):
+        u"""Run command ``cmd`` and return it's stdout as unicode."""
+
+        try:
+            proc = subprocess.Popen(
+                cmd
+                , stdout = subprocess.PIPE
+                , stderr = subprocess.PIPE
+                , **kwargs
+            )
+            out, err = proc.communicate()
+
+            out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
+
+            if proc.returncode != 0:
+                raise self.severe(
+                    u"command '%s' failed with return code %d"
+                    % (cmd, proc.returncode)
+                )
+        except OSError as exc:
+            raise self.severe(u"problems with '%s' directive: %s."
+                              % (self.name, ErrorString(exc)))
+        return out
+
+    def nestedParse(self, lines, fname):
+        content = ViewList()
+        node = nodes.section()
+
+        if "debug" in self.options:
+            code_block = "\n\n.. code-block:: rst\n    :linenos:\n"
+            for l in lines.split("\n"):
+                code_block += "\n    " + l
+            lines = code_block + "\n\n"
+
+        line_regex = re.compile("^#define LINENO (\S+)\#([0-9]+)$")
+        ln = 0
+        n = 0
+        f = fname
+
+        for line in lines.split("\n"):
+            n = n + 1
+            match = line_regex.search(line)
+            if match:
+                new_f = match.group(1)
+
+                # Sphinx parser is lazy: it stops parsing contents in the
+                # middle, if it is too big. So, handle it per input file
+                if new_f != f and content:
+                    self.do_parse(content, node)
+                    content = ViewList()
+
+                f = new_f
+
+                # sphinx counts lines from 0
+                ln = int(match.group(2)) - 1
+            else:
+                content.append(line, f, ln)
+
+        kernellog.info(self.state.document.settings.env.app, "%s: parsed %i lines" % (fname, n))
+
+        if content:
+            self.do_parse(content, node)
+
+        return node.children
+
+    def do_parse(self, content, node):
+        if Use_SSI:
+            with switch_source_input(self.state, content):
+                self.state.nested_parse(content, 0, node, match_titles=1)
+        else:
+            buf  = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
+
+            self.state.memo.title_styles  = []
+            self.state.memo.section_level = 0
+            self.state.memo.reporter      = AutodocReporter(content, self.state.memo.reporter)
+            try:
+                self.state.nested_parse(content, 0, node, match_titles=1)
+            finally:
+                self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = buf
diff --git a/doc/sphinx/kernel_feat.py b/doc/sphinx/kernel_feat.py
new file mode 100644
index 0000000..2fee04f
--- /dev/null
+++ b/doc/sphinx/kernel_feat.py
@@ -0,0 +1,169 @@
+# coding=utf-8
+# SPDX-License-Identifier: GPL-2.0
+#
+u"""
+    kernel-feat
+    ~~~~~~~~~~~
+
+    Implementation of the ``kernel-feat`` reST-directive.
+
+    :copyright:  Copyright (C) 2016  Markus Heiser
+    :copyright:  Copyright (C) 2016-2019  Mauro Carvalho Chehab
+    :maintained-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+    :license:    GPL Version 2, June 1991 see Linux/COPYING for details.
+
+    The ``kernel-feat`` (:py:class:`KernelFeat`) directive calls the
+    scripts/get_feat.pl script to parse the Kernel ABI files.
+
+    Overview of directive's argument and options.
+
+    .. code-block:: rst
+
+        .. kernel-feat:: <ABI directory location>
+            :debug:
+
+    The argument ``<ABI directory location>`` is required. It contains the
+    location of the ABI files to be parsed.
+
+    ``debug``
+      Inserts a code-block with the *raw* reST. Sometimes it is helpful to see
+      what reST is generated.
+
+"""
+
+import codecs
+import os
+import subprocess
+import sys
+
+from os import path
+
+from docutils import nodes, statemachine
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import directives, Directive
+from docutils.utils.error_reporting import ErrorString
+
+#
+# AutodocReporter is only good up to Sphinx 1.7
+#
+import sphinx
+
+Use_SSI = sphinx.__version__[:3] >= '1.7'
+if Use_SSI:
+    from sphinx.util.docutils import switch_source_input
+else:
+    from sphinx.ext.autodoc import AutodocReporter
+
+__version__  = '1.0'
+
+def setup(app):
+
+    app.add_directive("kernel-feat", KernelFeat)
+    return dict(
+        version = __version__
+        , parallel_read_safe = True
+        , parallel_write_safe = True
+    )
+
+class KernelFeat(Directive):
+
+    u"""KernelFeat (``kernel-feat``) directive"""
+
+    required_arguments = 1
+    optional_arguments = 2
+    has_content = False
+    final_argument_whitespace = True
+
+    option_spec = {
+        "debug"     : directives.flag
+    }
+
+    def warn(self, message, **replace):
+        replace["fname"]   = self.state.document.current_source
+        replace["line_no"] = replace.get("line_no", self.lineno)
+        message = ("%(fname)s:%(line_no)s: [kernel-feat WARN] : " + message) % replace
+        self.state.document.settings.env.app.warn(message, prefix="")
+
+    def run(self):
+
+        doc = self.state.document
+        if not doc.settings.file_insertion_enabled:
+            raise self.warning("docutils: file insertion disabled")
+
+        env = doc.settings.env
+        cwd = path.dirname(doc.current_source)
+        cmd = "get_feat.pl rest --dir "
+        cmd += self.arguments[0]
+
+        if len(self.arguments) > 1:
+            cmd += " --arch " + self.arguments[1]
+
+        srctree = path.abspath(os.environ["srctree"])
+
+        fname = cmd
+
+        # extend PATH with $(srctree)/scripts
+        path_env = os.pathsep.join([
+            srctree + os.sep + "scripts",
+            os.environ["PATH"]
+        ])
+        shell_env = os.environ.copy()
+        shell_env["PATH"]    = path_env
+        shell_env["srctree"] = srctree
+
+        lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
+        nodeList = self.nestedParse(lines, fname)
+        return nodeList
+
+    def runCmd(self, cmd, **kwargs):
+        u"""Run command ``cmd`` and return it's stdout as unicode."""
+
+        try:
+            proc = subprocess.Popen(
+                cmd
+                , stdout = subprocess.PIPE
+                , stderr = subprocess.PIPE
+                , **kwargs
+            )
+            out, err = proc.communicate()
+
+            out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
+
+            if proc.returncode != 0:
+                raise self.severe(
+                    u"command '%s' failed with return code %d"
+                    % (cmd, proc.returncode)
+                )
+        except OSError as exc:
+            raise self.severe(u"problems with '%s' directive: %s."
+                              % (self.name, ErrorString(exc)))
+        return out
+
+    def nestedParse(self, lines, fname):
+        content = ViewList()
+        node    = nodes.section()
+
+        if "debug" in self.options:
+            code_block = "\n\n.. code-block:: rst\n    :linenos:\n"
+            for l in lines.split("\n"):
+                code_block += "\n    " + l
+            lines = code_block + "\n\n"
+
+        for c, l in enumerate(lines.split("\n")):
+            content.append(l, fname, c)
+
+        buf  = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
+
+        if Use_SSI:
+            with switch_source_input(self.state, content):
+                self.state.nested_parse(content, 0, node, match_titles=1)
+        else:
+            self.state.memo.title_styles  = []
+            self.state.memo.section_level = 0
+            self.state.memo.reporter      = AutodocReporter(content, self.state.memo.reporter)
+            try:
+                self.state.nested_parse(content, 0, node, match_titles=1)
+            finally:
+                self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = buf
+
+        return node.children
diff --git a/doc/sphinx/kerneldoc.py b/doc/sphinx/kerneldoc.py
index 4bcbd6a..e9857ab 100644
--- a/doc/sphinx/kerneldoc.py
+++ b/doc/sphinx/kerneldoc.py
@@ -62,6 +62,7 @@
         'export': directives.unchanged,
         'internal': directives.unchanged,
         'identifiers': directives.unchanged,
+        'no-identifiers': directives.unchanged,
         'functions': directives.unchanged,
     }
     has_content = False
@@ -70,6 +71,11 @@
         env = self.state.document.settings.env
         cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']
 
+	# Pass the version string to kernel-doc, as it needs to use a different
+	# dialect, depending what the C domain supports for each specific
+	# Sphinx versions
+        cmd += ['-sphinx-version', sphinx.__version__]
+
         filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
         export_file_patterns = []
 
@@ -99,6 +105,12 @@
             else:
                 cmd += ['-no-doc-sections']
 
+        if 'no-identifiers' in self.options:
+            no_identifiers = self.options.get('no-identifiers').split()
+            if no_identifiers:
+                for i in no_identifiers:
+                    cmd += ['-nosymbol', i]
+
         for pattern in export_file_patterns:
             for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
                 env.note_dependency(os.path.abspath(f))
@@ -136,7 +148,8 @@
                     lineoffset = int(match.group(1)) - 1
                     # we must eat our comments since the upset the markup
                 else:
-                    result.append(line, filename, lineoffset)
+                    doc = env.srcdir + "/" + env.docname + ":" + str(self.lineno)
+                    result.append(line, doc + ": " + filename, lineoffset)
                     lineoffset += 1
 
             node = nodes.section()
diff --git a/doc/sphinx/kernellog.py b/doc/sphinx/kernellog.py
index af924f5..8ac7d27 100644
--- a/doc/sphinx/kernellog.py
+++ b/doc/sphinx/kernellog.py
@@ -25,4 +25,8 @@
     else:
         app.verbose(message)
 
-
+def info(app, message):
+    if UseLogging:
+        logger.info(message)
+    else:
+        app.info(message)
diff --git a/doc/sphinx/kfigure.py b/doc/sphinx/kfigure.py
index fbfe669..7887048 100644
--- a/doc/sphinx/kfigure.py
+++ b/doc/sphinx/kfigure.py
@@ -29,7 +29,7 @@
 
     Used tools:
 
-    * ``dot(1)``: Graphviz (http://www.graphviz.org). If Graphviz is not
+    * ``dot(1)``: Graphviz (https://www.graphviz.org). If Graphviz is not
       available, the DOT language is inserted as literal-block.
 
     * SVG to PDF: To generate PDF, you need at least one of this tools:
@@ -41,7 +41,7 @@
     * generate PDF from SVG / used by PDF (LaTeX) builder
 
     * generate SVG (html-builder) and PDF (latex-builder) from DOT files.
-      DOT: see http://www.graphviz.org/content/dot-language
+      DOT: see https://www.graphviz.org/content/dot-language
 
     """
 
@@ -182,7 +182,7 @@
         kernellog.verbose(app, "use dot(1) from: " + dot_cmd)
     else:
         kernellog.warn(app, "dot(1) not found, for better output quality install "
-                       "graphviz from http://www.graphviz.org")
+                       "graphviz from https://www.graphviz.org")
     if convert_cmd:
         kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
     else:
diff --git a/doc/sphinx/load_config.py b/doc/sphinx/load_config.py
index 301a21a..eeb394b 100644
--- a/doc/sphinx/load_config.py
+++ b/doc/sphinx/load_config.py
@@ -21,6 +21,29 @@
         and os.path.normpath(namespace["__file__"]) != os.path.normpath(config_file) ):
         config_file = os.path.abspath(config_file)
 
+        # Let's avoid one conf.py file just due to latex_documents
+        start = config_file.find('Documentation/')
+        if start >= 0:
+            start = config_file.find('/', start + 1)
+
+        end = config_file.rfind('/')
+        if start >= 0 and end > 0:
+            dir = config_file[start + 1:end]
+
+            print("source directory: %s" % dir)
+            new_latex_docs = []
+            latex_documents = namespace['latex_documents']
+
+            for l in latex_documents:
+                if l[0].find(dir + '/') == 0:
+                    has = True
+                    fn = l[0][len(dir) + 1:]
+                    new_latex_docs.append((fn, l[1], l[2], l[3], l[4]))
+                    break
+
+            namespace['latex_documents'] = new_latex_docs
+
+        # If there is an extra conf.py file, load it
         if os.path.isfile(config_file):
             sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
             config = namespace.copy()
@@ -29,4 +52,6 @@
             del config['__file__']
             namespace.update(config)
         else:
-            sys.stderr.write("WARNING: additional sphinx-config not found: %s\n" % config_file)
+            config = namespace.copy()
+            config['tags'].add("subproject")
+            namespace.update(config)
diff --git a/doc/sphinx/maintainers_include.py b/doc/sphinx/maintainers_include.py
new file mode 100755
index 0000000..dc8fed4
--- /dev/null
+++ b/doc/sphinx/maintainers_include.py
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0
+# -*- coding: utf-8; mode: python -*-
+# pylint: disable=R0903, C0330, R0914, R0912, E0401
+
+u"""
+    maintainers-include
+    ~~~~~~~~~~~~~~~~~~~
+
+    Implementation of the ``maintainers-include`` reST-directive.
+
+    :copyright:  Copyright (C) 2019  Kees Cook <keescook@chromium.org>
+    :license:    GPL Version 2, June 1991 see linux/COPYING for details.
+
+    The ``maintainers-include`` reST-directive performs extensive parsing
+    specific to the Linux kernel's standard "MAINTAINERS" file, in an
+    effort to avoid needing to heavily mark up the original plain text.
+"""
+
+import sys
+import re
+import os.path
+
+from docutils import statemachine
+from docutils.utils.error_reporting import ErrorString
+from docutils.parsers.rst import Directive
+from docutils.parsers.rst.directives.misc import Include
+
+__version__  = '1.0'
+
+def setup(app):
+    app.add_directive("maintainers-include", MaintainersInclude)
+    return dict(
+        version = __version__,
+        parallel_read_safe = True,
+        parallel_write_safe = True
+    )
+
+class MaintainersInclude(Include):
+    u"""MaintainersInclude (``maintainers-include``) directive"""
+    required_arguments = 0
+
+    def parse_maintainers(self, path):
+        """Parse all the MAINTAINERS lines into ReST for human-readability"""
+
+        result = list()
+        result.append(".. _maintainers:")
+        result.append("")
+
+        # Poor man's state machine.
+        descriptions = False
+        maintainers = False
+        subsystems = False
+
+        # Field letter to field name mapping.
+        field_letter = None
+        fields = dict()
+
+        prev = None
+        field_prev = ""
+        field_content = ""
+
+        for line in open(path):
+            if sys.version_info.major == 2:
+                line = unicode(line, 'utf-8')
+            # Have we reached the end of the preformatted Descriptions text?
+            if descriptions and line.startswith('Maintainers'):
+                descriptions = False
+                # Ensure a blank line following the last "|"-prefixed line.
+                result.append("")
+
+            # Start subsystem processing? This is to skip processing the text
+            # between the Maintainers heading and the first subsystem name.
+            if maintainers and not subsystems:
+                if re.search('^[A-Z0-9]', line):
+                    subsystems = True
+
+            # Drop needless input whitespace.
+            line = line.rstrip()
+
+            # Linkify all non-wildcard refs to ReST files in Documentation/.
+            pat = '(Documentation/([^\s\?\*]*)\.rst)'
+            m = re.search(pat, line)
+            if m:
+                # maintainers.rst is in a subdirectory, so include "../".
+                line = re.sub(pat, ':doc:`%s <../%s>`' % (m.group(2), m.group(2)), line)
+
+            # Check state machine for output rendering behavior.
+            output = None
+            if descriptions:
+                # Escape the escapes in preformatted text.
+                output = "| %s" % (line.replace("\\", "\\\\"))
+                # Look for and record field letter to field name mappings:
+                #   R: Designated *reviewer*: FullName <address@domain>
+                m = re.search("\s(\S):\s", line)
+                if m:
+                    field_letter = m.group(1)
+                if field_letter and not field_letter in fields:
+                    m = re.search("\*([^\*]+)\*", line)
+                    if m:
+                        fields[field_letter] = m.group(1)
+            elif subsystems:
+                # Skip empty lines: subsystem parser adds them as needed.
+                if len(line) == 0:
+                    continue
+                # Subsystem fields are batched into "field_content"
+                if line[1] != ':':
+                    # Render a subsystem entry as:
+                    #   SUBSYSTEM NAME
+                    #   ~~~~~~~~~~~~~~
+
+                    # Flush pending field content.
+                    output = field_content + "\n\n"
+                    field_content = ""
+
+                    # Collapse whitespace in subsystem name.
+                    heading = re.sub("\s+", " ", line)
+                    output = output + "%s\n%s" % (heading, "~" * len(heading))
+                    field_prev = ""
+                else:
+                    # Render a subsystem field as:
+                    #   :Field: entry
+                    #           entry...
+                    field, details = line.split(':', 1)
+                    details = details.strip()
+
+                    # Mark paths (and regexes) as literal text for improved
+                    # readability and to escape any escapes.
+                    if field in ['F', 'N', 'X', 'K']:
+                        # But only if not already marked :)
+                        if not ':doc:' in details:
+                            details = '``%s``' % (details)
+
+                    # Comma separate email field continuations.
+                    if field == field_prev and field_prev in ['M', 'R', 'L']:
+                        field_content = field_content + ","
+
+                    # Do not repeat field names, so that field entries
+                    # will be collapsed together.
+                    if field != field_prev:
+                        output = field_content + "\n"
+                        field_content = ":%s:" % (fields.get(field, field))
+                    field_content = field_content + "\n\t%s" % (details)
+                    field_prev = field
+            else:
+                output = line
+
+            # Re-split on any added newlines in any above parsing.
+            if output != None:
+                for separated in output.split('\n'):
+                    result.append(separated)
+
+            # Update the state machine when we find heading separators.
+            if line.startswith('----------'):
+                if prev.startswith('Descriptions'):
+                    descriptions = True
+                if prev.startswith('Maintainers'):
+                    maintainers = True
+
+            # Retain previous line for state machine transitions.
+            prev = line
+
+        # Flush pending field contents.
+        if field_content != "":
+            for separated in field_content.split('\n'):
+                result.append(separated)
+
+        output = "\n".join(result)
+        # For debugging the pre-rendered results...
+        #print(output, file=open("/tmp/MAINTAINERS.rst", "w"))
+
+        self.state_machine.insert_input(
+          statemachine.string2lines(output), path)
+
+    def run(self):
+        """Include the MAINTAINERS file as part of this reST file."""
+        if not self.state.document.settings.file_insertion_enabled:
+            raise self.warning('"%s" directive disabled.' % self.name)
+
+        # Walk up source path directories to find Documentation/../
+        path = self.state_machine.document.attributes['source']
+        path = os.path.realpath(path)
+        tail = path
+        while tail != "Documentation" and tail != "":
+            (path, tail) = os.path.split(path)
+
+        # Append "MAINTAINERS"
+        path = os.path.join(path, "MAINTAINERS")
+
+        try:
+            self.state.document.settings.record_dependencies.add(path)
+            lines = self.parse_maintainers(path)
+        except IOError as error:
+            raise self.severe('Problems with "%s" directive path:\n%s.' %
+                      (self.name, ErrorString(error)))
+
+        return []
diff --git a/doc/sphinx/parallel-wrapper.sh b/doc/sphinx/parallel-wrapper.sh
new file mode 100644
index 0000000..e54c44c
--- /dev/null
+++ b/doc/sphinx/parallel-wrapper.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Figure out if we should follow a specific parallelism from the make
+# environment (as exported by scripts/jobserver-exec), or fall back to
+# the "auto" parallelism when "-jN" is not specified at the top-level
+# "make" invocation.
+
+sphinx="$1"
+shift || true
+
+parallel="$PARALLELISM"
+if [ -z "$parallel" ] ; then
+	# If no parallelism is specified at the top-level make, then
+	# fall back to the expected "-jauto" mode that the "htmldocs"
+	# target has had.
+	auto=$(perl -e 'open IN,"'"$sphinx"' --version 2>&1 |";
+			while (<IN>) {
+				if (m/([\d\.]+)/) {
+					print "auto" if ($1 >= "1.7")
+				}
+			}
+			close IN')
+	if [ -n "$auto" ] ; then
+		parallel="$auto"
+	fi
+fi
+# Only if some parallelism has been determined do we add the -jN option.
+if [ -n "$parallel" ] ; then
+	parallel="-j$parallel"
+fi
+
+exec "$sphinx" $parallel "$@"
diff --git a/doc/sphinx/parse-headers.pl b/doc/sphinx/parse-headers.pl
index c518050..b063f2f 100755
--- a/doc/sphinx/parse-headers.pl
+++ b/doc/sphinx/parse-headers.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/env perl
 use strict;
 use Text::Tabs;
 use Getopt::Long;
@@ -110,7 +110,7 @@
 	    ) {
 		my $s = $1;
 
-		$structs{$s} = "struct :c:type:`$s`\\ ";
+		$structs{$s} = "struct $s\\ ";
 		next;
 	}
 }
@@ -393,7 +393,7 @@
 
 Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>.
 
-License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.
+License GPLv2: GNU GPL version 2 <https://gnu.org/licenses/gpl.html>.
 
 This is free software: you are free to change and redistribute it.
 There is NO WARRANTY, to the extent permitted by law.
diff --git a/doc/sphinx/requirements.txt b/doc/sphinx/requirements.txt
index 742be3e..5030d34 100644
--- a/doc/sphinx/requirements.txt
+++ b/doc/sphinx/requirements.txt
@@ -1,3 +1,4 @@
-docutils==0.12
-Sphinx==1.4.9
+docutils
+Sphinx==2.4.4
 sphinx_rtd_theme
+six
diff --git a/doc/uImage.FIT/source_file_format.txt b/doc/uImage.FIT/source_file_format.txt
index 633f227..00ed3eb 100644
--- a/doc/uImage.FIT/source_file_format.txt
+++ b/doc/uImage.FIT/source_file_format.txt
@@ -126,12 +126,10 @@
     load addresses supplied within sub-image nodes. May be omitted when no
     entry or load addresses are used.
 
-  Mandatory node:
+  Mandatory nodes:
   - images : This node contains a set of sub-nodes, each of them representing
     single component sub-image (like kernel, ramdisk, etc.). At least one
     sub-image is required.
-
-  Optional node:
   - configurations : Contains a set of available configuration nodes and
     defines a default configuration.
 
@@ -169,8 +167,8 @@
     to "none".
 
   Conditionally mandatory property:
-  - os : OS name, mandatory for types "kernel" and "ramdisk". Valid OS names
-    are: "openbsd", "netbsd", "freebsd", "4_4bsd", "linux", "svr4", "esix",
+  - os : OS name, mandatory for types "kernel". Valid OS names are:
+    "openbsd", "netbsd", "freebsd", "4_4bsd", "linux", "svr4", "esix",
     "solaris", "irix", "sco", "dell", "ncr", "lynxos", "vxworks", "psos", "qnx",
     "u-boot", "rtems", "unity", "integrity".
   - arch : Architecture name, mandatory for types: "standalone", "kernel",
@@ -179,10 +177,13 @@
     "sparc64", "m68k", "microblaze", "nios2", "blackfin", "avr32", "st200",
     "sandbox".
   - entry : entry point address, address size is determined by
-    '#address-cells' property of the root node. Mandatory for for types:
-    "standalone" and "kernel".
+    '#address-cells' property of the root node.
+    Mandatory for types: "firmware", and "kernel".
   - load : load address, address size is determined by '#address-cells'
-    property of the root node. Mandatory for types: "standalone" and "kernel".
+    property of the root node.
+    Mandatory for types: "firmware", and "kernel".
+  - compatible : compatible method for loading image.
+    Mandatory for types: "fpga", and images that do not specify a load address.
 
   Optional nodes:
   - hash-1 : Each hash sub-node represents separate hash or checksum
@@ -205,9 +206,8 @@
 6) '/configurations' node
 -------------------------
 
-The 'configurations' node is optional. If present, it allows to create a
-convenient, labeled boot configurations, which combine together kernel images
-with their ramdisks and fdt blobs.
+The 'configurations' node creates convenient, labeled boot configurations,
+which combine together kernel images with their ramdisks and fdt blobs.
 
 The 'configurations' node has has the following structure:
 
@@ -236,27 +236,22 @@
 o config-1
   |- description = "configuration description"
   |- kernel = "kernel sub-node unit name"
-  |- ramdisk = "ramdisk sub-node unit name"
   |- fdt = "fdt sub-node unit-name" [, "fdt overlay sub-node unit-name", ...]
-  |- fpga = "fpga sub-node unit-name"
   |- loadables = "loadables sub-node unit-name"
   |- compatible = "vendor,board-style device tree compatible string"
 
 
   Mandatory properties:
   - description : Textual configuration description.
-  - kernel : Unit name of the corresponding kernel image (image sub-node of a
-    "kernel" type).
+  - kernel or firmware: Unit name of the corresponding kernel or firmware
+    (u-boot, op-tee, etc) image. If both "kernel" and "firmware" are specified,
+    control is passed to the firmware image.
 
   Optional properties:
-  - ramdisk : Unit name of the corresponding ramdisk image (component image
-    node of a "ramdisk" type).
   - fdt : Unit name of the corresponding fdt blob (component image node of a
     "fdt type"). Additional fdt overlay nodes can be supplied which signify
     that the resulting device tree blob is generated by the first base fdt
     blob with all subsequent overlays applied.
-  - setup : Unit name of the corresponding setup binary (used for booting
-    an x86 kernel). This contains the setup.bin file built by the kernel.
   - fpga : Unit name of the corresponding fpga bitstream blob
     (component image node of a "fpga type").
   - loadables : Unit name containing a list of additional binaries to be
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 516cf1d..6dda8cb 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -56,6 +56,13 @@
   -rst			Output reStructuredText format.
   -none			Do not output documentation, only warnings.
 
+Output format selection modifier (affects only ReST output):
+
+  -sphinx-version	Use the ReST C domain dialect compatible with an
+			specific Sphinx Version.
+			If not specified, kernel-doc will auto-detect using
+			the sphinx-build version found on PATH.
+
 Output selection (mutually exclusive):
   -export		Only output documentation for symbols that have been
 			exported using EXPORT_SYMBOL() or EXPORT_SYMBOL_GPL()
@@ -66,9 +73,8 @@
   -function NAME	Only output documentation for the given function(s)
 			or DOC: section title(s). All other functions and DOC:
 			sections are ignored. May be specified multiple times.
-  -nofunction NAME	Do NOT output documentation for the given function(s);
-			only output documentation for the other functions and
-			DOC: sections. May be specified multiple times.
+  -nosymbol NAME	Exclude the specified symbols from the output
+		        documentation. May be specified multiple times.
 
 Output selection modifiers:
   -no-doc-sections	Do not output DOC: sections.
@@ -81,6 +87,7 @@
 Other parameters:
   -v			Verbose output, more warnings and other information.
   -h			Print this help.
+  -Werror		Treat warnings as errors.
 
 EOF
     print $message;
@@ -213,7 +220,9 @@
 my $type_constant2 = '\%([-_\w]+)';
 my $type_func = '(\w+)\(\)';
 my $type_param = '\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)';
+my $type_param_ref = '([\!]?)\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)';
 my $type_fp_param = '\@(\w+)\(\)';  # Special RST handling for func ptr params
+my $type_fp_param2 = '\@(\w+->\S+)\(\)';  # Special RST handling for structs with func ptr params
 my $type_env = '(\$\w+)';
 my $type_enum = '\&(enum\s*([_\w]+))';
 my $type_struct = '\&(struct\s*([_\w]+))';
@@ -236,6 +245,7 @@
                       [$type_typedef, "\\\\fI\$1\\\\fP"],
                       [$type_union, "\\\\fI\$1\\\\fP"],
                       [$type_param, "\\\\fI\$1\\\\fP"],
+                      [$type_param_ref, "\\\\fI\$1\$2\\\\fP"],
                       [$type_member, "\\\\fI\$1\$2\$3\\\\fP"],
                       [$type_fallback, "\\\\fI\$1\\\\fP"]
 		     );
@@ -249,14 +259,15 @@
                        [$type_member_func, "\\:c\\:type\\:`\$1\$2\$3\\\\(\\\\) <\$1>`"],
                        [$type_member, "\\:c\\:type\\:`\$1\$2\$3 <\$1>`"],
 		       [$type_fp_param, "**\$1\\\\(\\\\)**"],
-                       [$type_func, "\\:c\\:func\\:`\$1()`"],
+		       [$type_fp_param2, "**\$1\\\\(\\\\)**"],
+                       [$type_func, "\$1()"],
                        [$type_enum, "\\:c\\:type\\:`\$1 <\$2>`"],
                        [$type_struct, "\\:c\\:type\\:`\$1 <\$2>`"],
                        [$type_typedef, "\\:c\\:type\\:`\$1 <\$2>`"],
                        [$type_union, "\\:c\\:type\\:`\$1 <\$2>`"],
                        # in rst this can refer to any type
                        [$type_fallback, "\\:c\\:type\\:`\$1`"],
-                       [$type_param, "**\$1**"]
+                       [$type_param_ref, "**\$1\$2**"]
 		      );
 my $blankline_rst = "\n";
 
@@ -266,9 +277,12 @@
 }
 
 my $kernelversion;
+my ($sphinx_major, $sphinx_minor, $sphinx_patch);
+
 my $dohighlight = "";
 
 my $verbose = 0;
+my $Werror = 0;
 my $output_mode = "rst";
 my $output_preformatted = 0;
 my $no_doc_sections = 0;
@@ -280,12 +294,11 @@
 use constant {
     OUTPUT_ALL          => 0, # output all symbols and doc sections
     OUTPUT_INCLUDE      => 1, # output only specified symbols
-    OUTPUT_EXCLUDE      => 2, # output everything except specified symbols
-    OUTPUT_EXPORTED     => 3, # output exported symbols
-    OUTPUT_INTERNAL     => 4, # output non-exported symbols
+    OUTPUT_EXPORTED     => 2, # output exported symbols
+    OUTPUT_INTERNAL     => 3, # output non-exported symbols
 };
 my $output_selection = OUTPUT_ALL;
-my $show_not_found = 0;
+my $show_not_found = 0;	# No longer used
 
 my @export_file_list;
 
@@ -307,6 +320,7 @@
 # CAVEAT EMPTOR!  Some of the others I localised may not want to be, which
 # could cause "use of undefined value" or other bugs.
 my ($function, %function_table, %parametertypes, $declaration_purpose);
+my %nosymbol_table = ();
 my $declaration_start_line;
 my ($type, $declaration_name, $return_type);
 my ($newsection, $newcontents, $prototype, $brcount, %source_map);
@@ -315,9 +329,21 @@
 	$verbose = "$ENV{'KBUILD_VERBOSE'}";
 }
 
+if (defined($ENV{'KDOC_WERROR'})) {
+	$Werror = "$ENV{'KDOC_WERROR'}";
+}
+
+if (defined($ENV{'KCFLAGS'})) {
+	my $kcflags = "$ENV{'KCFLAGS'}";
+
+	if ($kcflags =~ /Werror/) {
+		$Werror = 1;
+	}
+}
+
 # Generated docbook code is inserted in a template at a point where
 # docbook v3.1 requires a non-zero sequence of RefEntry's; see:
-# http://www.oasis-open.org/docbook/documentation/reference/html/refentry.html
+# https://www.oasis-open.org/docbook/documentation/reference/html/refentry.html
 # We keep track of number of generated entries and generate a dummy
 # if needs be to ensure the expanded template can be postprocessed
 # into html.
@@ -327,13 +353,14 @@
 
 # Parser states
 use constant {
-    STATE_NORMAL        => 0, # normal code
-    STATE_NAME          => 1, # looking for function name
-    STATE_BODY_MAYBE    => 2, # body - or maybe more description
-    STATE_BODY          => 3, # the body of the comment
-    STATE_PROTO         => 4, # scanning prototype
-    STATE_DOCBLOCK      => 5, # documentation block
-    STATE_INLINE        => 6, # gathering documentation outside main block
+    STATE_NORMAL        => 0,        # normal code
+    STATE_NAME          => 1,        # looking for function name
+    STATE_BODY_MAYBE    => 2,        # body - or maybe more description
+    STATE_BODY          => 3,        # the body of the comment
+    STATE_BODY_WITH_BLANK_LINE => 4, # the body, which has a blank line
+    STATE_PROTO         => 5,        # scanning prototype
+    STATE_DOCBLOCK      => 6,        # documentation block
+    STATE_INLINE        => 7,        # gathering doc outside main block
 };
 my $state;
 my $in_doc_sect;
@@ -413,10 +440,9 @@
 	$output_selection = OUTPUT_INCLUDE;
 	$function = shift @ARGV;
 	$function_table{$function} = 1;
-    } elsif ($cmd eq "nofunction") { # output all except specific functions
-	$output_selection = OUTPUT_EXCLUDE;
-	$function = shift @ARGV;
-	$function_table{$function} = 1;
+    } elsif ($cmd eq "nosymbol") { # Exclude specific symbols
+	my $symbol = shift @ARGV;
+	$nosymbol_table{$symbol} = 1;
     } elsif ($cmd eq "export") { # only exported symbols
 	$output_selection = OUTPUT_EXPORTED;
 	%function_table = ();
@@ -428,6 +454,8 @@
 	push(@export_file_list, $file);
     } elsif ($cmd eq "v") {
 	$verbose = 1;
+    } elsif ($cmd eq "Werror") {
+	$Werror = 1;
     } elsif (($cmd eq "h") || ($cmd eq "help")) {
 	usage();
     } elsif ($cmd eq 'no-doc-sections') {
@@ -435,7 +463,24 @@
     } elsif ($cmd eq 'enable-lineno') {
 	    $enable_lineno = 1;
     } elsif ($cmd eq 'show-not-found') {
-	$show_not_found = 1;
+	$show_not_found = 1;  # A no-op but don't fail
+    } elsif ($cmd eq "sphinx-version") {
+	my $ver_string = shift @ARGV;
+	if ($ver_string =~ m/^(\d+)(\.\d+)?(\.\d+)?/) {
+	    $sphinx_major = $1;
+	    if (defined($2)) {
+		$sphinx_minor = substr($2,1);
+	    } else {
+		$sphinx_minor = 0;
+	    }
+	    if (defined($3)) {
+		$sphinx_patch = substr($3,1)
+	    } else {
+		$sphinx_patch = 0;
+	    }
+	} else {
+	    die "Sphinx version should either major.minor or major.minor.patch format\n";
+	}
     } else {
 	# Unknown argument
         usage();
@@ -444,6 +489,51 @@
 
 # continue execution near EOF;
 
+# The C domain dialect changed on Sphinx 3. So, we need to check the
+# version in order to produce the right tags.
+sub findprog($)
+{
+	foreach(split(/:/, $ENV{PATH})) {
+		return "$_/$_[0]" if(-x "$_/$_[0]");
+	}
+}
+
+sub get_sphinx_version()
+{
+	my $ver;
+
+	my $cmd = "sphinx-build";
+	if (!findprog($cmd)) {
+		my $cmd = "sphinx-build3";
+		if (!findprog($cmd)) {
+			$sphinx_major = 1;
+			$sphinx_minor = 2;
+			$sphinx_patch = 0;
+			printf STDERR "Warning: Sphinx version not found. Using default (Sphinx version %d.%d.%d)\n",
+			       $sphinx_major, $sphinx_minor, $sphinx_patch;
+			return;
+		}
+	}
+
+	open IN, "$cmd --version 2>&1 |";
+	while (<IN>) {
+		if (m/^\s*sphinx-build\s+([\d]+)\.([\d\.]+)(\+\/[\da-f]+)?$/) {
+			$sphinx_major = $1;
+			$sphinx_minor = $2;
+			$sphinx_patch = $3;
+			last;
+		}
+		# Sphinx 1.2.x uses a different format
+		if (m/^\s*Sphinx.*\s+([\d]+)\.([\d\.]+)$/) {
+			$sphinx_major = $1;
+			$sphinx_minor = $2;
+			$sphinx_patch = $3;
+			last;
+		}
+	}
+	close IN;
+}
+
 # get kernel version from env
 sub get_kernel_version() {
     my $version = 'unknown kernel version';
@@ -510,11 +600,11 @@
         return;
     }
 
+    return if (defined($nosymbol_table{$name}));
+
     if (($output_selection == OUTPUT_ALL) ||
-	($output_selection == OUTPUT_INCLUDE &&
-	 defined($function_table{$name})) ||
-	($output_selection == OUTPUT_EXCLUDE &&
-	 !defined($function_table{$name})))
+	(($output_selection == OUTPUT_INCLUDE) &&
+	 defined($function_table{$name})))
     {
 	dump_section($file, $name, $contents);
 	output_blockhead({'sectionlist' => \@sectionlist,
@@ -597,10 +687,10 @@
 	$type = $args{'parametertypes'}{$parameter};
 	if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
 	    # pointer-to-function
-	    print ".BI \"" . $parenth . $1 . "\" " . $parameter . " \") (" . $2 . ")" . $post . "\"\n";
+	    print ".BI \"" . $parenth . $1 . "\" " . " \") (" . $2 . ")" . $post . "\"\n";
 	} else {
 	    $type =~ s/([^\*])$/$1 /;
-	    print ".BI \"" . $parenth . $type . "\" " . $parameter . " \"" . $post . "\"\n";
+	    print ".BI \"" . $parenth . $type . "\" " . " \"" . $post . "\"\n";
 	}
 	$count++;
 	$parenth = "";
@@ -740,6 +830,8 @@
     my ($parameter, $section);
 
     foreach $section (@{$args{'sectionlist'}}) {
+	next if (defined($nosymbol_table{$section}));
+
 	if ($output_selection != OUTPUT_INCLUDE) {
 	    print "**$section**\n\n";
 	}
@@ -825,16 +917,37 @@
     my ($parameter, $section);
     my $oldprefix = $lineprefix;
     my $start = "";
+    my $is_macro = 0;
 
-    if ($args{'typedef'}) {
-	print ".. c:type:: ". $args{'function'} . "\n\n";
-	print_lineno($declaration_start_line);
-	print "   **Typedef**: ";
-	$lineprefix = "";
-	output_highlight_rst($args{'purpose'});
-	$start = "\n\n**Syntax**\n\n  ``";
+    if ($sphinx_major < 3) {
+	if ($args{'typedef'}) {
+	    print ".. c:type:: ". $args{'function'} . "\n\n";
+	    print_lineno($declaration_start_line);
+	    print "   **Typedef**: ";
+	    $lineprefix = "";
+	    output_highlight_rst($args{'purpose'});
+	    $start = "\n\n**Syntax**\n\n  ``";
+	    $is_macro = 1;
+	} else {
+	    print ".. c:function:: ";
+	}
     } else {
-	print ".. c:function:: ";
+	if ($args{'typedef'} || $args{'functiontype'} eq "") {
+	    $is_macro = 1;
+	    print ".. c:macro:: ". $args{'function'} . "\n\n";
+	} else {
+	    print ".. c:function:: ";
+	}
+
+	if ($args{'typedef'}) {
+	    print_lineno($declaration_start_line);
+	    print "   **Typedef**: ";
+	    $lineprefix = "";
+	    output_highlight_rst($args{'purpose'});
+	    $start = "\n\n**Syntax**\n\n  ``";
+	} else {
+	    print "``" if ($is_macro);
+	}
     }
     if ($args{'functiontype'} ne "") {
 	$start .= $args{'functiontype'} . " " . $args{'function'} . " (";
@@ -853,15 +966,17 @@
 
 	if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
 	    # pointer-to-function
-	    print $1 . $parameter . ") (" . $2;
+	    print $1 . $parameter . ") (" . $2 . ")";
 	} else {
-	    print $type . " " . $parameter;
+	    print $type;
 	}
     }
-    if ($args{'typedef'}) {
-	print ");``\n\n";
+    if ($is_macro) {
+	print ")``\n\n";
     } else {
 	print ")\n\n";
+    }
+    if (!$args{'typedef'}) {
 	print_lineno($declaration_start_line);
 	$lineprefix = "   ";
 	output_highlight_rst($args{'purpose'});
@@ -876,7 +991,7 @@
 	$type = $args{'parametertypes'}{$parameter};
 
 	if ($type ne "") {
-	    print "``$type $parameter``\n";
+	    print "``$type``\n";
 	} else {
 	    print "``$parameter``\n";
 	}
@@ -917,9 +1032,14 @@
     my ($parameter);
     my $oldprefix = $lineprefix;
     my $count;
-    my $name = "enum " . $args{'enum'};
 
-    print "\n\n.. c:type:: " . $name . "\n\n";
+    if ($sphinx_major < 3) {
+	my $name = "enum " . $args{'enum'};
+	print "\n\n.. c:type:: " . $name . "\n\n";
+    } else {
+	my $name = $args{'enum'};
+	print "\n\n.. c:enum:: " . $name . "\n\n";
+    }
     print_lineno($declaration_start_line);
     $lineprefix = "   ";
     output_highlight_rst($args{'purpose'});
@@ -945,8 +1065,13 @@
     my %args = %{$_[0]};
     my ($parameter);
     my $oldprefix = $lineprefix;
-    my $name = "typedef " . $args{'typedef'};
+    my $name;
 
+    if ($sphinx_major < 3) {
+	$name = "typedef " . $args{'typedef'};
+    } else {
+	$name = $args{'typedef'};
+    }
     print "\n\n.. c:type:: " . $name . "\n\n";
     print_lineno($declaration_start_line);
     $lineprefix = "   ";
@@ -961,9 +1086,18 @@
     my %args = %{$_[0]};
     my ($parameter);
     my $oldprefix = $lineprefix;
-    my $name = $args{'type'} . " " . $args{'struct'};
 
-    print "\n\n.. c:type:: " . $name . "\n\n";
+    if ($sphinx_major < 3) {
+	my $name = $args{'type'} . " " . $args{'struct'};
+	print "\n\n.. c:type:: " . $name . "\n\n";
+    } else {
+	my $name = $args{'struct'};
+	if ($args{'type'} eq 'union') {
+	    print "\n\n.. c:union:: " . $name . "\n\n";
+	} else {
+	    print "\n\n.. c:struct:: " . $name . "\n\n";
+	}
+    }
     print_lineno($declaration_start_line);
     $lineprefix = "   ";
     output_highlight_rst($args{'purpose'});
@@ -1022,12 +1156,14 @@
     my $name = shift;
     my $functype = shift;
     my $func = "output_${functype}_$output_mode";
+
+    return if (defined($nosymbol_table{$name}));
+
     if (($output_selection == OUTPUT_ALL) ||
 	(($output_selection == OUTPUT_INCLUDE ||
 	  $output_selection == OUTPUT_EXPORTED) &&
 	 defined($function_table{$name})) ||
-	(($output_selection == OUTPUT_EXCLUDE ||
-	  $output_selection == OUTPUT_INTERNAL) &&
+	($output_selection == OUTPUT_INTERNAL &&
 	 !($functype eq "function" && defined($function_table{$name}))))
     {
 	&$func(@_);
@@ -1062,7 +1198,7 @@
     my $x = shift;
     my $file = shift;
 
-    if ($x =~ /(struct|union)\s+(\w+)\s*\{(.*)\}(\s*(__packed|__aligned|__attribute__\s*\(\([a-z0-9,_\s\(\)]*\)\)))*/) {
+    if ($x =~ /(struct|union)\s+(\w+)\s*\{(.*)\}(\s*(__packed|__aligned|____cacheline_aligned_in_smp|____cacheline_aligned|__attribute__\s*\(\([a-z0-9,_\s\(\)]*\)\)))*/) {
 	my $decl_type = $1;
 	$declaration_name = $2;
 	my $members = $3;
@@ -1073,11 +1209,15 @@
 	# strip comments:
 	$members =~ s/\/\*.*?\*\///gos;
 	# strip attributes
-	$members =~ s/\s*__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)//gi;
-	$members =~ s/\s*__aligned\s*\([^;]*\)//gos;
-	$members =~ s/\s*__packed\s*//gos;
-	$members =~ s/\s*CRYPTO_MINALIGN_ATTR//gos;
+	$members =~ s/\s*__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)/ /gi;
+	$members =~ s/\s*__aligned\s*\([^;]*\)/ /gos;
+	$members =~ s/\s*__packed\s*/ /gos;
+	$members =~ s/\s*CRYPTO_MINALIGN_ATTR/ /gos;
+	$members =~ s/\s*____cacheline_aligned_in_smp/ /gos;
+	$members =~ s/\s*____cacheline_aligned/ /gos;
+
 	# replace DECLARE_BITMAP
+	$members =~ s/__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)/DECLARE_BITMAP($1, __ETHTOOL_LINK_MODE_MASK_NBITS)/gos;
 	$members =~ s/DECLARE_BITMAP\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos;
 	# replace DECLARE_HASHTABLE
 	$members =~ s/DECLARE_HASHTABLE\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[1 << (($2) - 1)\]/gos;
@@ -1204,6 +1344,8 @@
 	my $functype = shift;
 	my $name = shift;
 
+	return 0 if (defined($nosymbol_table{$name}));
+
 	return 1 if ($output_selection == OUTPUT_ALL);
 
 	if ($output_selection == OUTPUT_EXPORTED) {
@@ -1227,27 +1369,28 @@
 			return 0;
 		}
 	}
-	if ($output_selection == OUTPUT_EXCLUDE) {
-		if (!defined($function_table{$name})) {
-			return 1;
-		} else {
-			return 0;
-		}
-	}
 	die("Please add the new output type at show_warnings()");
 }
 
 sub dump_enum($$) {
     my $x = shift;
     my $file = shift;
+    my $members;
+
 
     $x =~ s@/\*.*?\*/@@gos;	# strip comments.
     # strip #define macros inside enums
     $x =~ s@#\s*((define|ifdef)\s+|endif)[^;]*;@@gos;
 
-    if ($x =~ /enum\s+(\w+)\s*\{(.*)\}/) {
+    if ($x =~ /typedef\s+enum\s*\{(.*)\}\s*(\w*)\s*;/) {
+	$declaration_name = $2;
+	$members = $1;
+    } elsif ($x =~ /enum\s+(\w*)\s*\{(.*)\}/) {
 	$declaration_name = $1;
-	my $members = $2;
+	$members = $2;
+    }
+
+    if ($members) {
 	my %_members;
 
 	$members =~ s/\s+$//;
@@ -1282,27 +1425,31 @@
 			    'sections' => \%sections,
 			    'purpose' => $declaration_purpose
 			   });
-    }
-    else {
+    } else {
 	print STDERR "${file}:$.: error: Cannot parse enum!\n";
 	++$errors;
     }
 }
 
+my $typedef_type = qr { ((?:\s+[\w\*]+\b){1,8})\s* }x;
+my $typedef_ident = qr { \*?\s*(\w\S+)\s* }x;
+my $typedef_args = qr { \s*\((.*)\); }x;
+
+my $typedef1 = qr { typedef$typedef_type\($typedef_ident\)$typedef_args }x;
+my $typedef2 = qr { typedef$typedef_type$typedef_ident$typedef_args }x;
+
 sub dump_typedef($$) {
     my $x = shift;
     my $file = shift;
 
     $x =~ s@/\*.*?\*/@@gos;	# strip comments.
 
-    # Parse function prototypes
-    if ($x =~ /typedef\s+(\w+)\s*\(\*\s*(\w\S+)\s*\)\s*\((.*)\);/ ||
-	$x =~ /typedef\s+(\w+)\s*(\w\S+)\s*\s*\((.*)\);/) {
-
-	# Function typedefs
+    # Parse function typedef prototypes
+    if ($x =~ $typedef1 || $x =~ $typedef2) {
 	$return_type = $1;
 	$declaration_name = $2;
 	my $args = $3;
+	$return_type =~ s/^\s+//;
 
 	create_parameterlist($args, ',', $file, $declaration_name);
 
@@ -1369,25 +1516,27 @@
     foreach my $arg (split($splitter, $args)) {
 	# strip comments
 	$arg =~ s/\/\*.*\*\///;
+	$arg =~ s/\s*EFIAPI\s*/ /g;
 	# strip leading/trailing spaces
 	$arg =~ s/^\s*//;
 	$arg =~ s/\s*$//;
 	$arg =~ s/\s+/ /;
 
+
 	if ($arg =~ /^#/) {
 	    # Treat preprocessor directive as a typeless variable just to fill
 	    # corresponding data structures "correctly". Catch it later in
 	    # output_* subs.
-	    push_parameter($arg, "", $file);
+	    push_parameter($arg, "", "", $file);
 	} elsif ($arg =~ m/\(.+\)\s*\(/) {
 	    # pointer-to-function
 	    $arg =~ tr/#/,/;
-	    $arg =~ m/[^\(]+\([\w\s]*\*?\s*([\w\.]*)\s*\)/;
+	    $arg =~ m/[^\(]+\(\s*\*?\s*([\w\.]*)\s*\)/;
 	    $param = $1;
 	    $type = $arg;
 	    $type =~ s/([^\(]+\(\*?)\s*$param/$1/;
 	    save_struct_actual($param);
-	    push_parameter($param, $type, $file, $declaration_name);
+	    push_parameter($param, $type, $arg, $file, $declaration_name);
 	} elsif ($arg) {
 	    $arg =~ s/\s*:\s*/:/g;
 	    $arg =~ s/\s*\[/\[/g;
@@ -1412,26 +1561,28 @@
 	    foreach $param (@args) {
 		if ($param =~ m/^(\*+)\s*(.*)/) {
 		    save_struct_actual($2);
-		    push_parameter($2, "$type $1", $file, $declaration_name);
+
+		    push_parameter($2, "$type $1", $arg, $file, $declaration_name);
 		}
 		elsif ($param =~ m/(.*?):(\d+)/) {
 		    if ($type ne "") { # skip unnamed bit-fields
 			save_struct_actual($1);
-			push_parameter($1, "$type:$2", $file, $declaration_name)
+			push_parameter($1, "$type:$2", $arg, $file, $declaration_name)
 		    }
 		}
 		else {
 		    save_struct_actual($param);
-		    push_parameter($param, $type, $file, $declaration_name);
+		    push_parameter($param, $type, $arg, $file, $declaration_name);
 		}
 	    }
 	}
     }
 }
 
-sub push_parameter($$$$) {
+sub push_parameter($$$$$) {
 	my $param = shift;
 	my $type = shift;
+	my $org_arg = shift;
 	my $file = shift;
 	my $declaration_name = shift;
 
@@ -1449,6 +1600,10 @@
 	      # handles unnamed variable parameters
 	      $param = "...";
 	    }
+	    elsif ($param =~ /\w\.\.\.$/) {
+	      # for named variable parameters of the form `x...`, remove the dots
+	      $param =~ s/\.\.\.$//;
+	    }
 	    if (!defined $parameterdescs{$param} || $parameterdescs{$param} eq "") {
 		$parameterdescs{$param} = "variable arguments";
 	    }
@@ -1491,8 +1646,8 @@
 	# "[blah" in a parameter string;
 	###$param =~ s/\s*//g;
 	push @parameterlist, $param;
-	$type =~ s/\s\s+/ /g;
-	$parametertypes{$param} = $type;
+	$org_arg =~ s/\s\s+/ /g;
+	$parametertypes{$param} = $org_arg;
 }
 
 sub check_sections($$$$$) {
@@ -1566,6 +1721,8 @@
     my $file = shift;
     my $noret = 0;
 
+    print_lineno($new_start_line);
+
     $prototype =~ s/^static +//;
     $prototype =~ s/^extern +//;
     $prototype =~ s/^asmlinkage +//;
@@ -1580,6 +1737,7 @@
     $prototype =~ s/__must_check +//;
     $prototype =~ s/__weak +//;
     $prototype =~ s/__sched +//;
+    $prototype =~ s/__printf\s*\(\s*\d*\s*,\s*\d*\s*\) +//;
     my $define = $prototype =~ s/^#\s*define\s+//; #ak added
     $prototype =~ s/__attribute__\s*\(\(
             (?:
@@ -1640,30 +1798,48 @@
 	return;
     }
 
-	my $prms = join " ", @parameterlist;
-	check_sections($file, $declaration_name, "function", $sectcheck, $prms);
+    my $prms = join " ", @parameterlist;
+    check_sections($file, $declaration_name, "function", $sectcheck, $prms);
 
-        # This check emits a lot of warnings at the moment, because many
-        # functions don't have a 'Return' doc section. So until the number
-        # of warnings goes sufficiently down, the check is only performed in
-        # verbose mode.
-        # TODO: always perform the check.
-        if ($verbose && !$noret) {
-                check_return_section($file, $declaration_name, $return_type);
-        }
+    # This check emits a lot of warnings at the moment, because many
+    # functions don't have a 'Return' doc section. So until the number
+    # of warnings goes sufficiently down, the check is only performed in
+    # verbose mode.
+    # TODO: always perform the check.
+    if ($verbose && !$noret) {
+	    check_return_section($file, $declaration_name, $return_type);
+    }
 
-    output_declaration($declaration_name,
-		       'function',
-		       {'function' => $declaration_name,
-			'module' => $modulename,
-			'functiontype' => $return_type,
-			'parameterlist' => \@parameterlist,
-			'parameterdescs' => \%parameterdescs,
-			'parametertypes' => \%parametertypes,
-			'sectionlist' => \@sectionlist,
-			'sections' => \%sections,
-			'purpose' => $declaration_purpose
-		       });
+    # The function parser can be called with a typedef parameter.
+    # Handle it.
+    if ($return_type =~ /typedef/) {
+	output_declaration($declaration_name,
+			   'function',
+			   {'function' => $declaration_name,
+			    'typedef' => 1,
+			    'module' => $modulename,
+			    'functiontype' => $return_type,
+			    'parameterlist' => \@parameterlist,
+			    'parameterdescs' => \%parameterdescs,
+			    'parametertypes' => \%parametertypes,
+			    'sectionlist' => \@sectionlist,
+			    'sections' => \%sections,
+			    'purpose' => $declaration_purpose
+			   });
+    } else {
+	output_declaration($declaration_name,
+			   'function',
+			   {'function' => $declaration_name,
+			    'module' => $modulename,
+			    'functiontype' => $return_type,
+			    'parameterlist' => \@parameterlist,
+			    'parameterdescs' => \%parameterdescs,
+			    'parametertypes' => \%parametertypes,
+			    'sectionlist' => \@sectionlist,
+			    'sections' => \%sections,
+			    'purpose' => $declaration_purpose
+			   });
+    }
 }
 
 sub reset_state {
@@ -1758,6 +1934,11 @@
 	$prototype =~ s@/\*.*?\*/@@gos;	# strip comments.
 	$prototype =~ s@[\r\n]+@ @gos; # strip newlines/cr's.
 	$prototype =~ s@^\s+@@gos; # strip leading spaces
+
+	 # Handle prototypes for function pointers like:
+	 # int (*pcs_config)(struct foo)
+	$prototype =~ s@^(\S+\s+)\(\s*\*(\S+)\)@$1$2@gos;
+
 	if ($prototype =~ /SYSCALL_DEFINE/) {
 		syscall_munge();
 	}
@@ -1836,6 +2017,7 @@
 
     while (<IN>) {
 	if (/$export_symbol/) {
+	    next if (defined($nosymbol_table{$2}));
 	    $function_table{$2} = 1;
 	}
     }
@@ -1867,7 +2049,7 @@
     if (/$doc_block/o) {
 	$state = STATE_DOCBLOCK;
 	$contents = "";
-	$new_start_line = $. + 1;
+	$new_start_line = $.;
 
 	if ( $1 eq "" ) {
 	    $section = $section_intro;
@@ -1935,6 +2117,25 @@
 sub process_body($$) {
     my $file = shift;
 
+    # Until all named variable macro parameters are
+    # documented using the bare name (`x`) rather than with
+    # dots (`x...`), strip the dots:
+    if ($section =~ /\w\.\.\.$/) {
+	$section =~ s/\.\.\.$//;
+
+	if ($verbose) {
+	    print STDERR "${file}:$.: warning: Variable macro arguments should be documented without dots\n";
+	    ++$warnings;
+	}
+    }
+
+    if ($state == STATE_BODY_WITH_BLANK_LINE && /^\s*\*\s?\S/) {
+	dump_section($file, $section, $contents);
+	$section = $section_default;
+	$new_start_line = $.;
+	$contents = "";
+    }
+
     if (/$doc_sect/i) { # case insensitive for supported section names
 	$newsection = $1;
 	$newcontents = $2;
@@ -1987,19 +2188,23 @@
 	$prototype = "";
 	$state = STATE_PROTO;
 	$brcount = 0;
+        $new_start_line = $. + 1;
     } elsif (/$doc_content/) {
-	# miguel-style comment kludge, look for blank lines after
-	# @parameter line to signify start of description
 	if ($1 eq "") {
-	    if ($section =~ m/^@/ || $section eq $section_context) {
+	    if ($section eq $section_context) {
 		dump_section($file, $section, $contents);
 		$section = $section_default;
 		$contents = "";
 		$new_start_line = $.;
+		$state = STATE_BODY;
 	    } else {
+		if ($section ne $section_default) {
+		    $state = STATE_BODY_WITH_BLANK_LINE;
+		} else {
+		    $state = STATE_BODY;
+		}
 		$contents .= "\n";
 	    }
-	    $state = STATE_BODY;
 	} elsif ($state == STATE_BODY_MAYBE) {
 	    # Continued declaration purpose
 	    chomp($declaration_purpose);
@@ -2131,7 +2336,7 @@
 
     $file = map_filename($orig_file);
 
-    if (!open(IN,"<$file")) {
+    if (!open(IN_FILE,"<$file")) {
 	print STDERR "Error: Cannot open file $file\n";
 	++$errors;
 	return;
@@ -2140,9 +2345,9 @@
     $. = 1;
 
     $section_counter = 0;
-    while (<IN>) {
+    while (<IN_FILE>) {
 	while (s/\\\s*$//) {
-	    $_ .= <IN>;
+	    $_ .= <IN_FILE>;
 	}
 	# Replace tabs by spaces
         while ($_ =~ s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e) {};
@@ -2151,7 +2356,8 @@
 	    process_normal();
 	} elsif ($state == STATE_NAME) {
 	    process_name($file, $_);
-	} elsif ($state == STATE_BODY || $state == STATE_BODY_MAYBE) {
+	} elsif ($state == STATE_BODY || $state == STATE_BODY_MAYBE ||
+		 $state == STATE_BODY_WITH_BLANK_LINE) {
 	    process_body($file, $_);
 	} elsif ($state == STATE_INLINE) { # scanning for inline parameters
 	    process_inline($file, $_);
@@ -2163,17 +2369,24 @@
     }
 
     # Make sure we got something interesting.
-    if ($initial_section_counter == $section_counter) {
-	if ($output_mode ne "none") {
+    if ($initial_section_counter == $section_counter && $
+	output_mode ne "none") {
+	if ($output_selection == OUTPUT_INCLUDE) {
+	    print STDERR "${file}:1: warning: '$_' not found\n"
+		for keys %function_table;
+	}
+	else {
 	    print STDERR "${file}:1: warning: no structured comments found\n";
 	}
-	if (($output_selection == OUTPUT_INCLUDE) && ($show_not_found == 1)) {
-	    print STDERR "    Was looking for '$_'.\n" for keys %function_table;
-	}
     }
+    close IN_FILE;
 }
 
 
+if ($output_mode eq "rst") {
+	get_sphinx_version() if (!$sphinx_major);
+}
+
 $kernelversion = get_kernel_version();
 
 # generate a sequence of code that will splice in highlighting information
@@ -2220,4 +2433,9 @@
   print STDERR "$warnings warnings\n";
 }
 
-exit($output_mode eq "none" ? 0 : $errors);
+if ($Werror && $warnings) {
+    print STDERR "$warnings warnings as Errors\n";
+    exit($warnings);
+} else {
+    exit($output_mode eq "none" ? 0 : $errors)
+}