Merge tag 'dm-next-9mar23' of https://source.denx.de/u-boot/custodians/u-boot-dm into next

binman x509, separate tools dir and other improvements
patman parallel patch generation
fdt fixes and tests
PyPi support for U-Boot tools
buildman reproducible builds
diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml
index 30025ff..61ada4d 100644
--- a/.azure-pipelines.yml
+++ b/.azure-pipelines.yml
@@ -232,6 +232,16 @@
       # have no matches.
       - script: git grep u-boot,dm- -- '*.dts*' && exit 1 || exit 0
 
+  - job: check_packing_of_python_tools
+    displayName: 'Check we can package the Python tools'
+    pool:
+      vmImage: $(ubuntu_vm)
+    container:
+      image: $(ci_runner_image)
+      options: $(container_option)
+    steps:
+      - script: make pip
+
 - stage: test_py
   jobs:
   - job: test_py
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index e320a24..a891387 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -251,6 +251,12 @@
     # have no matches.
     - git grep u-boot,dm- -- '*.dts*' && exit 1 || exit 0
 
+# Check we can package the Python tools
+Check packing of Python tools:
+  stage: testsuites
+  script:
+    - make pip
+
 # Test sandbox with test.py
 sandbox test.py:
   variables:
diff --git a/Makefile b/Makefile
index e575061..c00683b 100644
--- a/Makefile
+++ b/Makefile
@@ -522,7 +522,7 @@
 no-dot-config-targets := clean clobber mrproper distclean \
 			 help %docs check% coccicheck \
 			 ubootversion backup tests check pcheck qcheck tcheck \
-			 pylint pylint_err
+			 pylint pylint_err _pip pip pip_test pip_release
 
 config-targets := 0
 mixed-targets  := 0
@@ -2272,6 +2272,21 @@
 	F=`basename $(srctree)` ; cd .. ; \
 	gtar --force-local -zcvf `LC_ALL=C date "+$$F-%Y-%m-%d-%T.tar.gz"` $$F
 
+PHONY += _pip pip pip_release
+
+pip_release: PIP_ARGS="--real"
+pip_test: PIP_ARGS=""
+pip: PIP_ARGS="-n"
+
+pip pip_test pip_release: _pip
+
+_pip:
+	scripts/make_pip.sh u_boot_pylib ${PIP_ARGS}
+	scripts/make_pip.sh patman ${PIP_ARGS}
+	scripts/make_pip.sh buildman ${PIP_ARGS}
+	scripts/make_pip.sh dtoc ${PIP_ARGS}
+	scripts/make_pip.sh binman ${PIP_ARGS}
+
 help:
 	@echo  'Cleaning targets:'
 	@echo  '  clean		  - Remove most generated files but keep the config'
@@ -2305,6 +2320,11 @@
 	@echo  "  cfg		  - Don't build, just create the .cfg files"
 	@echo  "  envtools	  - Build only the target-side environment tools"
 	@echo  ''
+	@echo  'PyPi / pip targets:'
+	@echo  '  pip             - Check building of PyPi packages'
+	@echo  '  pip_test        - Build PyPi pakages and upload to test server'
+	@echo  '  pip_release     - Build PyPi pakages and upload to release server'
+	@echo  ''
 	@echo  'Static analysers'
 	@echo  '  checkstack      - Generate a list of stack hogs'
 	@echo  '  coccicheck      - Execute static code analysis with Coccinelle'
diff --git a/boot/bootdev-uclass.c b/boot/bootdev-uclass.c
index 8103a11..d34b7e3 100644
--- a/boot/bootdev-uclass.c
+++ b/boot/bootdev-uclass.c
@@ -629,11 +629,11 @@
 			if (++iter->cur_prio == BOOTDEVP_COUNT)
 				return log_msg_ret("fin", -ENODEV);
 
-			if (iter->flags & BOOTFLOWF_HUNT) {
+			if (iter->flags & BOOTFLOWIF_HUNT) {
 				/* hunt to find new bootdevs */
 				ret = bootdev_hunt_prio(iter->cur_prio,
 							iter->flags &
-							BOOTFLOWF_SHOW);
+							BOOTFLOWIF_SHOW);
 				log_debug("- hunt ret %d\n", ret);
 				if (ret)
 					return log_msg_ret("hun", ret);
@@ -657,7 +657,7 @@
 		       struct udevice **devp, int *method_flagsp)
 {
 	struct udevice *bootstd, *dev = NULL;
-	bool show = iter->flags & BOOTFLOWF_SHOW;
+	bool show = iter->flags & BOOTFLOWIF_SHOW;
 	int method_flags;
 	int ret;
 
@@ -668,7 +668,7 @@
 	}
 
 	/* hunt for any pre-scan devices */
-	if (iter->flags & BOOTFLOWF_HUNT) {
+	if (iter->flags & BOOTFLOWIF_HUNT) {
 		ret = bootdev_hunt_prio(BOOTDEVP_1_PRE_SCAN, show);
 		if (ret)
 			return log_msg_ret("pre", ret);
@@ -676,7 +676,7 @@
 
 	/* Handle scanning a single device */
 	if (IS_ENABLED(CONFIG_BOOTSTD_FULL) && label) {
-		if (iter->flags & BOOTFLOWF_HUNT) {
+		if (iter->flags & BOOTFLOWIF_HUNT) {
 			ret = bootdev_hunt(label, show);
 			if (ret)
 				return log_msg_ret("hun", ret);
@@ -687,11 +687,11 @@
 
 		log_debug("method_flags: %x\n", method_flags);
 		if (method_flags & BOOTFLOW_METHF_SINGLE_UCLASS)
-			iter->flags |= BOOTFLOWF_SINGLE_UCLASS;
+			iter->flags |= BOOTFLOWIF_SINGLE_UCLASS;
 		else if (method_flags & BOOTFLOW_METHF_SINGLE_DEV)
-			iter->flags |= BOOTFLOWF_SINGLE_DEV;
+			iter->flags |= BOOTFLOWIF_SINGLE_DEV;
 		else
-			iter->flags |= BOOTFLOWF_SINGLE_MEDIA;
+			iter->flags |= BOOTFLOWIF_SINGLE_MEDIA;
 		log_debug("Selected label: %s, flags %x\n", label, iter->flags);
 	} else {
 		bool ok;
diff --git a/boot/bootflow.c b/boot/bootflow.c
index 60791e6..8f2cb87 100644
--- a/boot/bootflow.c
+++ b/boot/bootflow.c
@@ -139,8 +139,8 @@
 		if (dev && iter->num_devs < iter->max_devs)
 			iter->dev_used[iter->num_devs++] = dev;
 
-		if ((iter->flags & (BOOTFLOWF_SHOW | BOOTFLOWF_SINGLE_DEV)) ==
-		    BOOTFLOWF_SHOW) {
+		if ((iter->flags & (BOOTFLOWIF_SHOW | BOOTFLOWIF_SINGLE_DEV)) ==
+		    BOOTFLOWIF_SHOW) {
 			if (dev)
 				printf("Scanning bootdev '%s':\n", dev->name);
 			else if (IS_ENABLED(CONFIG_BOOTMETH_GLOBAL) &&
@@ -215,7 +215,7 @@
 	iter->max_part = 0;
 
 	/* ...select next bootdev */
-	if (iter->flags & BOOTFLOWF_SINGLE_DEV) {
+	if (iter->flags & BOOTFLOWIF_SINGLE_DEV) {
 		ret = -ENOENT;
 	} else {
 		int method_flags;
@@ -227,7 +227,7 @@
 			ret = bootdev_setup_iter(iter, NULL, &dev,
 						 &method_flags);
 		} else if (IS_ENABLED(CONFIG_BOOTSTD_FULL) &&
-			   (iter->flags & BOOTFLOWF_SINGLE_UCLASS)) {
+			   (iter->flags & BOOTFLOWIF_SINGLE_UCLASS)) {
 			/* Move to the next bootdev in this uclass */
 			uclass_find_next_device(&dev);
 			if (!dev) {
@@ -236,7 +236,7 @@
 				ret = -ENODEV;
 			}
 		} else if (IS_ENABLED(CONFIG_BOOTSTD_FULL) &&
-			   iter->flags & BOOTFLOWF_SINGLE_MEDIA) {
+			   iter->flags & BOOTFLOWIF_SINGLE_MEDIA) {
 			log_debug("next in single\n");
 			method_flags = 0;
 			do {
@@ -328,7 +328,7 @@
 		 * For 'all' we return all bootflows, even
 		 * those with errors
 		 */
-		if (iter->flags & BOOTFLOWF_ALL)
+		if (iter->flags & BOOTFLOWIF_ALL)
 			return log_msg_ret("all", ret);
 	}
 	if (ret)
@@ -344,14 +344,14 @@
 	int ret;
 
 	if (dev || label)
-		flags |= BOOTFLOWF_SKIP_GLOBAL;
+		flags |= BOOTFLOWIF_SKIP_GLOBAL;
 	bootflow_iter_init(iter, flags);
 
 	/*
 	 * Set up the ordering of bootmeths. This sets iter->doing_global and
 	 * iter->first_glob_method if we are starting with the global bootmeths
 	 */
-	ret = bootmeth_setup_iter_order(iter, !(flags & BOOTFLOWF_SKIP_GLOBAL));
+	ret = bootmeth_setup_iter_order(iter, !(flags & BOOTFLOWIF_SKIP_GLOBAL));
 	if (ret)
 		return log_msg_ret("obmeth", -ENODEV);
 
@@ -373,7 +373,7 @@
 	if (ret) {
 		log_debug("check - ret=%d\n", ret);
 		if (ret != BF_NO_MORE_PARTS && ret != -ENOSYS) {
-			if (iter->flags & BOOTFLOWF_ALL)
+			if (iter->flags & BOOTFLOWIF_ALL)
 				return log_msg_ret("all", ret);
 		}
 		iter->err = ret;
@@ -402,7 +402,7 @@
 				return 0;
 			iter->err = ret;
 			if (ret != BF_NO_MORE_PARTS && ret != -ENOSYS) {
-				if (iter->flags & BOOTFLOWF_ALL)
+				if (iter->flags & BOOTFLOWIF_ALL)
 					return log_msg_ret("all", ret);
 			}
 		} else {
@@ -467,6 +467,9 @@
 
 	printf("** Booting bootflow '%s' with %s\n", bflow->name,
 	       bflow->method->name);
+	if (IS_ENABLED(CONFIG_OF_HAS_PRIOR_STAGE) &&
+	    (bflow->flags & BOOTFLOWF_USE_PRIOR_FDT))
+		printf("Using prior-stage device tree\n");
 	ret = bootflow_boot(bflow);
 	if (!IS_ENABLED(CONFIG_BOOTSTD_FULL)) {
 		printf("Boot failed (err=%d)\n", ret);
diff --git a/boot/bootmeth_efi.c b/boot/bootmeth_efi.c
index 67c972e..6a97ac0 100644
--- a/boot/bootmeth_efi.c
+++ b/boot/bootmeth_efi.c
@@ -147,25 +147,60 @@
 	return 0;
 }
 
-static void distro_efi_get_fdt_name(char *fname, int size)
+/**
+ * distro_efi_get_fdt_name() - Get the filename for reading the .dtb file
+ *
+ * @fname: Place to put filename
+ * @size: Max size of filename
+ * @seq: Sequence number, to cycle through options (0=first)
+ * Returns: 0 on success, -ENOENT if the "fdtfile" env var does not exist,
+ * -EINVAL if there are no more options, -EALREADY if the control FDT should be
+ * used
+ */
+static int distro_efi_get_fdt_name(char *fname, int size, int seq)
 {
 	const char *fdt_fname;
+	const char *prefix;
+
+	/* select the prefix */
+	switch (seq) {
+	case 0:
+		/* this is the default */
+		prefix = "/dtb";
+		break;
+	case 1:
+		prefix = "";
+		break;
+	case 2:
+		prefix = "/dtb/current";
+		break;
+	default:
+		return log_msg_ret("pref", -EINVAL);
+	}
 
 	fdt_fname = env_get("fdtfile");
 	if (fdt_fname) {
-		snprintf(fname, size, "dtb/%s", fdt_fname);
+		snprintf(fname, size, "%s/%s", prefix, fdt_fname);
 		log_debug("Using device tree: %s\n", fname);
-	} else {
+	} else if (IS_ENABLED(CONFIG_OF_HAS_PRIOR_STAGE)) {
+		strcpy(fname, "<prior>");
+		return log_msg_ret("pref", -EALREADY);
+	/* Use this fallback only for 32-bit ARM */
+	} else if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64)) {
 		const char *soc = env_get("soc");
 		const char *board = env_get("board");
 		const char *boardver = env_get("boardver");
 
 		/* cf the code in label_boot() which seems very complex */
-		snprintf(fname, size, "dtb/%s%s%s%s.dtb",
+		snprintf(fname, size, "%s/%s%s%s%s.dtb", prefix,
 			 soc ? soc : "", soc ? "-" : "", board ? board : "",
 			 boardver ? boardver : "");
 		log_debug("Using default device tree: %s\n", fname);
+	} else {
+		return log_msg_ret("env", -ENOENT);
 	}
+
+	return 0;
 }
 
 static int distro_efi_read_bootflow_file(struct udevice *dev,
@@ -174,7 +209,7 @@
 	struct blk_desc *desc = NULL;
 	ulong fdt_addr, size;
 	char fname[256];
-	int ret;
+	int ret, seq;
 
 	/* We require a partition table */
 	if (!bflow->part)
@@ -196,13 +231,26 @@
 	if (ret)
 		return log_msg_ret("read", -EINVAL);
 
-	distro_efi_get_fdt_name(fname, sizeof(fname));
+	fdt_addr = env_get_hex("fdt_addr_r", 0);
+
+	/* try the various available names */
+	ret = -ENOENT;
+	for (seq = 0; ret; seq++) {
+		ret = distro_efi_get_fdt_name(fname, sizeof(fname), seq);
+		if (ret == -EALREADY) {
+			bflow->flags = BOOTFLOWF_USE_PRIOR_FDT;
+			break;
+		}
+		if (ret)
+			return log_msg_ret("nam", ret);
+		ret = bootmeth_common_read_file(dev, bflow, fname, fdt_addr,
+						&size);
+	}
+
 	bflow->fdt_fname = strdup(fname);
 	if (!bflow->fdt_fname)
 		return log_msg_ret("fil", -ENOMEM);
 
-	fdt_addr = env_get_hex("fdt_addr_r", 0);
-	ret = bootmeth_common_read_file(dev, bflow, fname, fdt_addr, &size);
 	if (!ret) {
 		bflow->fdt_size = size;
 		bflow->fdt_addr = fdt_addr;
@@ -277,7 +325,11 @@
 	fdt_addr = hextoul(fdt_addr_str, NULL);
 	sprintf(file_addr, "%lx", fdt_addr);
 
-	distro_efi_get_fdt_name(fname, sizeof(fname));
+	/* We only allow the first prefix with PXE */
+	ret = distro_efi_get_fdt_name(fname, sizeof(fname), 0);
+	if (ret)
+		return log_msg_ret("nam", ret);
+
 	bflow->fdt_fname = strdup(fname);
 	if (!bflow->fdt_fname)
 		return log_msg_ret("fil", -ENOMEM);
diff --git a/cmd/bootflow.c b/cmd/bootflow.c
index 3548bbb..42f6e14 100644
--- a/cmd/bootflow.c
+++ b/cmd/bootflow.c
@@ -135,13 +135,13 @@
 
 	flags = 0;
 	if (list)
-		flags |= BOOTFLOWF_SHOW;
+		flags |= BOOTFLOWIF_SHOW;
 	if (all)
-		flags |= BOOTFLOWF_ALL;
+		flags |= BOOTFLOWIF_ALL;
 	if (no_global)
-		flags |= BOOTFLOWF_SKIP_GLOBAL;
+		flags |= BOOTFLOWIF_SKIP_GLOBAL;
 	if (!no_hunter)
-		flags |= BOOTFLOWF_HUNT;
+		flags |= BOOTFLOWIF_HUNT;
 
 	/*
 	 * If we have a device, just scan for bootflows attached to that device
diff --git a/cmd/fdt.c b/cmd/fdt.c
index 1972490..f38fe90 100644
--- a/cmd/fdt.c
+++ b/cmd/fdt.c
@@ -77,7 +77,17 @@
 
 		sprintf(buf, "0x%08X", fdt32_to_cpu(*(fdt32_t *)nodep));
 		env_set(var, buf);
-	} else if (len%4 == 0 && len <= 20) {
+	} else if (len % 4 == 0 && index >= 0) {
+		/* Needed to print integer arrays. */
+		const unsigned int *nodec = (const unsigned int *)nodep;
+		char buf[11];
+
+		if (index * 4 >= len)
+			return 1;
+
+		sprintf(buf, "0x%08X", fdt32_to_cpu(*(nodec + index)));
+		env_set(var, buf);
+	} else if (len % 4 == 0 && len <= 20) {
 		/* Needed to print things like sha1 hashes. */
 		char buf[41];
 		int i;
@@ -446,15 +456,17 @@
 		} else {
 			nodep = fdt_getprop(
 				working_fdt, nodeoffset, prop, &len);
-			if (len == 0) {
-				/* no property value */
-				env_set(var, "");
-				return 0;
-			} else if (nodep && len > 0) {
+			if (nodep && len >= 0) {
 				if (subcmd[0] == 'v') {
-					int index = 0;
+					int index = -1;
 					int ret;
 
+					if (len == 0) {
+						/* no property value */
+						env_set(var, "");
+						return 0;
+					}
+
 					if (argc == 7)
 						index = simple_strtoul(argv[6], NULL, 10);
 
@@ -464,9 +476,10 @@
 						return ret;
 				} else if (subcmd[0] == 'a') {
 					/* Get address */
-					char buf[11];
+					char buf[19];
 
-					sprintf(buf, "0x%p", nodep);
+					snprintf(buf, sizeof(buf), "0x%lx",
+						 (ulong)map_to_sysmem(nodep));
 					env_set(var, buf);
 				} else if (subcmd[0] == 's') {
 					/* Get size */
@@ -545,16 +558,16 @@
 		if (argc > 3) {
 			err = fdt_delprop(working_fdt, nodeoffset, argv[3]);
 			if (err < 0) {
-				printf("libfdt fdt_delprop():  %s\n",
+				printf("libfdt fdt_delprop(): %s\n",
 					fdt_strerror(err));
-				return err;
+				return CMD_RET_FAILURE;
 			}
 		} else {
 			err = fdt_del_node(working_fdt, nodeoffset);
 			if (err < 0) {
-				printf("libfdt fdt_del_node():  %s\n",
+				printf("libfdt fdt_del_node(): %s\n",
 					fdt_strerror(err));
-				return err;
+				return CMD_RET_FAILURE;
 			}
 		}
 
@@ -595,7 +608,12 @@
 	 * Set boot cpu id
 	 */
 	} else if (strncmp(argv[1], "boo", 3) == 0) {
-		unsigned long tmp = hextoul(argv[2], NULL);
+		unsigned long tmp;
+
+		if (argc != 3)
+			return CMD_RET_USAGE;
+
+		tmp = hextoul(argv[2], NULL);
 		fdt_set_boot_cpuid_phys(working_fdt, tmp);
 
 	/*
@@ -604,6 +622,10 @@
 	} else if (strncmp(argv[1], "me", 2) == 0) {
 		uint64_t addr, size;
 		int err;
+
+		if (argc != 4)
+			return CMD_RET_USAGE;
+
 		addr = simple_strtoull(argv[2], NULL, 16);
 		size = simple_strtoull(argv[3], NULL, 16);
 		err = fdt_fixup_memory(working_fdt, addr, size);
@@ -642,18 +664,18 @@
 			err = fdt_add_mem_rsv(working_fdt, addr, size);
 
 			if (err < 0) {
-				printf("libfdt fdt_add_mem_rsv():  %s\n",
+				printf("libfdt fdt_add_mem_rsv(): %s\n",
 					fdt_strerror(err));
-				return err;
+				return CMD_RET_FAILURE;
 			}
 		} else if (argv[2][0] == 'd') {
 			unsigned long idx = hextoul(argv[3], NULL);
 			int err = fdt_del_mem_rsv(working_fdt, idx);
 
 			if (err < 0) {
-				printf("libfdt fdt_del_mem_rsv():  %s\n",
+				printf("libfdt fdt_del_mem_rsv(): %s\n",
 					fdt_strerror(err));
-				return err;
+				return CMD_RET_FAILURE;
 			}
 		} else {
 			/* Unrecognized command */
@@ -878,41 +900,33 @@
 static int is_printable_string(const void *data, int len)
 {
 	const char *s = data;
+	const char *ss, *se;
 
 	/* zero length is not */
 	if (len == 0)
 		return 0;
 
-	/* must terminate with zero or '\n' */
-	if (s[len - 1] != '\0' && s[len - 1] != '\n')
+	/* must terminate with zero */
+	if (s[len - 1] != '\0')
 		return 0;
 
-	/* printable or a null byte (concatenated strings) */
-	while (((*s == '\0') || isprint(*s) || isspace(*s)) && (len > 0)) {
-		/*
-		 * If we see a null, there are three possibilities:
-		 * 1) If len == 1, it is the end of the string, printable
-		 * 2) Next character also a null, not printable.
-		 * 3) Next character not a null, continue to check.
-		 */
-		if (s[0] == '\0') {
-			if (len == 1)
-				return 1;
-			if (s[1] == '\0')
-				return 0;
-		}
+	se = s + len;
+
+	while (s < se) {
+		ss = s;
+		while (s < se && *s && isprint((unsigned char)*s))
+			s++;
+
+		/* not zero, or not done yet */
+		if (*s != '\0' || s == ss)
+			return 0;
+
 		s++;
-		len--;
 	}
 
-	/* Not the null termination, or not done yet: not printable */
-	if (*s != '\0' || (len != 0))
-		return 0;
-
 	return 1;
 }
 
-
 /*
  * Print the property in the best format, a heuristic guess.  Print as
  * a string, concatenated strings, a byte, word, double word, or (if all
@@ -1135,8 +1149,8 @@
 	"                                        <start>/<size> - initrd start addr/size\n"
 #if defined(CONFIG_FIT_SIGNATURE)
 	"fdt checksign [<addr>]              - check FIT signature\n"
-	"                                        <start> - addr of key blob\n"
-	"                                                  default gd->fdt_blob\n"
+	"                                      <addr> - address of key blob\n"
+	"                                               default gd->fdt_blob\n"
 #endif
 	"NOTE: Dereference aliases by omitting the leading '/', "
 		"e.g. fdt print ethernet0.";
diff --git a/doc/build/reproducible.rst b/doc/build/reproducible.rst
index 5423080..8b030f4 100644
--- a/doc/build/reproducible.rst
+++ b/doc/build/reproducible.rst
@@ -23,3 +23,5 @@
 
     ./u-boot -T
     U-Boot 2023.01 (Jan 01 2023 - 00:00:00 +0000)
+
+The same effect can be obtained with buildman using the `-r` flag.
diff --git a/doc/develop/bootstd.rst b/doc/develop/bootstd.rst
index dabe987..5dfa6cf 100644
--- a/doc/develop/bootstd.rst
+++ b/doc/develop/bootstd.rst
@@ -489,22 +489,22 @@
 Then the iterator is set up to according to the parameters given:
 
 - When `dev` is provided, then a single bootdev is scanned. In this case,
-  `BOOTFLOWF_SKIP_GLOBAL` and `BOOTFLOWF_SINGLE_DEV` are set. No hunters are
+  `BOOTFLOWIF_SKIP_GLOBAL` and `BOOTFLOWIF_SINGLE_DEV` are set. No hunters are
   used in this case
 
 - Otherwise, when `label` is provided, then a single label or named bootdev is
-  scanned. In this case `BOOTFLOWF_SKIP_GLOBAL` is set and there are three
+  scanned. In this case `BOOTFLOWIF_SKIP_GLOBAL` is set and there are three
   options (with an effect on the `iter_incr()` function described later):
 
   - If `label` indicates a numeric bootdev number (e.g. "2") then
     `BOOTFLOW_METHF_SINGLE_DEV` is set. In this case, moving to the next bootdev
     simple stops, since there is only one. No hunters are used.
   - If `label` indicates a particular media device (e.g. "mmc1") then
-    `BOOTFLOWF_SINGLE_MEDIA` is set. In this case, moving to the next bootdev
+    `BOOTFLOWIF_SINGLE_MEDIA` is set. In this case, moving to the next bootdev
     processes just the children of the media device. Hunters are used, in this
     example just the "mmc" hunter.
   - If `label` indicates a media uclass (e.g. "mmc") then
-    `BOOTFLOWF_SINGLE_UCLASS` is set. In this case, all bootdevs in that uclass
+    `BOOTFLOWIF_SINGLE_UCLASS` is set. In this case, all bootdevs in that uclass
     are used. Hunters are used, in this example just the "mmc" hunter
 
 - Otherwise, none of the above flags is set and iteration is set up to work
@@ -543,7 +543,7 @@
 With the iterator ready, `bootflow_scan_first()` checks whether the current
 settings produce a valid bootflow. This is handled by `bootflow_check()`, which
 either returns 0 (if it got something) or an error if not (more on that later).
-If the `BOOTFLOWF_ALL` iterator flag is set, even errors are returned as
+If the `BOOTFLOWIF_ALL` iterator flag is set, even errors are returned as
 incomplete bootflows, but normally an error results in moving onto the next
 iteration.
 
@@ -651,7 +651,7 @@
 Based on what the bootdev or bootmeth responds with, `bootflow_check()` either
 returns a valid bootflow, or a partial one with an error. A partial bootflow
 is one that has some fields set up, but did not reach the `BOOTFLOWST_READY`
-state. As noted before, if the `BOOTFLOWF_ALL` iterator flag is set, then all
+state. As noted before, if the `BOOTFLOWIF_ALL` iterator flag is set, then all
 bootflows are returned, even partial ones. This can help with debugging.
 
 So at this point you can see that total control over whether a bootflow can
diff --git a/include/bootflow.h b/include/bootflow.h
index f516bf8..f20f575 100644
--- a/include/bootflow.h
+++ b/include/bootflow.h
@@ -37,6 +37,18 @@
 };
 
 /**
+ * enum bootflow_flags_t - flags for bootflows
+ *
+ * @BOOTFLOWF_USE_PRIOR_FDT: Indicates that an FDT was not found by the bootmeth
+ *	and it is using the prior-stage FDT, which is the U-Boot control FDT.
+ *	This is only possible with the EFI bootmeth (distro-efi) and only when
+ *	CONFIG_OF_HAS_PRIOR_STAGE is enabled
+ */
+enum bootflow_flags_t {
+	BOOTFLOWF_USE_PRIOR_FDT	= 1 << 0,
+};
+
+/**
  * struct bootflow - information about a bootflow
  *
  * This is connected into two separate linked lists:
@@ -68,6 +80,7 @@
  * @fdt_fname: Filename of FDT file
  * @fdt_size: Size of FDT file
  * @fdt_addr: Address of loaded fdt
+ * @flags: Flags for the bootflow (see enum bootflow_flags_t)
  */
 struct bootflow {
 	struct list_head bm_node;
@@ -90,39 +103,40 @@
 	char *fdt_fname;
 	int fdt_size;
 	ulong fdt_addr;
+	int flags;
 };
 
 /**
- * enum bootflow_flags_t - flags for the bootflow iterator
+ * enum bootflow_iter_flags_t - flags for the bootflow iterator
  *
- * @BOOTFLOWF_FIXED: Only used fixed/internal media
- * @BOOTFLOWF_SHOW: Show each bootdev before scanning it; show each hunter
+ * @BOOTFLOWIF_FIXED: Only used fixed/internal media
+ * @BOOTFLOWIF_SHOW: Show each bootdev before scanning it; show each hunter
  * before using it
- * @BOOTFLOWF_ALL: Return bootflows with errors as well
- * @BOOTFLOWF_HUNT: Hunt for new bootdevs using the bootdrv hunters
+ * @BOOTFLOWIF_ALL: Return bootflows with errors as well
+ * @BOOTFLOWIF_HUNT: Hunt for new bootdevs using the bootdrv hunters
  *
  * Internal flags:
- * @BOOTFLOWF_SINGLE_DEV: (internal) Just scan one bootdev
- * @BOOTFLOWF_SKIP_GLOBAL: (internal) Don't scan global bootmeths
- * @BOOTFLOWF_SINGLE_UCLASS: (internal) Keep scanning through all devices in
+ * @BOOTFLOWIF_SINGLE_DEV: (internal) Just scan one bootdev
+ * @BOOTFLOWIF_SKIP_GLOBAL: (internal) Don't scan global bootmeths
+ * @BOOTFLOWIF_SINGLE_UCLASS: (internal) Keep scanning through all devices in
  * this uclass (used with things like "mmc")
- * @BOOTFLOWF_SINGLE_MEDIA: (internal) Scan one media device in the uclass (used
+ * @BOOTFLOWIF_SINGLE_MEDIA: (internal) Scan one media device in the uclass (used
  * with things like "mmc1")
  */
-enum bootflow_flags_t {
-	BOOTFLOWF_FIXED		= 1 << 0,
-	BOOTFLOWF_SHOW		= 1 << 1,
-	BOOTFLOWF_ALL		= 1 << 2,
-	BOOTFLOWF_HUNT		= 1 << 3,
+enum bootflow_iter_flags_t {
+	BOOTFLOWIF_FIXED		= 1 << 0,
+	BOOTFLOWIF_SHOW			= 1 << 1,
+	BOOTFLOWIF_ALL			= 1 << 2,
+	BOOTFLOWIF_HUNT			= 1 << 3,
 
 	/*
 	 * flags used internally by standard boot - do not set these when
 	 * calling bootflow_scan_bootdev() etc.
 	 */
-	BOOTFLOWF_SINGLE_DEV	= 1 << 16,
-	BOOTFLOWF_SKIP_GLOBAL	= 1 << 17,
-	BOOTFLOWF_SINGLE_UCLASS	= 1 << 18,
-	BOOTFLOWF_SINGLE_MEDIA	= 1 << 19,
+	BOOTFLOWIF_SINGLE_DEV		= 1 << 16,
+	BOOTFLOWIF_SKIP_GLOBAL		= 1 << 17,
+	BOOTFLOWIF_SINGLE_UCLASS	= 1 << 18,
+	BOOTFLOWIF_SINGLE_MEDIA		= 1 << 19,
 };
 
 /**
@@ -164,9 +178,9 @@
  * updated to a larger value, no less than the number of available partitions.
  * This ensures that iteration works through all partitions on the bootdev.
  *
- * @flags: Flags to use (see enum bootflow_flags_t). If BOOTFLOWF_GLOBAL_FIRST is
- *	enabled then the global bootmeths are being scanned, otherwise we have
- *	moved onto the bootdevs
+ * @flags: Flags to use (see enum bootflow_iter_flags_t). If
+ *	BOOTFLOWIF_GLOBAL_FIRST is enabled then the global bootmeths are being
+ *	scanned, otherwise we have moved onto the bootdevs
  * @dev: Current bootdev, NULL if none. This is only ever updated in
  * bootflow_iter_set_dev()
  * @part: Current partition number (0 for whole device)
@@ -233,7 +247,7 @@
  * This sets everything to the starting point, ready for use.
  *
  * @iter: Place to store private info (inited by this call)
- * @flags: Flags to use (see enum bootflow_flags_t)
+ * @flags: Flags to use (see enum bootflow_iter_flags_t)
  */
 void bootflow_iter_init(struct bootflow_iter *iter, int flags);
 
@@ -259,15 +273,16 @@
 /**
  * bootflow_scan_first() - find the first bootflow for a device or label
  *
- * If @flags includes BOOTFLOWF_ALL then bootflows with errors are returned too
+ * If @flags includes BOOTFLOWIF_ALL then bootflows with errors are returned too
  *
  * @dev:	Boot device to scan, NULL to work through all of them until it
  *	finds one that can supply a bootflow
  * @label:	Label to control the scan, NULL to work through all devices
  *	until it finds one that can supply a bootflow
  * @iter:	Place to store private info (inited by this call)
- * @flags:	Flags for iterator (enum bootflow_flags_t). Note that if @dev
- * is NULL, then BOOTFLOWF_SKIP_GLOBAL is set automatically by this function
+ * @flags:	Flags for iterator (enum bootflow_iter_flags_t). Note that if
+ *	@dev is NULL, then BOOTFLOWIF_SKIP_GLOBAL is set automatically by this
+ *	function
  * @bflow:	Place to put the bootflow if found
  * Return: 0 if found,  -ENODEV if no device, other -ve on other error
  *	(iteration can continue)
diff --git a/include/test/ut.h b/include/test/ut.h
index 4d00b4e..2b0dab3 100644
--- a/include/test/ut.h
+++ b/include/test/ut.h
@@ -334,6 +334,10 @@
 		return CMD_RET_FAILURE;					\
 	}								\
 
+/* Assert that the next console output line is empty */
+#define ut_assert_nextline_empty()					\
+	ut_assert_nextline("%s", "")
+
 /**
  * ut_check_free() - Return the number of bytes free in the malloc() pool
  *
diff --git a/scripts/event_dump.py b/scripts/event_dump.py
index d87823f..0117457 100755
--- a/scripts/event_dump.py
+++ b/scripts/event_dump.py
@@ -15,7 +15,7 @@
 sys.path.insert(1, os.path.join(our_path, '../tools'))
 
 from binman import elf
-from patman import tools
+from u_boot_pylib import tools
 
 # A typical symbol looks like this:
 #   _u_boot_list_2_evspy_info_2_EVT_MISC_INIT_F_3_sandbox_misc_init_f
diff --git a/scripts/make_pip.sh b/scripts/make_pip.sh
new file mode 100755
index 0000000..4602dcf
--- /dev/null
+++ b/scripts/make_pip.sh
@@ -0,0 +1,117 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+
+# Packages a U-Boot tool
+#
+# Usage: make_pip.sh <tool_name> [--real]
+#
+# Where tool_name is one of patman, buildman, dtoc, binman, u_boot_pylib
+#
+# and --real means to upload to the real server (otherwise the test one is used)
+#
+# The username for upload is always __token__ so set TWINE_PASSWORD to your
+# password before running this script:
+#
+# export TWINE_PASSWORD=pypi-xxx
+#
+# To test your new packages:
+#
+# pip install -i https://test.pypi.org/simple/ <tool_name>
+#
+
+# DO NOT use patman or binman
+
+set -xe
+
+# Repo to upload to
+repo="--repository testpypi"
+
+# Non-empty to do the actual upload
+upload=1
+
+tool="$1"
+shift
+flags="$*"
+
+if [[ "${tool}" =~ ^(patman|buildman|dtoc|binman|u_boot_pylib)$ ]]; then
+	echo "Building dist package for tool ${tool}"
+else
+	echo "Unknown tool ${tool}: use patman, buildman, dtoc or binman"
+	exit 1
+fi
+
+for flag in "${flags}"; do
+	if [ "${flag}" == "--real" ]; then
+		echo "Using real server"
+		repo=
+	fi
+	if [ "${flag}" == "-n" ]; then
+		echo "Doing dry run"
+		upload=
+	fi
+done
+
+if [ -n "${upload}" ]; then
+	if [ -z "${TWINE_PASSWORD}" ]; then
+		echo "Please set TWINE_PASSWORD to your password and retry"
+		exit 1
+	fi
+fi
+
+# Create a temp dir to work in
+dir=$(mktemp -d)
+
+# Copy in some basic files
+cp -v tools/${tool}/pyproject.toml ${dir}
+cp -v Licenses/gpl-2.0.txt ${dir}/LICENSE
+readme="tools/${tool}/README.*"
+
+# Copy in the README, dropping some Sphinx constructs that PyPi doesn't like
+cat ${readme} | sed -E 's/:(doc|ref):`.*`//; /sectionauthor/d; /toctree::/d' \
+	> ${dir}/$(basename ${readme})
+
+# Copy the top-level Python and doc files
+dest=${dir}/src/${tool}
+mkdir -p ${dest}
+cp -v tools/$tool/{*.py,*.rst} ${dest}
+
+# Copy over the subdirectories, including any sub files. Drop any cache files
+# and other such things
+pushd tools/${tool}
+for subdir in $(find . -maxdepth 1 -type d | \
+		grep -vE "(__pycache__|home|usr|scratch|\.$|pyproject)"); do
+	pathname="${dest}/${subdir}"
+	echo "Copy ${pathname}"
+	cp -a ${subdir} ${pathname}
+done
+popd
+
+# Remove cache files that accidentally made it through
+find ${dest} -name __pycache__ -type f -exec rm {} \;
+find ${dest} -depth -name __pycache__ -exec rmdir 112 \;
+
+# Remove test files
+rm -rf ${dest}/*test*
+
+mkdir ${dir}/tests
+cd ${dir}
+
+# Make sure the tools are up to date
+python3 -m pip install --upgrade build
+python3 -m pip install --upgrade twine
+
+# Build the PyPi package
+python3 -m build
+
+echo "Completed build of ${tool}"
+
+# Use --skip-existing to work even if the version is already present
+if [ -n "${upload}" ]; then
+	echo "Uploading from ${dir}"
+	python3 -m twine upload ${repo} -u __token__ dist/*
+	echo "Completed upload of ${tool}"
+fi
+
+rm -rf "${dir}"
+
+echo -e "done\n\n"
diff --git a/test/boot/bootdev.c b/test/boot/bootdev.c
index e1eb8cc..4fe9fd7 100644
--- a/test/boot/bootdev.c
+++ b/test/boot/bootdev.c
@@ -289,7 +289,7 @@
 
 	/* try again but enable hunting, which brings in SCSI */
 	bootflow_iter_uninit(&iter);
-	ut_assertok(bootflow_scan_first(NULL, NULL, &iter, BOOTFLOWF_HUNT,
+	ut_assertok(bootflow_scan_first(NULL, NULL, &iter, BOOTFLOWIF_HUNT,
 					&bflow));
 	ut_asserteq(-ENODEV, bootflow_scan_next(&iter, &bflow));
 	ut_asserteq(7, iter.num_devs);
@@ -427,8 +427,8 @@
 
 	ut_assertok(bootstd_test_drop_bootdev_order(uts));
 	ut_assertok(bootflow_scan_first(NULL, NULL, &iter,
-					BOOTFLOWF_SHOW | BOOTFLOWF_HUNT |
-					BOOTFLOWF_SKIP_GLOBAL, &bflow));
+					BOOTFLOWIF_SHOW | BOOTFLOWIF_HUNT |
+					BOOTFLOWIF_SKIP_GLOBAL, &bflow));
 	ut_asserteq(BIT(MMC_HUNTER) | BIT(1), std->hunters_used);
 
 	return 0;
@@ -649,7 +649,7 @@
 	iter.part = 0;
 	uclass_first_device(UCLASS_BOOTMETH, &bflow.method);
 	iter.cur_prio = 0;
-	iter.flags = BOOTFLOWF_SHOW;
+	iter.flags = BOOTFLOWIF_SHOW;
 
 	dev = NULL;
 	console_record_reset_enable();
@@ -662,7 +662,7 @@
 	ut_assert_console_end();
 
 	/* now try again with hunting enabled */
-	iter.flags = BOOTFLOWF_SHOW | BOOTFLOWF_HUNT;
+	iter.flags = BOOTFLOWIF_SHOW | BOOTFLOWIF_HUNT;
 	iter.cur_prio = 0;
 	iter.part = 0;
 
diff --git a/test/boot/bootflow.c b/test/boot/bootflow.c
index b9284fc..fd0e1d6 100644
--- a/test/boot/bootflow.c
+++ b/test/boot/bootflow.c
@@ -277,7 +277,7 @@
 	/* The first device is mmc2.bootdev which has no media */
 	ut_asserteq(-EPROTONOSUPPORT,
 		    bootflow_scan_first(NULL, NULL, &iter,
-					BOOTFLOWF_ALL | BOOTFLOWF_SKIP_GLOBAL, &bflow));
+					BOOTFLOWIF_ALL | BOOTFLOWIF_SKIP_GLOBAL, &bflow));
 	ut_asserteq(2, iter.num_methods);
 	ut_asserteq(0, iter.cur_method);
 	ut_asserteq(0, iter.part);
diff --git a/test/cmd/fdt.c b/test/cmd/fdt.c
index 7974c88..8ae8a52 100644
--- a/test/cmd/fdt.c
+++ b/test/cmd/fdt.c
@@ -15,6 +15,13 @@
 #include <test/ut.h>
 
 DECLARE_GLOBAL_DATA_PTR;
+/*
+ * Missing tests:
+ * fdt boardsetup         - Do board-specific set up
+ * fdt checksign [<addr>] - check FIT signature
+ *                          <addr> - address of key blob
+ *                                   default gd->fdt_blob
+ */
 
 /* Declare a new fdt test */
 #define FDT_TEST(_name, _flags)	UNIT_TEST(_name, _flags, fdt_test)
@@ -39,6 +46,102 @@
 	return 0;
 }
 
+/**
+ * make_fuller_fdt() - Create an FDT with root node and properties
+ *
+ * The size is set to the minimum needed
+ *
+ * @uts: Test state
+ * @fdt: Place to write FDT
+ * @size: Maximum size of space for fdt
+ */
+static int make_fuller_fdt(struct unit_test_state *uts, void *fdt, int size)
+{
+	fdt32_t regs[2] = { cpu_to_fdt32(0x1234), cpu_to_fdt32(0x1000) };
+
+	/*
+	 * Assemble the following DT for test purposes:
+	 *
+	 * / {
+	 * 	#address-cells = <0x00000001>;
+	 * 	#size-cells = <0x00000001>;
+	 * 	compatible = "u-boot,fdt-test";
+	 * 	model = "U-Boot FDT test";
+	 *
+	 *	aliases {
+	 *		badalias = "/bad/alias";
+	 *		subnodealias = "/test-node@1234/subnode";
+	 *		testnodealias = "/test-node@1234";
+	 *	};
+	 *
+	 * 	test-node@1234 {
+	 * 		#address-cells = <0x00000000>;
+	 * 		#size-cells = <0x00000000>;
+	 * 		compatible = "u-boot,fdt-test-device1";
+	 * 		clock-names = "fixed", "i2c", "spi", "uart2", "uart1";
+	 * 		u-boot,empty-property;
+	 * 		clock-frequency = <0x00fde800>;
+	 * 		regs = <0x00001234 0x00001000>;
+	 *
+	 * 		subnode {
+	 * 			#address-cells = <0x00000000>;
+	 * 			#size-cells = <0x00000000>;
+	 * 			compatible = "u-boot,fdt-subnode-test-device";
+	 * 		};
+	 * 	};
+	 * };
+	 */
+
+	ut_assertok(fdt_create(fdt, size));
+	ut_assertok(fdt_finish_reservemap(fdt));
+	ut_assert(fdt_begin_node(fdt, "") >= 0);
+
+	ut_assertok(fdt_property_u32(fdt, "#address-cells", 1));
+	ut_assertok(fdt_property_u32(fdt, "#size-cells", 1));
+	/* <string> */
+	ut_assertok(fdt_property_string(fdt, "compatible", "u-boot,fdt-test"));
+	/* <string> */
+	ut_assertok(fdt_property_string(fdt, "model", "U-Boot FDT test"));
+
+	ut_assert(fdt_begin_node(fdt, "aliases") >= 0);
+	/* <string> */
+	ut_assertok(fdt_property_string(fdt, "badalias", "/bad/alias"));
+	/* <string> */
+	ut_assertok(fdt_property_string(fdt, "subnodealias", "/test-node@1234/subnode"));
+	/* <string> */
+	ut_assertok(fdt_property_string(fdt, "testnodealias", "/test-node@1234"));
+	ut_assertok(fdt_end_node(fdt));
+
+	ut_assert(fdt_begin_node(fdt, "test-node@1234") >= 0);
+	ut_assertok(fdt_property_cell(fdt, "#address-cells", 0));
+	ut_assertok(fdt_property_cell(fdt, "#size-cells", 0));
+	/* <string> */
+	ut_assertok(fdt_property_string(fdt, "compatible", "u-boot,fdt-test-device1"));
+	/* <stringlist> */
+	ut_assertok(fdt_property(fdt, "clock-names", "fixed\0i2c\0spi\0uart2\0uart1\0", 26));
+	/* <empty> */
+	ut_assertok(fdt_property(fdt, "u-boot,empty-property", NULL, 0));
+	/*
+	 * <u32>
+	 * This value is deliberate as it used to break cmd/fdt.c
+	 * is_printable_string() implementation.
+	 */
+	ut_assertok(fdt_property_u32(fdt, "clock-frequency", 16640000));
+	/* <prop-encoded-array> */
+	ut_assertok(fdt_property(fdt, "regs", &regs, sizeof(regs)));
+	ut_assert(fdt_begin_node(fdt, "subnode") >= 0);
+	ut_assertok(fdt_property_cell(fdt, "#address-cells", 0));
+	ut_assertok(fdt_property_cell(fdt, "#size-cells", 0));
+	ut_assertok(fdt_property_string(fdt, "compatible", "u-boot,fdt-subnode-test-device"));
+	ut_assertok(fdt_end_node(fdt));
+	ut_assertok(fdt_end_node(fdt));
+
+	ut_assertok(fdt_end_node(fdt));
+	ut_assertok(fdt_finish(fdt));
+
+	return 0;
+}
+
 /* Test 'fdt addr' getting/setting address */
 static int fdt_test_addr(struct unit_test_state *uts)
 {
@@ -108,7 +211,7 @@
 FDT_TEST(fdt_test_addr, UT_TESTF_CONSOLE_REC);
 
 /* Test 'fdt addr' resizing an fdt */
-static int fdt_test_resize(struct unit_test_state *uts)
+static int fdt_test_addr_resize(struct unit_test_state *uts)
 {
 	char fdt[256];
 	const int newsize = sizeof(fdt) / 2;
@@ -140,60 +243,733 @@
 
 	return 0;
 }
-FDT_TEST(fdt_test_resize, UT_TESTF_CONSOLE_REC);
+FDT_TEST(fdt_test_addr_resize, UT_TESTF_CONSOLE_REC);
 
-/* Test 'fdt get' reading an fdt */
-static int fdt_test_get(struct unit_test_state *uts)
+static int fdt_test_move(struct unit_test_state *uts)
 {
-	ulong addr;
+	char fdt[256];
+	ulong addr, newaddr = 0x10000;
+	const int size = sizeof(fdt);
+	uint32_t ts;
+	void *buf;
 
-	addr = map_to_sysmem(gd->fdt_blob);
+	/* Original source DT */
+	ut_assertok(make_test_fdt(uts, fdt, size));
+	ts = fdt_totalsize(fdt);
+	addr = map_to_sysmem(fdt);
 	set_working_fdt_addr(addr);
 
-	/* Test getting default element of /clk-test node clock-names property */
+	/* Moved target DT location */
+	buf = map_sysmem(newaddr, size);
+	memset(buf, 0, size);
+
+	/* Test moving the working FDT to a new location */
 	ut_assertok(console_record_reset_enable());
-	ut_assertok(run_command("fdt get value fdflt /clk-test clock-names", 0));
-	ut_asserteq_str("fixed", env_get("fdflt"));
+	ut_assertok(run_commandf("fdt move %08x %08x %x", addr, newaddr, ts));
+	ut_assert_nextline("Working FDT set to %lx", newaddr);
 	ut_assertok(ut_check_console_end(uts));
 
-	/* Test getting 0th element of /clk-test node clock-names property */
+	/* Compare the source and destination DTs */
 	ut_assertok(console_record_reset_enable());
-	ut_assertok(run_command("fdt get value fzero /clk-test clock-names 0", 0));
-	ut_asserteq_str("fixed", env_get("fzero"));
+	ut_assertok(run_commandf("cmp.b %08x %08x %x", addr, newaddr, ts));
+	ut_assert_nextline("Total of %d byte(s) were the same", ts);
 	ut_assertok(ut_check_console_end(uts));
 
-	/* Test getting 1st element of /clk-test node clock-names property */
+	return 0;
+}
+FDT_TEST(fdt_test_move, UT_TESTF_CONSOLE_REC);
+
+static int fdt_test_resize(struct unit_test_state *uts)
+{
+	char fdt[256];
+	const unsigned int newsize = 0x2000;
+	uint32_t ts;
+	ulong addr;
+
+	/* Original source DT */
+	ut_assertok(make_test_fdt(uts, fdt, sizeof(fdt)));
+	fdt_shrink_to_minimum(fdt, 0);	/* Resize with 0 extra bytes */
+	ts = fdt_totalsize(fdt);
+	addr = map_to_sysmem(fdt);
+	set_working_fdt_addr(addr);
+
+	/* Test resizing the working FDT and verify the new space was added */
 	ut_assertok(console_record_reset_enable());
-	ut_assertok(run_command("fdt get value fone /clk-test clock-names 1", 0));
-	ut_asserteq_str("i2c", env_get("fone"));
+	ut_assertok(run_commandf("fdt resize %x", newsize));
+	ut_asserteq(ts + newsize, fdt_totalsize(fdt));
 	ut_assertok(ut_check_console_end(uts));
 
-	/* Test getting 2nd element of /clk-test node clock-names property */
+	return 0;
+}
+FDT_TEST(fdt_test_resize, UT_TESTF_CONSOLE_REC);
+
+/* Test 'fdt get value' reading an fdt */
+static int fdt_test_get_value_string(struct unit_test_state *uts,
+				     const char *node, const char *prop,
+				     const char *idx,  const char *strres,
+				     const int intres)
+{
 	ut_assertok(console_record_reset_enable());
-	ut_assertok(run_command("fdt get value ftwo /clk-test clock-names 2", 0));
-	ut_asserteq_str("spi", env_get("ftwo"));
+	ut_assertok(run_commandf("fdt get value var %s %s %s",
+				 node, prop, idx ? : ""));
+	if (strres) {
+		ut_asserteq_str(strres, env_get("var"));
+	} else {
+		ut_asserteq(intres, env_get_hex("var", 0x1234));
+	}
 	ut_assertok(ut_check_console_end(uts));
 
-	/* Test missing 10th element of /clk-test node clock-names property */
+	return 0;
+}
+
+static int fdt_test_get_value_common(struct unit_test_state *uts,
+				     const char *node)
+{
+	/* Test getting default element of $node node clock-names property */
+	fdt_test_get_value_string(uts, node, "clock-names", NULL, "fixed", 0);
+
+	/* Test getting 0th element of $node node clock-names property */
+	fdt_test_get_value_string(uts, node, "clock-names", "0", "fixed", 0);
+
+	/* Test getting 1st element of $node node clock-names property */
+	fdt_test_get_value_string(uts, node, "clock-names", "1", "i2c", 0);
+
+	/* Test getting 2nd element of $node node clock-names property */
+	fdt_test_get_value_string(uts, node, "clock-names", "2", "spi", 0);
+
+	/*
+	 * Test getting default element of $node node regs property.
+	 * The result here is highly unusual, the non-index value read from
+	 * integer array is a string of concatenated values from the array,
+	 * but only if the array is shorter than 40 characters. Anything
+	 * longer is an error. This is a special case for handling hashes.
+	 */
+	fdt_test_get_value_string(uts, node, "regs", NULL, "3412000000100000", 0);
+
+	/* Test getting 0th element of $node node regs property */
+	fdt_test_get_value_string(uts, node, "regs", "0", NULL, 0x1234);
+
+	/* Test getting 1st element of $node node regs property */
+	fdt_test_get_value_string(uts, node, "regs", "1", NULL, 0x1000);
+
+	/* Test missing 10th element of $node node clock-names property */
 	ut_assertok(console_record_reset_enable());
-	ut_asserteq(1, run_command("fdt get value ftwo /clk-test clock-names 10", 0));
+	ut_asserteq(1, run_commandf("fdt get value ften %s clock-names 10", node));
 	ut_assertok(ut_check_console_end(uts));
 
-	/* Test getting default element of /clk-test node nonexistent property */
+	/* Test missing 10th element of $node node regs property */
 	ut_assertok(console_record_reset_enable());
-	ut_asserteq(1, run_command("fdt get value fnone /clk-test nonexistent", 1));
+	ut_asserteq(1, run_commandf("fdt get value ften %s regs 10", node));
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting default element of $node node nonexistent property */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_commandf("fdt get value fnone %s nonexistent", node));
 	ut_assert_nextline("libfdt fdt_getprop(): FDT_ERR_NOTFOUND");
 	ut_assertok(ut_check_console_end(uts));
 
+	return 0;
+}
+
+static int fdt_test_get_value(struct unit_test_state *uts)
+{
+	char fdt[4096];
+	ulong addr;
+	int ret;
+
+	ut_assertok(make_fuller_fdt(uts, fdt, sizeof(fdt)));
+	addr = map_to_sysmem(fdt);
+	set_working_fdt_addr(addr);
+
+	ret = fdt_test_get_value_common(uts, "/test-node@1234");
+	if (!ret)
+		ret = fdt_test_get_value_common(uts, "testnodealias");
+	if (ret)
+		return ret;
+
 	/* Test getting default element of /nonexistent node */
 	ut_assertok(console_record_reset_enable());
 	ut_asserteq(1, run_command("fdt get value fnode /nonexistent nonexistent", 1));
 	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
 	ut_assertok(ut_check_console_end(uts));
 
+	/* Test getting default element of bad alias */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get value vbadalias badalias nonexistent", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting default element of nonexistent alias */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get value vnoalias noalias nonexistent", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_BADPATH");
+	ut_assertok(ut_check_console_end(uts));
+
 	return 0;
 }
-FDT_TEST(fdt_test_get, UT_TESTF_CONSOLE_REC);
+FDT_TEST(fdt_test_get_value, UT_TESTF_CONSOLE_REC);
+
+static int fdt_test_get_name(struct unit_test_state *uts)
+{
+	char fdt[4096];
+	ulong addr;
+
+	ut_assertok(make_fuller_fdt(uts, fdt, sizeof(fdt)));
+	addr = map_to_sysmem(fdt);
+	set_working_fdt_addr(addr);
+
+	/* Test getting name of node 0 in /, which is /aliases node */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_command("fdt get name nzero / 0", 0));
+	ut_asserteq_str("aliases", env_get("nzero"));
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting name of node 1 in /, which is /test-node@1234 node */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_command("fdt get name none / 1", 0));
+	ut_asserteq_str("test-node@1234", env_get("none"));
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting name of node -1 in /, which is /aliases node, same as 0 */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_command("fdt get name nmone / -1", 0));
+	ut_asserteq_str("aliases", env_get("nmone"));
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting name of node 2 in /, which does not exist */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get name ntwo / 2", 1));
+	ut_assert_nextline("libfdt node not found");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting name of node 0 in /test-node@1234, which is /subnode node */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_command("fdt get name snzero /test-node@1234 0", 0));
+	ut_asserteq_str("subnode", env_get("snzero"));
+	ut_assertok(run_command("fdt get name asnzero testnodealias 0", 0));
+	ut_asserteq_str("subnode", env_get("asnzero"));
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting name of node 1 in /test-node@1234, which does not exist */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get name snone /test-node@1234 1", 1));
+	ut_assert_nextline("libfdt node not found");
+	ut_asserteq(1, run_command("fdt get name asnone testnodealias 1", 1));
+	ut_assert_nextline("libfdt node not found");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting name of node -1 in /test-node@1234, which is /subnode node, same as 0 */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_command("fdt get name snmone /test-node@1234 -1", 0));
+	ut_asserteq_str("subnode", env_get("snmone"));
+	ut_assertok(run_command("fdt get name asnmone testnodealias -1", 0));
+	ut_asserteq_str("subnode", env_get("asnmone"));
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting name of nonexistent node */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get name nonode /nonexistent 0", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting name of bad alias */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get name vbadalias badalias 0", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting name of nonexistent alias */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get name vnoalias noalias 0", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_BADPATH");
+	ut_assertok(ut_check_console_end(uts));
+
+	return 0;
+}
+FDT_TEST(fdt_test_get_name, UT_TESTF_CONSOLE_REC);
+
+static int fdt_test_get_addr_common(struct unit_test_state *uts, char *fdt,
+				    const char *path, const char *prop)
+{
+	unsigned int offset;
+	int path_offset;
+	void *prop_ptr;
+	int len = 0;
+
+	path_offset = fdt_path_offset(fdt, path);
+	ut_assert(path_offset >= 0);
+	prop_ptr = (void *)fdt_getprop(fdt, path_offset, prop, &len);
+	ut_assertnonnull(prop_ptr);
+	offset = (char *)prop_ptr - fdt;
+
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_commandf("fdt get addr pstr %s %s", path, prop));
+	ut_asserteq((ulong)map_sysmem(env_get_hex("fdtaddr", 0x1234), 0),
+		    (ulong)(map_sysmem(env_get_hex("pstr", 0x1234), 0) - offset));
+	ut_assertok(ut_check_console_end(uts));
+
+	return 0;
+}
+
+static int fdt_test_get_addr(struct unit_test_state *uts)
+{
+	char fdt[4096];
+	ulong addr;
+
+	ut_assertok(make_fuller_fdt(uts, fdt, sizeof(fdt)));
+	addr = map_to_sysmem(fdt);
+	set_working_fdt_addr(addr);
+
+	/* Test getting address of root node / string property "compatible" */
+	fdt_test_get_addr_common(uts, fdt, "/", "compatible");
+
+	/* Test getting address of node /test-node@1234 stringlist property "clock-names" */
+	fdt_test_get_addr_common(uts, fdt, "/test-node@1234", "clock-names");
+	fdt_test_get_addr_common(uts, fdt, "testnodealias", "clock-names");
+
+	/* Test getting address of node /test-node@1234 u32 property "clock-frequency" */
+	fdt_test_get_addr_common(uts, fdt, "/test-node@1234", "clock-frequency");
+	fdt_test_get_addr_common(uts, fdt, "testnodealias", "clock-frequency");
+
+	/* Test getting address of node /test-node@1234 empty property "u-boot,empty-property" */
+	fdt_test_get_addr_common(uts, fdt, "/test-node@1234", "u-boot,empty-property");
+	fdt_test_get_addr_common(uts, fdt, "testnodealias", "u-boot,empty-property");
+
+	/* Test getting address of node /test-node@1234 array property "regs" */
+	fdt_test_get_addr_common(uts, fdt, "/test-node@1234", "regs");
+	fdt_test_get_addr_common(uts, fdt, "testnodealias", "regs");
+
+	/* Test getting address of node /test-node@1234/subnode non-existent property "noprop" */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get addr pnoprop /test-node@1234/subnode noprop", 1));
+	ut_assert_nextline("libfdt fdt_getprop(): FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting address of non-existent node /test-node@1234/nonode@1 property "noprop" */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get addr pnonode /test-node@1234/nonode@1 noprop", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	return 0;
+}
+FDT_TEST(fdt_test_get_addr, UT_TESTF_CONSOLE_REC);
+
+static int fdt_test_get_size_common(struct unit_test_state *uts,
+				     const char *path, const char *prop,
+				     const unsigned int val)
+{
+	ut_assertok(console_record_reset_enable());
+	if (prop) {
+		ut_assertok(run_commandf("fdt get size sstr %s %s", path, prop));
+	} else {
+		ut_assertok(run_commandf("fdt get size sstr %s", path));
+	}
+	ut_asserteq(val, env_get_hex("sstr", 0x1234));
+	ut_assertok(ut_check_console_end(uts));
+
+	return 0;
+}
+
+static int fdt_test_get_size(struct unit_test_state *uts)
+{
+	char fdt[4096];
+	ulong addr;
+
+	ut_assertok(make_fuller_fdt(uts, fdt, sizeof(fdt)));
+	addr = map_to_sysmem(fdt);
+	set_working_fdt_addr(addr);
+
+	/* Test getting size of root node / string property "compatible" */
+	fdt_test_get_size_common(uts, "/", "compatible", 16);
+
+	/* Test getting size of node /test-node@1234 stringlist property "clock-names" */
+	fdt_test_get_size_common(uts, "/test-node@1234", "clock-names", 26);
+	fdt_test_get_size_common(uts, "testnodealias", "clock-names", 26);
+
+	/* Test getting size of node /test-node@1234 u32 property "clock-frequency" */
+	fdt_test_get_size_common(uts, "/test-node@1234", "clock-frequency", 4);
+	fdt_test_get_size_common(uts, "testnodealias", "clock-frequency", 4);
+
+	/* Test getting size of node /test-node@1234 empty property "u-boot,empty-property" */
+	fdt_test_get_size_common(uts, "/test-node@1234", "u-boot,empty-property", 0);
+	fdt_test_get_size_common(uts, "testnodealias", "u-boot,empty-property", 0);
+
+	/* Test getting size of node /test-node@1234 array property "regs" */
+	fdt_test_get_size_common(uts, "/test-node@1234", "regs", 8);
+	fdt_test_get_size_common(uts, "testnodealias", "regs", 8);
+
+	/* Test getting node count of node / */
+	fdt_test_get_size_common(uts, "/", NULL, 2);
+
+	/* Test getting node count of node /test-node@1234/subnode */
+	fdt_test_get_size_common(uts, "/test-node@1234/subnode", NULL, 0);
+	fdt_test_get_size_common(uts, "subnodealias", NULL, 0);
+
+	/* Test getting size of node /test-node@1234/subnode non-existent property "noprop" */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get size pnoprop /test-node@1234/subnode noprop", 1));
+	ut_assert_nextline("libfdt fdt_getprop(): FDT_ERR_NOTFOUND");
+	ut_asserteq(1, run_command("fdt get size pnoprop subnodealias noprop", 1));
+	ut_assert_nextline("libfdt fdt_getprop(): FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting size of non-existent node /test-node@1234/nonode@1 property "noprop" */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get size pnonode /test-node@1234/nonode@1 noprop", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting node count of non-existent node /test-node@1234/nonode@1 */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get size pnonode /test-node@1234/nonode@1", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting node count of bad alias badalias */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get size pnonode badalias noprop", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test getting node count of non-existent alias noalias */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt get size pnonode noalias", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_BADPATH");
+	ut_assertok(ut_check_console_end(uts));
+
+	return 0;
+}
+FDT_TEST(fdt_test_get_size, UT_TESTF_CONSOLE_REC);
+
+static int fdt_test_set_single(struct unit_test_state *uts,
+			       const char *path, const char *prop,
+			       const char *sval, int ival, bool integer)
+{
+	/*
+	 * Set single element string/integer/<empty> property into DT, that is:
+	 * => fdt set /path property string
+	 * => fdt set /path property integer
+	 * => fdt set /path property
+	 */
+	ut_assertok(console_record_reset_enable());
+	if (sval) {
+		ut_assertok(run_commandf("fdt set %s %s %s", path, prop, sval));
+	} else if (integer) {
+		ut_assertok(run_commandf("fdt set %s %s <%d>", path, prop, ival));
+	} else {
+		ut_assertok(run_commandf("fdt set %s %s", path, prop));
+	}
+
+	/* Validate the property is present and has correct value. */
+	ut_assertok(run_commandf("fdt get value svar %s %s", path, prop));
+	if (sval) {
+		ut_asserteq_str(sval, env_get("svar"));
+	} else if (integer) {
+		ut_asserteq(ival, env_get_hex("svar", 0x1234));
+	} else {
+		ut_assertnull(env_get("svar"));
+	}
+	ut_assertok(ut_check_console_end(uts));
+
+	return 0;
+}
+
+static int fdt_test_set_multi(struct unit_test_state *uts,
+			      const char *path, const char *prop,
+			      const char *sval1, const char *sval2,
+			      int ival1, int ival2)
+{
+	/*
+	 * Set multi element string/integer array property in DT, that is:
+	 * => fdt set /path property <string1 string2>
+	 * => fdt set /path property <integer1 integer2>
+	 *
+	 * The set is done twice in here deliberately, The first set adds
+	 * the property with an extra trailing element in its array to make
+	 * the array longer, the second set is the expected final content of
+	 * the array property. The longer array is used to verify that the
+	 * new array is correctly sized and read past the new array length
+	 * triggers failure.
+	 */
+	ut_assertok(console_record_reset_enable());
+	if (sval1 && sval2) {
+		ut_assertok(run_commandf("fdt set %s %s %s %s end", path, prop, sval1, sval2));
+		ut_assertok(run_commandf("fdt set %s %s %s %s", path, prop, sval1, sval2));
+	} else {
+		ut_assertok(run_commandf("fdt set %s %s <%d %d 10>", path, prop, ival1, ival2));
+		ut_assertok(run_commandf("fdt set %s %s <%d %d>", path, prop, ival1, ival2));
+	}
+
+	/*
+	 * Validate the property is present and has correct value.
+	 *
+	 * The "end/10" above and "svarn" below is used to validate that
+	 * previous 'fdt set' to longer array does not polute newly set
+	 * shorter array.
+	 */
+	ut_assertok(run_commandf("fdt get value svar1 %s %s 0", path, prop));
+	ut_assertok(run_commandf("fdt get value svar2 %s %s 1", path, prop));
+	ut_asserteq(1, run_commandf("fdt get value svarn %s %s 2", path, prop));
+	if (sval1 && sval2) {
+		ut_asserteq_str(sval1, env_get("svar1"));
+		ut_asserteq_str(sval2, env_get("svar2"));
+		ut_assertnull(env_get("svarn"));
+	} else {
+		ut_asserteq(ival1, env_get_hex("svar1", 0x1234));
+		ut_asserteq(ival2, env_get_hex("svar2", 0x1234));
+		ut_assertnull(env_get("svarn"));
+	}
+	ut_assertok(ut_check_console_end(uts));
+
+	return 0;
+}
+
+static int fdt_test_set_node(struct unit_test_state *uts,
+			     const char *path, const char *prop)
+{
+	fdt_test_set_single(uts, path, prop, "new", 0, false);
+	fdt_test_set_single(uts, path, prop, "rewrite", 0, false);
+	fdt_test_set_single(uts, path, prop, NULL, 42, true);
+	fdt_test_set_single(uts, path, prop, NULL, 0, false);
+	fdt_test_set_multi(uts, path, prop, NULL, NULL, 42, 1701);
+	fdt_test_set_multi(uts, path, prop, NULL, NULL, 74656, 9);
+	fdt_test_set_multi(uts, path, prop, "42", "1701", 0, 0);
+	fdt_test_set_multi(uts, path, prop, "74656", "9", 0, 0);
+
+	return 0;
+}
+
+static int fdt_test_set(struct unit_test_state *uts)
+{
+	char fdt[8192];
+	ulong addr;
+
+	ut_assertok(make_fuller_fdt(uts, fdt, sizeof(fdt)));
+	fdt_shrink_to_minimum(fdt, 4096);	/* Resize with 4096 extra bytes */
+	addr = map_to_sysmem(fdt);
+	set_working_fdt_addr(addr);
+
+	/* Test setting of root node / existing property "compatible" */
+	fdt_test_set_node(uts, "/", "compatible");
+
+	/* Test setting of root node / new property "newproperty" */
+	fdt_test_set_node(uts, "/", "newproperty");
+
+	/* Test setting of subnode existing property "compatible" */
+	fdt_test_set_node(uts, "/test-node@1234/subnode", "compatible");
+	fdt_test_set_node(uts, "subnodealias", "compatible");
+
+	/* Test setting of subnode new property "newproperty" */
+	fdt_test_set_node(uts, "/test-node@1234/subnode", "newproperty");
+	fdt_test_set_node(uts, "subnodealias", "newproperty");
+
+	/* Test setting property of non-existent node */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt set /no-node noprop", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test setting property of non-existent alias */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt set noalias noprop", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_BADPATH");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test setting property of bad alias */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_command("fdt set badalias noprop", 1));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	return 0;
+}
+FDT_TEST(fdt_test_set, UT_TESTF_CONSOLE_REC);
+
+static int fdt_test_mknode(struct unit_test_state *uts)
+{
+	char fdt[8192];
+	ulong addr;
+
+	ut_assertok(make_fuller_fdt(uts, fdt, sizeof(fdt)));
+	fdt_shrink_to_minimum(fdt, 4096);	/* Resize with 4096 extra bytes */
+	addr = map_to_sysmem(fdt);
+	set_working_fdt_addr(addr);
+
+	/* Test creation of new node in / */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_commandf("fdt mknode / newnode"));
+	ut_assertok(run_commandf("fdt list /newnode"));
+	ut_assert_nextline("newnode {");
+	ut_assert_nextline("};");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test creation of new node in /test-node@1234 */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_commandf("fdt mknode /test-node@1234 newsubnode"));
+	ut_assertok(run_commandf("fdt list /test-node@1234/newsubnode"));
+	ut_assert_nextline("newsubnode {");
+	ut_assert_nextline("};");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test creation of new node in /test-node@1234 by alias */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_commandf("fdt mknode testnodealias newersubnode"));
+	ut_assertok(run_commandf("fdt list testnodealias/newersubnode"));
+	ut_assert_nextline("newersubnode {");
+	ut_assert_nextline("};");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test creation of new node in /test-node@1234 over existing node */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_commandf("fdt mknode testnodealias newsubnode"));
+	ut_assert_nextline("libfdt fdt_add_subnode(): FDT_ERR_EXISTS");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test creation of new node in /test-node@1234 by alias over existing node */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_commandf("fdt mknode testnodealias newersubnode"));
+	ut_assert_nextline("libfdt fdt_add_subnode(): FDT_ERR_EXISTS");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test creation of new node in non-existent node */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_commandf("fdt mknode /no-node newnosubnode"));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test creation of new node in non-existent alias */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_commandf("fdt mknode noalias newfailsubnode"));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_BADPATH");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test creation of new node in bad alias */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_commandf("fdt mknode badalias newbadsubnode"));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	return 0;
+}
+FDT_TEST(fdt_test_mknode, UT_TESTF_CONSOLE_REC);
+
+static int fdt_test_rm(struct unit_test_state *uts)
+{
+	char fdt[4096];
+	ulong addr;
+
+	ut_assertok(make_fuller_fdt(uts, fdt, sizeof(fdt)));
+	addr = map_to_sysmem(fdt);
+	set_working_fdt_addr(addr);
+
+	/* Test removal of property in root node / */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_commandf("fdt print / compatible"));
+	ut_assert_nextline("compatible = \"u-boot,fdt-test\"");
+	ut_assertok(run_commandf("fdt rm / compatible"));
+	ut_asserteq(1, run_commandf("fdt print / compatible"));
+	ut_assert_nextline("libfdt fdt_getprop(): FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test removal of property clock-names in subnode /test-node@1234 */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_commandf("fdt print /test-node@1234 clock-names"));
+	ut_assert_nextline("clock-names = \"fixed\", \"i2c\", \"spi\", \"uart2\", \"uart1\"");
+	ut_assertok(run_commandf("fdt rm /test-node@1234 clock-names"));
+	ut_asserteq(1, run_commandf("fdt print /test-node@1234 clock-names"));
+	ut_assert_nextline("libfdt fdt_getprop(): FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test removal of property u-boot,empty-property in subnode /test-node@1234 by alias */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_commandf("fdt print testnodealias u-boot,empty-property"));
+	ut_assert_nextline("testnodealias u-boot,empty-property");
+	ut_assertok(run_commandf("fdt rm testnodealias u-boot,empty-property"));
+	ut_asserteq(1, run_commandf("fdt print testnodealias u-boot,empty-property"));
+	ut_assert_nextline("libfdt fdt_getprop(): FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test removal of non-existent property noprop in subnode /test-node@1234 */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_commandf("fdt rm /test-node@1234 noprop"));
+	ut_assert_nextline("libfdt fdt_delprop(): FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test removal of non-existent node /no-node@5678 */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_commandf("fdt rm /no-node@5678"));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test removal of subnode /test-node@1234/subnode by alias */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_commandf("fdt rm subnodealias"));
+	ut_asserteq(1, run_commandf("fdt print /test-node@1234/subnode"));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test removal of node by non-existent alias */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_commandf("fdt rm noalias"));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_BADPATH");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test removal of node by bad alias */
+	ut_assertok(console_record_reset_enable());
+	ut_asserteq(1, run_commandf("fdt rm noalias"));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_BADPATH");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test removal of node /test-node@1234 */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_commandf("fdt rm /test-node@1234"));
+	ut_asserteq(1, run_commandf("fdt print /test-node@1234"));
+	ut_assert_nextline("libfdt fdt_path_offset() returned FDT_ERR_NOTFOUND");
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test removal of node / */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_commandf("fdt rm /"));
+	ut_asserteq(1, run_commandf("fdt print /"));
+	ut_assertok(ut_check_console_end(uts));
+
+	return 0;
+}
+FDT_TEST(fdt_test_rm, UT_TESTF_CONSOLE_REC);
+
+static int fdt_test_bootcpu(struct unit_test_state *uts)
+{
+	char fdt[256];
+	ulong addr;
+	int i;
+
+	ut_assertok(make_test_fdt(uts, fdt, sizeof(fdt)));
+	addr = map_to_sysmem(fdt);
+	set_working_fdt_addr(addr);
+
+	/* Test getting default bootcpu entry */
+	ut_assertok(console_record_reset_enable());
+	ut_assertok(run_commandf("fdt header get bootcpu boot_cpuid_phys"));
+	ut_asserteq(0, env_get_ulong("bootcpu", 10, 0x1234));
+	ut_assertok(ut_check_console_end(uts));
+
+	/* Test setting and getting new bootcpu entry, twice, to test overwrite */
+	for (i = 42; i <= 43; i++) {
+		ut_assertok(console_record_reset_enable());
+		ut_assertok(run_commandf("fdt bootcpu %d", i));
+		ut_assertok(ut_check_console_end(uts));
+
+		/* Test getting new bootcpu entry */
+		ut_assertok(console_record_reset_enable());
+		ut_assertok(run_commandf("fdt header get bootcpu boot_cpuid_phys"));
+		ut_asserteq(i, env_get_ulong("bootcpu", 10, 0x1234));
+		ut_assertok(ut_check_console_end(uts));
+	}
+
+	return 0;
+}
+FDT_TEST(fdt_test_bootcpu, UT_TESTF_CONSOLE_REC);
 
 int do_ut_fdt(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[])
 {
diff --git a/test/py/requirements.txt b/test/py/requirements.txt
index fae8b59..e241780 100644
--- a/test/py/requirements.txt
+++ b/test/py/requirements.txt
@@ -1,5 +1,6 @@
 atomicwrites==1.4.1
 attrs==19.3.0
+concurrencytest==0.1.2
 coverage==4.5.4
 extras==1.0.0
 filelock==3.0.12
diff --git a/test/run b/test/run
index c4ab046..93b556f 100755
--- a/test/run
+++ b/test/run
@@ -76,6 +76,7 @@
 
 run_test "binman" ./tools/binman/binman --toolpath ${TOOLS_DIR} test
 run_test "patman" ./tools/patman/patman test
+run_test "u_boot_pylib" ./tools/u_boot_pylib/u_boot_pylib
 
 run_test "buildman" ./tools/buildman/buildman -t ${skip}
 run_test "fdt" ./tools/dtoc/test_fdt -t
diff --git a/tools/binman/binman.rst b/tools/binman/binman.rst
index 2bcb7d3..e65fbff 100644
--- a/tools/binman/binman.rst
+++ b/tools/binman/binman.rst
@@ -95,6 +95,19 @@
 - binary - an input binary that goes into the image
 
 
+Installation
+------------
+
+You can install binman using::
+
+   pip install binary-manager
+
+The name is chosen since binman conflicts with an existing package.
+
+If you are using binman within the U-Boot tree, it may be easiest to add a
+symlink from your local `~/.bin` directory to `/path/to/tools/binman/binman`.
+
+
 Relationship to FIT
 -------------------
 
@@ -838,6 +851,14 @@
     is the symbol to lookup (relative to elf-base-sym) and <offset> is an offset
     to add to that value.
 
+preserve:
+    Indicates that this entry should be preserved by any firmware updates. This
+    flag should be checked by the updater when it is deciding which entries to
+    update. This flag is normally attached to sections but can be attached to
+    a single entry in a section if the updater supports it. Not that binman
+    itself has no control over the updater's behaviour, so this is just a
+    signal. It is not enforced by binman.
+
 Examples of the above options can be found in the tests. See the
 tools/binman/test directory.
 
@@ -1326,6 +1347,22 @@
 
     $ binman replace -i image.bin "*u-boot*" -I indir
 
+It is possible to replace whole sections as well, but in that case any
+information about entries within the section may become outdated. This is
+because Binman cannot know whether things have moved around or resized within
+the section, once you have updated its data.
+
+Technical note: With 'allow-repack', Binman writes information about the
+original offset and size properties of each entry, if any were specified, in
+the 'orig-offset' and 'orig-size' properties. This allows Binman to distinguish
+between an entry which ended up being packed at an offset (or assigned a size)
+and an entry which had a particular offset / size requested in the Binman
+configuration. Where are particular offset / size was requested, this is treated
+as set in stone, so Binman will ensure it doesn't change. Without this feature,
+repacking an entry might cause it to disobey the original constraints provided
+when it was created.
+
+ Repacking an image involves
 
 .. _`BinmanLogging`:
 
@@ -1407,6 +1444,16 @@
 a particular tool. Some tools are built from source code, in which case you will
 need to have at least the `build-essential` and `git` packages installed.
 
+Tools are fetched into the `~/.binman-tools` directory. This directory is
+automatically added to the toolpath so there is no need to use `--toolpath` to
+specify it. If you want to use these tools outside binman, you may want to
+add this directory to your `PATH`. For example, if you use bash, add this to
+the end of `.bashrc`::
+
+   PATH="$HOME/.binman-tools:$PATH"
+
+To select a custom directory, use the `--tooldir` option.
+
 Bintool Documentation
 =====================
 
@@ -1425,8 +1472,9 @@
 
 Usage::
 
-    binman [-h] [-B BUILD_DIR] [-D] [-H] [--toolpath TOOLPATH] [-T THREADS]
-        [--test-section-timeout] [-v VERBOSITY] [-V]
+    binman [-h] [-B BUILD_DIR] [-D] [--tooldir TOOLDIR] [-H]
+        [--toolpath TOOLPATH] [-T THREADS] [--test-section-timeout]
+        [-v VERBOSITY] [-V]
         {build,bintool-docs,entry-docs,ls,extract,replace,test,tool} ...
 
 Binman provides the following commands:
@@ -1451,11 +1499,13 @@
 -D, --debug
     Enabling debugging (provides a full traceback on error)
 
+--tooldir TOOLDIR     Set the directory to store tools
+
 -H, --full-help
     Display the README file
 
 --toolpath TOOLPATH
-    Add a path to the directories containing tools
+    Add a path to the list of directories containing tools
 
 -T THREADS, --threads THREADS
     Number of threads to use (0=single-thread). Note that -T0 is useful for
@@ -1663,6 +1713,12 @@
 -m, --map
     Output a map file for the updated image
 
+-O OUTDIR, --outdir OUTDIR
+    Path to directory to use for intermediate and output files
+
+-p, --preserve
+    Preserve temporary output directory even if option -O is not given
+
 This replaces one or more entries in an existing image. See
 `Replacing files in an image`_.
 
diff --git a/tools/binman/bintool.py b/tools/binman/bintool.py
index 8fda13f..8162968 100644
--- a/tools/binman/bintool.py
+++ b/tools/binman/bintool.py
@@ -18,10 +18,10 @@
 import tempfile
 import urllib.error
 
-from patman import command
-from patman import terminal
-from patman import tools
-from patman import tout
+from u_boot_pylib import command
+from u_boot_pylib import terminal
+from u_boot_pylib import tools
+from u_boot_pylib import tout
 
 BINMAN_DIR = os.path.dirname(os.path.realpath(__file__))
 
@@ -43,8 +43,6 @@
 # Status of tool fetching
 FETCHED, FAIL, PRESENT, STATUS_COUNT = range(4)
 
-DOWNLOAD_DESTDIR = os.path.join(os.getenv('HOME'), 'bin')
-
 class Bintool:
     """Tool which operates on binaries to help produce entry contents
 
@@ -53,6 +51,10 @@
     # List of bintools to regard as missing
     missing_list = []
 
+    # Directory to store tools. Note that this set up by set_tool_dir() which
+    # must be called before this class is used.
+    tooldir = ''
+
     def __init__(self, name, desc, version_regex=None, version_args='-V'):
         self.name = name
         self.desc = desc
@@ -112,6 +114,11 @@
         obj = cls(name)
         return obj
 
+    @classmethod
+    def set_tool_dir(cls, pathname):
+        """Set the path to use to store and find tools"""
+        cls.tooldir = pathname
+
     def show(self):
         """Show a line of information about a bintool"""
         if self.is_present():
@@ -208,7 +215,8 @@
             return FAIL
         if result is not True:
             fname, tmpdir = result
-            dest = os.path.join(DOWNLOAD_DESTDIR, self.name)
+            dest = os.path.join(self.tooldir, self.name)
+            os.makedirs(self.tooldir, exist_ok=True)
             print(f"- writing to '{dest}'")
             shutil.move(fname, dest)
             if tmpdir:
@@ -389,7 +397,7 @@
 
     @classmethod
     def apt_install(cls, package):
-        """Install a bintool using the 'aot' tool
+        """Install a bintool using the 'apt' tool
 
         This requires use of servo so may request a password
 
diff --git a/tools/binman/bintool_test.py b/tools/binman/bintool_test.py
index 7efb839..f9b16d4 100644
--- a/tools/binman/bintool_test.py
+++ b/tools/binman/bintool_test.py
@@ -16,10 +16,10 @@
 from binman import bintool
 from binman.bintool import Bintool
 
-from patman import command
-from patman import terminal
-from patman import test_util
-from patman import tools
+from u_boot_pylib import command
+from u_boot_pylib import terminal
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
 
 # pylint: disable=R0904
 class TestBintool(unittest.TestCase):
@@ -134,12 +134,14 @@
         dirname = os.path.join(self._indir, 'download_dir')
         os.mkdir(dirname)
         fname = os.path.join(dirname, 'downloaded')
+
+        # Rely on bintool to create this directory
         destdir = os.path.join(self._indir, 'dest_dir')
-        os.mkdir(destdir)
+
         dest_fname = os.path.join(destdir, '_testing')
         self.seq = 0
 
-        with unittest.mock.patch.object(bintool, 'DOWNLOAD_DESTDIR', destdir):
+        with unittest.mock.patch.object(bintool.Bintool, 'tooldir', destdir):
             with unittest.mock.patch.object(tools, 'download',
                                             side_effect=handle_download):
                 with test_util.capture_sys_output() as (stdout, _):
@@ -250,7 +252,7 @@
         btest = Bintool.create('_testing')
         col = terminal.Color()
         self.fname = None
-        with unittest.mock.patch.object(bintool, 'DOWNLOAD_DESTDIR',
+        with unittest.mock.patch.object(bintool.Bintool, 'tooldir',
                                         self._indir):
             with unittest.mock.patch.object(tools, 'run', side_effect=fake_run):
                 with test_util.capture_sys_output() as (stdout, _):
@@ -344,8 +346,11 @@
 
     def test_failed_command(self):
         """Check that running a command that does not exist returns None"""
-        btool = Bintool.create('_testing')
-        result = btool.run_cmd_result('fred')
+        destdir = os.path.join(self._indir, 'dest_dir')
+        os.mkdir(destdir)
+        with unittest.mock.patch.object(bintool.Bintool, 'tooldir', destdir):
+            btool = Bintool.create('_testing')
+            result = btool.run_cmd_result('fred')
         self.assertIsNone(result)
 
 
diff --git a/tools/binman/bintools.rst b/tools/binman/bintools.rst
index edb373a..c30e7eb 100644
--- a/tools/binman/bintools.rst
+++ b/tools/binman/bintools.rst
@@ -10,6 +10,20 @@
 
 
 
+Bintool: bzip2: Compression/decompression using the bzip2 algorithm
+-------------------------------------------------------------------
+
+This bintool supports running `bzip2` to compress and decompress data, as
+used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+    man bzip2
+
+
+
 Bintool: cbfstool: Coreboot filesystem (CBFS) tool
 --------------------------------------------------
 
@@ -58,6 +72,20 @@
 
 
 
+Bintool: gzip: Compression/decompression using the gzip algorithm
+-----------------------------------------------------------------
+
+This bintool supports running `gzip` to compress and decompress data, as
+used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+    man gzip
+
+
+
 Bintool: ifwitool: Handles the 'ifwitool' tool
 ----------------------------------------------
 
@@ -101,6 +129,20 @@
 
 
 
+Bintool: lzop: Compression/decompression using the lzop algorithm
+-----------------------------------------------------------------
+
+This bintool supports running `lzop` to compress and decompress data, as
+used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+    man lzop
+
+
+
 Bintool: mkimage: Image generation for U-Boot
 ---------------------------------------------
 
@@ -113,3 +155,31 @@
 
 
 
+Bintool: xz: Compression/decompression using the xz algorithm
+-------------------------------------------------------------
+
+This bintool supports running `xz` to compress and decompress data, as
+used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+    man xz
+
+
+
+Bintool: zstd: Compression/decompression using the zstd algorithm
+-----------------------------------------------------------------
+
+This bintool supports running `zstd` to compress and decompress data, as
+used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+    man zstd
+
+
+
diff --git a/tools/binman/btool/lz4.py b/tools/binman/btool/lz4.py
index dc9e379..fd520d1 100644
--- a/tools/binman/btool/lz4.py
+++ b/tools/binman/btool/lz4.py
@@ -60,7 +60,7 @@
 import tempfile
 
 from binman import bintool
-from patman import tools
+from u_boot_pylib import tools
 
 # pylint: disable=C0103
 class Bintoollz4(bintool.Bintool):
diff --git a/tools/binman/btool/lzma_alone.py b/tools/binman/btool/lzma_alone.py
index 52a960f..1fda2f6 100644
--- a/tools/binman/btool/lzma_alone.py
+++ b/tools/binman/btool/lzma_alone.py
@@ -37,7 +37,7 @@
 import tempfile
 
 from binman import bintool
-from patman import tools
+from u_boot_pylib import tools
 
 # pylint: disable=C0103
 class Bintoollzma_alone(bintool.Bintool):
diff --git a/tools/binman/btool/openssl.py b/tools/binman/btool/openssl.py
new file mode 100644
index 0000000..3a4dbdd
--- /dev/null
+++ b/tools/binman/btool/openssl.py
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for openssl
+
+openssl provides a number of features useful for signing images
+
+Documentation is at https://www.coreboot.org/CBFS
+
+Source code is at https://www.openssl.org/
+"""
+
+import hashlib
+
+from binman import bintool
+from u_boot_pylib import tools
+
+class Bintoolopenssl(bintool.Bintool):
+    """openssl tool
+
+    This bintool supports creating new openssl certificates.
+
+    It also supports fetching a binary openssl
+
+    Documentation about openssl is at https://www.openssl.org/
+    """
+    def __init__(self, name):
+        super().__init__(
+            name, 'openssl cryptography toolkit',
+            version_regex=r'OpenSSL (.*) \(', version_args='version')
+
+    def x509_cert(self, cert_fname, input_fname, key_fname, cn, revision,
+                  config_fname):
+        """Create a certificate
+
+        Args:
+            cert_fname (str): Filename of certificate to create
+            input_fname (str): Filename containing data to sign
+            key_fname (str): Filename of .pem file
+            cn (str): Common name
+            revision (int): Revision number
+            config_fname (str): Filename to write fconfig into
+
+        Returns:
+            str: Tool output
+        """
+        indata = tools.read_file(input_fname)
+        hashval = hashlib.sha512(indata).hexdigest()
+        with open(config_fname, 'w', encoding='utf-8') as outf:
+            print(f'''[ req ]
+distinguished_name     = req_distinguished_name
+x509_extensions        = v3_ca
+prompt                 = no
+dirstring_type         = nobmp
+
+[ req_distinguished_name ]
+CN                     = {cert_fname}
+
+[ v3_ca ]
+basicConstraints       = CA:true
+1.3.6.1.4.1.294.1.3    = ASN1:SEQUENCE:swrv
+1.3.6.1.4.1.294.1.34   = ASN1:SEQUENCE:sysfw_image_integrity
+
+[ swrv ]
+swrv = INTEGER:{revision}
+
+[ sysfw_image_integrity ]
+shaType                = OID:2.16.840.1.101.3.4.2.3
+shaValue               = FORMAT:HEX,OCT:{hashval}
+imageSize              = INTEGER:{len(indata)}
+''', file=outf)
+        args = ['req', '-new', '-x509', '-key', key_fname, '-nodes',
+                '-outform', 'DER', '-out', cert_fname, '-config', config_fname,
+                '-sha512']
+        return self.run_cmd(*args)
+
+    def fetch(self, method):
+        """Fetch handler for openssl
+
+        This installs the openssl package using the apt utility.
+
+        Args:
+            method (FETCH_...): Method to use
+
+        Returns:
+            True if the file was fetched and now installed, None if a method
+            other than FETCH_BIN was requested
+
+        Raises:
+            Valuerror: Fetching could not be completed
+        """
+        if method != bintool.FETCH_BIN:
+            return None
+        return self.apt_install('openssl')
diff --git a/tools/binman/cbfs_util.py b/tools/binman/cbfs_util.py
index 7bd3d89..fc56b40 100644
--- a/tools/binman/cbfs_util.py
+++ b/tools/binman/cbfs_util.py
@@ -22,8 +22,8 @@
 
 from binman import bintool
 from binman import elf
-from patman import command
-from patman import tools
+from u_boot_pylib import command
+from u_boot_pylib import tools
 
 # Set to True to enable printing output while working
 DEBUG = False
diff --git a/tools/binman/cbfs_util_test.py b/tools/binman/cbfs_util_test.py
index e0f792f..ee951d1 100755
--- a/tools/binman/cbfs_util_test.py
+++ b/tools/binman/cbfs_util_test.py
@@ -20,8 +20,8 @@
 from binman import cbfs_util
 from binman.cbfs_util import CbfsWriter
 from binman import elf
-from patman import test_util
-from patman import tools
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
 
 U_BOOT_DATA           = b'1234'
 U_BOOT_DTB_DATA       = b'udtb'
diff --git a/tools/binman/cmdline.py b/tools/binman/cmdline.py
index 986d6f1..1b7bbe8 100644
--- a/tools/binman/cmdline.py
+++ b/tools/binman/cmdline.py
@@ -7,7 +7,13 @@
 
 import argparse
 from argparse import ArgumentParser
+import os
 from binman import state
+import os
+import pathlib
+
+BINMAN_DIR = pathlib.Path(__file__).parent
+HAS_TESTS = (BINMAN_DIR / "ftest.py").exists()
 
 def make_extract_parser(subparsers):
     """make_extract_parser: Make a subparser for the 'extract' command
@@ -67,6 +73,14 @@
             options provides access to the options (e.g. option.debug)
             args is a list of string arguments
     """
+    def _AddPreserve(pars):
+        pars.add_argument('-O', '--outdir', type=str,
+            action='store', help='Path to directory to use for intermediate '
+            'and output files')
+        pars.add_argument('-p', '--preserve', action='store_true',\
+            help='Preserve temporary output directory even if option -O is not '
+                 'given')
+
     if '-H' in argv:
         argv.append('build')
 
@@ -80,8 +94,11 @@
         help='Enabling debugging (provides a full traceback on error)')
     parser.add_argument('-H', '--full-help', action='store_true',
         default=False, help='Display the README file')
+    parser.add_argument('--tooldir', type=str,
+        default=os.path.join(os.getenv('HOME'), '.binman-tools'),
+        help='Set the directory to store tools')
     parser.add_argument('--toolpath', type=str, action='append',
-        help='Add a path to the directories containing tools')
+        help='Add a path to the list of directories containing tools')
     parser.add_argument('-T', '--threads', type=int,
           default=None, help='Number of threads to use (0=single-thread)')
     parser.add_argument('--test-section-timeout', action='store_true',
@@ -118,12 +135,7 @@
     build_parser.add_argument('-n', '--no-expanded', action='store_true',
             help="Don't use 'expanded' versions of entries where available; "
                  "normally 'u-boot' becomes 'u-boot-expanded', for example")
-    build_parser.add_argument('-O', '--outdir', type=str,
-        action='store', help='Path to directory to use for intermediate and '
-        'output files')
-    build_parser.add_argument('-p', '--preserve', action='store_true',\
-        help='Preserve temporary output directory even if option -O is not '
-             'given')
+    _AddPreserve(build_parser)
     build_parser.add_argument('-u', '--update-fdt', action='store_true',
         default=False, help='Update the binman node with offset/size info')
     build_parser.add_argument('--update-fdt-in-elf', type=str,
@@ -160,26 +172,30 @@
         help='Path to directory to use for input files')
     replace_parser.add_argument('-m', '--map', action='store_true',
         default=False, help='Output a map file for the updated image')
+    _AddPreserve(replace_parser)
     replace_parser.add_argument('paths', type=str, nargs='*',
                                 help='Paths within file to replace (wildcard)')
 
-    test_parser = subparsers.add_parser('test', help='Run tests')
-    test_parser.add_argument('-P', '--processes', type=int,
-        help='set number of processes to use for running tests')
-    test_parser.add_argument('-T', '--test-coverage', action='store_true',
-        default=False, help='run tests and check for 100%% coverage')
-    test_parser.add_argument('-X', '--test-preserve-dirs', action='store_true',
-        help='Preserve and display test-created input directories; also '
-             'preserve the output directory if a single test is run (pass test '
-             'name at the end of the command line')
-    test_parser.add_argument('tests', nargs='*',
-                             help='Test names to run (omit for all)')
+    if HAS_TESTS:
+        test_parser = subparsers.add_parser('test', help='Run tests')
+        test_parser.add_argument('-P', '--processes', type=int,
+            help='set number of processes to use for running tests')
+        test_parser.add_argument('-T', '--test-coverage', action='store_true',
+            default=False, help='run tests and check for 100%% coverage')
+        test_parser.add_argument(
+            '-X', '--test-preserve-dirs', action='store_true',
+            help='Preserve and display test-created input directories; also '
+                 'preserve the output directory if a single test is run (pass '
+                 'test name at the end of the command line')
+        test_parser.add_argument('tests', nargs='*',
+                                 help='Test names to run (omit for all)')
 
     tool_parser = subparsers.add_parser('tool', help='Check bintools')
     tool_parser.add_argument('-l', '--list', action='store_true',
                              help='List all known bintools')
-    tool_parser.add_argument('-f', '--fetch', action='store_true',
-                             help='fetch a bintool from a known location (or: all/missing)')
+    tool_parser.add_argument(
+        '-f', '--fetch', action='store_true',
+        help='fetch a bintool from a known location (or: all/missing)')
     tool_parser.add_argument('bintools', type=str, nargs='*')
 
     return parser.parse_args(argv)
diff --git a/tools/binman/control.py b/tools/binman/control.py
index e647400..2f2b489 100644
--- a/tools/binman/control.py
+++ b/tools/binman/control.py
@@ -7,19 +7,20 @@
 
 from collections import OrderedDict
 import glob
+import importlib.resources
 import os
 import pkg_resources
 import re
 
 import sys
-from patman import tools
 
 from binman import bintool
 from binman import cbfs_util
-from patman import command
 from binman import elf
 from binman import entry
-from patman import tout
+from u_boot_pylib import command
+from u_boot_pylib import tools
+from u_boot_pylib import tout
 
 # These are imported if needed since they import libfdt
 state = None
@@ -402,6 +403,8 @@
     image_fname = os.path.abspath(image_fname)
     image = Image.FromFile(image_fname)
 
+    image.mark_build_done()
+
     # Replace an entry from a single file, as a special case
     if input_fname:
         if not entry_paths:
@@ -641,19 +644,29 @@
     global state
 
     if args.full_help:
-        tools.print_full_help(
-            os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'README.rst')
-        )
+        with importlib.resources.path('binman', 'README.rst') as readme:
+            tools.print_full_help(str(readme))
         return 0
 
     # Put these here so that we can import this module without libfdt
     from binman.image import Image
     from binman import state
 
+    tool_paths = []
+    if args.toolpath:
+        tool_paths += args.toolpath
+    if args.tooldir:
+        tool_paths.append(args.tooldir)
+    tools.set_tool_paths(tool_paths or None)
+    bintool.Bintool.set_tool_dir(args.tooldir)
+
     if args.cmd in ['ls', 'extract', 'replace', 'tool']:
         try:
             tout.init(args.verbosity)
-            tools.prepare_output_dir(None)
+            if args.cmd == 'replace':
+                tools.prepare_output_dir(args.outdir, args.preserve)
+            else:
+                tools.prepare_output_dir(None)
             if args.cmd == 'ls':
                 ListEntries(args.image, args.paths)
 
@@ -667,7 +680,6 @@
                                allow_resize=not args.fix_size, write_map=args.map)
 
             if args.cmd == 'tool':
-                tools.set_tool_paths(args.toolpath)
                 if args.list:
                     bintool.Bintool.list_all()
                 elif args.fetch:
@@ -719,7 +731,6 @@
         try:
             tools.set_input_dirs(args.indir)
             tools.prepare_output_dir(args.outdir, args.preserve)
-            tools.set_tool_paths(args.toolpath)
             state.SetEntryArgs(args.entry_arg)
             state.SetThreads(args.threads)
 
diff --git a/tools/binman/elf.py b/tools/binman/elf.py
index 3cc8a38..5816284 100644
--- a/tools/binman/elf.py
+++ b/tools/binman/elf.py
@@ -13,9 +13,9 @@
 import struct
 import tempfile
 
-from patman import command
-from patman import tools
-from patman import tout
+from u_boot_pylib import command
+from u_boot_pylib import tools
+from u_boot_pylib import tout
 
 ELF_TOOLS = True
 try:
diff --git a/tools/binman/elf_test.py b/tools/binman/elf_test.py
index 8cb55eb..c980839 100644
--- a/tools/binman/elf_test.py
+++ b/tools/binman/elf_test.py
@@ -12,10 +12,10 @@
 import unittest
 
 from binman import elf
-from patman import command
-from patman import test_util
-from patman import tools
-from patman import tout
+from u_boot_pylib import command
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
+from u_boot_pylib import tout
 
 binman_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
 
diff --git a/tools/binman/entries.rst b/tools/binman/entries.rst
index 7a04a61..9a52b22 100644
--- a/tools/binman/entries.rst
+++ b/tools/binman/entries.rst
@@ -887,6 +887,11 @@
 from the FMAP by using the offset information. This convention does not
 seem to be documented, but is used in Chromium OS.
 
+To mark an area as preserved, use the normal 'preserved' flag in the entry.
+This will result in the corresponding FMAP area having the
+FMAP_AREA_PRESERVE flag. This flag does not automatically propagate down to
+child entries.
+
 CBFS entries appear as a single entry, i.e. the sub-entries are ignored.
 
 
@@ -2271,6 +2276,24 @@
 
 
 
+.. _etype_x509_cert:
+
+Entry: x509-cert: An entry which contains an X509 certificate
+-------------------------------------------------------------
+
+Properties / Entry arguments:
+    - content: List of phandles to entries to sign
+
+Output files:
+    - input.<unique_name> - input file passed to openssl
+    - cert.<unique_name> - output file generated by openssl (which is
+        used as the entry contents)
+
+openssl signs the provided data, writing the signature in this entry. This
+allows verification that the data is genuine
+
+
+
 .. _etype_x86_reset16:
 
 Entry: x86-reset16: x86 16-bit reset code for U-Boot
diff --git a/tools/binman/entry.py b/tools/binman/entry.py
index 5eacc5f..b10a433 100644
--- a/tools/binman/entry.py
+++ b/tools/binman/entry.py
@@ -14,9 +14,9 @@
 from binman import bintool
 from binman import elf
 from dtoc import fdt_util
-from patman import tools
-from patman.tools import to_hex, to_hex_size
-from patman import tout
+from u_boot_pylib import tools
+from u_boot_pylib.tools import to_hex, to_hex_size
+from u_boot_pylib import tout
 
 modules = {}
 
@@ -100,6 +100,14 @@
             appear in the map
         optional (bool): True if this entry contains an optional external blob
         overlap (bool): True if this entry overlaps with others
+        preserve (bool): True if this entry should be preserved when updating
+            firmware. This means that it will not be changed by the update.
+            This is just a signal: enforcement of this is up to the updater.
+            This flag does not automatically propagate down to child entries.
+        build_done (bool): Indicates that the entry data has been built and does
+            not need to be done again. This is only used with 'binman replace',
+            to stop sections from being rebuilt if their entries have not been
+            replaced
     """
     fake_dir = None
 
@@ -148,6 +156,8 @@
         self.overlap = False
         self.elf_base_sym = None
         self.offset_from_elf = None
+        self.preserve = False
+        self.build_done = False
 
     @staticmethod
     def FindEntryClass(etype, expanded):
@@ -310,6 +320,8 @@
         self.offset_from_elf = fdt_util.GetPhandleNameOffset(self._node,
                                                              'offset-from-elf')
 
+        self.preserve = fdt_util.GetBool(self._node, 'preserve')
+
     def GetDefaultFilename(self):
         return None
 
@@ -1006,6 +1018,7 @@
         else:
             self.contents_size = self.pre_reset_size
         ok = self.ProcessContentsUpdate(data)
+        self.build_done = False
         self.Detail('WriteData: size=%x, ok=%s' % (len(data), ok))
         section_ok = self.section.WriteChildData(self)
         return ok and section_ok
@@ -1027,6 +1040,14 @@
             True if the section could be updated successfully, False if the
                 data is such that the section could not update
         """
+        self.build_done = False
+        entry = self.section
+
+        # Now we must rebuild all sections above this one
+        while entry and entry != entry.section:
+            self.build_done = False
+            entry = entry.section
+
         return True
 
     def GetSiblingOrder(self):
@@ -1104,7 +1125,7 @@
         If there are faked blobs, the entries are added to the list
 
         Args:
-            fake_blobs_list: List of Entry objects to be added to
+            faked_blobs_list: List of Entry objects to be added to
         """
         # This is meaningless for anything other than blobs
         pass
@@ -1349,3 +1370,11 @@
         val = elf.GetSymbolOffset(entry.elf_fname, sym_name,
                                   entry.elf_base_sym)
         return val + offset
+
+    def mark_build_done(self):
+        """Mark an entry as already built"""
+        self.build_done = True
+        entries = self.GetEntries()
+        if entries:
+            for entry in entries.values():
+                entry.mark_build_done()
diff --git a/tools/binman/entry_test.py b/tools/binman/entry_test.py
index a6fbf62..ac6582c 100644
--- a/tools/binman/entry_test.py
+++ b/tools/binman/entry_test.py
@@ -14,7 +14,7 @@
 from binman.etype.blob import Entry_blob
 from dtoc import fdt
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 class TestEntry(unittest.TestCase):
     def setUp(self):
diff --git a/tools/binman/etype/_testing.py b/tools/binman/etype/_testing.py
index 1c1efb2..e092d98 100644
--- a/tools/binman/etype/_testing.py
+++ b/tools/binman/etype/_testing.py
@@ -9,7 +9,7 @@
 
 from binman.entry import Entry, EntryArg
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 
 class Entry__testing(Entry):
diff --git a/tools/binman/etype/atf_fip.py b/tools/binman/etype/atf_fip.py
index 6ecd95b..73a3f85 100644
--- a/tools/binman/etype/atf_fip.py
+++ b/tools/binman/etype/atf_fip.py
@@ -11,7 +11,7 @@
 from binman.etype.section import Entry_section
 from binman.fip_util import FIP_TYPES, FipReader, FipWriter, UUID_LEN
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 class Entry_atf_fip(Entry_section):
     """ARM Trusted Firmware's Firmware Image Package (FIP)
@@ -270,4 +270,4 @@
         # Recreate the data structure, leaving the data for this child alone,
         # so that child.data is used to pack into the FIP.
         self.ObtainContents(skip_entry=child)
-        return True
+        return super().WriteChildData(child)
diff --git a/tools/binman/etype/blob.py b/tools/binman/etype/blob.py
index c7ddced..064fae5 100644
--- a/tools/binman/etype/blob.py
+++ b/tools/binman/etype/blob.py
@@ -8,8 +8,8 @@
 from binman.entry import Entry
 from binman import state
 from dtoc import fdt_util
-from patman import tools
-from patman import tout
+from u_boot_pylib import tools
+from u_boot_pylib import tout
 
 class Entry_blob(Entry):
     """Arbitrary binary blob
@@ -102,7 +102,7 @@
         If there are faked blobs, the entries are added to the list
 
         Args:
-            fake_blobs_list: List of Entry objects to be added to
+            faked_blobs_list: List of Entry objects to be added to
         """
         if self.faked:
             faked_blobs_list.append(self)
diff --git a/tools/binman/etype/blob_ext.py b/tools/binman/etype/blob_ext.py
index fba6271..ca26530 100644
--- a/tools/binman/etype/blob_ext.py
+++ b/tools/binman/etype/blob_ext.py
@@ -9,8 +9,8 @@
 
 from binman.etype.blob import Entry_blob
 from dtoc import fdt_util
-from patman import tools
-from patman import tout
+from u_boot_pylib import tools
+from u_boot_pylib import tout
 
 class Entry_blob_ext(Entry_blob):
     """Externally built binary blob
@@ -26,11 +26,3 @@
     def __init__(self, section, etype, node):
         Entry_blob.__init__(self, section, etype, node)
         self.external = True
-
-    def SetAllowFakeBlob(self, allow_fake):
-        """Set whether the entry allows to create a fake blob
-
-        Args:
-            allow_fake_blob: True if allowed, False if not allowed
-        """
-        self.allow_fake = allow_fake
diff --git a/tools/binman/etype/blob_ext_list.py b/tools/binman/etype/blob_ext_list.py
index f00202e..1bfcf67 100644
--- a/tools/binman/etype/blob_ext_list.py
+++ b/tools/binman/etype/blob_ext_list.py
@@ -9,8 +9,8 @@
 
 from binman.etype.blob import Entry_blob
 from dtoc import fdt_util
-from patman import tools
-from patman import tout
+from u_boot_pylib import tools
+from u_boot_pylib import tout
 
 class Entry_blob_ext_list(Entry_blob):
     """List of externally built binary blobs
diff --git a/tools/binman/etype/cbfs.py b/tools/binman/etype/cbfs.py
index 832f8d0..575aa62 100644
--- a/tools/binman/etype/cbfs.py
+++ b/tools/binman/etype/cbfs.py
@@ -295,7 +295,7 @@
         # Recreate the data structure, leaving the data for this child alone,
         # so that child.data is used to pack into the FIP.
         self.ObtainContents(skip_entry=child)
-        return True
+        return super().WriteChildData(child)
 
     def AddBintools(self, btools):
         super().AddBintools(btools)
diff --git a/tools/binman/etype/fdtmap.py b/tools/binman/etype/fdtmap.py
index 33c9d03..f1f6217 100644
--- a/tools/binman/etype/fdtmap.py
+++ b/tools/binman/etype/fdtmap.py
@@ -9,8 +9,8 @@
 """
 
 from binman.entry import Entry
-from patman import tools
-from patman import tout
+from u_boot_pylib import tools
+from u_boot_pylib import tout
 
 FDTMAP_MAGIC   = b'_FDTMAP_'
 FDTMAP_HDR_LEN = 16
diff --git a/tools/binman/etype/files.py b/tools/binman/etype/files.py
index 2081bc7..c8757ea 100644
--- a/tools/binman/etype/files.py
+++ b/tools/binman/etype/files.py
@@ -11,7 +11,7 @@
 
 from binman.etype.section import Entry_section
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 # This is imported if needed
 state = None
diff --git a/tools/binman/etype/fill.py b/tools/binman/etype/fill.py
index c91d015..7c93d4e 100644
--- a/tools/binman/etype/fill.py
+++ b/tools/binman/etype/fill.py
@@ -5,7 +5,7 @@
 
 from binman.entry import Entry
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 class Entry_fill(Entry):
     """An entry which is filled to a particular byte value
diff --git a/tools/binman/etype/fit.py b/tools/binman/etype/fit.py
index cd29435..03fe88e 100644
--- a/tools/binman/etype/fit.py
+++ b/tools/binman/etype/fit.py
@@ -12,7 +12,7 @@
 from binman import elf
 from dtoc import fdt_util
 from dtoc.fdt import Fdt
-from patman import tools
+from u_boot_pylib import tools
 
 # Supported operations, with the fit,operation property
 OP_GEN_FDT_NODES, OP_SPLIT_ELF = range(2)
@@ -453,6 +453,8 @@
             args.update({'align': fdt_util.fdt32_to_cpu(align.value)})
         if self.mkimage.run(reset_timestamp=True, output_fname=output_fname,
                             **args) is None:
+            if not self.GetAllowMissing():
+                self.Raise("Missing tool: 'mkimage'")
             # Bintool is missing; just use empty data as the output
             self.record_missing_bintool(self.mkimage)
             return tools.get_bytes(0, 1024)
@@ -775,6 +777,8 @@
         Args:
             image_pos (int): Position of this entry in the image
         """
+        if self.build_done:
+            return
         super().SetImagePos(image_pos)
 
         # If mkimage is missing we'll have empty data,
@@ -823,8 +827,11 @@
         self.mkimage = self.AddBintool(btools, 'mkimage')
 
     def CheckMissing(self, missing_list):
-        # We must use our private entry list for this since generator notes
+        # We must use our private entry list for this since generator nodes
         # which are removed from self._entries will otherwise not show up as
         # missing
         for entry in self._priv_entries.values():
             entry.CheckMissing(missing_list)
+
+    def CheckEntries(self):
+        pass
diff --git a/tools/binman/etype/fmap.py b/tools/binman/etype/fmap.py
index 0c57620..3669d91 100644
--- a/tools/binman/etype/fmap.py
+++ b/tools/binman/etype/fmap.py
@@ -7,9 +7,9 @@
 
 from binman.entry import Entry
 from binman import fmap_util
-from patman import tools
-from patman.tools import to_hex_size
-from patman import tout
+from u_boot_pylib import tools
+from u_boot_pylib.tools import to_hex_size
+from u_boot_pylib import tout
 
 
 class Entry_fmap(Entry):
@@ -33,6 +33,11 @@
     from the FMAP by using the offset information. This convention does not
     seem to be documented, but is used in Chromium OS.
 
+    To mark an area as preserved, use the normal 'preserved' flag in the entry.
+    This will result in the corresponding FMAP area having the
+    FMAP_AREA_PRESERVE flag. This flag does not automatically propagate down to
+    child entries.
+
     CBFS entries appear as a single entry, i.e. the sub-entries are ignored.
     """
     def __init__(self, section, etype, node):
@@ -48,6 +53,12 @@
             entries = entry.GetEntries()
             tout.debug("fmap: Add entry '%s' type '%s' (%s subentries)" %
                        (entry.GetPath(), entry.etype, to_hex_size(entries)))
+
+            # Collect any flag (separate lines to ensure code coverage)
+            flags = 0
+            if entry.preserve:
+                flags = fmap_util.FMAP_AREA_PRESERVE
+
             if entries and entry.etype != 'cbfs':
                 # Create an area for the section, which encompasses all entries
                 # within it
@@ -59,7 +70,7 @@
                 # Drop @ symbols in name
                 name = entry.name.replace('@', '')
                 areas.append(
-                    fmap_util.FmapArea(pos, entry.size or 0, name, 0))
+                    fmap_util.FmapArea(pos, entry.size or 0, name, flags))
                 for subentry in entries.values():
                     _AddEntries(areas, subentry)
             else:
@@ -67,7 +78,7 @@
                 if pos is not None:
                     pos -= entry.section.GetRootSkipAtStart()
                 areas.append(fmap_util.FmapArea(pos or 0, entry.size or 0,
-                                                entry.name, 0))
+                                                entry.name, flags))
 
         entries = self.GetImage().GetEntries()
         areas = []
diff --git a/tools/binman/etype/gbb.py b/tools/binman/etype/gbb.py
index ba2a362..cca18af 100644
--- a/tools/binman/etype/gbb.py
+++ b/tools/binman/etype/gbb.py
@@ -8,11 +8,11 @@
 
 from collections import OrderedDict
 
-from patman import command
+from u_boot_pylib import command
 from binman.entry import Entry, EntryArg
 
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 # Build GBB flags.
 # (src/platform/vboot_reference/firmware/include/gbb_header.h)
diff --git a/tools/binman/etype/intel_ifwi.py b/tools/binman/etype/intel_ifwi.py
index 04fad40..6513b97 100644
--- a/tools/binman/etype/intel_ifwi.py
+++ b/tools/binman/etype/intel_ifwi.py
@@ -10,7 +10,7 @@
 from binman.entry import Entry
 from binman.etype.blob_ext import Entry_blob_ext
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 class Entry_intel_ifwi(Entry_blob_ext):
     """Intel Integrated Firmware Image (IFWI) file
diff --git a/tools/binman/etype/mkimage.py b/tools/binman/etype/mkimage.py
index cb264c3c..27a0c4b 100644
--- a/tools/binman/etype/mkimage.py
+++ b/tools/binman/etype/mkimage.py
@@ -9,7 +9,7 @@
 
 from binman.entry import Entry
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 class Entry_mkimage(Entry):
     """Binary produced by mkimage
diff --git a/tools/binman/etype/null.py b/tools/binman/etype/null.py
index c10d482..263fb52 100644
--- a/tools/binman/etype/null.py
+++ b/tools/binman/etype/null.py
@@ -5,7 +5,7 @@
 
 from binman.entry import Entry
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 class Entry_null(Entry):
     """An entry which has no contents of its own
diff --git a/tools/binman/etype/pre_load.py b/tools/binman/etype/pre_load.py
index b622281..bd3545b 100644
--- a/tools/binman/etype/pre_load.py
+++ b/tools/binman/etype/pre_load.py
@@ -8,7 +8,7 @@
 import os
 import struct
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 from binman.entry import Entry
 from binman.etype.collection import Entry_collection
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py
index 57b91ff..c36edd1 100644
--- a/tools/binman/etype/section.py
+++ b/tools/binman/etype/section.py
@@ -16,9 +16,9 @@
 from binman.entry import Entry
 from binman import state
 from dtoc import fdt_util
-from patman import tools
-from patman import tout
-from patman.tools import to_hex_size
+from u_boot_pylib import tools
+from u_boot_pylib import tout
+from u_boot_pylib.tools import to_hex_size
 
 
 class Entry_section(Entry):
@@ -172,7 +172,7 @@
     def IsSpecialSubnode(self, node):
         """Check if a node is a special one used by the section itself
 
-        Some notes are used for hashing / signatures and do not add entries to
+        Some nodes are used for hashing / signatures and do not add entries to
         the actual section.
 
         Returns:
@@ -397,10 +397,13 @@
             This excludes any padding. If the section is compressed, the
             compressed data is returned
         """
-        data = self.BuildSectionData(required)
-        if data is None:
-            return None
-        self.SetContents(data)
+        if not self.build_done:
+            data = self.BuildSectionData(required)
+            if data is None:
+                return None
+            self.SetContents(data)
+        else:
+            data = self.data
         if self._filename:
             tools.write_file(tools.get_output_filename(self._filename), data)
         return data
@@ -427,8 +430,11 @@
             self._SortEntries()
         self._extend_entries()
 
-        data = self.BuildSectionData(True)
-        self.SetContents(data)
+        if self.build_done:
+            self.size = None
+        else:
+            data = self.BuildSectionData(True)
+            self.SetContents(data)
 
         self.CheckSize()
 
@@ -810,6 +816,9 @@
     def LoadData(self, decomp=True):
         for entry in self._entries.values():
             entry.LoadData(decomp)
+        data = self.ReadData(decomp)
+        self.contents_size = len(data)
+        self.ProcessContentsUpdate(data)
         self.Detail('Loaded data')
 
     def GetImage(self):
@@ -866,10 +875,15 @@
         return data
 
     def WriteData(self, data, decomp=True):
-        self.Raise("Replacing sections is not implemented yet")
+        ok = super().WriteData(data, decomp)
+
+        # The section contents are now fixed and cannot be rebuilt from the
+        # containing entries.
+        self.mark_build_done()
+        return ok
 
     def WriteChildData(self, child):
-        return True
+        return super().WriteChildData(child)
 
     def SetAllowMissing(self, allow_missing):
         """Set whether a section allows missing external blobs
@@ -885,7 +899,7 @@
         """Set whether a section allows to create a fake blob
 
         Args:
-            allow_fake_blob: True if allowed, False if not allowed
+            allow_fake: True if allowed, False if not allowed
         """
         super().SetAllowFakeBlob(allow_fake)
         for entry in self._entries.values():
@@ -909,7 +923,7 @@
         If there are faked blobs, the entries are added to the list
 
         Args:
-            fake_blobs_list: List of Entry objects to be added to
+            faked_blobs_list: List of Entry objects to be added to
         """
         for entry in self._entries.values():
             entry.CheckFakedBlobs(faked_blobs_list)
diff --git a/tools/binman/etype/text.py b/tools/binman/etype/text.py
index c55e023..e4deb4a 100644
--- a/tools/binman/etype/text.py
+++ b/tools/binman/etype/text.py
@@ -7,7 +7,7 @@
 
 from binman.entry import Entry, EntryArg
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 
 class Entry_text(Entry):
diff --git a/tools/binman/etype/u_boot_dtb_with_ucode.py b/tools/binman/etype/u_boot_dtb_with_ucode.py
index 047d310..f7225ce 100644
--- a/tools/binman/etype/u_boot_dtb_with_ucode.py
+++ b/tools/binman/etype/u_boot_dtb_with_ucode.py
@@ -7,7 +7,7 @@
 
 from binman.entry import Entry
 from binman.etype.blob_dtb import Entry_blob_dtb
-from patman import tools
+from u_boot_pylib import tools
 
 # This is imported if needed
 state = None
diff --git a/tools/binman/etype/u_boot_elf.py b/tools/binman/etype/u_boot_elf.py
index 3ec774f..f4d86aa 100644
--- a/tools/binman/etype/u_boot_elf.py
+++ b/tools/binman/etype/u_boot_elf.py
@@ -9,7 +9,7 @@
 from binman.etype.blob import Entry_blob
 
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 class Entry_u_boot_elf(Entry_blob):
     """U-Boot ELF image
diff --git a/tools/binman/etype/u_boot_env.py b/tools/binman/etype/u_boot_env.py
index c38340b..c027e93 100644
--- a/tools/binman/etype/u_boot_env.py
+++ b/tools/binman/etype/u_boot_env.py
@@ -8,7 +8,7 @@
 
 from binman.etype.blob import Entry_blob
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 class Entry_u_boot_env(Entry_blob):
     """An entry which contains a U-Boot environment
diff --git a/tools/binman/etype/u_boot_spl_bss_pad.py b/tools/binman/etype/u_boot_spl_bss_pad.py
index 680d198..1ffeb39 100644
--- a/tools/binman/etype/u_boot_spl_bss_pad.py
+++ b/tools/binman/etype/u_boot_spl_bss_pad.py
@@ -10,7 +10,7 @@
 from binman import elf
 from binman.entry import Entry
 from binman.etype.blob import Entry_blob
-from patman import tools
+from u_boot_pylib import tools
 
 class Entry_u_boot_spl_bss_pad(Entry_blob):
     """U-Boot SPL binary padded with a BSS region
diff --git a/tools/binman/etype/u_boot_spl_expanded.py b/tools/binman/etype/u_boot_spl_expanded.py
index 319f670..fcd0dd1 100644
--- a/tools/binman/etype/u_boot_spl_expanded.py
+++ b/tools/binman/etype/u_boot_spl_expanded.py
@@ -5,7 +5,7 @@
 # Entry-type module for expanded U-Boot SPL binary
 #
 
-from patman import tout
+from u_boot_pylib import tout
 
 from binman import state
 from binman.etype.blob_phase import Entry_blob_phase
diff --git a/tools/binman/etype/u_boot_tpl_bss_pad.py b/tools/binman/etype/u_boot_tpl_bss_pad.py
index 47f4b23..29c6a95 100644
--- a/tools/binman/etype/u_boot_tpl_bss_pad.py
+++ b/tools/binman/etype/u_boot_tpl_bss_pad.py
@@ -10,7 +10,7 @@
 from binman import elf
 from binman.entry import Entry
 from binman.etype.blob import Entry_blob
-from patman import tools
+from u_boot_pylib import tools
 
 class Entry_u_boot_tpl_bss_pad(Entry_blob):
     """U-Boot TPL binary padded with a BSS region
diff --git a/tools/binman/etype/u_boot_tpl_expanded.py b/tools/binman/etype/u_boot_tpl_expanded.py
index 55fde3c..58db4f3 100644
--- a/tools/binman/etype/u_boot_tpl_expanded.py
+++ b/tools/binman/etype/u_boot_tpl_expanded.py
@@ -5,7 +5,7 @@
 # Entry-type module for expanded U-Boot TPL binary
 #
 
-from patman import tout
+from u_boot_pylib import tout
 
 from binman import state
 from binman.etype.blob_phase import Entry_blob_phase
diff --git a/tools/binman/etype/u_boot_tpl_with_ucode_ptr.py b/tools/binman/etype/u_boot_tpl_with_ucode_ptr.py
index c7f3f9d..86f9578 100644
--- a/tools/binman/etype/u_boot_tpl_with_ucode_ptr.py
+++ b/tools/binman/etype/u_boot_tpl_with_ucode_ptr.py
@@ -7,11 +7,11 @@
 
 import struct
 
-from patman import command
 from binman.entry import Entry
 from binman.etype.blob import Entry_blob
 from binman.etype.u_boot_with_ucode_ptr import Entry_u_boot_with_ucode_ptr
-from patman import tools
+from u_boot_pylib import command
+from u_boot_pylib import tools
 
 class Entry_u_boot_tpl_with_ucode_ptr(Entry_u_boot_with_ucode_ptr):
     """U-Boot TPL with embedded microcode pointer
diff --git a/tools/binman/etype/u_boot_ucode.py b/tools/binman/etype/u_boot_ucode.py
index 6945411..97ed7d7 100644
--- a/tools/binman/etype/u_boot_ucode.py
+++ b/tools/binman/etype/u_boot_ucode.py
@@ -7,7 +7,7 @@
 
 from binman.entry import Entry
 from binman.etype.blob import Entry_blob
-from patman import tools
+from u_boot_pylib import tools
 
 class Entry_u_boot_ucode(Entry_blob):
     """U-Boot microcode block
diff --git a/tools/binman/etype/u_boot_vpl_bss_pad.py b/tools/binman/etype/u_boot_vpl_bss_pad.py
index b2ce2a3..bba38cc 100644
--- a/tools/binman/etype/u_boot_vpl_bss_pad.py
+++ b/tools/binman/etype/u_boot_vpl_bss_pad.py
@@ -10,7 +10,7 @@
 from binman import elf
 from binman.entry import Entry
 from binman.etype.blob import Entry_blob
-from patman import tools
+from u_boot_pylib import tools
 
 class Entry_u_boot_vpl_bss_pad(Entry_blob):
     """U-Boot VPL binary padded with a BSS region
diff --git a/tools/binman/etype/u_boot_vpl_expanded.py b/tools/binman/etype/u_boot_vpl_expanded.py
index 92c64f0..deff5a3 100644
--- a/tools/binman/etype/u_boot_vpl_expanded.py
+++ b/tools/binman/etype/u_boot_vpl_expanded.py
@@ -5,7 +5,7 @@
 # Entry-type module for expanded U-Boot VPL binary
 #
 
-from patman import tout
+from u_boot_pylib import tout
 
 from binman import state
 from binman.etype.blob_phase import Entry_blob_phase
diff --git a/tools/binman/etype/u_boot_with_ucode_ptr.py b/tools/binman/etype/u_boot_with_ucode_ptr.py
index e275698..41731fd 100644
--- a/tools/binman/etype/u_boot_with_ucode_ptr.py
+++ b/tools/binman/etype/u_boot_with_ucode_ptr.py
@@ -11,8 +11,8 @@
 from binman.entry import Entry
 from binman.etype.blob import Entry_blob
 from dtoc import fdt_util
-from patman import tools
-from patman import command
+from u_boot_pylib import tools
+from u_boot_pylib import command
 
 class Entry_u_boot_with_ucode_ptr(Entry_blob):
     """U-Boot with embedded microcode pointer
diff --git a/tools/binman/etype/vblock.py b/tools/binman/etype/vblock.py
index 04cb722..4adb9a4 100644
--- a/tools/binman/etype/vblock.py
+++ b/tools/binman/etype/vblock.py
@@ -13,7 +13,7 @@
 from binman.etype.collection import Entry_collection
 
 from dtoc import fdt_util
-from patman import tools
+from u_boot_pylib import tools
 
 class Entry_vblock(Entry_collection):
     """An entry which contains a Chromium OS verified boot block
diff --git a/tools/binman/etype/x509_cert.py b/tools/binman/etype/x509_cert.py
new file mode 100644
index 0000000..f80a6ec
--- /dev/null
+++ b/tools/binman/etype/x509_cert.py
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2023 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+# Support for an X509 certificate, used to sign a set of entries
+
+from collections import OrderedDict
+import os
+
+from binman.entry import EntryArg
+from binman.etype.collection import Entry_collection
+
+from dtoc import fdt_util
+from u_boot_pylib  import tools
+
+class Entry_x509_cert(Entry_collection):
+    """An entry which contains an X509 certificate
+
+    Properties / Entry arguments:
+        - content: List of phandles to entries to sign
+
+    Output files:
+        - input.<unique_name> - input file passed to openssl
+        - cert.<unique_name> - output file generated by openssl (which is
+            used as the entry contents)
+
+    openssl signs the provided data, writing the signature in this entry. This
+    allows verification that the data is genuine
+    """
+    def __init__(self, section, etype, node):
+        super().__init__(section, etype, node)
+        self.openssl = None
+
+    def ReadNode(self):
+        super().ReadNode()
+        self._cert_ca = fdt_util.GetString(self._node, 'cert-ca')
+        self._cert_rev = fdt_util.GetInt(self._node, 'cert-revision-int', 0)
+        self.key_fname = self.GetEntryArgsOrProps([
+            EntryArg('keyfile', str)], required=True)[0]
+
+    def GetCertificate(self, required):
+        """Get the contents of this entry
+
+        Args:
+            required: True if the data must be present, False if it is OK to
+                return None
+
+        Returns:
+            bytes content of the entry, which is the signed vblock for the
+                provided data
+        """
+        # Join up the data files to be signed
+        input_data = self.GetContents(required)
+        if input_data is None:
+            return None
+
+        uniq = self.GetUniqueName()
+        output_fname = tools.get_output_filename('cert.%s' % uniq)
+        input_fname = tools.get_output_filename('input.%s' % uniq)
+        config_fname = tools.get_output_filename('config.%s' % uniq)
+        tools.write_file(input_fname, input_data)
+        stdout = self.openssl.x509_cert(
+            cert_fname=output_fname,
+            input_fname=input_fname,
+            key_fname=self.key_fname,
+            cn=self._cert_ca,
+            revision=self._cert_rev,
+            config_fname=config_fname)
+        if stdout is not None:
+            data = tools.read_file(output_fname)
+        else:
+            # Bintool is missing; just use 4KB of zero data
+            self.record_missing_bintool(self.openssl)
+            data = tools.get_bytes(0, 4096)
+        return data
+
+    def ObtainContents(self):
+        data = self.GetCertificate(False)
+        if data is None:
+            return False
+        self.SetContents(data)
+        return True
+
+    def ProcessContents(self):
+        # The blob may have changed due to WriteSymbols()
+        data = self.GetCertificate(True)
+        return self.ProcessContentsUpdate(data)
+
+    def AddBintools(self, btools):
+        super().AddBintools(btools)
+        self.openssl = self.AddBintool(btools, 'openssl')
diff --git a/tools/binman/fdt_test.py b/tools/binman/fdt_test.py
index 94347b1..7ef8729 100644
--- a/tools/binman/fdt_test.py
+++ b/tools/binman/fdt_test.py
@@ -12,7 +12,7 @@
 from dtoc import fdt
 from dtoc import fdt_util
 from dtoc.fdt import FdtScan
-from patman import tools
+from u_boot_pylib import tools
 
 class TestFdt(unittest.TestCase):
     @classmethod
diff --git a/tools/binman/fip_util.py b/tools/binman/fip_util.py
index 95eee32..b5caab2 100755
--- a/tools/binman/fip_util.py
+++ b/tools/binman/fip_util.py
@@ -37,8 +37,8 @@
 sys.path.insert(2, os.path.join(OUR_PATH, '..'))
 
 # pylint: disable=C0413
-from patman import command
-from patman import tools
+from u_boot_pylib import command
+from u_boot_pylib import tools
 
 # The TOC header, at the start of the FIP
 HEADER_FORMAT = '<IIQ'
diff --git a/tools/binman/fip_util_test.py b/tools/binman/fip_util_test.py
index cf6d000..56aa56f 100755
--- a/tools/binman/fip_util_test.py
+++ b/tools/binman/fip_util_test.py
@@ -20,10 +20,10 @@
 sys.path.insert(2, os.path.join(OUR_PATH, '..'))
 
 # pylint: disable=C0413
-from patman import test_util
-from patman import tools
 from binman import bintool
 from binman import fip_util
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
 
 FIPTOOL = bintool.Bintool.create('fiptool')
 HAVE_FIPTOOL = FIPTOOL.is_present()
diff --git a/tools/binman/fmap_util.py b/tools/binman/fmap_util.py
index 1ce63d1..40f2dbf 100644
--- a/tools/binman/fmap_util.py
+++ b/tools/binman/fmap_util.py
@@ -10,7 +10,7 @@
 import struct
 import sys
 
-from patman import tools
+from u_boot_pylib import tools
 
 # constants imported from lib/fmap.h
 FMAP_SIGNATURE = b'__FMAP__'
@@ -45,6 +45,9 @@
     'flags',
 )
 
+# Flags supported by areas (bits 2:0 are unused so not included here)
+FMAP_AREA_PRESERVE = 1 << 3  # Preserved by any firmware updates
+
 # These are the two data structures supported by flashrom, a header (which
 # appears once at the start) and an area (which is repeated until the end of
 # the list of areas)
diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py
index 062f54a..f1e14c6 100644
--- a/tools/binman/ftest.py
+++ b/tools/binman/ftest.py
@@ -34,10 +34,10 @@
 from binman.etype import fdtmap
 from binman.etype import image_header
 from binman.image import Image
-from patman import command
-from patman import test_util
-from patman import tools
-from patman import tout
+from u_boot_pylib import command
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
+from u_boot_pylib import tout
 
 # Contents of test files, corresponding to different entry types
 U_BOOT_DATA           = b'1234'
@@ -1702,7 +1702,7 @@
         self.assertEqual(b'SECTION0', fentry.name)
         self.assertEqual(0, fentry.offset)
         self.assertEqual(16, fentry.size)
-        self.assertEqual(0, fentry.flags)
+        self.assertEqual(fmap_util.FMAP_AREA_PRESERVE, fentry.flags)
 
         fentry = next(fiter)
         self.assertEqual(b'RO_U_BOOT', fentry.name)
@@ -1750,7 +1750,7 @@
 
     def _HandleGbbCommand(self, pipe_list):
         """Fake calls to the futility utility"""
-        if pipe_list[0][0] == 'futility':
+        if 'futility' in pipe_list[0][0]:
             fname = pipe_list[0][-1]
             # Append our GBB data to the file, which will happen every time the
             # futility command is called.
@@ -1812,7 +1812,7 @@
         self._hash_data is False, it writes VBLOCK_DATA, else it writes a hash
         of the input data (here, 'input.vblock').
         """
-        if pipe_list[0][0] == 'futility':
+        if 'futility' in pipe_list[0][0]:
             fname = pipe_list[0][3]
             with open(fname, 'wb') as fd:
                 if self._hash_data:
@@ -3999,9 +3999,17 @@
             self.assertEqual(expected, data[image_pos:image_pos+size])
 
     def testFitMissing(self):
+        """Test that binman complains if mkimage is missing"""
+        with self.assertRaises(ValueError) as e:
+            self._DoTestFile('162_fit_external.dts',
+                             force_missing_bintools='mkimage')
+        self.assertIn("Node '/binman/fit': Missing tool: 'mkimage'",
+                      str(e.exception))
+
+    def testFitMissingOK(self):
         """Test that binman still produces a FIT image if mkimage is missing"""
         with test_util.capture_sys_output() as (_, stderr):
-            self._DoTestFile('162_fit_external.dts',
+            self._DoTestFile('162_fit_external.dts', allow_missing=True,
                              force_missing_bintools='mkimage')
         err = stderr.getvalue()
         self.assertRegex(err, "Image 'image'.*missing bintools.*: mkimage")
@@ -5811,13 +5819,61 @@
         self.assertEqual(expected_fdtmap, fdtmap)
 
     def testReplaceSectionSimple(self):
-        """Test replacing a simple section with arbitrary data"""
+        """Test replacing a simple section with same-sized data"""
         new_data = b'w' * len(COMPRESS_DATA + U_BOOT_DATA)
-        with self.assertRaises(ValueError) as exc:
-            self._RunReplaceCmd('section', new_data,
-                                dts='241_replace_section_simple.dts')
+        data, expected_fdtmap, image = self._RunReplaceCmd('section',
+            new_data, dts='241_replace_section_simple.dts')
+        self.assertEqual(new_data, data)
+
+        entries = image.GetEntries()
+        self.assertIn('section', entries)
+        entry = entries['section']
+        self.assertEqual(len(new_data), entry.size)
+
+    def testReplaceSectionLarger(self):
+        """Test replacing a simple section with larger data"""
+        new_data = b'w' * (len(COMPRESS_DATA + U_BOOT_DATA) + 1)
+        data, expected_fdtmap, image = self._RunReplaceCmd('section',
+            new_data, dts='241_replace_section_simple.dts')
+        self.assertEqual(new_data, data)
+
+        entries = image.GetEntries()
+        self.assertIn('section', entries)
+        entry = entries['section']
+        self.assertEqual(len(new_data), entry.size)
+        fentry = entries['fdtmap']
+        self.assertEqual(entry.offset + entry.size, fentry.offset)
+
+    def testReplaceSectionSmaller(self):
+        """Test replacing a simple section with smaller data"""
+        new_data = b'w' * (len(COMPRESS_DATA + U_BOOT_DATA) - 1) + b'\0'
+        data, expected_fdtmap, image = self._RunReplaceCmd('section',
+            new_data, dts='241_replace_section_simple.dts')
+        self.assertEqual(new_data, data)
+
+        # The new size is the same as the old, just with a pad byte at the end
+        entries = image.GetEntries()
+        self.assertIn('section', entries)
+        entry = entries['section']
+        self.assertEqual(len(new_data), entry.size)
+
+    def testReplaceSectionSmallerAllow(self):
+        """Test failing to replace a simple section with smaller data"""
+        new_data = b'w' * (len(COMPRESS_DATA + U_BOOT_DATA) - 1)
+        try:
+            state.SetAllowEntryContraction(True)
+            with self.assertRaises(ValueError) as exc:
+                self._RunReplaceCmd('section', new_data,
+                                    dts='241_replace_section_simple.dts')
+        finally:
+            state.SetAllowEntryContraction(False)
+
+        # Since we have no information about the position of things within the
+        # section, we cannot adjust the position of /section-u-boot so it ends
+        # up outside the section
         self.assertIn(
-            "Node '/section': Replacing sections is not implemented yet",
+            "Node '/section/u-boot': Offset 0x24 (36) size 0x4 (4) is outside "
+            "the section '/section' starting at 0x0 (0) of size 0x27 (39)",
             str(exc.exception))
 
     def testMkimageImagename(self):
@@ -6353,10 +6409,11 @@
             'tee-os-path': 'missing.bin',
         }
         test_subdir = os.path.join(self._indir, TEST_FDT_SUBDIR)
-        data = self._DoReadFileDtb(
-            '276_fit_firmware_loadables.dts',
-            entry_args=entry_args,
-            extra_indirs=[test_subdir])[0]
+        with test_util.capture_sys_output() as (stdout, stderr):
+            data = self._DoReadFileDtb(
+                '276_fit_firmware_loadables.dts',
+                entry_args=entry_args,
+                extra_indirs=[test_subdir])[0]
 
         dtb = fdt.Fdt.FromData(data)
         dtb.Scan()
@@ -6386,6 +6443,128 @@
         self.assertEqual(['u-boot', 'atf-2'],
                          fdt_util.GetStringList(node, 'loadables'))
 
+    def testTooldir(self):
+        """Test that we can specify the tooldir"""
+        with test_util.capture_sys_output() as (stdout, stderr):
+            self.assertEqual(0, self._DoBinman('--tooldir', 'fred',
+                                               'tool', '-l'))
+        self.assertEqual('fred', bintool.Bintool.tooldir)
+
+        # Check that the toolpath is updated correctly
+        self.assertEqual(['fred'], tools.tool_search_paths)
+
+        # Try with a few toolpaths; the tooldir should be at the end
+        with test_util.capture_sys_output() as (stdout, stderr):
+            self.assertEqual(0, self._DoBinman(
+                '--toolpath', 'mary', '--toolpath', 'anna', '--tooldir', 'fred',
+                'tool', '-l'))
+        self.assertEqual(['mary', 'anna', 'fred'], tools.tool_search_paths)
+
+    def testReplaceSectionEntry(self):
+        """Test replacing an entry in a section"""
+        expect_data = b'w' * len(U_BOOT_DATA + COMPRESS_DATA)
+        entry_data, expected_fdtmap, image = self._RunReplaceCmd('section/blob',
+            expect_data, dts='241_replace_section_simple.dts')
+        self.assertEqual(expect_data, entry_data)
+
+        entries = image.GetEntries()
+        self.assertIn('section', entries)
+        section = entries['section']
+
+        sect_entries = section.GetEntries()
+        self.assertIn('blob', sect_entries)
+        entry = sect_entries['blob']
+        self.assertEqual(len(expect_data), entry.size)
+
+        fname = tools.get_output_filename('image-updated.bin')
+        data = tools.read_file(fname)
+
+        new_blob_data = data[entry.image_pos:entry.image_pos + len(expect_data)]
+        self.assertEqual(expect_data, new_blob_data)
+
+        self.assertEqual(U_BOOT_DATA,
+                         data[entry.image_pos + len(expect_data):]
+                         [:len(U_BOOT_DATA)])
+
+    def testReplaceSectionDeep(self):
+        """Test replacing an entry in two levels of sections"""
+        expect_data = b'w' * len(U_BOOT_DATA + COMPRESS_DATA)
+        entry_data, expected_fdtmap, image = self._RunReplaceCmd(
+            'section/section/blob', expect_data,
+            dts='278_replace_section_deep.dts')
+        self.assertEqual(expect_data, entry_data)
+
+        entries = image.GetEntries()
+        self.assertIn('section', entries)
+        section = entries['section']
+
+        subentries = section.GetEntries()
+        self.assertIn('section', subentries)
+        section = subentries['section']
+
+        sect_entries = section.GetEntries()
+        self.assertIn('blob', sect_entries)
+        entry = sect_entries['blob']
+        self.assertEqual(len(expect_data), entry.size)
+
+        fname = tools.get_output_filename('image-updated.bin')
+        data = tools.read_file(fname)
+
+        new_blob_data = data[entry.image_pos:entry.image_pos + len(expect_data)]
+        self.assertEqual(expect_data, new_blob_data)
+
+        self.assertEqual(U_BOOT_DATA,
+                         data[entry.image_pos + len(expect_data):]
+                         [:len(U_BOOT_DATA)])
+
+    def testReplaceFitSibling(self):
+        """Test an image with a FIT inside where we replace its sibling"""
+        fname = TestFunctional._MakeInputFile('once', b'available once')
+        self._DoReadFileRealDtb('277_replace_fit_sibling.dts')
+        os.remove(fname)
+
+        try:
+            tmpdir, updated_fname = self._SetupImageInTmpdir()
+
+            fname = os.path.join(tmpdir, 'update-blob')
+            expected = b'w' * (len(COMPRESS_DATA + U_BOOT_DATA) + 1)
+            tools.write_file(fname, expected)
+
+            self._DoBinman('replace', '-i', updated_fname, 'blob', '-f', fname)
+            data = tools.read_file(updated_fname)
+            start = len(U_BOOT_DTB_DATA)
+            self.assertEqual(expected, data[start:start + len(expected)])
+            map_fname = os.path.join(tmpdir, 'image-updated.map')
+            self.assertFalse(os.path.exists(map_fname))
+        finally:
+            shutil.rmtree(tmpdir)
+
+    def testX509Cert(self):
+        """Test creating an X509 certificate"""
+        keyfile = self.TestFile('key.key')
+        entry_args = {
+            'keyfile': keyfile,
+        }
+        data = self._DoReadFileDtb('279_x509_cert.dts',
+                                   entry_args=entry_args)[0]
+        cert = data[:-4]
+        self.assertEqual(U_BOOT_DATA, data[-4:])
+
+        # TODO: verify the signature
+
+    def testX509CertMissing(self):
+        """Test that binman still produces an image if openssl is missing"""
+        keyfile = self.TestFile('key.key')
+        entry_args = {
+            'keyfile': 'keyfile',
+        }
+        with test_util.capture_sys_output() as (_, stderr):
+            self._DoTestFile('279_x509_cert.dts',
+                             force_missing_bintools='openssl',
+                             entry_args=entry_args)
+        err = stderr.getvalue()
+        self.assertRegex(err, "Image 'image'.*missing bintools.*: openssl")
+
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/tools/binman/image.py b/tools/binman/image.py
index 9415963..8ebf71d 100644
--- a/tools/binman/image.py
+++ b/tools/binman/image.py
@@ -18,8 +18,8 @@
 from binman.etype import section
 from dtoc import fdt
 from dtoc import fdt_util
-from patman import tools
-from patman import tout
+from u_boot_pylib import tools
+from u_boot_pylib import tout
 
 class Image(section.Entry_section):
     """A Image, representing an output from binman
diff --git a/tools/binman/image_test.py b/tools/binman/image_test.py
index e351fa8..bd51c1e 100644
--- a/tools/binman/image_test.py
+++ b/tools/binman/image_test.py
@@ -7,7 +7,7 @@
 import unittest
 
 from binman.image import Image
-from patman.test_util import capture_sys_output
+from u_boot_pylib.test_util import capture_sys_output
 
 class TestImage(unittest.TestCase):
     def testInvalidFormat(self):
diff --git a/tools/binman/main.py b/tools/binman/main.py
index 14432a8..92d2431 100755
--- a/tools/binman/main.py
+++ b/tools/binman/main.py
@@ -34,7 +34,7 @@
 sys.path.insert(2, our1_path)
 
 from binman import bintool
-from patman import test_util
+from u_boot_pylib import test_util
 
 # Bring in the libfdt module
 sys.path.insert(2, 'scripts/dtc/pylibfdt')
@@ -44,7 +44,7 @@
 
 from binman import cmdline
 from binman import control
-from patman import test_util
+from u_boot_pylib import test_util
 
 def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath):
     """Run the functional tests and any embedded doctests
@@ -85,7 +85,7 @@
 
     return (0 if result.wasSuccessful() else 1)
 
-def RunTestCoverage(toolpath):
+def RunTestCoverage(toolpath, build_dir):
     """Run the tests and check that we get 100% coverage"""
     glob_list = control.GetEntryModules(False)
     all_set = set([os.path.splitext(os.path.basename(item))[0]
@@ -95,8 +95,9 @@
         for path in toolpath:
             extra_args += ' --toolpath %s' % path
     test_util.run_test_coverage('tools/binman/binman', None,
-            ['*test*', '*main.py', 'tools/patman/*', 'tools/dtoc/*'],
-            args.build_dir, all_set, extra_args or None)
+            ['*test*', '*main.py', 'tools/patman/*', 'tools/dtoc/*',
+             'tools/u_boot_pylib/*'],
+            build_dir, all_set, extra_args or None)
 
 def RunBinman(args):
     """Main entry point to binman once arguments are parsed
@@ -116,7 +117,7 @@
 
     if args.cmd == 'test':
         if args.test_coverage:
-            RunTestCoverage(args.toolpath)
+            RunTestCoverage(args.toolpath, args.build_dir)
         else:
             ret_code = RunTests(args.debug, args.verbosity, args.processes,
                                 args.test_preserve_dirs, args.tests,
@@ -140,8 +141,12 @@
     return ret_code
 
 
-if __name__ == "__main__":
+def start_binman():
     args = cmdline.ParseArgs(sys.argv[1:])
 
     ret_code = RunBinman(args)
     sys.exit(ret_code)
+
+
+if __name__ == "__main__":
+    start_binman()
diff --git a/tools/binman/pyproject.toml b/tools/binman/pyproject.toml
new file mode 100644
index 0000000..b4b54fb
--- /dev/null
+++ b/tools/binman/pyproject.toml
@@ -0,0 +1,29 @@
+[build-system]
+requires = ["setuptools>=61.0"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "binary-manager"
+version = "0.0.2"
+authors = [
+  { name="Simon Glass", email="sjg@chromium.org" },
+]
+dependencies = ["pylibfdt", "u_boot_pylib", "dtoc"]
+description = "Binman firmware-packaging tool"
+readme = "README.rst"
+requires-python = ">=3.7"
+classifiers = [
+    "Programming Language :: Python :: 3",
+    "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
+    "Operating System :: OS Independent",
+]
+
+[project.urls]
+"Homepage" = "https://u-boot.readthedocs.io/en/latest/develop/package/index.html"
+"Bug Tracker" = "https://source.denx.de/groups/u-boot/-/issues"
+
+[project.scripts]
+binman = "binman.main:start_binman"
+
+[tool.setuptools.package-data]
+patman = ["*.rst"]
diff --git a/tools/binman/state.py b/tools/binman/state.py
index 56e5bf8..3e78cf3 100644
--- a/tools/binman/state.py
+++ b/tools/binman/state.py
@@ -13,8 +13,8 @@
 
 from dtoc import fdt
 import os
-from patman import tools
-from patman import tout
+from u_boot_pylib import tools
+from u_boot_pylib import tout
 
 OUR_PATH = os.path.dirname(os.path.realpath(__file__))
 
@@ -306,7 +306,7 @@
     """Yield all the nodes that need to be updated in all device trees
 
     The property referenced by this node is added to any device trees which
-    have the given node. Due to removable of unwanted notes, SPL and TPL may
+    have the given node. Due to removable of unwanted nodes, SPL and TPL may
     not have this node.
 
     Args:
diff --git a/tools/binman/test/067_fmap.dts b/tools/binman/test/067_fmap.dts
index 9c0e293..24fa635 100644
--- a/tools/binman/test/067_fmap.dts
+++ b/tools/binman/test/067_fmap.dts
@@ -11,6 +11,7 @@
 			name-prefix = "ro-";
 			size = <0x10>;
 			pad-byte = <0x21>;
+			preserve;
 
 			u-boot {
 			};
diff --git a/tools/binman/test/277_replace_fit_sibling.dts b/tools/binman/test/277_replace_fit_sibling.dts
new file mode 100644
index 0000000..fc941a8
--- /dev/null
+++ b/tools/binman/test/277_replace_fit_sibling.dts
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		allow-repack;
+
+		u-boot {
+		};
+
+		blob {
+			filename = "compress";
+		};
+
+		fit {
+			description = "test-desc";
+			#address-cells = <1>;
+
+			images {
+				kernel {
+					description = "Vanilla Linux kernel";
+					type = "kernel";
+					arch = "ppc";
+					os = "linux";
+					compression = "gzip";
+					load = <00000000>;
+					entry = <00000000>;
+					hash-1 {
+						algo = "crc32";
+					};
+					blob-ext {
+						filename = "once";
+					};
+				};
+				fdt-1 {
+					description = "Flattened Device Tree blob";
+					type = "flat_dt";
+					arch = "ppc";
+					compression = "none";
+					hash-1 {
+						algo = "crc32";
+					};
+					u-boot-spl-dtb {
+					};
+				};
+			};
+
+			configurations {
+				default = "conf-1";
+				conf-1 {
+					description = "Boot Linux kernel with FDT blob";
+					kernel = "kernel";
+					fdt = "fdt-1";
+				};
+			};
+		};
+
+		fdtmap {
+		};
+	};
+};
diff --git a/tools/binman/test/278_replace_section_deep.dts b/tools/binman/test/278_replace_section_deep.dts
new file mode 100644
index 0000000..fba2d7d
--- /dev/null
+++ b/tools/binman/test/278_replace_section_deep.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+/ {
+	binman {
+		allow-repack;
+
+		u-boot-dtb {
+		};
+
+		section {
+			section {
+				blob {
+					filename = "compress";
+				};
+			};
+
+			u-boot {
+			};
+		};
+
+		fdtmap {
+		};
+	};
+};
diff --git a/tools/binman/test/279_x509_cert.dts b/tools/binman/test/279_x509_cert.dts
new file mode 100644
index 0000000..7123817
--- /dev/null
+++ b/tools/binman/test/279_x509_cert.dts
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		x509-cert {
+			cert-ca = "IOT2050 Firmware Signature";
+			cert-revision-int = <0>;
+			content = <&u_boot>;
+		};
+
+		u_boot: u-boot {
+		};
+	};
+};
diff --git a/tools/binman/test/key.key b/tools/binman/test/key.key
new file mode 100644
index 0000000..9de3be1
--- /dev/null
+++ b/tools/binman/test/key.key
@@ -0,0 +1,52 @@
+-----BEGIN PRIVATE KEY-----
+MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCSDLMHq1Jw3U+G
+H2wutSGrT4Xhs5Yy7uhR/rDOiuKTW3zkVdfSIliye3Nnwrl/nNUFzEJ+4t/AiDaJ
+Qk5KddTAJnOkw5SYBvFsTDhMR4HH6AyfzaaVl+AAGOg4LXwZzGYKncgOY5u6ZyMB
+SzHxozJmmoqYaCIi4Iv2VZRZw1YPBoT6sv38RQSET5ci/g+89Sfb85ZPHPu6PLlz
+ZTufG+yzAhIDsIvNpt2YlCnQ1TqoZxXsztxN1bKIP68xvlAQHSAB8+x4y0tYPE1I
+UT1DK22FMgz5iyBp6ksFaqI06fITtJjPKG13z8sXXgb4/rJ5I0lhsn1ySsHQ0zLw
+/CX4La2/VMA0Bw6GLFRhu/rOycqKfmwLm25bExV8xL6lwFohxbzBQgYr93ujGFyQ
+AXBDOphvZcdXP3CHAcEViVRjrsBWNz8wyf7X8h2FIU16kAd30WuspjmnGuvRZ6Gn
+SNDVO2tbEKvwkg6liYWy4IXtWcvooMtkhYyvFudcxRPgxEUTQ00biYfJ59ukqD7I
+hyT7pq1bZDCVnAt6dUUPWZutrbBacsyITs01hyiPxvAAQ7XRoInmW1DLqHZ+gCJU
+YJ0TaiAI8AmnjypMWRUo19l0zIgPdva8EJ+mz+kKFsZszo1nwuxQL7oUSUCb0hfB
+2k3WxNthBi3QpspUKPtKweIg9ITtIwIDAQABAoICAA9dS6ZGZTVfauLKwnhFcOXT
+R1vfpzDzhjg+CX6pCL4E1WY2C67dEySvrQvg5d/hcV2bR/GOT4izK72T3qWhsMCI
+KwlN0/+MV3CTsiaALUyJAm77VQeOwy9vb1qdml0ibie2wpmU7AiXmgykSvxHNWGq
+52KyLckqgz7mcOVikdah0nKHSwXzgs6iit1RCfnQdqGChjELdQX6Jm5X24ZZCzUn
+xhpiQ8reP5iyGZYRIIsf0SQo/O8pSI9h173tbgHL9paOATYR+Pqu2Vh+x2meE3b8
+NXY5Jy9NSRgoSCk15VQiXyMH90Av+YcbSrN+I+tvhWREQUM5Txt3ZHgKprntoEYE
+XLHAr9cvmIzLNeApt2z/g4t80xFBpIvTG3+SV/rthmq0KGCLW2kPkdujOiIwdNFF
+6fJ6ikphKAbx2NgUY+6AM5AoOh5QPMqvCdsPwO21YG1WoxmiUpNTaYMlR1fDofr/
+A/z2bFH4SiJPkHXRT2KBiJh4ZZWNzP6hOqGy+jreOpWh5IAyn7cKx6t3I28Q9df0
+tK/1PLgR8WWu6G4uHtF5lKL+LgqFCTbSu9JtLQVQntD7Qyd98sF5o23QQWyA19uU
+TVGxtkVaP1y7v+gtC+xMTW9MbGIeJiqMZuZ3xXJVvUNg1/2BDd+VAfPCOq6xGHC7
+s9MFqwUsLCAFFebXC8oVAoIBAQDKGc/o21Ags2t61IJaJjU7YwrsRywhZR+vUz5F
+xtqH4jt9AkdWpDkKbO7xNMQ2OFdnobq5mkM+iW6Jvc1fi4gm1HDyP296nPKZdFrJ
+UgGfTxOhxFLp7gsJ2F0GX5eDJYvqUTBeYB3wrQkCc+t7fLg2oS+gKGIIn2CP07Mx
+Bist3eCcDvL6QIxYS43u+ptTyAItyUYn8KwvCxlIEfjxowsxfhRWuU+Mr4A4hfGB
+64xSI1YU1AYZLMucOtK/mmlscfO8isdcyfea0GJn4VLRnNvAKL5g627IdErWHs3u
+KgYWAXtVKzHrf4hO8dpVgIzO69wAsqZEvKYGmTJhfyvBN9DdAoIBAQC5AA7s2XOX
+raVymhPwEy4I/2w9NuMFmTavOREBp/gA9uaWBdqAWn1rRJiJ5plgdcnOBFPSGBnc
+thkuWBRqkklQ0YPKhNBT48CZGBN7VUsvyTZD1+IXLW1TmY5UGT0p6/dAYkoJHnvX
+TAHl1tfmeHxVCJWV6Shf5LfJJwsAiykxzetkzmeaycy2s9GKCnkc2uFxyhKnfM0/
+SLwTuXQIJvHuErTYA4jjVOG9EGYW2/uKScPBLpB1YTliAUIvByDy6suCN5pVZGT8
+xVLTYec9lXjhfyhysOAjhD3w77Jh7Exft91fEK50k2ZkqYYnh+mYZcnR52msVSBS
+3YL9kK/9dNX/AoIBABcEaZFzqOSQiqUqns31nApvdUcDtBr5kWo+aNE5nJntQiky
+oT1U5soxLeV6xP4H3KyI1uNcllwA+v3lCAbhtVf2ygZNAz1LsrWXct+K33RtZSb/
+XRIXclpksfOP34moNQ8yv/d/qulGS8hju2YNBk3yfaIX91JUFINM8ROcSD6pDnO3
+oCSwRUupDzkwgZBBLz5Xtg3Gc1XIRdDXeyrKDvRMD7Tw1gaH1mqZlq/dS9XvAFbO
+7wLe/zGD4YzA4VDgiYnnpF0FA5Y2NX7vQqds3fo8qbIQHkXmOL+6Mmn1j0viT1Gb
+4cuYcsXK9brXMTI/2oaZ0iXx9la6C+reuPUAjmECggEBAInEvlips0hgW1ZV4cUm
+M2El/dA0YKoZqDyjDcQi9zCYra1JXKe7O603XzVK0iugbBGM7XMG2bOgtG3r0ABx
+QkH6VN/rOk1OzW31HQT6xswmVs/9I/TIsqLQNsrwJLlkbTO4PpQ97FGv27Xy4cNT
+NJwKkYMbKCMJa8hT2ACmoZ3iUIs4nrUJ1Pa2QLRBCmJvqfYYWv35lcur+cvijsNH
+ZWE68wvuzfEllBo87RnW5qLcPfhOGewf5CDU+RmWgHYGXllx2PAAnKgUtpKOVStq
+daPQEyoeCDzKzWnwxvHfjBy4CxYxkQllf5o1GJ+1ukLwgnRbljltB25OYa89IaJp
+cLcCggEAa5vbegzMKYPjR3zcVjnvhRsLXQi1vMtbUqOQ5wYMwGIef4v3QHNoF7EA
+aNpWQ/qgCTQUzl3qoQCkRiVmVBBr60Fs5y7sfA92eBxQIV5hxJftH3vmiKqeWeqm
+ila9DNw84MNAIqI2u6R3K/ur9fkSswDr3nzvFjuheW5V/M/6zAUtJZXr4iUih929
+uhf2dn6pSLR+epJ5023CVaI2zwz+U6PDEATKy9HjeKab3tQMHxQkT/5IWcLqrVTs
+0rMobIgONzQqYDi2sO05YvgNBxvX3pUvqNlthcOtauT8BoE6wxLYm7ZcWYLPn15A
+wR0+2mDpx+HDyu76q3M+KxXG2U8sJg==
+-----END PRIVATE KEY-----
diff --git a/tools/binman/test/key.pem b/tools/binman/test/key.pem
new file mode 100644
index 0000000..7a7b84a
--- /dev/null
+++ b/tools/binman/test/key.pem
@@ -0,0 +1,32 @@
+-----BEGIN CERTIFICATE-----
+MIIFcTCCA1kCFB/17qhcvpyKhG+jfS2c0qG1yjruMA0GCSqGSIb3DQEBCwUAMHUx
+CzAJBgNVBAYTAk5aMRMwEQYDVQQIDApDYW50ZXJidXJ5MRUwEwYDVQQHDAxDaHJp
+c3RjaHVyY2gxITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEXMBUG
+A1UEAwwOTXkgQ29tbW9uIE5hbWUwHhcNMjMwMjEzMDM1MzMzWhcNMjQwMjEzMDM1
+MzMzWjB1MQswCQYDVQQGEwJOWjETMBEGA1UECAwKQ2FudGVyYnVyeTEVMBMGA1UE
+BwwMQ2hyaXN0Y2h1cmNoMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBM
+dGQxFzAVBgNVBAMMDk15IENvbW1vbiBOYW1lMIICIjANBgkqhkiG9w0BAQEFAAOC
+Ag8AMIICCgKCAgEAkgyzB6tScN1Phh9sLrUhq0+F4bOWMu7oUf6wzorik1t85FXX
+0iJYsntzZ8K5f5zVBcxCfuLfwIg2iUJOSnXUwCZzpMOUmAbxbEw4TEeBx+gMn82m
+lZfgABjoOC18GcxmCp3IDmObumcjAUsx8aMyZpqKmGgiIuCL9lWUWcNWDwaE+rL9
+/EUEhE+XIv4PvPUn2/OWTxz7ujy5c2U7nxvsswISA7CLzabdmJQp0NU6qGcV7M7c
+TdWyiD+vMb5QEB0gAfPseMtLWDxNSFE9QytthTIM+YsgaepLBWqiNOnyE7SYzyht
+d8/LF14G+P6yeSNJYbJ9ckrB0NMy8Pwl+C2tv1TANAcOhixUYbv6zsnKin5sC5tu
+WxMVfMS+pcBaIcW8wUIGK/d7oxhckAFwQzqYb2XHVz9whwHBFYlUY67AVjc/MMn+
+1/IdhSFNepAHd9FrrKY5pxrr0Wehp0jQ1TtrWxCr8JIOpYmFsuCF7VnL6KDLZIWM
+rxbnXMUT4MRFE0NNG4mHyefbpKg+yIck+6atW2QwlZwLenVFD1mbra2wWnLMiE7N
+NYcoj8bwAEO10aCJ5ltQy6h2foAiVGCdE2ogCPAJp48qTFkVKNfZdMyID3b2vBCf
+ps/pChbGbM6NZ8LsUC+6FElAm9IXwdpN1sTbYQYt0KbKVCj7SsHiIPSE7SMCAwEA
+ATANBgkqhkiG9w0BAQsFAAOCAgEAJAJoia6Vq4vXP/0bCgW3o9TOMmFYhI/xPxoh
+Gd7was9R7BOrMGO+/3E7DZtjycZYL0r9nOtr9S/BBreuZ4vkk/PSoGaSnG8ST4jC
+Ajk7ew/32RGOgA/oIzgKj1SPkBtvW+x+76sjUkGKsxmABBUhycIY7K0U8McTTfJ7
+gJ164VXmdG7qFMWmRy4Ry9QGXkDsbMSOZ485X7zbphjK5OZXEujP7GMUgg1lP479
+NqC1g+1m/A3PIB767lVYA7APQsrckHdRqOTkK9TYRQ3mvyE2wruhqE6lx8G/UyFh
+RZjZ3lh2bx07UWIlyMabnGDMrM4FCnesqVyVAc8VAbkdXkeJI9r6DdFw+dzIY0P1
+il+MlYpZNwRyNv2W5SCPilyuhuPOSrSnsSHx64puCIvwG/4xA30Jw8nviJuyGSef
+7uE+W7SD9E/hQHi/S9KRsYVoo7a6X9ADiwNsRNzVnuqc7K3mv/C5E9s6uFTNoObe
+fUBA7pL3Fmvc5pYatxTFI85ajBpe/la6AA+7HX/8PXEphmp6GhFCcfsq+DL03vTM
+DqIJL1i/JXggwqvvdcfaSeMDIOIzO89yUGGwwuj9rqMeEY99qDtljgy1EljjrB5i
+0j4Jg4O0OEd2KIOD7nz4do1tLNlRcpysDZeXIiwAI7Dd3wWMsgpOQxs0zqWyqDVq
+mCKa5Tw=
+-----END CERTIFICATE-----
diff --git a/tools/buildman/builder.py b/tools/buildman/builder.py
index c2a6902..d81752e 100644
--- a/tools/buildman/builder.py
+++ b/tools/buildman/builder.py
@@ -19,10 +19,10 @@
 
 from buildman import builderthread
 from buildman import toolchain
-from patman import command
 from patman import gitutil
-from patman import terminal
-from patman.terminal import tprint
+from u_boot_pylib import command
+from u_boot_pylib import terminal
+from u_boot_pylib.terminal import tprint
 
 # This indicates an new int or hex Kconfig property with no default
 # It hangs the build since the 'conf' tool cannot proceed without valid input.
@@ -194,6 +194,8 @@
         work_in_output: Use the output directory as the work directory and
             don't write to a separate output directory.
         thread_exceptions: List of exceptions raised by thread jobs
+        no_lto (bool): True to set the NO_LTO flag when building
+        reproducible_builds (bool): True to set SOURCE_DATE_EPOCH=0 for builds
 
     Private members:
         _base_board_dict: Last-summarised Dict of boards
@@ -253,7 +255,7 @@
                  config_only=False, squash_config_y=False,
                  warnings_as_errors=False, work_in_output=False,
                  test_thread_exceptions=False, adjust_cfg=None,
-                 allow_missing=False):
+                 allow_missing=False, no_lto=False, reproducible_builds=False):
         """Create a new Builder object
 
         Args:
@@ -292,6 +294,7 @@
                     C=val to set the value of C (val must have quotes if C is
                         a string Kconfig
             allow_missing: Run build with BINMAN_ALLOW_MISSING=1
+            no_lto (bool): True to set the NO_LTO flag when building
 
         """
         self.toolchains = toolchains
@@ -331,6 +334,8 @@
         self.adjust_cfg = adjust_cfg
         self.allow_missing = allow_missing
         self._ide = False
+        self.no_lto = no_lto
+        self.reproducible_builds = reproducible_builds
 
         if not self.squash_config_y:
             self.config_filenames += EXTRA_CONFIG_FILENAMES
diff --git a/tools/buildman/builderthread.py b/tools/buildman/builderthread.py
index 680efae..879ff138 100644
--- a/tools/buildman/builderthread.py
+++ b/tools/buildman/builderthread.py
@@ -10,8 +10,8 @@
 import threading
 
 from buildman import cfgutil
-from patman import command
 from patman import gitutil
+from u_boot_pylib import command
 
 RETURN_CODE_RETRY = -1
 BASE_ELF_FILENAMES = ['u-boot', 'spl/u-boot-spl', 'tpl/u-boot-tpl']
@@ -255,6 +255,10 @@
                     args.append('KCFLAGS=-Werror')
                 if self.builder.allow_missing:
                     args.append('BINMAN_ALLOW_MISSING=1')
+                if self.builder.no_lto:
+                    args.append('NO_LTO=1')
+                if self.builder.reproducible_builds:
+                    args.append('SOURCE_DATE_EPOCH=0')
                 config_args = ['%s_defconfig' % brd.target]
                 config_out = ''
                 args.extend(self.builder.toolchains.GetMakeArguments(brd))
@@ -273,14 +277,19 @@
 
                 # If we need to reconfigure, do that now
                 cfg_file = os.path.join(out_dir, '.config')
+                cmd_list = []
                 if do_config or adjust_cfg:
                     config_out = ''
                     if self.mrproper:
                         result = self.Make(commit, brd, 'mrproper', cwd,
                                 'mrproper', *args, env=env)
                         config_out += result.combined
+                        cmd_list.append([self.builder.gnu_make, 'mrproper',
+                                         *args])
                     result = self.Make(commit, brd, 'config', cwd,
                             *(args + config_args), env=env)
+                    cmd_list.append([self.builder.gnu_make] + args +
+                                    config_args)
                     config_out += result.combined
                     do_config = False   # No need to configure next time
                     if adjust_cfg:
@@ -290,6 +299,7 @@
                         args.append('cfg')
                     result = self.Make(commit, brd, 'build', cwd, *args,
                             env=env)
+                    cmd_list.append([self.builder.gnu_make] + args)
                     if (result.return_code == 2 and
                         ('Some images are invalid' in result.stderr)):
                         # This is handled later by the check for output in
@@ -303,6 +313,7 @@
                 result.stderr = result.stderr.replace(src_dir + '/', '')
                 if self.builder.verbose_build:
                     result.stdout = config_out + result.stdout
+                result.cmd_list = cmd_list
             else:
                 result.return_code = 1
                 result.stderr = 'No tool chain for %s\n' % brd.arch
@@ -378,6 +389,12 @@
             with open(os.path.join(build_dir, 'out-env'), 'wb') as fd:
                 for var in sorted(env.keys()):
                     fd.write(b'%s="%s"' % (var, env[var]))
+
+            with open(os.path.join(build_dir, 'out-cmd'), 'w',
+                      encoding='utf-8') as fd:
+                for cmd in result.cmd_list:
+                    print(' '.join(cmd), file=fd)
+
             lines = []
             for fname in BASE_ELF_FILENAMES:
                 cmd = ['%snm' % self.toolchain.cross, '--size-sort', fname]
diff --git a/tools/buildman/buildman.rst b/tools/buildman/buildman.rst
index 2a83cb7..c8b0db3 100644
--- a/tools/buildman/buildman.rst
+++ b/tools/buildman/buildman.rst
@@ -1023,14 +1023,15 @@
 final binary. This information varies each time U-Boot is built. This causes
 various files to be rebuilt even if no source changes are made, which in turn
 requires that the final U-Boot binary be re-linked. This unnecessary work can
-be avoided by turning off the timestamp feature. This can be achieved by
-setting the SOURCE_DATE_EPOCH environment variable to 0.
+be avoided by turning off the timestamp feature. This can be achieved using
+the `-r` flag, which enables reproducible builds by setting
+`SOURCE_DATE_EPOCH=0` when building.
 
 Combining all of these options together yields the command-line shown below.
 This will provide the quickest possible feedback regarding the current content
 of the source tree, thus allowing rapid tested evolution of the code::
 
-    SOURCE_DATE_EPOCH=0 ./tools/buildman/buildman -P tegra
+    ./tools/buildman/buildman -Pr tegra
 
 
 Checking configuration
@@ -1108,6 +1109,8 @@
 value for 'altbootcmd', but lost one for ' altbootcmd'.
 
 The -U option uses the u-boot.env files which are produced by a build.
+Internally, buildman writes out an out-env file into the build directory for
+later comparison.
 
 
 Building with clang
@@ -1121,6 +1124,20 @@
    buildman -O clang-7 --board sandbox
 
 
+Building without LTO
+--------------------
+
+Link-time optimisation (LTO) is designed to reduce code size by globally
+optimising the U-Boot build. Unfortunately this can dramatically slow down
+builds. This is particularly noticeable when running a lot of builds.
+
+Use the -L (--no-lto) flag to disable LTO.
+
+.. code-block:: bash
+
+   buildman -L --board sandbox
+
+
 Doing a simple build
 --------------------
 
@@ -1298,6 +1315,14 @@
 since it may be dropped altogether in future.
 
 
+Checking the command
+--------------------
+
+Buildman writes out the toolchain information to a `toolchain` file within the
+output directory. It also writes the commands used to build U-Boot in an
+`out-cmd` file. You can check these if you suspect something strange is
+happening.
+
 TODO
 ----
 
diff --git a/tools/buildman/cfgutil.py b/tools/buildman/cfgutil.py
index ab74a8e..a340e01 100644
--- a/tools/buildman/cfgutil.py
+++ b/tools/buildman/cfgutil.py
@@ -7,7 +7,7 @@
 
 import re
 
-from patman import tools
+from u_boot_pylib import tools
 
 RE_LINE = re.compile(r'(# )?CONFIG_([A-Z0-9_]+)(=(.*)| is not set)')
 RE_CFG = re.compile(r'(~?)(CONFIG_)?([A-Z0-9_]+)(=.*)?')
diff --git a/tools/buildman/cmdline.py b/tools/buildman/cmdline.py
index c485994..a9cda24 100644
--- a/tools/buildman/cmdline.py
+++ b/tools/buildman/cmdline.py
@@ -3,6 +3,11 @@
 #
 
 from optparse import OptionParser
+import os
+import pathlib
+
+BUILDMAN_DIR = pathlib.Path(__file__).parent
+HAS_TESTS = os.path.exists(BUILDMAN_DIR / "test.py")
 
 def ParseArgs():
     """Parse command line arguments from sys.argv[]
@@ -71,6 +76,8 @@
           default=False, help="Don't convert y to 1 in configs")
     parser.add_option('-l', '--list-error-boards', action='store_true',
           default=False, help='Show a list of boards next to each error/warning')
+    parser.add_option('-L', '--no-lto', action='store_true',
+          default=False, help='Disable Link-time Optimisation (LTO) for builds')
     parser.add_option('--list-tool-chains', action='store_true', default=False,
           help='List available tool chains (use -v to see probing detail)')
     parser.add_option('-m', '--mrproper', action='store_true',
@@ -95,18 +102,21 @@
           default=False, help="Use full toolchain path in CROSS_COMPILE")
     parser.add_option('-P', '--per-board-out-dir', action='store_true',
           default=False, help="Use an O= (output) directory per board rather than per thread")
+    parser.add_option('-r', '--reproducible-builds', action='store_true',
+          help='Set SOURCE_DATE_EPOCH=0 to suuport a reproducible build')
     parser.add_option('-R', '--regen-board-list', action='store_true',
           help='Force regeneration of the list of boards, like the old boards.cfg file')
     parser.add_option('-s', '--summary', action='store_true',
           default=False, help='Show a build summary')
     parser.add_option('-S', '--show-sizes', action='store_true',
           default=False, help='Show image size variation in summary')
-    parser.add_option('--skip-net-tests', action='store_true', default=False,
-                      help='Skip tests which need the network')
     parser.add_option('--step', type='int',
           default=1, help='Only build every n commits (0=just first and last)')
-    parser.add_option('-t', '--test', action='store_true', dest='test',
-                      default=False, help='run tests')
+    if HAS_TESTS:
+        parser.add_option('--skip-net-tests', action='store_true', default=False,
+                          help='Skip tests which need the network')
+        parser.add_option('-t', '--test', action='store_true', dest='test',
+                          default=False, help='run tests')
     parser.add_option('-T', '--threads', type='int',
           default=None,
           help='Number of builder threads to use (0=single-thread)')
diff --git a/tools/buildman/control.py b/tools/buildman/control.py
index 87e7d0e..35f44c0 100644
--- a/tools/buildman/control.py
+++ b/tools/buildman/control.py
@@ -3,6 +3,7 @@
 #
 
 import multiprocessing
+import importlib.resources
 import os
 import shutil
 import subprocess
@@ -13,12 +14,12 @@
 from buildman import cfgutil
 from buildman import toolchain
 from buildman.builder import Builder
-from patman import command
 from patman import gitutil
 from patman import patchstream
-from patman import terminal
-from patman import tools
-from patman.terminal import tprint
+from u_boot_pylib import command
+from u_boot_pylib import terminal
+from u_boot_pylib import tools
+from u_boot_pylib.terminal import tprint
 
 def GetPlural(count):
     """Returns a plural 's' if count is not 1"""
@@ -152,9 +153,8 @@
     global builder
 
     if options.full_help:
-        tools.print_full_help(
-            os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
-                         'README.rst'))
+        with importlib.resources.path('buildman', 'README.rst') as readme:
+            tools.print_full_help(str(readme))
         return 0
 
     gitutil.setup()
@@ -261,9 +261,9 @@
             count += 1   # Build upstream commit also
 
     if not count:
-        str = ("No commits found to process in branch '%s': "
+        msg = ("No commits found to process in branch '%s': "
                "set branch's upstream or use -c flag" % options.branch)
-        sys.exit(col.build(col.RED, str))
+        sys.exit(col.build(col.RED, msg))
     if options.work_in_output:
         if len(selected) != 1:
             sys.exit(col.build(col.RED,
@@ -338,6 +338,14 @@
             shutil.rmtree(output_dir)
     adjust_cfg = cfgutil.convert_list_to_dict(options.adjust_cfg)
 
+    # Drop LOCALVERSION_AUTO since it changes the version string on every commit
+    if options.reproducible_builds:
+        # If these are mentioned, leave the local version alone
+        if 'LOCALVERSION' in adjust_cfg or 'LOCALVERSION_AUTO' in adjust_cfg:
+            print('Not dropping LOCALVERSION_AUTO for reproducible build')
+        else:
+            adjust_cfg['LOCALVERSION_AUTO'] = '~'
+
     builder = Builder(toolchains, output_dir, options.git_dir,
             options.threads, options.jobs, gnu_make=gnu_make, checkout=True,
             show_unknown=options.show_unknown, step=options.step,
@@ -351,7 +359,8 @@
             work_in_output=options.work_in_output,
             test_thread_exceptions=test_thread_exceptions,
             adjust_cfg=adjust_cfg,
-            allow_missing=allow_missing)
+            allow_missing=allow_missing, no_lto=options.no_lto,
+            reproducible_builds=options.reproducible_builds)
     builder.force_config_on_failure = not options.quick
     if make_func:
         builder.do_make = make_func
diff --git a/tools/buildman/func_test.py b/tools/buildman/func_test.py
index 559e4ed..ebd78f2 100644
--- a/tools/buildman/func_test.py
+++ b/tools/buildman/func_test.py
@@ -14,11 +14,11 @@
 from buildman import cmdline
 from buildman import control
 from buildman import toolchain
-from patman import command
 from patman import gitutil
-from patman import terminal
-from patman import test_util
-from patman import tools
+from u_boot_pylib import command
+from u_boot_pylib import terminal
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
 
 settings_data = '''
 # Buildman settings file
@@ -415,17 +415,19 @@
             kwargs: Arguments to pass to command.run_pipe()
         """
         self._make_calls += 1
+        out_dir = ''
+        for arg in args:
+            if arg.startswith('O='):
+                out_dir = arg[2:]
         if stage == 'mrproper':
             return command.CommandResult(return_code=0)
         elif stage == 'config':
+            fname = os.path.join(cwd or '', out_dir, '.config')
+            tools.write_file(fname, b'CONFIG_SOMETHING=1')
             return command.CommandResult(return_code=0,
                     combined='Test configuration complete')
         elif stage == 'build':
             stderr = ''
-            out_dir = ''
-            for arg in args:
-                if arg.startswith('O='):
-                    out_dir = arg[2:]
             fname = os.path.join(cwd or '', out_dir, 'u-boot')
             tools.write_file(fname, b'U-Boot')
 
@@ -723,3 +725,57 @@
                          control.get_allow_missing(False, False, 2, True))
         self.assertEqual(False,
                          control.get_allow_missing(False, True, 2, True))
+
+    def check_command(self, *extra_args):
+        """Run a command with the extra arguments and return the commands used
+
+        Args:
+            extra_args (list of str): List of extra arguments
+
+        Returns:
+            list of str: Lines returned in the out-cmd file
+        """
+        self._RunControl('-o', self._output_dir, *extra_args)
+        board0_dir = os.path.join(self._output_dir, 'current', 'board0')
+        self.assertTrue(os.path.exists(os.path.join(board0_dir, 'done')))
+        cmd_fname = os.path.join(board0_dir, 'out-cmd')
+        self.assertTrue(os.path.exists(cmd_fname))
+        data = tools.read_file(cmd_fname)
+
+        config_fname = os.path.join(board0_dir, '.config')
+        self.assertTrue(os.path.exists(config_fname))
+        cfg_data = tools.read_file(config_fname)
+
+        return data.splitlines(), cfg_data
+
+    def testCmdFile(self):
+        """Test that the -cmd-out file is produced"""
+        lines = self.check_command()[0]
+        self.assertEqual(2, len(lines))
+        self.assertRegex(lines[0], b'make O=/.*board0_defconfig')
+        self.assertRegex(lines[0], b'make O=/.*-s.*')
+
+    def testNoLto(self):
+        """Test that the --no-lto flag works"""
+        lines = self.check_command('-L')[0]
+        self.assertIn(b'NO_LTO=1', lines[0])
+
+    def testReproducible(self):
+        """Test that the -r flag works"""
+        lines, cfg_data = self.check_command('-r')
+        self.assertIn(b'SOURCE_DATE_EPOCH=0', lines[0])
+
+        # We should see CONFIG_LOCALVERSION_AUTO unset
+        self.assertEqual(b'''CONFIG_SOMETHING=1
+# CONFIG_LOCALVERSION_AUTO is not set
+''', cfg_data)
+
+        with test_util.capture_sys_output() as (stdout, stderr):
+            lines, cfg_data = self.check_command('-r', '-a', 'LOCALVERSION')
+        self.assertIn(b'SOURCE_DATE_EPOCH=0', lines[0])
+
+        # We should see CONFIG_LOCALVERSION_AUTO unset
+        self.assertEqual(b'''CONFIG_SOMETHING=1
+CONFIG_LOCALVERSION=y
+''', cfg_data)
+        self.assertIn('Not dropping LOCALVERSION_AUTO', stdout.getvalue())
diff --git a/tools/buildman/main.py b/tools/buildman/main.py
index 67c560c..5e1f68d 100755
--- a/tools/buildman/main.py
+++ b/tools/buildman/main.py
@@ -25,8 +25,8 @@
 from buildman import toolchain
 from patman import patchstream
 from patman import gitutil
-from patman import terminal
-from patman import test_util
+from u_boot_pylib import terminal
+from u_boot_pylib import test_util
 
 def RunTests(skip_net_tests, verboose, args):
     from buildman import func_test
@@ -46,17 +46,22 @@
 
     return (0 if result.wasSuccessful() else 1)
 
-options, args = cmdline.ParseArgs()
+def run_buildman():
+    options, args = cmdline.ParseArgs()
 
-if not options.debug:
-    sys.tracebacklimit = 0
+    if not options.debug:
+        sys.tracebacklimit = 0
 
-# Run our meagre tests
-if options.test:
-    RunTests(options.skip_net_tests, options.verbose, args)
+    # Run our meagre tests
+    if cmdline.HAS_TESTS and options.test:
+        RunTests(options.skip_net_tests, options.verbose, args)
 
-# Build selected commits for selected boards
-else:
-    bsettings.Setup(options.config_file)
-    ret_code = control.DoBuildman(options, args)
-    sys.exit(ret_code)
+    # Build selected commits for selected boards
+    else:
+        bsettings.Setup(options.config_file)
+        ret_code = control.DoBuildman(options, args)
+        sys.exit(ret_code)
+
+
+if __name__ == "__main__":
+    run_buildman()
diff --git a/tools/buildman/pyproject.toml b/tools/buildman/pyproject.toml
new file mode 100644
index 0000000..4d75e77
--- /dev/null
+++ b/tools/buildman/pyproject.toml
@@ -0,0 +1,29 @@
+[build-system]
+requires = ["setuptools>=61.0"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "buildman"
+version = "0.0.2"
+authors = [
+  { name="Simon Glass", email="sjg@chromium.org" },
+]
+dependencies = ["u_boot_pylib", "patch-manager"]
+description = "Buildman build tool for U-Boot"
+readme = "README.rst"
+requires-python = ">=3.7"
+classifiers = [
+    "Programming Language :: Python :: 3",
+    "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
+    "Operating System :: OS Independent",
+]
+
+[project.urls]
+"Homepage" = "https://u-boot.readthedocs.io/en/latest/build/buildman.html"
+"Bug Tracker" = "https://source.denx.de/groups/u-boot/-/issues"
+
+[project.scripts]
+buildman = "buildman.main:run_buildman"
+
+[tool.setuptools.package-data]
+buildman = ["*.rst"]
diff --git a/tools/buildman/test.py b/tools/buildman/test.py
index daf5467..9fa6445 100644
--- a/tools/buildman/test.py
+++ b/tools/buildman/test.py
@@ -17,10 +17,10 @@
 from buildman import control
 from buildman import toolchain
 from patman import commit
-from patman import command
-from patman import terminal
-from patman import test_util
-from patman import tools
+from u_boot_pylib import command
+from u_boot_pylib import terminal
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
 
 use_network = True
 
diff --git a/tools/buildman/toolchain.py b/tools/buildman/toolchain.py
index ea1ad1b..8f9130b 100644
--- a/tools/buildman/toolchain.py
+++ b/tools/buildman/toolchain.py
@@ -11,9 +11,9 @@
 import urllib.request, urllib.error, urllib.parse
 
 from buildman import bsettings
-from patman import command
-from patman import terminal
-from patman import tools
+from u_boot_pylib import command
+from u_boot_pylib import terminal
+from u_boot_pylib import tools
 
 (PRIORITY_FULL_PREFIX, PRIORITY_PREFIX_GCC, PRIORITY_PREFIX_GCC_PATH,
     PRIORITY_CALC) = list(range(4))
@@ -156,9 +156,8 @@
         Returns:
             Value of that environment variable or arguments
         """
-        wrapper = self.GetWrapper()
         if which == VAR_CROSS_COMPILE:
-            return wrapper + os.path.join(self.path, self.cross)
+            return self.GetWrapper() + self.cross
         elif which == VAR_PATH:
             return self.path
         elif which == VAR_ARCH:
diff --git a/tools/concurrencytest/.gitignore b/tools/concurrencytest/.gitignore
deleted file mode 100644
index 0d20b64..0000000
--- a/tools/concurrencytest/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-*.pyc
diff --git a/tools/concurrencytest/README.md b/tools/concurrencytest/README.md
deleted file mode 100644
index 2d7fe75..0000000
--- a/tools/concurrencytest/README.md
+++ /dev/null
@@ -1,74 +0,0 @@
-concurrencytest
-===============
-
-![testing goats](https://raw.github.com/cgoldberg/concurrencytest/master/testing-goats.png "testing goats")
-
-Python testtools extension for running unittest suites concurrently.
-
-----
-
-Install from PyPI:
-```
-pip install concurrencytest
-```
-
-----
-
-Requires:
-
-* [testtools](https://pypi.python.org/pypi/testtools) : `pip install testtools`
-* [python-subunit](https://pypi.python.org/pypi/python-subunit) : `pip install python-subunit`
-
-----
-
-Example:
-
-```python
-import time
-import unittest
-
-from concurrencytest import ConcurrentTestSuite, fork_for_tests
-
-
-class SampleTestCase(unittest.TestCase):
-    """Dummy tests that sleep for demo."""
-
-    def test_me_1(self):
-        time.sleep(0.5)
-
-    def test_me_2(self):
-        time.sleep(0.5)
-
-    def test_me_3(self):
-        time.sleep(0.5)
-
-    def test_me_4(self):
-        time.sleep(0.5)
-
-
-# Load tests from SampleTestCase defined above
-suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
-runner = unittest.TextTestRunner()
-
-# Run tests sequentially
-runner.run(suite)
-
-# Run same tests across 4 processes
-suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
-concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(4))
-runner.run(concurrent_suite)
-```
-Output:
-
-```
-....
-----------------------------------------------------------------------
-Ran 4 tests in 2.003s
-
-OK
-....
-----------------------------------------------------------------------
-Ran 4 tests in 0.504s
-
-OK
-```
diff --git a/tools/concurrencytest/__init__.py b/tools/concurrencytest/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tools/concurrencytest/__init__.py
+++ /dev/null
diff --git a/tools/concurrencytest/concurrencytest.py b/tools/concurrencytest/concurrencytest.py
deleted file mode 100644
index 1c4f03f..0000000
--- a/tools/concurrencytest/concurrencytest.py
+++ /dev/null
@@ -1,221 +0,0 @@
-#!/usr/bin/env python
-# SPDX-License-Identifier: GPL-2.0+
-#
-# Modified by: Corey Goldberg, 2013
-#
-# Original code from:
-#   Bazaar (bzrlib.tests.__init__.py, v2.6, copied Jun 01 2013)
-#   Copyright (C) 2005-2011 Canonical Ltd
-
-"""Python testtools extension for running unittest suites concurrently.
-
-The `testtools` project provides a ConcurrentTestSuite class, but does
-not provide a `make_tests` implementation needed to use it.
-
-This allows you to parallelize a test run across a configurable number
-of worker processes. While this can speed up CPU-bound test runs, it is
-mainly useful for IO-bound tests that spend most of their time waiting for
-data to arrive from someplace else and can benefit from cocncurrency.
-
-Unix only.
-"""
-
-import os
-import sys
-import traceback
-import unittest
-from itertools import cycle
-from multiprocessing import cpu_count
-
-from subunit import ProtocolTestCase, TestProtocolClient
-from subunit.test_results import AutoTimingTestResultDecorator
-
-from testtools import ConcurrentTestSuite, iterate_tests
-from testtools.content import TracebackContent, text_content
-
-
-_all__ = [
-    'ConcurrentTestSuite',
-    'fork_for_tests',
-    'partition_tests',
-]
-
-
-CPU_COUNT = cpu_count()
-
-
-class BufferingTestProtocolClient(TestProtocolClient):
-    """A TestProtocolClient which can buffer the test outputs
-
-    This class captures the stdout and stderr output streams of the
-    tests as it runs them, and includes the output texts in the subunit
-    stream as additional details.
-
-    Args:
-        stream: A file-like object to write a subunit stream to
-        buffer (bool): True to capture test stdout/stderr outputs and
-            include them in the test details
-    """
-    def __init__(self, stream, buffer=True):
-        super().__init__(stream)
-        self.buffer = buffer
-
-    def _addOutcome(self, outcome, test, error=None, details=None,
-            error_permitted=True):
-        """Report a test outcome to the subunit stream
-
-        The parent class uses this function as a common implementation
-        for various methods that report successes, errors, failures, etc.
-
-        This version automatically upgrades the error tracebacks to the
-        new 'details' format by wrapping them in a Content object, so
-        that we can include the captured test output in the test result
-        details.
-
-        Args:
-            outcome: A string describing the outcome - used as the
-                event name in the subunit stream.
-            test: The test case whose outcome is to be reported
-            error: Standard unittest positional argument form - an
-                exc_info tuple.
-            details: New Testing-in-python drafted API; a dict from
-                string to subunit.Content objects.
-            error_permitted: If True then one and only one of error or
-                details must be supplied. If False then error must not
-                be supplied and details is still optional.
-        """
-        if details is None:
-            details = {}
-
-        # Parent will raise an exception if error_permitted is False but
-        # error is not None. We want that exception in that case, so
-        # don't touch error when error_permitted is explicitly False.
-        if error_permitted and error is not None:
-            # Parent class prefers error over details
-            details['traceback'] = TracebackContent(error, test)
-            error_permitted = False
-            error = None
-
-        if self.buffer:
-            stdout = sys.stdout.getvalue()
-            if stdout:
-                details['stdout'] = text_content(stdout)
-
-            stderr = sys.stderr.getvalue()
-            if stderr:
-                details['stderr'] = text_content(stderr)
-
-        return super()._addOutcome(outcome, test, error=error,
-                details=details, error_permitted=error_permitted)
-
-
-def fork_for_tests(concurrency_num=CPU_COUNT, buffer=False):
-    """Implementation of `make_tests` used to construct `ConcurrentTestSuite`.
-
-    :param concurrency_num: number of processes to use.
-    """
-    if buffer:
-        test_protocol_client_class = BufferingTestProtocolClient
-    else:
-        test_protocol_client_class = TestProtocolClient
-
-    def do_fork(suite):
-        """Take suite and start up multiple runners by forking (Unix only).
-
-        :param suite: TestSuite object.
-
-        :return: An iterable of TestCase-like objects which can each have
-        run(result) called on them to feed tests to result.
-        """
-        result = []
-        test_blocks = partition_tests(suite, concurrency_num)
-        # Clear the tests from the original suite so it doesn't keep them alive
-        suite._tests[:] = []
-        for process_tests in test_blocks:
-            process_suite = unittest.TestSuite(process_tests)
-            # Also clear each split list so new suite has only reference
-            process_tests[:] = []
-            c2pread, c2pwrite = os.pipe()
-            pid = os.fork()
-            if pid == 0:
-                try:
-                    stream = os.fdopen(c2pwrite, 'wb')
-                    os.close(c2pread)
-                    # Leave stderr and stdout open so we can see test noise
-                    # Close stdin so that the child goes away if it decides to
-                    # read from stdin (otherwise its a roulette to see what
-                    # child actually gets keystrokes for pdb etc).
-                    sys.stdin.close()
-                    subunit_result = AutoTimingTestResultDecorator(
-                        test_protocol_client_class(stream)
-                    )
-                    process_suite.run(subunit_result)
-                except:
-                    # Try and report traceback on stream, but exit with error
-                    # even if stream couldn't be created or something else
-                    # goes wrong.  The traceback is formatted to a string and
-                    # written in one go to avoid interleaving lines from
-                    # multiple failing children.
-                    try:
-                        stream.write(traceback.format_exc())
-                    finally:
-                        os._exit(1)
-                os._exit(0)
-            else:
-                os.close(c2pwrite)
-                stream = os.fdopen(c2pread, 'rb')
-                # If we don't pass the second argument here, it defaults
-                # to sys.stdout.buffer down the line. But if we don't
-                # pass it *now*, it may be resolved after sys.stdout is
-                # replaced with a StringIO (to capture tests' outputs)
-                # which doesn't have a buffer attribute and can end up
-                # occasionally causing a 'broken-runner' error.
-                test = ProtocolTestCase(stream, sys.stdout.buffer)
-                result.append(test)
-        return result
-    return do_fork
-
-
-def partition_tests(suite, count):
-    """Partition suite into count lists of tests."""
-    # This just assigns tests in a round-robin fashion.  On one hand this
-    # splits up blocks of related tests that might run faster if they shared
-    # resources, but on the other it avoids assigning blocks of slow tests to
-    # just one partition.  So the slowest partition shouldn't be much slower
-    # than the fastest.
-    partitions = [list() for _ in range(count)]
-    tests = iterate_tests(suite)
-    for partition, test in zip(cycle(partitions), tests):
-        partition.append(test)
-    return partitions
-
-
-if __name__ == '__main__':
-    import time
-
-    class SampleTestCase(unittest.TestCase):
-        """Dummy tests that sleep for demo."""
-
-        def test_me_1(self):
-            time.sleep(0.5)
-
-        def test_me_2(self):
-            time.sleep(0.5)
-
-        def test_me_3(self):
-            time.sleep(0.5)
-
-        def test_me_4(self):
-            time.sleep(0.5)
-
-    # Load tests from SampleTestCase defined above
-    suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
-    runner = unittest.TextTestRunner()
-
-    # Run tests sequentially
-    runner.run(suite)
-
-    # Run same tests across 4 processes
-    suite = unittest.TestLoader().loadTestsFromTestCase(SampleTestCase)
-    concurrent_suite = ConcurrentTestSuite(suite, fork_for_tests(4))
-    runner.run(concurrent_suite)
diff --git a/tools/dtoc/README.rst b/tools/dtoc/README.rst
new file mode 100644
index 0000000..92b3975
--- /dev/null
+++ b/tools/dtoc/README.rst
@@ -0,0 +1,15 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Devicetree-to-C generator
+=========================
+
+This is a Python program and associated utilities, which supports converting
+devicetree files into C code. It generates header files containing struct
+definitions, as well as C files containing the data. It does not require any
+modification of the devicetree files.
+
+Some high-level libraries are provided for working with devicetree. These may
+be useful in other projects.
+
+This package also includes some U-Boot-specific features, such as creating
+`struct udevice` and `struct uclass` entries for devicetree nodes.
diff --git a/tools/dtoc/fdt.py b/tools/dtoc/fdt.py
index d933972..a8e0534 100644
--- a/tools/dtoc/fdt.py
+++ b/tools/dtoc/fdt.py
@@ -12,7 +12,7 @@
 from dtoc import fdt_util
 import libfdt
 from libfdt import QUIET_NOTFOUND
-from patman import tools
+from u_boot_pylib import tools
 
 # This deals with a device tree, presenting it as an assortment of Node and
 # Prop objects, representing nodes and properties, respectively. This file
diff --git a/tools/dtoc/fdt_util.py b/tools/dtoc/fdt_util.py
index f343166..f1f7056 100644
--- a/tools/dtoc/fdt_util.py
+++ b/tools/dtoc/fdt_util.py
@@ -13,8 +13,8 @@
 import sys
 import tempfile
 
-from patman import command
-from patman import tools
+from u_boot_pylib import command
+from u_boot_pylib import tools
 
 def fdt32_to_cpu(val):
     """Convert a device tree cell to an integer
diff --git a/tools/dtoc/main.py b/tools/dtoc/main.py
index 5508759..6c91450 100755
--- a/tools/dtoc/main.py
+++ b/tools/dtoc/main.py
@@ -23,6 +23,7 @@
 
 from argparse import ArgumentParser
 import os
+import pathlib
 import sys
 
 # Bring in the patman libraries
@@ -35,7 +36,10 @@
                 '../../build-sandbox_spl/scripts/dtc/pylibfdt'))
 
 from dtoc import dtb_platdata
-from patman import test_util
+from u_boot_pylib import test_util
+
+DTOC_DIR = pathlib.Path(__file__).parent
+HAVE_TESTS = (DTOC_DIR / 'test_dtoc.py').exists()
 
 def run_tests(processes, args):
     """Run all the test we have for dtoc
@@ -61,54 +65,62 @@
     return (0 if result.wasSuccessful() else 1)
 
 
-def RunTestCoverage():
+def RunTestCoverage(build_dir):
     """Run the tests and check that we get 100% coverage"""
     sys.argv = [sys.argv[0]]
     test_util.run_test_coverage('tools/dtoc/dtoc', '/main.py',
-            ['tools/patman/*.py', '*/fdt*', '*test*'], args.build_dir)
+            ['tools/patman/*.py', 'tools/u_boot_pylib/*','*/fdt*', '*test*'],
+            build_dir)
 
 
-if __name__ != '__main__':
-    sys.exit(1)
+def run_dtoc():
+    epilog = 'Generate C code from devicetree files. See of-plat.rst for details'
 
-epilog = '''Generate C code from devicetree files. See of-plat.rst for details'''
+    parser = ArgumentParser(epilog=epilog)
+    parser.add_argument('-B', '--build-dir', type=str, default='b',
+            help='Directory containing the build output')
+    parser.add_argument('-c', '--c-output-dir', action='store',
+                      help='Select output directory for C files')
+    parser.add_argument(
+        '-C', '--h-output-dir', action='store',
+        help='Select output directory for H files (defaults to --c-output-di)')
+    parser.add_argument('-d', '--dtb-file', action='store',
+                      help='Specify the .dtb input file')
+    parser.add_argument(
+        '-i', '--instantiate', action='store_true', default=False,
+        help='Instantiate devices to avoid needing device_bind()')
+    parser.add_argument('--include-disabled', action='store_true',
+                      help='Include disabled nodes')
+    parser.add_argument('-o', '--output', action='store',
+                      help='Select output filename')
+    parser.add_argument(
+        '-p', '--phase', type=str,
+        help='set phase of U-Boot this invocation is for (spl/tpl)')
+    parser.add_argument('-P', '--processes', type=int,
+                      help='set number of processes to use for running tests')
+    if HAVE_TESTS:
+        parser.add_argument('-t', '--test', action='store_true', dest='test',
+                            default=False, help='run tests')
+        parser.add_argument(
+            '-T', '--test-coverage', action='store_true',
+            default=False, help='run tests and check for 100%% coverage')
+    parser.add_argument('files', nargs='*')
+    args = parser.parse_args()
 
-parser = ArgumentParser(epilog=epilog)
-parser.add_argument('-B', '--build-dir', type=str, default='b',
-        help='Directory containing the build output')
-parser.add_argument('-c', '--c-output-dir', action='store',
-                  help='Select output directory for C files')
-parser.add_argument('-C', '--h-output-dir', action='store',
-                  help='Select output directory for H files (defaults to --c-output-di)')
-parser.add_argument('-d', '--dtb-file', action='store',
-                  help='Specify the .dtb input file')
-parser.add_argument('-i', '--instantiate', action='store_true', default=False,
-                  help='Instantiate devices to avoid needing device_bind()')
-parser.add_argument('--include-disabled', action='store_true',
-                  help='Include disabled nodes')
-parser.add_argument('-o', '--output', action='store',
-                  help='Select output filename')
-parser.add_argument('-p', '--phase', type=str,
-                  help='set phase of U-Boot this invocation is for (spl/tpl)')
-parser.add_argument('-P', '--processes', type=int,
-                  help='set number of processes to use for running tests')
-parser.add_argument('-t', '--test', action='store_true', dest='test',
-                  default=False, help='run tests')
-parser.add_argument('-T', '--test-coverage', action='store_true',
-                default=False, help='run tests and check for 100%% coverage')
-parser.add_argument('files', nargs='*')
-args = parser.parse_args()
+    # Run our meagre tests
+    if HAVE_TESTS and args.test:
+        ret_code = run_tests(args.processes, args)
+        sys.exit(ret_code)
 
-# Run our meagre tests
-if args.test:
-    ret_code = run_tests(args.processes, args)
-    sys.exit(ret_code)
+    elif HAVE_TESTS and args.test_coverage:
+        RunTestCoverage(args.build_dir)
 
-elif args.test_coverage:
-    RunTestCoverage()
+    else:
+        dtb_platdata.run_steps(args.files, args.dtb_file, args.include_disabled,
+                               args.output,
+                               [args.c_output_dir, args.h_output_dir],
+                               args.phase, instantiate=args.instantiate)
 
-else:
-    dtb_platdata.run_steps(args.files, args.dtb_file, args.include_disabled,
-                           args.output,
-                           [args.c_output_dir, args.h_output_dir],
-                           args.phase, instantiate=args.instantiate)
+
+if __name__ == '__main__':
+    run_dtoc()
diff --git a/tools/dtoc/pyproject.toml b/tools/dtoc/pyproject.toml
new file mode 100644
index 0000000..77fe4da
--- /dev/null
+++ b/tools/dtoc/pyproject.toml
@@ -0,0 +1,26 @@
+[build-system]
+requires = ["setuptools>=61.0"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "dtoc"
+version = "0.0.2"
+authors = [
+  { name="Simon Glass", email="sjg@chromium.org" },
+]
+dependencies = ["pylibfdt", "u_boot_pylib"]
+description = "Devicetree-to-C generator"
+readme = "README.rst"
+requires-python = ">=3.7"
+classifiers = [
+    "Programming Language :: Python :: 3",
+    "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
+    "Operating System :: OS Independent",
+]
+
+[project.urls]
+"Homepage" = "https://u-boot.readthedocs.io/en/latest/develop/driver-model/of-plat.html"
+"Bug Tracker" = "https://source.denx.de/groups/u-boot/-/issues"
+
+[project.scripts]
+dtoc = "dtoc.main:run_dtoc"
diff --git a/tools/dtoc/test_dtoc.py b/tools/dtoc/test_dtoc.py
index c62fcba..597c93e 100755
--- a/tools/dtoc/test_dtoc.py
+++ b/tools/dtoc/test_dtoc.py
@@ -13,6 +13,7 @@
 import copy
 import glob
 import os
+import pathlib
 import struct
 import unittest
 
@@ -25,10 +26,11 @@
 from dtoc.dtb_platdata import tab_to
 from dtoc.src_scan import conv_name_to_c
 from dtoc.src_scan import get_compat_name
-from patman import test_util
-from patman import tools
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
 
-OUR_PATH = os.path.dirname(os.path.realpath(__file__))
+DTOC_DIR = pathlib.Path(__file__).parent
+TEST_DATA_DIR = DTOC_DIR / 'test/'
 
 
 HEADER = '''/*
@@ -91,7 +93,7 @@
     Returns:
         str: Filename of compiled file in output directory
     """
-    return fdt_util.EnsureCompiled(os.path.join(OUR_PATH, 'test', dts_fname),
+    return fdt_util.EnsureCompiled(str(TEST_DATA_DIR / dts_fname),
                                    capture_stderr=capture_stderr)
 
 
diff --git a/tools/dtoc/test_fdt.py b/tools/dtoc/test_fdt.py
index dffa86f..32fa69c 100755
--- a/tools/dtoc/test_fdt.py
+++ b/tools/dtoc/test_fdt.py
@@ -30,8 +30,8 @@
 from dtoc.fdt_util import fdt32_to_cpu, fdt64_to_cpu
 from dtoc.fdt import Type, BytesToValue
 import libfdt
-from patman import test_util
-from patman import tools
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
 
 #pylint: disable=protected-access
 
@@ -814,7 +814,8 @@
         build_dir (str): Directory containing the build output
     """
     test_util.run_test_coverage('tools/dtoc/test_fdt.py', None,
-            ['tools/patman/*.py', '*test_fdt.py'], build_dir)
+            ['tools/patman/*.py', 'tools/u_boot_pylib/*', '*test_fdt.py'],
+            build_dir)
 
 
 def run_tests(names, processes):
diff --git a/tools/dtoc/test_src_scan.py b/tools/dtoc/test_src_scan.py
index f93cd7f..64b7408 100644
--- a/tools/dtoc/test_src_scan.py
+++ b/tools/dtoc/test_src_scan.py
@@ -15,8 +15,8 @@
 from unittest import mock
 
 from dtoc import src_scan
-from patman import test_util
-from patman import tools
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
 
 OUR_PATH = os.path.dirname(os.path.realpath(__file__))
 
diff --git a/tools/patman/__init__.py b/tools/patman/__init__.py
index 1b98ec7..08eeffd 100644
--- a/tools/patman/__init__.py
+++ b/tools/patman/__init__.py
@@ -1,6 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0+
 
-__all__ = ['checkpatch', 'command', 'commit', 'control', 'cros_subprocess',
-           'func_test', 'get_maintainer', 'gitutil', '__main__', 'patchstream',
-           'project', 'series', 'setup', 'settings', 'terminal',
-           'test_checkpatch', 'test_util', 'tools', 'tout']
+__all__ = ['checkpatch', 'commit', 'control', 'func_test', 'get_maintainer',
+           'gitutil', '__main__', 'patchstream', 'project', 'series',
+           'settings','setup', 'status', 'test_checkpatch', 'test_settings']
diff --git a/tools/patman/__main__.py b/tools/patman/__main__.py
index 749e634..48ffbc8 100755
--- a/tools/patman/__main__.py
+++ b/tools/patman/__main__.py
@@ -24,10 +24,9 @@
 from patman import gitutil
 from patman import project
 from patman import settings
-from patman import terminal
-from patman import test_util
-from patman import test_checkpatch
-from patman import tools
+from u_boot_pylib import terminal
+from u_boot_pylib import test_util
+from u_boot_pylib import tools
 
 epilog = '''Create patches from commits in a branch, check them and email them
 as specified by tags you place in the commits. Use -n to do a dry run first.'''
@@ -146,11 +145,12 @@
 # Run our meagre tests
 if args.cmd == 'test':
     from patman import func_test
+    from patman import test_checkpatch
 
     result = test_util.run_test_suites(
         'patman', False, False, False, None, None, None,
         [test_checkpatch.TestPatch, func_test.TestFunctional,
-         'gitutil', 'settings', 'terminal'])
+         'gitutil', 'settings'])
 
     sys.exit(0 if result.wasSuccessful() else 1)
 
diff --git a/tools/patman/checkpatch.py b/tools/patman/checkpatch.py
index d1b902d..e03cac1 100644
--- a/tools/patman/checkpatch.py
+++ b/tools/patman/checkpatch.py
@@ -3,13 +3,14 @@
 #
 
 import collections
+import concurrent.futures
 import os
 import re
 import sys
 
-from patman import command
 from patman import gitutil
-from patman import terminal
+from u_boot_pylib import command
+from u_boot_pylib import terminal
 
 EMACS_PREFIX = r'(?:[0-9]{4}.*\.patch:[0-9]+: )?'
 TYPE_NAME = r'([A-Z_]+:)?'
@@ -244,26 +245,31 @@
     error_count, warning_count, check_count = 0, 0, 0
     col = terminal.Color()
 
-    for fname in args:
-        result = check_patch(fname, verbose, use_tree=use_tree)
-        if not result.ok:
-            error_count += result.errors
-            warning_count += result.warnings
-            check_count += result.checks
-            print('%d errors, %d warnings, %d checks for %s:' % (result.errors,
-                    result.warnings, result.checks, col.build(col.BLUE, fname)))
-            if (len(result.problems) != result.errors + result.warnings +
-                    result.checks):
-                print("Internal error: some problems lost")
-            # Python seems to get confused by this
-            # pylint: disable=E1133
-            for item in result.problems:
-                sys.stderr.write(
-                    get_warning_msg(col, item.get('type', '<unknown>'),
-                        item.get('file', '<unknown>'),
-                        item.get('line', 0), item.get('msg', 'message')))
-            print
-            #print(stdout)
+    with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
+        futures = []
+        for fname in args:
+            f = executor.submit(check_patch, fname, verbose, use_tree=use_tree)
+            futures.append(f)
+
+        for fname, f in zip(args, futures):
+            result = f.result()
+            if not result.ok:
+                error_count += result.errors
+                warning_count += result.warnings
+                check_count += result.checks
+                print('%d errors, %d warnings, %d checks for %s:' % (result.errors,
+                        result.warnings, result.checks, col.build(col.BLUE, fname)))
+                if (len(result.problems) != result.errors + result.warnings +
+                        result.checks):
+                    print("Internal error: some problems lost")
+                # Python seems to get confused by this
+                # pylint: disable=E1133
+                for item in result.problems:
+                    sys.stderr.write(
+                        get_warning_msg(col, item.get('type', '<unknown>'),
+                            item.get('file', '<unknown>'),
+                            item.get('line', 0), item.get('msg', 'message')))
+                print
     if error_count or warning_count or check_count:
         str = 'checkpatch.pl found %d error(s), %d warning(s), %d checks(s)'
         color = col.GREEN
diff --git a/tools/patman/control.py b/tools/patman/control.py
index 38e98da..916ddf8 100644
--- a/tools/patman/control.py
+++ b/tools/patman/control.py
@@ -14,7 +14,7 @@
 from patman import checkpatch
 from patman import gitutil
 from patman import patchstream
-from patman import terminal
+from u_boot_pylib import terminal
 
 def setup():
     """Do required setup before doing anything"""
@@ -85,7 +85,7 @@
     # Do a few checks on the series
     series.DoChecks()
 
-    # Check the patches, and run them through 'git am' just to be sure
+    # Check the patches
     if run_checkpatch:
         ok = checkpatch.check_patches(verbose, patch_files, use_tree)
     else:
diff --git a/tools/patman/func_test.py b/tools/patman/func_test.py
index c25a47b..42ac4ed 100644
--- a/tools/patman/func_test.py
+++ b/tools/patman/func_test.py
@@ -23,9 +23,9 @@
 from patman.patchstream import PatchStream
 from patman.series import Series
 from patman import settings
-from patman import terminal
-from patman import tools
-from patman.test_util import capture_sys_output
+from u_boot_pylib import terminal
+from u_boot_pylib import tools
+from u_boot_pylib.test_util import capture_sys_output
 
 import pygit2
 from patman import status
@@ -240,6 +240,8 @@
         self.assertEqual('Change log missing for v3', next(lines))
         self.assertEqual('Change log for unknown version v4', next(lines))
         self.assertEqual("Alias 'pci' not found", next(lines))
+        while next(lines) != 'Cc processing complete':
+            pass
         self.assertIn('Dry run', next(lines))
         self.assertEqual('', next(lines))
         self.assertIn('Send a total of %d patches' % count, next(lines))
diff --git a/tools/patman/get_maintainer.py b/tools/patman/get_maintainer.py
index f7011be..8df3d12 100644
--- a/tools/patman/get_maintainer.py
+++ b/tools/patman/get_maintainer.py
@@ -7,8 +7,8 @@
 import shlex
 import shutil
 
-from patman import command
 from patman import gitutil
+from u_boot_pylib import command
 
 
 def find_get_maintainer(script_file_name):
diff --git a/tools/patman/gitutil.py b/tools/patman/gitutil.py
index 5e74210..6700057 100644
--- a/tools/patman/gitutil.py
+++ b/tools/patman/gitutil.py
@@ -5,9 +5,9 @@
 import os
 import sys
 
-from patman import command
 from patman import settings
-from patman import terminal
+from u_boot_pylib import command
+from u_boot_pylib import terminal
 
 # True to use --no-decorate - we check this in setup()
 use_no_decorate = True
diff --git a/tools/patman/patchstream.py b/tools/patman/patchstream.py
index fb6a603..f91669a 100644
--- a/tools/patman/patchstream.py
+++ b/tools/patman/patchstream.py
@@ -14,10 +14,10 @@
 import shutil
 import tempfile
 
-from patman import command
 from patman import commit
 from patman import gitutil
 from patman.series import Series
+from u_boot_pylib import command
 
 # Tags that we detect and remove
 RE_REMOVE = re.compile(r'^BUG=|^TEST=|^BRANCH=|^Review URL:'
diff --git a/tools/patman/patman.rst b/tools/patman/patman.rst
index 6113962..038b651 100644
--- a/tools/patman/patman.rst
+++ b/tools/patman/patman.rst
@@ -41,6 +41,18 @@
 patches automatically (unless you use -m to disable this).
 
 
+Installation
+------------
+
+You can install patman using::
+
+   pip install patch-manager
+
+The name is chosen since patman conflicts with an existing package.
+
+If you are using patman within the U-Boot tree, it may be easiest to add a
+symlink from your local `~/.bin` directory to `/path/to/tools/patman/patman`.
+
 How to use this tool
 --------------------
 
diff --git a/tools/patman/pyproject.toml b/tools/patman/pyproject.toml
new file mode 100644
index 0000000..c5dc7c7
--- /dev/null
+++ b/tools/patman/pyproject.toml
@@ -0,0 +1,29 @@
+[build-system]
+requires = ["setuptools>=61.0"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "patch-manager"
+version = "0.0.2"
+authors = [
+  { name="Simon Glass", email="sjg@chromium.org" },
+]
+dependencies = ["u_boot_pylib"]
+description = "Patman patch manager"
+readme = "README.rst"
+requires-python = ">=3.7"
+classifiers = [
+    "Programming Language :: Python :: 3",
+    "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
+    "Operating System :: OS Independent",
+]
+
+[project.urls]
+"Homepage" = "https://u-boot.readthedocs.io/en/latest/develop/patman.html"
+"Bug Tracker" = "https://source.denx.de/groups/u-boot/-/issues"
+
+[project.scripts]
+patman = "patman.__main__:run_patman"
+
+[tool.setuptools.package-data]
+patman = ["*.rst"]
diff --git a/tools/patman/series.py b/tools/patman/series.py
index 2eeeef7..6866e1d 100644
--- a/tools/patman/series.py
+++ b/tools/patman/series.py
@@ -5,14 +5,17 @@
 from __future__ import print_function
 
 import collections
+import concurrent.futures
 import itertools
 import os
+import sys
+import time
 
 from patman import get_maintainer
 from patman import gitutil
 from patman import settings
-from patman import terminal
-from patman import tools
+from u_boot_pylib import terminal
+from u_boot_pylib import tools
 
 # Series-xxx tags that we understand
 valid_series = ['to', 'cc', 'version', 'changes', 'prefix', 'notes', 'name',
@@ -234,6 +237,49 @@
             str = 'Change log exists, but no version is set'
             print(col.build(col.RED, str))
 
+    def GetCcForCommit(self, commit, process_tags, warn_on_error,
+                       add_maintainers, limit, get_maintainer_script,
+                       all_skips):
+        """Get the email CCs to use with a particular commit
+
+        Uses subject tags and get_maintainers.pl script to find people to cc
+        on a patch
+
+        Args:
+            commit (Commit): Commit to process
+            process_tags (bool): Process tags as if they were aliases
+            warn_on_error (bool): True to print a warning when an alias fails to
+                match, False to ignore it.
+            add_maintainers (bool or list of str): Either:
+                True/False to call the get_maintainers to CC maintainers
+                List of maintainers to include (for testing)
+            limit (int): Limit the length of the Cc list (None if no limit)
+            get_maintainer_script (str): The file name of the get_maintainer.pl
+                script (or compatible).
+            all_skips (set of str): Updated to include the set of bouncing email
+                addresses that were dropped from the output. This is essentially
+                a return value from this function.
+
+        Returns:
+            list of str: List of email addresses to cc
+        """
+        cc = []
+        if process_tags:
+            cc += gitutil.build_email_list(commit.tags,
+                                           warn_on_error=warn_on_error)
+        cc += gitutil.build_email_list(commit.cc_list,
+                                       warn_on_error=warn_on_error)
+        if type(add_maintainers) == type(cc):
+            cc += add_maintainers
+        elif add_maintainers:
+            cc += get_maintainer.get_maintainer(get_maintainer_script,
+                                                commit.patch)
+        all_skips |= set(cc) & set(settings.bounces)
+        cc = list(set(cc) - set(settings.bounces))
+        if limit is not None:
+            cc = cc[:limit]
+        return cc
+
     def MakeCcFile(self, process_tags, cover_fname, warn_on_error,
                    add_maintainers, limit, get_maintainer_script):
         """Make a cc file for us to use for per-commit Cc automation
@@ -241,15 +287,15 @@
         Also stores in self._generated_cc to make ShowActions() faster.
 
         Args:
-            process_tags: Process tags as if they were aliases
-            cover_fname: If non-None the name of the cover letter.
-            warn_on_error: True to print a warning when an alias fails to match,
-                False to ignore it.
-            add_maintainers: Either:
+            process_tags (bool): Process tags as if they were aliases
+            cover_fname (str): If non-None the name of the cover letter.
+            warn_on_error (bool): True to print a warning when an alias fails to
+                match, False to ignore it.
+            add_maintainers (bool or list of str): Either:
                 True/False to call the get_maintainers to CC maintainers
                 List of maintainers to include (for testing)
-            limit: Limit the length of the Cc list (None if no limit)
-            get_maintainer_script: The file name of the get_maintainer.pl
+            limit (int): Limit the length of the Cc list (None if no limit)
+            get_maintainer_script (str): The file name of the get_maintainer.pl
                 script (or compatible).
         Return:
             Filename of temp file created
@@ -259,28 +305,42 @@
         fname = '/tmp/patman.%d' % os.getpid()
         fd = open(fname, 'w', encoding='utf-8')
         all_ccs = []
-        for commit in self.commits:
-            cc = []
-            if process_tags:
-                cc += gitutil.build_email_list(commit.tags,
-                                               warn_on_error=warn_on_error)
-            cc += gitutil.build_email_list(commit.cc_list,
-                                           warn_on_error=warn_on_error)
-            if type(add_maintainers) == type(cc):
-                cc += add_maintainers
-            elif add_maintainers:
+        all_skips = set()
+        with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:
+            for i, commit in enumerate(self.commits):
+                commit.seq = i
+                commit.future = executor.submit(
+                    self.GetCcForCommit, commit, process_tags, warn_on_error,
+                    add_maintainers, limit, get_maintainer_script, all_skips)
 
-                cc += get_maintainer.get_maintainer(get_maintainer_script,
-                                                    commit.patch)
-            for x in set(cc) & set(settings.bounces):
-                print(col.build(col.YELLOW, 'Skipping "%s"' % x))
-            cc = list(set(cc) - set(settings.bounces))
-            if limit is not None:
-                cc = cc[:limit]
+            # Show progress any commits that are taking forever
+            lastlen = 0
+            while True:
+                left = [commit for commit in self.commits
+                        if not commit.future.done()]
+                if not left:
+                    break
+                names = ', '.join(f'{c.seq + 1}:{c.subject}'
+                                  for c in left[:2])
+                out = f'\r{len(left)} remaining: {names}'[:79]
+                spaces = ' ' * (lastlen - len(out))
+                if lastlen:  # Don't print anything the first time
+                    print(out, spaces, end='')
+                    sys.stdout.flush()
+                lastlen = len(out)
+                time.sleep(.25)
+            print(f'\rdone{" " * lastlen}\r', end='')
+            print('Cc processing complete')
+
+        for commit in self.commits:
+            cc = commit.future.result()
             all_ccs += cc
             print(commit.patch, '\0'.join(sorted(set(cc))), file=fd)
             self._generated_cc[commit.patch] = cc
 
+        for x in sorted(all_skips):
+            print(col.build(col.YELLOW, f'Skipping "{x}"'))
+
         if cover_fname:
             cover_cc = gitutil.build_email_list(self.get('cover_cc', ''))
             cover_cc = list(set(cover_cc + all_ccs))
diff --git a/tools/patman/status.py b/tools/patman/status.py
index 47ed6d6..5fb436e 100644
--- a/tools/patman/status.py
+++ b/tools/patman/status.py
@@ -18,8 +18,8 @@
 
 from patman import patchstream
 from patman.patchstream import PatchStream
-from patman import terminal
-from patman import tout
+from u_boot_pylib import terminal
+from u_boot_pylib import tout
 
 # Patches which are part of a multi-patch series are shown with a prefix like
 # [prefix, version, sequence], for example '[RFC, v2, 3/5]'. All but the last
diff --git a/tools/patman/test_settings.py b/tools/patman/test_settings.py
index c768a2f..06b7cbc 100644
--- a/tools/patman/test_settings.py
+++ b/tools/patman/test_settings.py
@@ -10,7 +10,7 @@
 import tempfile
 
 from patman import settings
-from patman import tools
+from u_boot_pylib import tools
 
 
 @contextlib.contextmanager
diff --git a/tools/rmboard.py b/tools/rmboard.py
index ae25632..0c56b14 100755
--- a/tools/rmboard.py
+++ b/tools/rmboard.py
@@ -28,7 +28,7 @@
 import re
 import sys
 
-from patman import command
+from u_boot_pylib import command
 
 def rm_kconfig_include(path):
     """Remove a path from Kconfig files
diff --git a/tools/u_boot_pylib/LICENSE b/tools/u_boot_pylib/LICENSE
new file mode 100644
index 0000000..d159169
--- /dev/null
+++ b/tools/u_boot_pylib/LICENSE
@@ -0,0 +1,339 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/tools/u_boot_pylib/README.rst b/tools/u_boot_pylib/README.rst
new file mode 100644
index 0000000..93858f5
--- /dev/null
+++ b/tools/u_boot_pylib/README.rst
@@ -0,0 +1,15 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+# U-Boot Python Library
+=====================
+
+This is a Python library used by various U-Boot tools, including patman,
+buildman and binman.
+
+The module can be installed with pip::
+
+   pip install u_boot_pylib
+
+or via setup.py::
+
+   ./setup.py install [--user]
diff --git a/tools/u_boot_pylib/__init__.py b/tools/u_boot_pylib/__init__.py
new file mode 100644
index 0000000..63c88e8
--- /dev/null
+++ b/tools/u_boot_pylib/__init__.py
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+__all__ = ['command', 'cros_subprocess','terminal', 'test_util', 'tools',
+	   'tout']
diff --git a/tools/u_boot_pylib/__main__.py b/tools/u_boot_pylib/__main__.py
new file mode 100755
index 0000000..8f98d7b
--- /dev/null
+++ b/tools/u_boot_pylib/__main__.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright 2023 Google LLC
+#
+
+import os
+import sys
+
+if __name__ == "__main__":
+    # Allow 'from u_boot_pylib import xxx to work'
+    our_path = os.path.dirname(os.path.realpath(__file__))
+    sys.path.append(os.path.join(our_path, '..'))
+
+    # Run tests
+    from u_boot_pylib import terminal
+    from u_boot_pylib import test_util
+
+    result = test_util.run_test_suites(
+        'u_boot_pylib', False, False, False, None, None, None,
+        ['terminal'])
+
+    sys.exit(0 if result.wasSuccessful() else 1)
diff --git a/tools/patman/command.py b/tools/u_boot_pylib/command.py
similarity index 98%
rename from tools/patman/command.py
rename to tools/u_boot_pylib/command.py
index 92c453b..9bbfc5b 100644
--- a/tools/patman/command.py
+++ b/tools/u_boot_pylib/command.py
@@ -4,7 +4,7 @@
 
 import os
 
-from patman import cros_subprocess
+from u_boot_pylib import cros_subprocess
 
 """Shell command ease-ups for Python."""
 
diff --git a/tools/patman/cros_subprocess.py b/tools/u_boot_pylib/cros_subprocess.py
similarity index 100%
rename from tools/patman/cros_subprocess.py
rename to tools/u_boot_pylib/cros_subprocess.py
diff --git a/tools/u_boot_pylib/pyproject.toml b/tools/u_boot_pylib/pyproject.toml
new file mode 100644
index 0000000..3f33caf
--- /dev/null
+++ b/tools/u_boot_pylib/pyproject.toml
@@ -0,0 +1,22 @@
+[build-system]
+requires = ["setuptools>=61.0"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "u_boot_pylib"
+version = "0.0.2"
+authors = [
+  { name="Simon Glass", email="sjg@chromium.org" },
+]
+description = "U-Boot python library"
+readme = "README.md"
+requires-python = ">=3.7"
+classifiers = [
+    "Programming Language :: Python :: 3",
+    "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
+    "Operating System :: OS Independent",
+]
+
+[project.urls]
+"Homepage" = "https://u-boot.readthedocs.io"
+"Bug Tracker" = "https://source.denx.de/groups/u-boot/-/issues"
diff --git a/tools/patman/terminal.py b/tools/u_boot_pylib/terminal.py
similarity index 100%
rename from tools/patman/terminal.py
rename to tools/u_boot_pylib/terminal.py
diff --git a/tools/patman/test_util.py b/tools/u_boot_pylib/test_util.py
similarity index 85%
rename from tools/patman/test_util.py
rename to tools/u_boot_pylib/test_util.py
index 0f6d1aa..e7564e1 100644
--- a/tools/patman/test_util.py
+++ b/tools/u_boot_pylib/test_util.py
@@ -11,15 +11,14 @@
 import sys
 import unittest
 
-from patman import command
+from u_boot_pylib import command
 
 from io import StringIO
 
-buffer_outputs = True
 use_concurrent = True
 try:
-    from concurrencytest.concurrencytest import ConcurrentTestSuite
-    from concurrencytest.concurrencytest import fork_for_tests
+    from concurrencytest import ConcurrentTestSuite
+    from concurrencytest import fork_for_tests
 except:
     use_concurrent = False
 
@@ -120,7 +119,6 @@
             0: Print nothing
             1: Print a dot per test
             2: Print test names
-            3: Print test names, and buffered outputs for failing tests
     """
     def __init__(self, stream, descriptions, verbosity):
         self.verbosity = verbosity
@@ -140,39 +138,12 @@
         self.printErrorList('XFAIL', self.expectedFailures)
         self.printErrorList('XPASS', unexpected_successes)
 
-    def addError(self, test, err):
-        """Called when an error has occurred."""
-        super().addError(test, err)
-        self._mirrorOutput &= self.verbosity >= 3
-
-    def addFailure(self, test, err):
-        """Called when a test has failed."""
-        super().addFailure(test, err)
-        self._mirrorOutput &= self.verbosity >= 3
-
-    def addSubTest(self, test, subtest, err):
-        """Called at the end of a subtest."""
-        super().addSubTest(test, subtest, err)
-        self._mirrorOutput &= self.verbosity >= 3
-
-    def addSuccess(self, test):
-        """Called when a test has completed successfully"""
-        super().addSuccess(test)
-        # Don't print stdout/stderr for successful tests
-        self._mirrorOutput = False
-
     def addSkip(self, test, reason):
         """Called when a test is skipped."""
         # Add empty line to keep spacing consistent with other results
         if not reason.endswith('\n'):
             reason += '\n'
         super().addSkip(test, reason)
-        self._mirrorOutput &= self.verbosity >= 3
-
-    def addExpectedFailure(self, test, err):
-        """Called when an expected failure/error occurred."""
-        super().addExpectedFailure(test, err)
-        self._mirrorOutput &= self.verbosity >= 3
 
 
 def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
@@ -208,14 +179,12 @@
     runner = unittest.TextTestRunner(
         stream=sys.stdout,
         verbosity=(1 if verbosity is None else verbosity),
-        buffer=False if test_name else buffer_outputs,
         resultclass=FullTextTestResult,
     )
 
     if use_concurrent and processes != 1:
         suite = ConcurrentTestSuite(suite,
-                fork_for_tests(processes or multiprocessing.cpu_count(),
-                               buffer=False if test_name else buffer_outputs))
+                fork_for_tests(processes or multiprocessing.cpu_count()))
 
     for module in class_and_module_list:
         if isinstance(module, str) and (not test_name or test_name == module):
diff --git a/tools/patman/tools.py b/tools/u_boot_pylib/tools.py
similarity index 99%
rename from tools/patman/tools.py
rename to tools/u_boot_pylib/tools.py
index 2ac814d..187725b 100644
--- a/tools/patman/tools.py
+++ b/tools/u_boot_pylib/tools.py
@@ -11,8 +11,8 @@
 import tempfile
 import urllib.request
 
-from patman import command
-from patman import tout
+from u_boot_pylib import command
+from u_boot_pylib import tout
 
 # Output directly (generally this is temporary)
 outdir = None
diff --git a/tools/patman/tout.py b/tools/u_boot_pylib/tout.py
similarity index 98%
rename from tools/patman/tout.py
rename to tools/u_boot_pylib/tout.py
index ff0fd92..6bd2806 100644
--- a/tools/patman/tout.py
+++ b/tools/u_boot_pylib/tout.py
@@ -6,7 +6,7 @@
 
 import sys
 
-from patman import terminal
+from u_boot_pylib import terminal
 
 # Output verbosity levels that we support
 ERROR, WARNING, NOTICE, INFO, DETAIL, DEBUG = range(6)
diff --git a/tools/u_boot_pylib/u_boot_pylib b/tools/u_boot_pylib/u_boot_pylib
new file mode 120000
index 0000000..5a427d1
--- /dev/null
+++ b/tools/u_boot_pylib/u_boot_pylib
@@ -0,0 +1 @@
+__main__.py
\ No newline at end of file