Merge tag 'dm-pull-26jan22' of https://source.denx.de/u-boot/custodians/u-boot-dm

acpi refactoring to allow non-x86 use
binman support for bintools (binary tools)
minor tools improvements in preparation for FDT signing
various minor fixes and improvements
diff --git a/MAINTAINERS b/MAINTAINERS
index 9c2d6fe..dcdd99e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -50,6 +50,12 @@
 Maintainers List (try to look for most precise areas first)
 
 		-----------------------------------
+ACPI:
+M:	Simon Glass <sjg@chromium.org>
+S:	Maintained
+F:	cmd/acpi.c
+F:	lib/acpi/
+
 ANDROID AB
 M:	Igor Opaniuk <igor.opaniuk@gmail.com>
 R:	Sam Protsenko <joe.skb7@gmail.com>
diff --git a/Makefile b/Makefile
index ebc99d2..10879f1 100644
--- a/Makefile
+++ b/Makefile
@@ -1320,6 +1320,7 @@
                 --toolpath $(objtree)/tools \
 		$(if $(BINMAN_VERBOSE),-v$(BINMAN_VERBOSE)) \
 		build -u -d u-boot.dtb -O . -m --allow-missing \
+		--fake-ext-blobs \
 		-I . -I $(srctree) -I $(srctree)/board/$(BOARDDIR) \
 		-I arch/$(ARCH)/dts -a of-list=$(CONFIG_OF_LIST) \
 		$(foreach f,$(BINMAN_INDIRS),-I $(f)) \
@@ -1331,7 +1332,6 @@
 		-a tpl-bss-pad=$(if $(CONFIG_TPL_SEPARATE_BSS),,1) \
 		-a spl-dtb=$(CONFIG_SPL_OF_REAL) \
 		-a tpl-dtb=$(CONFIG_TPL_OF_REAL) \
-		$(if $(BINMAN_FAKE_EXT_BLOBS),--fake-ext-blobs) \
 		$(BINMAN_$(@F))
 
 OBJCOPYFLAGS_u-boot.ldr.hex := -I binary -O ihex
diff --git a/arch/Kconfig b/arch/Kconfig
index 343db9e..bea8ead 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -62,6 +62,7 @@
 	select ARCH_SUPPORTS_LTO
 	select CREATE_ARCH_SYMLINK
 	select HAVE_PRIVATE_LIBGCC if !ARM64
+	select SUPPORT_ACPI
 	select SUPPORT_OF_CONTROL
 
 config M68K
@@ -152,6 +153,7 @@
 	select SYS_CACHE_SHIFT_4
 	select IRQ
 	select SUPPORT_EXTENSION_SCAN
+	select SUPPORT_ACPI
 	imply BITREVERSE
 	select BLOBLIST
 	imply LTO
@@ -201,6 +203,7 @@
 	imply CMD_EXTENSION
 	imply KEYBOARD
 	imply PHYSMEM
+	imply GENERATE_ACPI_TABLE
 
 config SH
 	bool "SuperH architecture"
@@ -217,6 +220,7 @@
 	select HAVE_PRIVATE_LIBGCC
 	select OF_CONTROL
 	select PCI
+	select SUPPORT_ACPI
 	select SUPPORT_OF_CONTROL
 	select SYS_CACHE_SHIFT_6
 	select TIMER
@@ -254,7 +258,7 @@
 	imply PCH
 	imply PHYSMEM
 	imply RTC_MC146818
-	imply ACPIGEN if !QEMU
+	imply ACPIGEN if !QEMU && !EFI_APP
 	imply SYSINFO if GENERATE_SMBIOS_TABLE
 	imply SYSINFO_SMBIOS if GENERATE_SMBIOS_TABLE
 	imply TIMESTAMP
diff --git a/arch/sandbox/cpu/eth-raw-os.c b/arch/sandbox/cpu/eth-raw-os.c
index 6a8d809..e59b96b 100644
--- a/arch/sandbox/cpu/eth-raw-os.c
+++ b/arch/sandbox/cpu/eth-raw-os.c
@@ -4,6 +4,8 @@
  * Copyright (c) 2015-2018 Joe Hershberger <joe.hershberger@ni.com>
  */
 
+#define _GNU_SOURCE
+
 #include <asm/eth-raw-os.h>
 #include <errno.h>
 #include <fcntl.h>
diff --git a/arch/sandbox/include/asm/acpi_table.h b/arch/sandbox/include/asm/acpi_table.h
index 921c7f4..ae17f6c 100644
--- a/arch/sandbox/include/asm/acpi_table.h
+++ b/arch/sandbox/include/asm/acpi_table.h
@@ -6,4 +6,6 @@
 #ifndef __ASM_ACPI_TABLE_H__
 #define __ASM_ACPI_TABLE_H__
 
+ulong write_acpi_tables(ulong start);
+
 #endif /* __ASM_ACPI_TABLE_H__ */
diff --git a/arch/sandbox/include/asm/global_data.h b/arch/sandbox/include/asm/global_data.h
index f95ddb0..f4ce72d 100644
--- a/arch/sandbox/include/asm/global_data.h
+++ b/arch/sandbox/include/asm/global_data.h
@@ -13,7 +13,6 @@
 struct arch_global_data {
 	uint8_t		*ram_buf;	/* emulated RAM buffer */
 	void		*text_base;	/* pointer to base of text region */
-	ulong acpi_start;		/* Start address of ACPI tables */
 };
 
 #include <asm-generic/global_data.h>
diff --git a/arch/sandbox/include/asm/tables.h b/arch/sandbox/include/asm/tables.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/arch/sandbox/include/asm/tables.h
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b8d8ee3..7cbfd6c 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -770,15 +770,6 @@
 	  multiprocessing as well as symmetric I/O interrupt handling with
 	  the local APIC and I/O APIC.
 
-config GENERATE_ACPI_TABLE
-	bool "Generate an ACPI (Advanced Configuration and Power Interface) table"
-	select QFW if QEMU
-	help
-	  The Advanced Configuration and Power Interface (ACPI) specification
-	  provides an open standard for device configuration and management
-	  by the operating system. It defines platform-independent interfaces
-	  for configuration and power management monitoring.
-
 config ACPI_GNVS_EXTERNAL
 	bool
 	help
diff --git a/arch/x86/cpu/apollolake/acpi.c b/arch/x86/cpu/apollolake/acpi.c
index fd21c0b..a3dd737 100644
--- a/arch/x86/cpu/apollolake/acpi.c
+++ b/arch/x86/cpu/apollolake/acpi.c
@@ -146,16 +146,25 @@
 	fadt->x_pm_tmr_blk.addrl = IOMAP_ACPI_BASE + PM1_TMR;
 }
 
-void acpi_create_fadt(struct acpi_fadt *fadt, struct acpi_facs *facs,
-		      void *dsdt)
+static int apl_write_fadt(struct acpi_ctx *ctx, const struct acpi_writer *entry)
 {
-	struct acpi_table_header *header = &fadt->header;
+	struct acpi_table_header *header;
+	struct acpi_fadt *fadt;
 
-	acpi_fadt_common(fadt, facs, dsdt);
+	fadt = ctx->current;
+	acpi_fadt_common(fadt, ctx->facs, ctx->dsdt);
 	intel_acpi_fill_fadt(fadt);
 	fill_fadt(fadt);
+	header = &fadt->header;
 	header->checksum = table_compute_checksum(fadt, header->length);
+
+	acpi_add_table(ctx, fadt);
+
+	acpi_inc(ctx, sizeof(struct acpi_fadt));
+
+	return 0;
 }
+ACPI_WRITER(5fadt, "FACS", apl_write_fadt, 0);
 
 int apl_acpi_fill_dmar(struct acpi_ctx *ctx)
 {
diff --git a/arch/x86/cpu/baytrail/acpi.c b/arch/x86/cpu/baytrail/acpi.c
index 07757b8..59db2e2 100644
--- a/arch/x86/cpu/baytrail/acpi.c
+++ b/arch/x86/cpu/baytrail/acpi.c
@@ -15,20 +15,24 @@
 #include <asm/arch/iomap.h>
 #include <dm/uclass-internal.h>
 
-void acpi_create_fadt(struct acpi_fadt *fadt, struct acpi_facs *facs,
-		      void *dsdt)
+static int baytrail_write_fadt(struct acpi_ctx *ctx,
+			       const struct acpi_writer *entry)
 {
-	struct acpi_table_header *header = &(fadt->header);
+	struct acpi_table_header *header;
+	struct acpi_fadt *fadt;
+
+	fadt = ctx->current;
+	header = &fadt->header;
 	u16 pmbase = ACPI_BASE_ADDRESS;
 
-	memset((void *)fadt, 0, sizeof(struct acpi_fadt));
+	memset(fadt, '\0', sizeof(struct acpi_fadt));
 
 	acpi_fill_header(header, "FACP");
 	header->length = sizeof(struct acpi_fadt);
 	header->revision = 4;
 
-	fadt->firmware_ctrl = (u32)facs;
-	fadt->dsdt = (u32)dsdt;
+	fadt->firmware_ctrl = (u32)ctx->facs;
+	fadt->dsdt = (u32)ctx->dsdt;
 	fadt->preferred_pm_profile = ACPI_PM_MOBILE;
 	fadt->sci_int = 9;
 	fadt->smi_cmd = 0;
@@ -75,9 +79,9 @@
 	fadt->reset_reg.addrh = 0;
 	fadt->reset_value = SYS_RST | RST_CPU | FULL_RST;
 
-	fadt->x_firmware_ctl_l = (u32)facs;
+	fadt->x_firmware_ctl_l = (u32)ctx->facs;
 	fadt->x_firmware_ctl_h = 0;
-	fadt->x_dsdt_l = (u32)dsdt;
+	fadt->x_dsdt_l = (u32)ctx->dsdt;
 	fadt->x_dsdt_h = 0;
 
 	fadt->x_pm1a_evt_blk.space_id = ACPI_ADDRESS_SPACE_IO;
@@ -137,7 +141,14 @@
 	fadt->x_gpe1_blk.addrh = 0x0;
 
 	header->checksum = table_compute_checksum(fadt, header->length);
+
+	acpi_add_table(ctx, fadt);
+
+	acpi_inc(ctx, sizeof(struct acpi_fadt));
+
+	return 0;
 }
+ACPI_WRITER(5fadt, "FACP", baytrail_write_fadt, 0);
 
 int acpi_create_gnvs(struct acpi_global_nvs *gnvs)
 {
diff --git a/arch/x86/cpu/quark/acpi.c b/arch/x86/cpu/quark/acpi.c
index 82b776f..9ce9ee3 100644
--- a/arch/x86/cpu/quark/acpi.c
+++ b/arch/x86/cpu/quark/acpi.c
@@ -10,20 +10,24 @@
 #include <asm/arch/global_nvs.h>
 #include <asm/arch/iomap.h>
 
-void acpi_create_fadt(struct acpi_fadt *fadt, struct acpi_facs *facs,
-		      void *dsdt)
+static int quark_write_fadt(struct acpi_ctx *ctx,
+			    const struct acpi_writer *entry)
 {
-	struct acpi_table_header *header = &(fadt->header);
 	u16 pmbase = ACPI_PM1_BASE_ADDRESS;
+	struct acpi_table_header *header;
+	struct acpi_fadt *fadt;
 
-	memset((void *)fadt, 0, sizeof(struct acpi_fadt));
+	fadt = ctx->current;
+	header = &fadt->header;
+
+	memset(fadt, '\0', sizeof(struct acpi_fadt));
 
 	acpi_fill_header(header, "FACP");
 	header->length = sizeof(struct acpi_fadt);
 	header->revision = 4;
 
-	fadt->firmware_ctrl = (u32)facs;
-	fadt->dsdt = (u32)dsdt;
+	fadt->firmware_ctrl = (u32)ctx->facs;
+	fadt->dsdt = (u32)ctx->dsdt;
 	fadt->preferred_pm_profile = ACPI_PM_UNSPECIFIED;
 	fadt->sci_int = 9;
 	fadt->smi_cmd = 0;
@@ -70,9 +74,9 @@
 	fadt->reset_reg.addrh = 0;
 	fadt->reset_value = SYS_RST | RST_CPU | FULL_RST;
 
-	fadt->x_firmware_ctl_l = (u32)facs;
+	fadt->x_firmware_ctl_l = (u32)ctx->facs;
 	fadt->x_firmware_ctl_h = 0;
-	fadt->x_dsdt_l = (u32)dsdt;
+	fadt->x_dsdt_l = (u32)ctx->dsdt;
 	fadt->x_dsdt_h = 0;
 
 	fadt->x_pm1a_evt_blk.space_id = ACPI_ADDRESS_SPACE_IO;
@@ -132,7 +136,14 @@
 	fadt->x_gpe1_blk.addrh = 0x0;
 
 	header->checksum = table_compute_checksum(fadt, header->length);
+
+	acpi_add_table(ctx, fadt);
+
+	acpi_inc(ctx, sizeof(struct acpi_fadt));
+
+	return 0;
 }
+ACPI_WRITER(5fadt, "FACP", quark_write_fadt, 0);
 
 int acpi_create_gnvs(struct acpi_global_nvs *gnvs)
 {
diff --git a/arch/x86/cpu/tangier/acpi.c b/arch/x86/cpu/tangier/acpi.c
index 82f4ce5..12f9289 100644
--- a/arch/x86/cpu/tangier/acpi.c
+++ b/arch/x86/cpu/tangier/acpi.c
@@ -16,19 +16,23 @@
 #include <asm/arch/iomap.h>
 #include <dm/uclass-internal.h>
 
-void acpi_create_fadt(struct acpi_fadt *fadt, struct acpi_facs *facs,
-		      void *dsdt)
+static int tangier_write_fadt(struct acpi_ctx *ctx,
+			      const struct acpi_writer *entry)
 {
-	struct acpi_table_header *header = &(fadt->header);
+	struct acpi_table_header *header;
+	struct acpi_fadt *fadt;
 
-	memset((void *)fadt, 0, sizeof(struct acpi_fadt));
+	fadt = ctx->current;
+	header = &fadt->header;
+
+	memset(fadt, '\0', sizeof(struct acpi_fadt));
 
 	acpi_fill_header(header, "FACP");
 	header->length = sizeof(struct acpi_fadt);
 	header->revision = 6;
 
-	fadt->firmware_ctrl = (u32)facs;
-	fadt->dsdt = (u32)dsdt;
+	fadt->firmware_ctrl = (u32)ctx->facs;
+	fadt->dsdt = (u32)ctx->dsdt;
 	fadt->preferred_pm_profile = ACPI_PM_UNSPECIFIED;
 
 	fadt->iapc_boot_arch = ACPI_FADT_VGA_NOT_PRESENT |
@@ -41,13 +45,18 @@
 
 	fadt->minor_revision = 2;
 
-	fadt->x_firmware_ctl_l = (u32)facs;
+	fadt->x_firmware_ctl_l = (u32)ctx->facs;
 	fadt->x_firmware_ctl_h = 0;
-	fadt->x_dsdt_l = (u32)dsdt;
+	fadt->x_dsdt_l = (u32)ctx->dsdt;
 	fadt->x_dsdt_h = 0;
 
 	header->checksum = table_compute_checksum(fadt, header->length);
+
+	acpi_inc(ctx, sizeof(struct acpi_fadt));
+
+	return 0;
 }
+ACPI_WRITER(5fadt, "FACP", tangier_write_fadt, 0);
 
 u32 acpi_fill_madt(u32 current)
 {
@@ -100,11 +109,14 @@
 	return grp->length;
 }
 
-u32 acpi_fill_csrt(u32 current)
+int acpi_fill_csrt(struct acpi_ctx *ctx)
 {
-	current += acpi_fill_csrt_dma((struct acpi_csrt_group *)current);
+	int size;
 
-	return current;
+	size = acpi_fill_csrt_dma(ctx->current);
+	acpi_inc(ctx, size);
+
+	return 0;
 }
 
 int acpi_create_gnvs(struct acpi_global_nvs *gnvs)
diff --git a/arch/x86/include/asm/acpi_table.h b/arch/x86/include/asm/acpi_table.h
index 55b1a3d..0d07f7c 100644
--- a/arch/x86/include/asm/acpi_table.h
+++ b/arch/x86/include/asm/acpi_table.h
@@ -24,9 +24,6 @@
 
 /* These can be used by the target port */
 
-void acpi_fill_header(struct acpi_table_header *header, char *signature);
-void acpi_create_fadt(struct acpi_fadt *fadt, struct acpi_facs *facs,
-		      void *dsdt);
 int acpi_create_madt_lapics(u32 current);
 int acpi_create_madt_ioapic(struct acpi_madt_ioapic *ioapic, u8 id,
 			    u32 addr, u32 gsi_base);
@@ -38,7 +35,6 @@
 int acpi_create_mcfg_mmconfig(struct acpi_mcfg_mmconfig *mmconfig, u32 base,
 			      u16 seg_nr, u8 start, u8 end);
 u32 acpi_fill_mcfg(u32 current);
-u32 acpi_fill_csrt(u32 current);
 
 /**
  * acpi_write_hpet() - Write out a HPET table
diff --git a/arch/x86/include/asm/global_data.h b/arch/x86/include/asm/global_data.h
index 3e40445..23693f8 100644
--- a/arch/x86/include/asm/global_data.h
+++ b/arch/x86/include/asm/global_data.h
@@ -122,7 +122,6 @@
 	struct fsp_header *fsp_s_hdr;	/* Pointer to FSP-S header */
 #endif
 	void *itss_priv;		/* Private ITSS data pointer */
-	ulong acpi_start;		/* Start address of ACPI tables */
 	ulong coreboot_table;		/* Address of coreboot table */
 };
 
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index e5235b7..a6f2244 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -41,7 +41,7 @@
 obj-$(CONFIG_HAVE_ACPI_RESUME) += acpi_s3.o
 ifndef CONFIG_QEMU
 obj-y += acpigen.o
-obj-$(CONFIG_GENERATE_ACPI_TABLE) += acpi_table.o
+obj-$(CONFIG_$(SPL_TPL_)GENERATE_ACPI_TABLE) += acpi_table.o
 endif
 obj-y	+= tables.o
 ifndef CONFIG_SPL_BUILD
diff --git a/arch/x86/lib/acpi_table.c b/arch/x86/lib/acpi_table.c
index 3f84771..c053434 100644
--- a/arch/x86/lib/acpi_table.c
+++ b/arch/x86/lib/acpi_table.c
@@ -29,30 +29,6 @@
 #include <dm/acpi.h>
 #include <linux/err.h>
 
-/*
- * IASL compiles the dsdt entries and writes the hex values
- * to a C array AmlCode[] (see dsdt.c).
- */
-extern const unsigned char AmlCode[];
-
-/* ACPI RSDP address to be used in boot parameters */
-static ulong acpi_rsdp_addr;
-
-static void acpi_create_facs(struct acpi_facs *facs)
-{
-	memset((void *)facs, 0, sizeof(struct acpi_facs));
-
-	memcpy(facs->signature, "FACS", 4);
-	facs->length = sizeof(struct acpi_facs);
-	facs->hardware_signature = 0;
-	facs->firmware_waking_vector = 0;
-	facs->global_lock = 0;
-	facs->flags = 0;
-	facs->x_firmware_waking_vector_l = 0;
-	facs->x_firmware_waking_vector_h = 0;
-	facs->version = 1;
-}
-
 static int acpi_create_madt_lapic(struct acpi_madt_lapic *lapic,
 				  u8 cpu, u8 apic)
 {
@@ -152,12 +128,16 @@
 	return current;
 }
 
-static void acpi_create_madt(struct acpi_madt *madt)
+int acpi_write_madt(struct acpi_ctx *ctx, const struct acpi_writer *entry)
 {
-	struct acpi_table_header *header = &(madt->header);
-	u32 current = (u32)madt + sizeof(struct acpi_madt);
+	struct acpi_table_header *header;
+	struct acpi_madt *madt;
+	u32 current;
 
-	memset((void *)madt, 0, sizeof(struct acpi_madt));
+	madt = ctx->current;
+
+	memset(madt, '\0', sizeof(struct acpi_madt));
+	header = &madt->header;
 
 	/* Fill out header fields */
 	acpi_fill_header(header, "APIC");
@@ -167,13 +147,19 @@
 	madt->lapic_addr = LAPIC_DEFAULT_BASE;
 	madt->flags = ACPI_MADT_PCAT_COMPAT;
 
+	current = (u32)madt + sizeof(struct acpi_madt);
 	current = acpi_fill_madt(current);
 
 	/* (Re)calculate length and checksum */
 	header->length = current - (u32)madt;
 
 	header->checksum = table_compute_checksum((void *)madt, header->length);
+	acpi_add_table(ctx, madt);
+	acpi_inc(ctx, madt->header.length);
+
+	return 0;
 }
+ACPI_WRITER(5x86, NULL, acpi_write_madt, 0);
 
 int acpi_create_mcfg_mmconfig(struct acpi_mcfg_mmconfig *mmconfig, u32 base,
 			      u16 seg_nr, u8 start, u8 end)
@@ -197,45 +183,29 @@
 	return current;
 }
 
-/* MCFG is defined in the PCI Firmware Specification 3.0 */
-static void acpi_create_mcfg(struct acpi_mcfg *mcfg)
-{
-	struct acpi_table_header *header = &(mcfg->header);
-	u32 current = (u32)mcfg + sizeof(struct acpi_mcfg);
-
-	memset((void *)mcfg, 0, sizeof(struct acpi_mcfg));
-
-	/* Fill out header fields */
-	acpi_fill_header(header, "MCFG");
-	header->length = sizeof(struct acpi_mcfg);
-	header->revision = 1;
-
-	current = acpi_fill_mcfg(current);
-
-	/* (Re)calculate length and checksum */
-	header->length = current - (u32)mcfg;
-	header->checksum = table_compute_checksum((void *)mcfg, header->length);
-}
-
 /**
  * acpi_create_tcpa() - Create a TCPA table
  *
- * @tcpa: Pointer to place to put table
- *
  * Trusted Computing Platform Alliance Capabilities Table
  * TCPA PC Specific Implementation SpecificationTCPA is defined in the PCI
  * Firmware Specification 3.0
  */
-static int acpi_create_tcpa(struct acpi_tcpa *tcpa)
+int acpi_write_tcpa(struct acpi_ctx *ctx, const struct acpi_writer *entry)
 {
-	struct acpi_table_header *header = &tcpa->header;
-	u32 current = (u32)tcpa + sizeof(struct acpi_tcpa);
+	struct acpi_table_header *header;
+	struct acpi_tcpa *tcpa;
+	u32 current;
 	int size = 0x10000;	/* Use this as the default size */
 	void *log;
 	int ret;
 
+	if (!IS_ENABLED(CONFIG_TPM_V1))
+		return -ENOENT;
 	if (!CONFIG_IS_ENABLED(BLOBLIST))
 		return -ENXIO;
+
+	tcpa = ctx->current;
+	header = &tcpa->header;
 	memset(tcpa, '\0', sizeof(struct acpi_tcpa));
 
 	/* Fill out header fields */
@@ -249,14 +219,19 @@
 
 	tcpa->platform_class = 0;
 	tcpa->laml = size;
-	tcpa->lasa = (ulong)log;
+	tcpa->lasa = map_to_sysmem(log);
 
 	/* (Re)calculate length and checksum */
+	current = (u32)tcpa + sizeof(struct acpi_tcpa);
 	header->length = current - (u32)tcpa;
-	header->checksum = table_compute_checksum((void *)tcpa, header->length);
+	header->checksum = table_compute_checksum(tcpa, header->length);
+
+	acpi_inc(ctx, tcpa->header.length);
+	acpi_add_table(ctx, tcpa);
 
 	return 0;
 }
+ACPI_WRITER(5tcpa, "TCPA", acpi_write_tcpa, 0);
 
 static int get_tpm2_log(void **ptrp, int *sizep)
 {
@@ -274,14 +249,21 @@
 	return 0;
 }
 
-static int acpi_create_tpm2(struct acpi_tpm2 *tpm2)
+static int acpi_write_tpm2(struct acpi_ctx *ctx,
+			   const struct acpi_writer *entry)
 {
-	struct acpi_table_header *header = &tpm2->header;
+	struct acpi_table_header *header;
+	struct acpi_tpm2 *tpm2;
 	int tpm2_log_len;
 	void *lasa;
 	int ret;
 
-	memset((void *)tpm2, 0, sizeof(struct acpi_tpm2));
+	if (!IS_ENABLED(CONFIG_TPM_V2))
+		return log_msg_ret("none", -ENOENT);
+
+	tpm2 = ctx->current;
+	header = &tpm2->header;
+	memset(tpm2, '\0', sizeof(struct acpi_tpm2));
 
 	/*
 	 * Some payloads like SeaBIOS depend on log area to use TPM2.
@@ -289,7 +271,7 @@
 	 */
 	ret = get_tpm2_log(&lasa, &tpm2_log_len);
 	if (ret)
-		return ret;
+		return log_msg_ret("log", ret);
 
 	/* Fill out header fields. */
 	acpi_fill_header(header, "TPM2");
@@ -298,7 +280,7 @@
 	header->length = sizeof(struct acpi_tpm2);
 	header->revision = acpi_get_table_revision(ACPITAB_TPM2);
 
-	/* Hard to detect for coreboot. Just set it to 0 */
+	/* Hard to detect for U-Boot. Just set it to 0 */
 	tpm2->platform_class = 0;
 
 	/* Must be set to 0 for FIFO-interface support */
@@ -308,49 +290,24 @@
 
 	/* Fill the log area size and start address fields. */
 	tpm2->laml = tpm2_log_len;
-	tpm2->lasa = (uintptr_t)lasa;
+	tpm2->lasa = map_to_sysmem(lasa);
 
 	/* Calculate checksum. */
-	header->checksum = table_compute_checksum((void *)tpm2, header->length);
+	header->checksum = table_compute_checksum(tpm2, header->length);
+
+	acpi_inc(ctx, tpm2->header.length);
+	acpi_add_table(ctx, tpm2);
 
 	return 0;
 }
+ACPI_WRITER(5tpm2, "TPM2", acpi_write_tpm2, 0);
 
-__weak u32 acpi_fill_csrt(u32 current)
+int acpi_write_spcr(struct acpi_ctx *ctx, const struct acpi_writer *entry)
 {
-	return 0;
-}
-
-static int acpi_create_csrt(struct acpi_csrt *csrt)
-{
-	struct acpi_table_header *header = &(csrt->header);
-	u32 current = (u32)csrt + sizeof(struct acpi_csrt);
-	uint ptr;
-
-	memset((void *)csrt, 0, sizeof(struct acpi_csrt));
-
-	/* Fill out header fields */
-	acpi_fill_header(header, "CSRT");
-	header->length = sizeof(struct acpi_csrt);
-	header->revision = 0;
-
-	ptr = acpi_fill_csrt(current);
-	if (!ptr)
-		return -ENOENT;
-	current = ptr;
-
-	/* (Re)calculate length and checksum */
-	header->length = current - (u32)csrt;
-	header->checksum = table_compute_checksum((void *)csrt, header->length);
-
-	return 0;
-}
-
-static void acpi_create_spcr(struct acpi_spcr *spcr)
-{
-	struct acpi_table_header *header = &(spcr->header);
 	struct serial_device_info serial_info = {0};
 	ulong serial_address, serial_offset;
+	struct acpi_table_header *header;
+	struct acpi_spcr *spcr;
 	struct udevice *dev;
 	uint serial_config;
 	uint serial_width;
@@ -358,7 +315,10 @@
 	int space_id;
 	int ret = -ENODEV;
 
-	memset((void *)spcr, 0, sizeof(struct acpi_spcr));
+	spcr = ctx->current;
+	header = &spcr->header;
+
+	memset(spcr, '\0', sizeof(struct acpi_spcr));
 
 	/* Fill out header fields */
 	acpi_fill_header(header, "SPCR");
@@ -468,225 +428,87 @@
 
 	/* Fix checksum */
 	header->checksum = table_compute_checksum((void *)spcr, header->length);
-}
 
-static int acpi_create_ssdt(struct acpi_ctx *ctx,
-			    struct acpi_table_header *ssdt,
-			    const char *oem_table_id)
-{
-	memset((void *)ssdt, '\0', sizeof(struct acpi_table_header));
-
-	acpi_fill_header(ssdt, "SSDT");
-	ssdt->revision = acpi_get_table_revision(ACPITAB_SSDT);
-	ssdt->aslc_revision = 1;
-	ssdt->length = sizeof(struct acpi_table_header);
-
-	acpi_inc(ctx, sizeof(struct acpi_table_header));
-
-	acpi_fill_ssdt(ctx);
-
-	/* (Re)calculate length and checksum */
-	ssdt->length = ctx->current - (void *)ssdt;
-	ssdt->checksum = table_compute_checksum((void *)ssdt, ssdt->length);
-	log_debug("SSDT at %p, length %x\n", ssdt, ssdt->length);
-
-	/* Drop the table if it is empty */
-	if (ssdt->length == sizeof(struct acpi_table_header)) {
-		ctx->current = ssdt;
-		return -ENOENT;
-	}
-	acpi_align(ctx);
+	acpi_add_table(ctx, spcr);
+	acpi_inc(ctx, spcr->header.length);
 
 	return 0;
 }
+ACPI_WRITER(5spcr, "SPCR", acpi_write_spcr, 0);
 
-/*
- * QEMU's version of write_acpi_tables is defined in drivers/misc/qfw.c
- */
-ulong write_acpi_tables(ulong start_addr)
+int acpi_write_gnvs(struct acpi_ctx *ctx, const struct acpi_writer *entry)
 {
-	const int thl = sizeof(struct acpi_table_header);
-	struct acpi_ctx *ctx;
-	struct acpi_facs *facs;
-	struct acpi_table_header *dsdt;
-	struct acpi_fadt *fadt;
-	struct acpi_table_header *ssdt;
-	struct acpi_mcfg *mcfg;
-	struct acpi_tcpa *tcpa;
-	struct acpi_madt *madt;
-	struct acpi_csrt *csrt;
-	struct acpi_spcr *spcr;
-	void *start;
-	int aml_len;
 	ulong addr;
-	int ret;
-	int i;
-
-	ctx = calloc(1, sizeof(*ctx));
-	if (!ctx)
-		return log_msg_ret("mem", -ENOMEM);
-	gd->acpi_ctx = ctx;
-
-	start = map_sysmem(start_addr, 0);
-
-	debug("ACPI: Writing ACPI tables at %lx\n", start_addr);
-
-	acpi_reset_items();
-	acpi_setup_base_tables(ctx, start);
-
-	debug("ACPI:    * FACS\n");
-	facs = ctx->current;
-	acpi_inc_align(ctx, sizeof(struct acpi_facs));
-
-	acpi_create_facs(facs);
-
-	debug("ACPI:    * DSDT\n");
-	dsdt = ctx->current;
-
-	/* Put the table header first */
-	memcpy(dsdt, &AmlCode, thl);
-	acpi_inc(ctx, thl);
-	log_debug("DSDT starts at %p, hdr ends at %p\n", dsdt, ctx->current);
-
-	/* If the table is not empty, allow devices to inject things */
-	aml_len = dsdt->length - thl;
-	if (aml_len) {
-		void *base = ctx->current;
-
-		acpi_inject_dsdt(ctx);
-		log_debug("Added %x bytes from inject_dsdt, now at %p\n",
-			  ctx->current - base, ctx->current);
-		log_debug("Copy AML code size %x to %p\n", aml_len,
-			  ctx->current);
-		memcpy(ctx->current, AmlCode + thl, aml_len);
-		acpi_inc(ctx, aml_len);
-	}
-
-	dsdt->length = ctx->current - (void *)dsdt;
-	acpi_align(ctx);
-	log_debug("Updated DSDT length to %x, total %x\n", dsdt->length,
-		  ctx->current - (void *)dsdt);
 
 	if (!IS_ENABLED(CONFIG_ACPI_GNVS_EXTERNAL)) {
+		int i;
+
+		/* We need the DSDT to be done */
+		if (!ctx->dsdt)
+			return log_msg_ret("dsdt", -EAGAIN);
+
 		/* Pack GNVS into the ACPI table area */
-		for (i = 0; i < dsdt->length; i++) {
-			u32 *gnvs = (u32 *)((u32)dsdt + i);
+		for (i = 0; i < ctx->dsdt->length; i++) {
+			u32 *gnvs = (u32 *)((u32)ctx->dsdt + i);
 
 			if (*gnvs == ACPI_GNVS_ADDR) {
 				*gnvs = map_to_sysmem(ctx->current);
-				debug("Fix up global NVS in DSDT to %#08x\n",
-				      *gnvs);
+				log_debug("Fix up global NVS in DSDT to %#08x\n",
+					  *gnvs);
 				break;
 			}
 		}
 
 		/*
-		 * Fill in platform-specific global NVS variables. If this fails
-		 * we cannot return the error but this should only happen while
-		 * debugging.
+		 * Recalculate the length and update the DSDT checksum since we
+		 * patched the GNVS address. Set the checksum to zero since it
+		 * is part of the region being checksummed.
 		 */
-		addr = acpi_create_gnvs(ctx->current);
-		if (IS_ERR_VALUE(addr))
-			printf("Error: Gailed to create GNVS\n");
-		acpi_inc_align(ctx, sizeof(struct acpi_global_nvs));
+		ctx->dsdt->checksum = 0;
+		ctx->dsdt->checksum = table_compute_checksum((void *)ctx->dsdt,
+							     ctx->dsdt->length);
 	}
 
-	/*
-	 * Recalculate the length and update the DSDT checksum since we patched
-	 * the GNVS address. Set the checksum to zero since it is part of the
-	 * region being checksummed.
-	 */
-	dsdt->checksum = 0;
-	dsdt->checksum = table_compute_checksum((void *)dsdt, dsdt->length);
-
-	/*
-	 * Fill in platform-specific global NVS variables. If this fails we
-	 * cannot return the error but this should only happen while debugging.
-	 */
+	/* Fill in platform-specific global NVS variables */
 	addr = acpi_create_gnvs(ctx->current);
 	if (IS_ERR_VALUE(addr))
-		printf("Error: Failed to create GNVS\n");
+		return log_msg_ret("gnvs", (int)addr);
 
 	acpi_inc_align(ctx, sizeof(struct acpi_global_nvs));
 
-	debug("ACPI:    * FADT\n");
-	fadt = ctx->current;
-	acpi_inc_align(ctx, sizeof(struct acpi_fadt));
-	acpi_create_fadt(fadt, facs, dsdt);
-	acpi_add_table(ctx, fadt);
+	return 0;
+}
+ACPI_WRITER(4gnvs, "GNVS", acpi_write_gnvs, 0);
 
-	debug("ACPI:     * SSDT\n");
-	ssdt = (struct acpi_table_header *)ctx->current;
-	if (!acpi_create_ssdt(ctx, ssdt, OEM_TABLE_ID))
-		acpi_add_table(ctx, ssdt);
+/* MCFG is defined in the PCI Firmware Specification 3.0 */
+int acpi_write_mcfg(struct acpi_ctx *ctx, const struct acpi_writer *entry)
+{
+	struct acpi_table_header *header;
+	struct acpi_mcfg *mcfg;
+	u32 current;
 
-	debug("ACPI:    * MCFG\n");
 	mcfg = ctx->current;
-	acpi_create_mcfg(mcfg);
-	acpi_inc_align(ctx, mcfg->header.length);
+	header = &mcfg->header;
+
+	current = (u32)mcfg + sizeof(struct acpi_mcfg);
+
+	memset(mcfg, '\0', sizeof(struct acpi_mcfg));
+
+	/* Fill out header fields */
+	acpi_fill_header(header, "MCFG");
+	header->length = sizeof(struct acpi_mcfg);
+	header->revision = 1;
+
+	/* (Re)calculate length and checksum */
+	header->length = current - (u32)mcfg;
+	header->checksum = table_compute_checksum(mcfg, header->length);
+
+	acpi_inc(ctx, mcfg->header.length);
 	acpi_add_table(ctx, mcfg);
 
-	if (IS_ENABLED(CONFIG_TPM_V2)) {
-		struct acpi_tpm2 *tpm2;
-
-		debug("ACPI:    * TPM2\n");
-		tpm2 = (struct acpi_tpm2 *)ctx->current;
-		ret = acpi_create_tpm2(tpm2);
-		if (!ret) {
-			acpi_inc_align(ctx, tpm2->header.length);
-			acpi_add_table(ctx, tpm2);
-		} else {
-			log_warning("TPM2 table creation failed\n");
-		}
-	}
-
-	debug("ACPI:    * MADT\n");
-	madt = ctx->current;
-	acpi_create_madt(madt);
-	acpi_inc_align(ctx, madt->header.length);
-	acpi_add_table(ctx, madt);
-
-	if (IS_ENABLED(CONFIG_TPM_V1)) {
-		debug("ACPI:    * TCPA\n");
-		tcpa = (struct acpi_tcpa *)ctx->current;
-		ret = acpi_create_tcpa(tcpa);
-		if (ret) {
-			log_warning("Failed to create TCPA table (err=%d)\n",
-				    ret);
-		} else {
-			acpi_inc_align(ctx, tcpa->header.length);
-			acpi_add_table(ctx, tcpa);
-		}
-	}
-
-	debug("ACPI:    * CSRT\n");
-	csrt = ctx->current;
-	if (!acpi_create_csrt(csrt)) {
-		acpi_inc_align(ctx, csrt->header.length);
-		acpi_add_table(ctx, csrt);
-	}
-
-	debug("ACPI:    * SPCR\n");
-	spcr = ctx->current;
-	acpi_create_spcr(spcr);
-	acpi_inc_align(ctx, spcr->header.length);
-	acpi_add_table(ctx, spcr);
-
-	acpi_write_dev_tables(ctx);
-
-	addr = map_to_sysmem(ctx->current);
-	debug("current = %lx\n", addr);
-
-	acpi_rsdp_addr = (unsigned long)ctx->rsdp;
-	debug("ACPI: done\n");
-
-	return addr;
+	return 0;
 }
-
-ulong acpi_get_rsdp_addr(void)
-{
-	return acpi_rsdp_addr;
-}
+ACPI_WRITER(5mcfg, "MCFG", acpi_write_mcfg, 0);
 
 /**
  * acpi_write_hpet() - Write out a HPET table
diff --git a/boot/image-fit-sig.c b/boot/image-fit-sig.c
index 495d776..a461d59 100644
--- a/boot/image-fit-sig.c
+++ b/boot/image-fit-sig.c
@@ -65,7 +65,8 @@
 
 static int fit_image_setup_verify(struct image_sign_info *info,
 				  const void *fit, int noffset,
-				  int required_keynode, char **err_msgp)
+				  const void *key_blob, int required_keynode,
+				  char **err_msgp)
 {
 	const char *algo_name;
 	const char *padding_name;
@@ -91,7 +92,7 @@
 	info->checksum = image_get_checksum_algo(algo_name);
 	info->crypto = image_get_crypto_algo(algo_name);
 	info->padding = image_get_padding_algo(padding_name);
-	info->fdt_blob = gd_fdt_blob();
+	info->fdt_blob = key_blob;
 	info->required_keynode = required_keynode;
 	printf("%s:%s", algo_name, info->keyname);
 
@@ -104,7 +105,8 @@
 }
 
 int fit_image_check_sig(const void *fit, int noffset, const void *data,
-			size_t size, int required_keynode, char **err_msgp)
+			size_t size, const void *key_blob, int required_keynode,
+			char **err_msgp)
 {
 	struct image_sign_info info;
 	struct image_region region;
@@ -112,8 +114,8 @@
 	int fit_value_len;
 
 	*err_msgp = NULL;
-	if (fit_image_setup_verify(&info, fit, noffset, required_keynode,
-				   err_msgp))
+	if (fit_image_setup_verify(&info, fit, noffset, key_blob,
+				   required_keynode, err_msgp))
 		return -1;
 
 	if (fit_image_hash_get_value(fit, noffset, &fit_value,
@@ -135,7 +137,7 @@
 
 static int fit_image_verify_sig(const void *fit, int image_noffset,
 				const char *data, size_t size,
-				const void *sig_blob, int sig_offset)
+				const void *key_blob, int key_offset)
 {
 	int noffset;
 	char *err_msg = "";
@@ -156,8 +158,8 @@
 		}
 		if (!strncmp(name, FIT_SIG_NODENAME,
 			     strlen(FIT_SIG_NODENAME))) {
-			ret = fit_image_check_sig(fit, noffset, data,
-						  size, -1, &err_msg);
+			ret = fit_image_check_sig(fit, noffset, data, size,
+						  key_blob, -1, &err_msg);
 			if (ret) {
 				puts("- ");
 			} else {
@@ -184,34 +186,34 @@
 
 int fit_image_verify_required_sigs(const void *fit, int image_noffset,
 				   const char *data, size_t size,
-				   const void *sig_blob, int *no_sigsp)
+				   const void *key_blob, int *no_sigsp)
 {
 	int verify_count = 0;
 	int noffset;
-	int sig_node;
+	int key_node;
 
 	/* Work out what we need to verify */
 	*no_sigsp = 1;
-	sig_node = fdt_subnode_offset(sig_blob, 0, FIT_SIG_NODENAME);
-	if (sig_node < 0) {
+	key_node = fdt_subnode_offset(key_blob, 0, FIT_SIG_NODENAME);
+	if (key_node < 0) {
 		debug("%s: No signature node found: %s\n", __func__,
-		      fdt_strerror(sig_node));
+		      fdt_strerror(key_node));
 		return 0;
 	}
 
-	fdt_for_each_subnode(noffset, sig_blob, sig_node) {
+	fdt_for_each_subnode(noffset, key_blob, key_node) {
 		const char *required;
 		int ret;
 
-		required = fdt_getprop(sig_blob, noffset, FIT_KEY_REQUIRED,
+		required = fdt_getprop(key_blob, noffset, FIT_KEY_REQUIRED,
 				       NULL);
 		if (!required || strcmp(required, "image"))
 			continue;
 		ret = fit_image_verify_sig(fit, image_noffset, data, size,
-					   sig_blob, noffset);
+					   key_blob, noffset);
 		if (ret) {
 			printf("Failed to verify required signature '%s'\n",
-			       fit_get_name(sig_blob, noffset, NULL));
+			       fit_get_name(key_blob, noffset, NULL));
 			return ret;
 		}
 		verify_count++;
@@ -226,20 +228,35 @@
 /**
  * fit_config_check_sig() - Check the signature of a config
  *
+ * Here we are looking at a particular signature that needs verification (here
+ * signature-1):
+ *
+ *	configurations {
+ *		default = "conf-1";
+ *		conf-1 {
+ *			kernel = "kernel-1";
+ *			fdt = "fdt-1";
+ *			signature-1 {
+ *				algo = "sha1,rsa2048";
+ *				value = <...conf 1 signature...>;
+ *			};
+ *		};
+ *
  * @fit: FIT to check
- * @noffset: Offset of configuration node (e.g. /configurations/conf-1)
- * @required_keynode:	Offset in the control FDT of the required key node,
+ * @noffset: Offset of the signature node being checked (e.g.
+ *	 /configurations/conf-1/signature-1)
+ * @conf_noffset: Offset of configuration node (e.g. /configurations/conf-1)
+ * @key_blob: Blob containing the keys to check against
+ * @required_keynode:	Offset in @key_blob of the required key node,
  *			if any. If this is given, then the configuration wil not
  *			pass verification unless that key is used. If this is
  *			-1 then any signature will do.
- * @conf_noffset: Offset of the configuration subnode being checked (e.g.
- *	 /configurations/conf-1/kernel)
  * @err_msgp:		In the event of an error, this will be pointed to a
  *			help error string to display to the user.
  * Return: 0 if all verified ok, <0 on error
  */
-static int fit_config_check_sig(const void *fit, int noffset,
-				int required_keynode, int conf_noffset,
+static int fit_config_check_sig(const void *fit, int noffset, int conf_noffset,
+				const void *key_blob, int required_keynode,
 				char **err_msgp)
 {
 	static char * const exc_prop[] = {
@@ -262,12 +279,12 @@
 	int count;
 
 	config_name = fit_get_name(fit, conf_noffset, NULL);
-	debug("%s: fdt=%p, conf='%s', sig='%s'\n", __func__, gd_fdt_blob(),
+	debug("%s: fdt=%p, conf='%s', sig='%s'\n", __func__, key_blob,
 	      fit_get_name(fit, noffset, NULL),
-	      fit_get_name(gd_fdt_blob(), required_keynode, NULL));
+	      fit_get_name(key_blob, required_keynode, NULL));
 	*err_msgp = NULL;
-	if (fit_image_setup_verify(&info, fit, noffset, required_keynode,
-				   err_msgp))
+	if (fit_image_setup_verify(&info, fit, noffset, key_blob,
+				   required_keynode, err_msgp))
 		return -1;
 
 	if (fit_image_hash_get_value(fit, noffset, &fit_value,
@@ -368,8 +385,35 @@
 	return 0;
 }
 
-static int fit_config_verify_sig(const void *fit, int conf_noffset,
-				 const void *sig_blob, int sig_offset)
+/**
+ * fit_config_verify_key() - Verify that a configuration is signed with a key
+ *
+ * Here we are looking at a particular configuration that needs verification:
+ *
+ *	configurations {
+ *		default = "conf-1";
+ *		conf-1 {
+ *			kernel = "kernel-1";
+ *			fdt = "fdt-1";
+ *			signature-1 {
+ *				algo = "sha1,rsa2048";
+ *				value = <...conf 1 signature...>;
+ *			};
+ *		};
+ *
+ * We must check each of the signature subnodes of conf-1. Hopefully one of them
+ * will match the key at key_offset.
+ *
+ * @fit: FIT to check
+ * @conf_noffset: Offset of the configuration node to check (e.g.
+ *	/configurations/conf-1)
+ * @key_blob: Blob containing the keys to check against
+ * @key_offset: Offset of the key to check within @key_blob
+ * @return 0 if OK, -EPERM if any signatures did not verify, or the
+ *	configuration node has an invalid name
+ */
+static int fit_config_verify_key(const void *fit, int conf_noffset,
+				 const void *key_blob, int key_offset)
 {
 	int noffset;
 	char *err_msg = "No 'signature' subnode found";
@@ -382,8 +426,9 @@
 
 		if (!strncmp(name, FIT_SIG_NODENAME,
 			     strlen(FIT_SIG_NODENAME))) {
-			ret = fit_config_check_sig(fit, noffset, sig_offset,
-						   conf_noffset, &err_msg);
+			ret = fit_config_check_sig(fit, noffset, conf_noffset,
+						   key_blob, key_offset,
+						   &err_msg);
 			if (ret) {
 				puts("- ");
 			} else {
@@ -409,12 +454,25 @@
 	return -EPERM;
 }
 
-static int fit_config_verify_required_sigs(const void *fit, int conf_noffset,
-					   const void *sig_blob)
+/**
+ * fit_config_verify_required_keys() - verify any required signatures for config
+ *
+ * This looks through all the signatures we expect and verifies that at least
+ * all the required ones are valid signatures for the configuration
+ *
+ * @fit: FIT to check
+ * @conf_noffset: Offset of the configuration node to check (e.g.
+ *	/configurations/conf-1)
+ * @key_blob: Blob containing the keys to check against
+ * @return 0 if OK, -EPERM if any signatures did not verify, or the
+ *	configuration node has an invalid name
+ */
+static int fit_config_verify_required_keys(const void *fit, int conf_noffset,
+					   const void *key_blob)
 {
 	const char *name = fit_get_name(fit, conf_noffset, NULL);
 	int noffset;
-	int sig_node;
+	int key_node;
 	int verified = 0;
 	int reqd_sigs = 0;
 	bool reqd_policy_all = true;
@@ -430,38 +488,45 @@
 	}
 
 	/* Work out what we need to verify */
-	sig_node = fdt_subnode_offset(sig_blob, 0, FIT_SIG_NODENAME);
-	if (sig_node < 0) {
+	key_node = fdt_subnode_offset(key_blob, 0, FIT_SIG_NODENAME);
+	if (key_node < 0) {
 		debug("%s: No signature node found: %s\n", __func__,
-		      fdt_strerror(sig_node));
+		      fdt_strerror(key_node));
 		return 0;
 	}
 
 	/* Get required-mode policy property from DTB */
-	reqd_mode = fdt_getprop(sig_blob, sig_node, "required-mode", NULL);
+	reqd_mode = fdt_getprop(key_blob, key_node, "required-mode", NULL);
 	if (reqd_mode && !strcmp(reqd_mode, "any"))
 		reqd_policy_all = false;
 
 	debug("%s: required-mode policy set to '%s'\n", __func__,
 	      reqd_policy_all ? "all" : "any");
 
-	fdt_for_each_subnode(noffset, sig_blob, sig_node) {
+	/*
+	 * The algorithm here is a little convoluted due to how we want it to
+	 * work. Here we work through each of the signature nodes in the
+	 * public-key area. These are in the U-Boot control devicetree. Each
+	 * node was created by signing a configuration, so we check if it is
+	 * 'required' and if so, request that it be verified.
+	 */
+	fdt_for_each_subnode(noffset, key_blob, key_node) {
 		const char *required;
 		int ret;
 
-		required = fdt_getprop(sig_blob, noffset, FIT_KEY_REQUIRED,
+		required = fdt_getprop(key_blob, noffset, FIT_KEY_REQUIRED,
 				       NULL);
 		if (!required || strcmp(required, "conf"))
 			continue;
 
 		reqd_sigs++;
 
-		ret = fit_config_verify_sig(fit, conf_noffset, sig_blob,
+		ret = fit_config_verify_key(fit, conf_noffset, key_blob,
 					    noffset);
 		if (ret) {
 			if (reqd_policy_all) {
 				printf("Failed to verify required signature '%s'\n",
-				       fit_get_name(sig_blob, noffset, NULL));
+				       fit_get_name(key_blob, noffset, NULL));
 				return ret;
 			}
 		} else {
@@ -481,6 +546,6 @@
 
 int fit_config_verify(const void *fit, int conf_noffset)
 {
-	return fit_config_verify_required_sigs(fit, conf_noffset,
+	return fit_config_verify_required_keys(fit, conf_noffset,
 					       gd_fdt_blob());
 }
diff --git a/boot/image-fit.c b/boot/image-fit.c
index 85a6f22..f01cafe 100644
--- a/boot/image-fit.c
+++ b/boot/image-fit.c
@@ -1309,7 +1309,8 @@
 }
 
 int fit_image_verify_with_data(const void *fit, int image_noffset,
-			       const void *data, size_t size)
+			       const void *key_blob, const void *data,
+			       size_t size)
 {
 	int		noffset = 0;
 	char		*err_msg = "";
@@ -1319,7 +1320,7 @@
 	/* Verify all required signatures */
 	if (FIT_IMAGE_ENABLE_VERIFY &&
 	    fit_image_verify_required_sigs(fit, image_noffset, data, size,
-					   gd_fdt_blob(), &verify_all)) {
+					   key_blob, &verify_all)) {
 		err_msg = "Unable to verify required signature";
 		goto error;
 	}
@@ -1342,8 +1343,8 @@
 		} else if (FIT_IMAGE_ENABLE_VERIFY && verify_all &&
 				!strncmp(name, FIT_SIG_NODENAME,
 					strlen(FIT_SIG_NODENAME))) {
-			ret = fit_image_check_sig(fit, noffset, data,
-							size, -1, &err_msg);
+			ret = fit_image_check_sig(fit, noffset, data, size,
+						  gd_fdt_blob(), -1, &err_msg);
 
 			/*
 			 * Show an indication on failure, but do not return
@@ -1406,7 +1407,8 @@
 		goto err;
 	}
 
-	return fit_image_verify_with_data(fit, image_noffset, data, size);
+	return fit_image_verify_with_data(fit, image_noffset, gd_fdt_blob(),
+					  data, size);
 
 err:
 	printf("error!\n%s in '%s' image node\n", err_msg,
diff --git a/cmd/acpi.c b/cmd/acpi.c
index 9c3462b..c543f1e 100644
--- a/cmd/acpi.c
+++ b/cmd/acpi.c
@@ -24,10 +24,10 @@
 {
 	bool has_hdr = memcmp(hdr->signature, "FACS", ACPI_NAME_LEN);
 
-	printf("%.*s %08lx %06x", ACPI_NAME_LEN, hdr->signature,
+	printf("%.*s  %08lx  %5x", ACPI_NAME_LEN, hdr->signature,
 	       (ulong)map_to_sysmem(hdr), hdr->length);
 	if (has_hdr) {
-		printf(" (v%02d %.6s %.8s %x %.4s %x)\n", hdr->revision,
+		printf("  v%02d %.6s %.8s %x %.4s %x\n", hdr->revision,
 		       hdr->oem_id, hdr->oem_table_id, hdr->oem_revision,
 		       hdr->aslc_id, hdr->aslc_revision);
 	} else {
@@ -47,7 +47,7 @@
 	struct acpi_rsdt *rsdt;
 	int len, i, count;
 
-	rsdp = map_sysmem(gd->arch.acpi_start, 0);
+	rsdp = map_sysmem(gd_acpi_start(), 0);
 	if (!rsdp)
 		return NULL;
 	rsdt = map_sysmem(rsdp->rsdt_address, 0);
@@ -129,7 +129,7 @@
 	struct acpi_rsdt *rsdt;
 	struct acpi_xsdt *xsdt;
 
-	printf("RSDP %08lx %06x (v%02d %.6s)\n", (ulong)map_to_sysmem(rsdp),
+	printf("RSDP  %08lx  %5x  v%02d %.6s\n", (ulong)map_to_sysmem(rsdp),
 	       rsdp->length, rsdp->revision, rsdp->oem_id);
 	rsdt = map_sysmem(rsdp->rsdt_address, 0);
 	xsdt = map_sysmem(rsdp->xsdt_address, 0);
@@ -143,12 +143,13 @@
 {
 	struct acpi_rsdp *rsdp;
 
-	rsdp = map_sysmem(gd->arch.acpi_start, 0);
+	rsdp = map_sysmem(gd_acpi_start(), 0);
 	if (!rsdp) {
 		printf("No ACPI tables present\n");
 		return 0;
 	}
-	printf("ACPI tables start at %lx\n", gd->arch.acpi_start);
+	printf("Name      Base   Size  Detail\n");
+	printf("----  --------  -----  ------\n");
 	list_rsdp(rsdp);
 
 	return 0;
diff --git a/common/spl/spl_fit.c b/common/spl/spl_fit.c
index 774072b..1bbf824 100644
--- a/common/spl/spl_fit.c
+++ b/common/spl/spl_fit.c
@@ -321,7 +321,8 @@
 	if (CONFIG_IS_ENABLED(FIT_SIGNATURE)) {
 		printf("## Checking hash(es) for Image %s ... ",
 		       fit_get_name(fit, node, NULL));
-		if (!fit_image_verify_with_data(fit, node, src, length))
+		if (!fit_image_verify_with_data(fit, node, gd_fdt_blob(), src,
+						length))
 			return -EPERM;
 		puts("OK\n");
 	}
diff --git a/configs/cherryhill_defconfig b/configs/cherryhill_defconfig
index 73120b5..7bfbcf6 100644
--- a/configs/cherryhill_defconfig
+++ b/configs/cherryhill_defconfig
@@ -40,6 +40,7 @@
 CONFIG_TFTP_TSIZE=y
 CONFIG_REGMAP=y
 CONFIG_SYSCON=y
+# CONFIG_ACPIGEN is not set
 CONFIG_CPU=y
 CONFIG_RTL8169=y
 CONFIG_SPI=y
diff --git a/configs/chromebook_link64_defconfig b/configs/chromebook_link64_defconfig
index 7059be8..a575437 100644
--- a/configs/chromebook_link64_defconfig
+++ b/configs/chromebook_link64_defconfig
@@ -64,6 +64,7 @@
 CONFIG_TFTP_TSIZE=y
 CONFIG_REGMAP=y
 CONFIG_SYSCON=y
+# CONFIG_ACPIGEN is not set
 CONFIG_CPU=y
 CONFIG_DM_I2C=y
 CONFIG_SYS_I2C_INTEL=y
diff --git a/configs/chromebook_link_defconfig b/configs/chromebook_link_defconfig
index a6efb19..4bb52b6 100644
--- a/configs/chromebook_link_defconfig
+++ b/configs/chromebook_link_defconfig
@@ -55,6 +55,7 @@
 CONFIG_TFTP_TSIZE=y
 CONFIG_REGMAP=y
 CONFIG_SYSCON=y
+# CONFIG_ACPIGEN is not set
 CONFIG_CPU=y
 CONFIG_DM_I2C=y
 CONFIG_SYS_I2C_INTEL=y
diff --git a/configs/chromebook_samus_tpl_defconfig b/configs/chromebook_samus_tpl_defconfig
index e35ef47..6839d8c 100644
--- a/configs/chromebook_samus_tpl_defconfig
+++ b/configs/chromebook_samus_tpl_defconfig
@@ -74,6 +74,7 @@
 # CONFIG_NET is not set
 CONFIG_REGMAP=y
 CONFIG_SYSCON=y
+# CONFIG_ACPIGEN is not set
 CONFIG_CPU=y
 CONFIG_DM_I2C=y
 CONFIG_SYS_I2C_DW=y
diff --git a/configs/coreboot64_defconfig b/configs/coreboot64_defconfig
index 8146569..8b88a08 100644
--- a/configs/coreboot64_defconfig
+++ b/configs/coreboot64_defconfig
@@ -46,6 +46,7 @@
 CONFIG_TFTP_TSIZE=y
 CONFIG_REGMAP=y
 CONFIG_SYSCON=y
+# CONFIG_ACPIGEN is not set
 # CONFIG_PCI_PNP is not set
 CONFIG_SOUND=y
 CONFIG_SOUND_I8254=y
diff --git a/configs/coreboot_defconfig b/configs/coreboot_defconfig
index a12e4cd..621b736 100644
--- a/configs/coreboot_defconfig
+++ b/configs/coreboot_defconfig
@@ -41,6 +41,7 @@
 CONFIG_TFTP_TSIZE=y
 CONFIG_REGMAP=y
 CONFIG_SYSCON=y
+# CONFIG_ACPIGEN is not set
 # CONFIG_PCI_PNP is not set
 CONFIG_SOUND=y
 CONFIG_SOUND_I8254=y
diff --git a/configs/crownbay_defconfig b/configs/crownbay_defconfig
index 650d768..f7dc932 100644
--- a/configs/crownbay_defconfig
+++ b/configs/crownbay_defconfig
@@ -48,6 +48,7 @@
 CONFIG_TFTP_TSIZE=y
 CONFIG_REGMAP=y
 CONFIG_SYSCON=y
+# CONFIG_ACPIGEN is not set
 CONFIG_CPU=y
 CONFIG_E1000=y
 CONFIG_SOUND=y
diff --git a/configs/efi-x86_payload32_defconfig b/configs/efi-x86_payload32_defconfig
index d7be957..ed6fed2 100644
--- a/configs/efi-x86_payload32_defconfig
+++ b/configs/efi-x86_payload32_defconfig
@@ -39,6 +39,7 @@
 CONFIG_TFTP_TSIZE=y
 CONFIG_REGMAP=y
 CONFIG_SYSCON=y
+# CONFIG_ACPIGEN is not set
 # CONFIG_PCI_PNP is not set
 # CONFIG_GZIP is not set
 CONFIG_EFI=y
diff --git a/configs/efi-x86_payload64_defconfig b/configs/efi-x86_payload64_defconfig
index 36dd064..1d4d1f3 100644
--- a/configs/efi-x86_payload64_defconfig
+++ b/configs/efi-x86_payload64_defconfig
@@ -39,6 +39,7 @@
 CONFIG_TFTP_TSIZE=y
 CONFIG_REGMAP=y
 CONFIG_SYSCON=y
+# CONFIG_ACPIGEN is not set
 # CONFIG_PCI_PNP is not set
 # CONFIG_GZIP is not set
 CONFIG_EFI=y
diff --git a/configs/slimbootloader_defconfig b/configs/slimbootloader_defconfig
index fb2091b..1597616 100644
--- a/configs/slimbootloader_defconfig
+++ b/configs/slimbootloader_defconfig
@@ -23,6 +23,7 @@
 CONFIG_TFTP_TSIZE=y
 CONFIG_REGMAP=y
 CONFIG_SYSCON=y
+# CONFIG_ACPIGEN is not set
 # CONFIG_PCI_PNP is not set
 CONFIG_CONSOLE_SCROLL_LINES=5
 # CONFIG_GZIP is not set
diff --git a/configs/tools-only_defconfig b/configs/tools-only_defconfig
index abb0024..b78e509 100644
--- a/configs/tools-only_defconfig
+++ b/configs/tools-only_defconfig
@@ -18,6 +18,7 @@
 CONFIG_OF_CONTROL=y
 CONFIG_SYS_RELOC_GD_ENV_ADDR=y
 # CONFIG_NET is not set
+# CONFIG_GENERATE_ACPI_TABLE is not set
 # CONFIG_ACPIGEN is not set
 CONFIG_AXI=y
 CONFIG_AXI_SANDBOX=y
diff --git a/doc/develop/devicetree/control.rst b/doc/develop/devicetree/control.rst
index 0e6f85d..c71570d 100644
--- a/doc/develop/devicetree/control.rst
+++ b/doc/develop/devicetree/control.rst
@@ -182,6 +182,24 @@
 Only one of these is selected but of course you can #include another one within
 that file, to create a hierarchy of shared files.
 
+
+External .dtsi fragments
+------------------------
+
+Apart from describing the hardware present, U-Boot also uses its
+control dtb for various configuration purposes. For example, the
+public key(s) used for Verified Boot are embedded in a specific format
+in a /signature node.
+
+As mentioned above, the U-Boot build system automatically includes a
+`*-u-boot.dtsi` file, if found, containing U-Boot specific
+quirks. However, some data, such as the mentioned public keys, are not
+appropriate for upstream U-Boot but are better kept and maintained
+outside the U-Boot repository. You can use CONFIG_DEVICE_TREE_INCLUDES
+to specify a list of .dtsi files that will also be included when
+building .dtb files.
+
+
 Relocation, SPL and TPL
 -----------------------
 
diff --git a/doc/develop/package/bintools.rst b/doc/develop/package/bintools.rst
new file mode 120000
index 0000000..7ef3d75
--- /dev/null
+++ b/doc/develop/package/bintools.rst
@@ -0,0 +1 @@
+../../../tools/binman/bintools.rst
\ No newline at end of file
diff --git a/doc/usage/acpi.rst b/doc/usage/acpi.rst
new file mode 100644
index 0000000..14bafc8
--- /dev/null
+++ b/doc/usage/acpi.rst
@@ -0,0 +1,235 @@
+.. SPDX-License-Identifier: GPL-2.0+:
+
+acpi command
+============
+
+Synopis
+-------
+
+::
+
+    acpi list
+    acpi items [-d]
+    acpi dump <name>
+
+Description
+-----------
+
+The *acpi* command is used to dump the ACPI tables generated by U-Boot for passing
+to the operating systems.
+
+ACPI tables can be generated by various output functions and even devices can
+output material to include in the Differentiated System Description Table (DSDT)
+and SSDT tables (Secondary System Description Table). U-Boot keeps track of
+which device or table-writer produced each piece of the ACPI tables.
+
+The ACPI tables are stored contiguously in memory.
+
+
+acpi list
+~~~~~~~~~
+
+List the ACPI tables that have been generated. Each table has a 4-character
+table name (e.g. SSDT, FACS) and has a format defined by the
+`ACPI specification`_.
+
+U-Boot does not currently support decoding the tables. Unlike devicetree, ACPI
+tables have no regular schema and also some include bytecode, so decoding the
+tables requires a lot of code.
+
+The table shows the following information:
+
+Name
+    Table name, e.g. `MCFG`
+
+Base
+    Base address of table in memory
+
+Size
+    Size of table in bytes
+
+Detail
+    More information depending on the table type
+
+    Revision
+        Table revision number (two decimal digits)
+
+    OEM ID
+        ID for the Original Equipment Manufacturer. Typically this is "U-BOOT".
+
+    OEM Table ID
+        Table ID for the Original Equipment Manufacturer. Typically this is
+        "U-BOOTBL" (U-Boot bootloader)
+
+    OEM Revision
+        Revision string for the Original Equipment Manufacturer. Typically this
+        is the U-Boot release number, e.g. 20220101 (meaning v2022.01 since the
+        final 01 is not used). For DSDT, this is set by the source code in
+        the parameters of DefinitionBlock().
+
+    ACPI compiler-vendor ID
+        This is normally `INTL` for Intel
+
+    ACPI compiler revision
+        This is the compiler revision. It is set to the version string for the
+        DSDT table but other tables just use the value 0 or 1, since U-Boot does
+        not actually use the compiler in these cases. It generates the code
+        itself.
+
+acpi items
+~~~~~~~~~~
+
+List the ACPI data that was generated, broken down by item. An item is either
+an ACPI table generated by a writer function, or the part of a table that was
+generated by a particular device.
+
+The `-d` flag also shows a binary dump of the table.
+
+The table shows the following information about each item:
+
+Seq
+    Sequence number in hex
+
+Type
+    Type of item
+
+    =====  ============================================================
+    Type   Meaning
+    =====  ============================================================
+    dsdt   Fragment of a DSDT table, as generated by a device
+    ssdt   Fragment of a SSDT table, as generated by a device
+    other  A whole table of a particular type. as generated by a writer
+    =====  ============================================================
+
+Base
+    Base address of table in memory
+
+Size
+    Size of table in bytes
+
+Device / Writer
+    Name of device (for ssdt/dsdt) that wrong this fragment of the table, or
+    name of the registered writer function (otherwise) that wrote the table.
+
+acpi dump
+~~~~~~~~~
+
+Dump a paticular ACPI table in binary format. This can be used to read the table
+if you have the specification handy.
+
+
+Example
+-------
+
+::
+
+    => acpi list
+    Name      Base   Size  Detail
+    ----  --------  -----  ------
+    RSDP  79925000     24  v02 U-BOOT
+    RSDT  79925030     48  v01 U-BOOT U-BOOTBL 20220101 INTL 0
+    XSDT  799250e0     6c  v01 U-BOOT U-BOOTBL 20220101 INTL 0
+    FACP  79929570     f4  v04 U-BOOT U-BOOTBL 20220101 INTL 1
+    DSDT  79925280   32ea  v02 U-BOOT U-BOOTBL 20110725 INTL 20180105
+    FACS  79925240     40
+    MCFG  79929670     2c  v01 U-BOOT U-BOOTBL 20220101 INTL 0
+    SPCR  799296a0     50  v02 U-BOOT U-BOOTBL 20220101 INTL 0
+    TPM2  799296f0     4c  v04 U-BOOT U-BOOTBL 20220101 INTL 0
+    APIC  79929740     6c  v02 U-BOOT U-BOOTBL 20220101 INTL 0
+    SSDT  799297b0   1523  v02 U-BOOT U-BOOTBL 20220101 INTL 1
+    NHLT  7992ace0    e60  v05 coral coral 3 INTL 0
+    DBG2  7992db40     61  v00 U-BOOT U-BOOTBL 20220101 INTL 0
+    HPET  7992dbb0     38  v01 U-BOOT U-BOOTBL 20220101 INTL 0
+    => acpi items
+    Seq  Type       Base   Size  Device/Writer
+    ---  -----  --------   ----  -------------
+      0  other  79925000    240  0base
+      1  other  79925240     40  1facs
+      2  dsdt   799252a4     58  board
+      3  dsdt   799252fc     10  lpc
+      4  other  79925280   32f0  3dsdt
+      5  other  79928570   1000  4gnvs
+      6  other  79929570    100  5fact
+      7  other  79929670     30  5mcfg
+      8  other  799296a0     50  5spcr
+      9  other  799296f0     50  5tpm2
+      a  other  79929740     70  5x86
+      b  ssdt   799297d4     fe  maxim-codec
+      c  ssdt   799298d2     28  i2c2@16,0
+      d  ssdt   799298fa    270  da-codec
+      e  ssdt   79929b6a     28  i2c2@16,1
+      f  ssdt   79929b92     28  i2c2@16,2
+     10  ssdt   79929bba     83  tpm@50
+     11  ssdt   79929c3d     28  i2c2@16,3
+     12  ssdt   79929c65    282  elan-touchscreen@10
+     13  ssdt   79929ee7    285  raydium-touchscreen@39
+     14  ssdt   7992a16c     28  i2c2@17,0
+     15  ssdt   7992a194     d8  elan-touchpad@15
+     16  ssdt   7992a26c    163  synaptics-touchpad@2c
+     17  ssdt   7992a3cf     28  i2c2@17,1
+     18  ssdt   7992a3f7    111  wacom-digitizer@9
+     19  ssdt   7992a508     8f  sdmmc@1b,0
+     1a  ssdt   7992a597     4b  wifi
+     1b  ssdt   7992a5e2    1a0  cpu@0
+     1c  ssdt   7992a782    1a0  cpu@1
+     1d  ssdt   7992a922    1a0  cpu@2
+     1e  ssdt   7992aac2    211  cpu@3
+     1f  other  799297b0   1530  6ssdt
+     20  other  7992ace0   2f10  8dev
+    => acpi dump mcfg
+    MCFG @ 79929670
+    00000000: 4d 43 46 47 2c 00 00 00 01 41 55 2d 42 4f 4f 54  MCFG,....AU-BOOT
+    00000010: 55 2d 42 4f 4f 54 42 4c 01 01 22 20 49 4e 54 4c  U-BOOTBL.." INTL
+    00000020: 00 00 00 00 00 00 00 00 00 00 00 00              ............
+    => acpi items -d
+    Seq  Type       Base   Size  Device/Writer
+    ---  -----  --------   ----  -------------
+      0  other  79925000    240  0base
+    00000000: 52 53 44 20 50 54 52 20 9e 55 2d 42 4f 4f 54 02  RSD PTR .U-BOOT.
+    00000010: 30 50 92 79 24 00 00 00 e0 50 92 79 00 00 00 00  0P.y$....P.y....
+    00000020: a1 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    00000030: 52 53 44 54 48 00 00 00 01 8b 55 2d 42 4f 4f 54  RSDTH.....U-BOOT
+    00000040: 55 2d 42 4f 4f 54 42 4c 01 01 22 20 49 4e 54 4c  U-BOOTBL.." INTL
+    00000050: 00 00 00 00 70 95 92 79 70 96 92 79 a0 96 92 79  ....p..yp..y...y
+    00000060: f0 96 92 79 40 97 92 79 b0 97 92 79 e0 ac 92 79  ...y@..y...y...y
+    00000070: 40 db 92 79 b0 db 92 79 00 00 00 00 00 00 00 00  @..y...y........
+    00000080: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    00000090: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    000000a0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    000000b0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    000000c0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    000000d0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    000000e0: 58 53 44 54 6c 00 00 00 01 61 55 2d 42 4f 4f 54  XSDTl....aU-BOOT
+    000000f0: 55 2d 42 4f 4f 54 42 4c 01 01 22 20 49 4e 54 4c  U-BOOTBL.." INTL
+    00000100: 00 00 00 00 70 95 92 79 00 00 00 00 70 96 92 79  ....p..y....p..y
+    00000110: 00 00 00 00 a0 96 92 79 00 00 00 00 f0 96 92 79  .......y.......y
+    00000120: 00 00 00 00 40 97 92 79 00 00 00 00 b0 97 92 79  ....@..y.......y
+    00000130: 00 00 00 00 e0 ac 92 79 00 00 00 00 40 db 92 79  .......y....@..y
+    00000140: 00 00 00 00 b0 db 92 79 00 00 00 00 00 00 00 00  .......y........
+    00000150: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    00000160: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    ...
+
+      1  other  79925240     40  1facs
+    00000000: 46 41 43 53 40 00 00 00 00 00 00 00 00 00 00 00  FACS@...........
+    00000010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    00000020: 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+    00000030: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00  ................
+
+      2  dsdt   799252a4     58  board
+    00000000: 10 87 05 00 5c 00 08 4f 49 50 47 12 8c 04 00 03  ....\..OIPG.....
+    00000010: 12 8b 01 00 04 01 01 0e ff ff ff ff ff ff ff ff  ................
+    00000020: 0d 49 4e 54 33 34 35 32 3a 30 31 00 12 85 01 00  .INT3452:01.....
+    00000030: 04 0a 03 01 0a 23 0d 49 4e 54 33 34 35 32 3a 30  .....#.INT3452:0
+    00000040: 31 00 12 85 01 00 04 0a 04 01 0a 0a 0d 49 4e 54  1............INT
+    00000050: 33 34 35 32 3a 30 30 00                          3452:00.
+
+      3  dsdt   799252fc     10  lpc
+    00000000: 10 8f 00 00 5c 00 08 4e 56 53 41 0c 10 50 93 79  ....\..NVSA..P.y
+
+      4  other  79925280   32f0  3dsdt
+    00000000: 44 53 44 54 ea 32 00 00 02 eb 55 2d 42 4f 4f 54  DSDT.2....U-BOOT
+    00000010: 55 2d 42 4f 4f 54 42 4c 25 07 11 20 49 4e 54 4c  U-BOOTBL%.. INTL
+
+
+.. _`ACPI specification`: https://uefi.org/sites/default/files/resources/ACPI_6_3_final_Jan30.pdf
diff --git a/doc/usage/index.rst b/doc/usage/index.rst
index 33761af..964d761 100644
--- a/doc/usage/index.rst
+++ b/doc/usage/index.rst
@@ -18,6 +18,7 @@
 .. toctree::
    :maxdepth: 1
 
+   acpi
    addrmap
    askenv
    base
diff --git a/drivers/core/acpi.c b/drivers/core/acpi.c
index e988a65..0df58db 100644
--- a/drivers/core/acpi.c
+++ b/drivers/core/acpi.c
@@ -12,6 +12,7 @@
 #include <dm.h>
 #include <log.h>
 #include <malloc.h>
+#include <mapmem.h>
 #include <acpi/acpi_device.h>
 #include <dm/acpi.h>
 #include <dm/device-internal.h>
@@ -19,11 +20,26 @@
 
 #define MAX_ACPI_ITEMS	100
 
-/* Type of table that we collected */
+/**
+ * Type of table that we collected
+ *
+ * @TYPE_NONE: Not yet known
+ * @TYPE_SSDT: Items in the Secondary System Description Table
+ * @TYPE_DSDT: Items in the Differentiated System Description Table
+ * @TYPE_OTHER: Other (whole)
+ */
 enum gen_type_t {
 	TYPE_NONE,
 	TYPE_SSDT,
 	TYPE_DSDT,
+	TYPE_OTHER,
+};
+
+const char *gen_type_str[] = {
+	"-",
+	"ssdt",
+	"dsdt",
+	"other",
 };
 
 /* Type of method to call */
@@ -42,12 +58,16 @@
  *
  * @dev: Device that generated this data
  * @type: Table type it refers to
- * @buf: Buffer containing the data
+ * @writer: Writer that wrote this table
+ * @base: Pointer to base of table in its original location
+ * @buf: Buffer allocated to contain the data (NULL if not allocated)
  * @size: Size of the data in bytes
  */
 struct acpi_item {
 	struct udevice *dev;
+	const struct acpi_writer *writer;
 	enum gen_type_t type;
+	const char *base;
 	char *buf;
 	int size;
 };
@@ -103,16 +123,18 @@
 }
 
 /**
- * acpi_add_item() - Add a new item to the list of data collected
+ * add_item() - Add a new item to the list of data collected
  *
  * @ctx: ACPI context
- * @dev: Device that generated the data
+ * @dev: Device that generated the data, if type != TYPE_OTHER
+ * @writer: Writer entry that generated the data, if type == TYPE_OTHER
  * @type: Table type it refers to
  * @start: The start of the data (the end is obtained from ctx->current)
  * Return: 0 if OK, -ENOSPC if too many items, -ENOMEM if out of memory
  */
-static int acpi_add_item(struct acpi_ctx *ctx, struct udevice *dev,
-			 enum gen_type_t type, void *start)
+static int add_item(struct acpi_ctx *ctx, struct udevice *dev,
+		    const struct acpi_writer *writer, enum gen_type_t type,
+		    void *start)
 {
 	struct acpi_item *item;
 	void *end = ctx->current;
@@ -124,14 +146,18 @@
 
 	item = &acpi_item[item_count];
 	item->dev = dev;
+	item->writer = writer;
 	item->type = type;
 	item->size = end - start;
+	item->base = start;
 	if (!item->size)
 		return 0;
-	item->buf = malloc(item->size);
-	if (!item->buf)
-		return log_msg_ret("mem", -ENOMEM);
-	memcpy(item->buf, start, item->size);
+	if (type != TYPE_OTHER) {
+		item->buf = malloc(item->size);
+		if (!item->buf)
+			return log_msg_ret("mem", -ENOMEM);
+		memcpy(item->buf, start, item->size);
+	}
 	item_count++;
 	log_debug("* %s: Added type %d, %p, size %x\n", dev->name, type, start,
 		  item->size);
@@ -139,17 +165,28 @@
 	return 0;
 }
 
+int acpi_add_other_item(struct acpi_ctx *ctx, const struct acpi_writer *writer,
+			void *start)
+{
+	return add_item(ctx, NULL, writer, TYPE_OTHER, start);
+}
+
 void acpi_dump_items(enum acpi_dump_option option)
 {
 	int i;
 
+	printf("Seq  Type       Base   Size  Device/Writer\n");
+	printf("---  -----  --------   ----  -------------\n");
 	for (i = 0; i < item_count; i++) {
 		struct acpi_item *item = &acpi_item[i];
 
-		printf("dev '%s', type %d, size %x\n", item->dev->name,
-		       item->type, item->size);
+		printf("%3x  %-5s  %8lx  %5x  %s\n", i,
+		       gen_type_str[item->type],
+		       (ulong)map_to_sysmem(item->base), item->size,
+		       item->dev ? item->dev->name : item->writer->name);
 		if (option == ACPI_DUMP_CONTENTS) {
-			print_buffer(0, item->buf, 1, item->size, 0);
+			print_buffer(0, item->buf ? item->buf : item->base, 1,
+				     item->size, 0);
 			printf("\n");
 		}
 	}
@@ -162,7 +199,7 @@
 	for (i = 0; i < item_count; i++) {
 		struct acpi_item *item = &acpi_item[i];
 
-		if (!strcmp(devname, item->dev->name))
+		if (item->dev && !strcmp(devname, item->dev->name))
 			return item;
 	}
 
@@ -266,19 +303,18 @@
 
 	func = acpi_get_method(parent, method);
 	if (func) {
-		void *start = ctx->current;
-
 		log_debug("- method %d, %s %p\n", method, parent->name, func);
 		ret = device_of_to_plat(parent);
 		if (ret)
 			return log_msg_ret("ofdata", ret);
+		ctx->tab_start = ctx->current;
 		ret = func(parent, ctx);
 		if (ret)
 			return log_msg_ret("func", ret);
 
 		/* Add the item to the internal list */
 		if (type != TYPE_NONE) {
-			ret = acpi_add_item(ctx, parent, type, start);
+			ret = add_item(ctx, parent, NULL, type, ctx->tab_start);
 			if (ret)
 				return log_msg_ret("add", ret);
 		}
diff --git a/drivers/misc/qfw.c b/drivers/misc/qfw.c
index ea00be8..677841a 100644
--- a/drivers/misc/qfw.c
+++ b/drivers/misc/qfw.c
@@ -14,11 +14,12 @@
 #include <qfw.h>
 #include <dm.h>
 #include <misc.h>
+#include <tables_csum.h>
 #ifdef CONFIG_GENERATE_ACPI_TABLE
 #include <asm/tables.h>
 #endif
 
-#ifdef CONFIG_GENERATE_ACPI_TABLE
+#if defined(CONFIG_GENERATE_ACPI_TABLE) && !defined(CONFIG_SANDBOX)
 /*
  * This function allocates memory for ACPI tables
  *
diff --git a/drivers/serial/sandbox.c b/drivers/serial/sandbox.c
index dbbcea5..0b1756f 100644
--- a/drivers/serial/sandbox.c
+++ b/drivers/serial/sandbox.c
@@ -97,7 +97,7 @@
 		return 0;
 
 	os_usleep(100);
-	if (!IS_ENABLED(CONFIG_SPL_BUILD))
+	if (IS_ENABLED(CONFIG_DM_VIDEO) && !IS_ENABLED(CONFIG_SPL_BUILD))
 		video_sync_all();
 	avail = membuff_putraw(&priv->buf, 100, false, &data);
 	if (!avail)
diff --git a/drivers/usb/host/usb-uclass.c b/drivers/usb/host/usb-uclass.c
index fd39c33..27e2fc6 100644
--- a/drivers/usb/host/usb-uclass.c
+++ b/drivers/usb/host/usb-uclass.c
@@ -396,7 +396,7 @@
 	int ret;
 
 	/* Find the old device and remove it */
-	ret = uclass_find_device_by_seq(UCLASS_USB, 0, &dev);
+	ret = uclass_find_first_device(UCLASS_USB, &dev);
 	if (ret)
 		return ret;
 	ret = device_remove(dev, DM_REMOVE_NORMAL);
@@ -419,7 +419,7 @@
 	int ret;
 
 	/* Find the old device and remove it */
-	ret = uclass_find_device_by_seq(UCLASS_USB, 0, &dev);
+	ret = uclass_find_first_device(UCLASS_USB, &dev);
 	if (ret)
 		return ret;
 	ret = device_remove(dev, DM_REMOVE_NORMAL);
diff --git a/dts/Kconfig b/dts/Kconfig
index fb7df53..4de1a70 100644
--- a/dts/Kconfig
+++ b/dts/Kconfig
@@ -147,6 +147,15 @@
 	  It can be overridden from the command line:
 	  $ make DEVICE_TREE=<device-tree-name>
 
+config DEVICE_TREE_INCLUDES
+       string "Extra .dtsi files to include when building DT control"
+	depends on OF_CONTROL
+	help
+	  U-Boot's control .dtb is usually built from an in-tree .dts
+	  file, plus (if available) an in-tree U-Boot-specific .dtsi
+	  file. This option specifies a space-separated list of extra
+	  .dtsi files that will also be used.
+
 config OF_LIST
 	string "List of device tree files to include for DT control"
 	depends on SPL_LOAD_FIT || MULTI_DTB_FIT
diff --git a/include/acpi/acpi_table.h b/include/acpi/acpi_table.h
index dbfea3b..c98c874 100644
--- a/include/acpi/acpi_table.h
+++ b/include/acpi/acpi_table.h
@@ -162,6 +162,9 @@
 #define ACPI_FADT_HW_REDUCED_ACPI	BIT(20)
 #define ACPI_FADT_LOW_PWR_IDLE_S0	BIT(21)
 
+/* ARM boot flags */
+#define ACPI_ARM_PSCI_COMPLIANT		BIT(0)
+
 enum acpi_address_space_type {
 	ACPI_ADDRESS_SPACE_MEMORY = 0,	/* System memory */
 	ACPI_ADDRESS_SPACE_IO,		/* System I/O */
@@ -237,6 +240,9 @@
 	struct acpi_gen_regaddr x_pm_tmr_blk;
 	struct acpi_gen_regaddr x_gpe0_blk;
 	struct acpi_gen_regaddr x_gpe1_blk;
+	struct acpi_gen_regaddr sleep_control_reg;
+	struct acpi_gen_regaddr sleep_status_reg;
+	u64 hyp_vendor_id;
 };
 
 /* FADT TABLE Revision values - note these do not match the ACPI revision */
@@ -302,6 +308,8 @@
 	ACPI_APIC_PLATFORM_IRQ_SRC,	/* Platform interrupt sources */
 	ACPI_APIC_LX2APIC,		/* Processor local x2APIC */
 	ACPI_APIC_LX2APIC_NMI,		/* Local x2APIC NMI */
+	ACPI_APIC_GICC,			/* Generic Interrupt Ctlr CPU i/f */
+	ACPI_APIC_GICD			/* Generic Interrupt Ctlr Distributor */
 };
 
 /* MADT: Processor Local APIC Structure */
@@ -345,6 +353,57 @@
 	u8 lint;		/* Local APIC LINT# */
 };
 
+/* flags for acpi_madr_gicc flags word */
+enum {
+	ACPI_MADRF_ENABLED	= BIT(0),
+	ACPI_MADRF_PERF		= BIT(1),
+	ACPI_MADRF_VGIC		= BIT(2),
+};
+
+/**
+ * struct __packed acpi_madr_gicc - GIC CPU interface (type 0xb)
+ *
+ * This holds information about the Generic Interrupt Controller (GIC) CPU
+ * interface. See ACPI Spec v6.3 section 5.2.12.14
+ */
+struct __packed acpi_madr_gicc {
+	u8 type;
+	u8 length;
+	u16 reserved;
+	u32 cpu_if_num;
+	u32 processor_id;
+	u32 flags;
+	u32 parking_proto;
+	u32 perf_gsiv;
+	u64 parked_addr;
+	u64 phys_base;
+	u64 gicv;
+	u64 gich;
+	u32 vgic_maint_irq;
+	u64 gicr_base;
+	u64 mpidr;
+	u8 efficiency;
+	u8 reserved2;
+	u16 spi_overflow_irq;
+};
+
+/**
+ * struct __packed acpi_madr_gicc - GIC distributor (type 0xc)
+ *
+ * This holds information about the Generic Interrupt Controller (GIC)
+ * Distributor interface. See ACPI Spec v6.3 section 5.2.12.15
+ */
+struct __packed acpi_madr_gicd {
+	u8 type;
+	u8 length;
+	u16 reserved;
+	u32 gic_id;
+	u64 phys_base;
+	u32 reserved2;
+	u8 gic_version;
+	u8 reserved3[3];
+};
+
 /* MCFG (PCI Express MMIO config space BAR description table) */
 struct acpi_mcfg {
 	struct acpi_table_header header;
@@ -371,6 +430,19 @@
 	struct acpi_table_header header;
 };
 
+/**
+ * struct acpi_csrt_group - header for a group within the CSRT
+ *
+ * The CSRT consists of one or more groups and this is the header for each
+ *
+ * See Core System Resources Table (CSRT), March 13, 2017, Microsoft Corporation
+ * for details
+ *
+ * https://uefi.org/sites/default/files/resources/CSRT%20v2.pdf
+ *
+ * @shared_info_length indicates the number of shared-info bytes following this
+ * struct (which may be 0)
+ */
 struct acpi_csrt_group {
 	u32 length;
 	u32 vendor_id;
@@ -382,6 +454,25 @@
 	u32 shared_info_length;
 };
 
+/**
+ * struct acpi_csrt_descriptor - describes the information that follows
+ *
+ * See the spec as above for details
+ */
+struct acpi_csrt_descriptor {
+	u32 length;
+	u16 type;
+	u16 subtype;
+	u32 uid;
+};
+
+/**
+ * struct acpi_csrt_shared_info - shared info for Intel tangier
+ *
+ * This provides the shared info for this particular board. Notes that the CSRT
+ * does not describe the format of data, so this format may not be used by any
+ * other board.
+ */
 struct acpi_csrt_shared_info {
 	u16 major_version;
 	u16 minor_version;
@@ -559,6 +650,120 @@
 	u32 reserved2;
 };
 
+/**
+ * struct acpi_gtdt - Generic Timer Description Table (GTDT)
+ *
+ * See ACPI Spec v6.3 section 5.2.24 for details
+ */
+struct __packed acpi_gtdt {
+	struct acpi_table_header header;
+	u64 cnt_ctrl_base;
+	u32 reserved0;
+	u32 sec_el1_gsiv;
+	u32 sec_el1_flags;
+	u32 el1_gsiv;
+	u32 el1_flags;
+	u32 virt_el1_gsiv;
+	u32 virt_el1_flags;
+	u32 el2_gsiv;
+	u32 el2_flags;
+	u64 cnt_read_base;
+	u32 plat_timer_count;
+	u32 plat_timer_offset;
+	u32 virt_el2_gsiv;
+	u32 virt_el2_flags;
+};
+
+/**
+ * struct acpi_bgrt -  Boot Graphics Resource Table (BGRT)
+ *
+ * Optional table that provides a mechanism to indicate that an image was drawn
+ * on the screen during boot, and some information about the image.
+ *
+ * See ACPI Spec v6.3 section 5.2.22 for details
+ */
+struct __packed acpi_bgrt {
+	struct acpi_table_header header;
+	u16 version;
+	u8 status;
+	u8 image_type;
+	u64 addr;
+	u32 offset_x;
+	u32 offset_y;
+};
+
+/* Types for PPTT */
+#define ACPI_PPTT_TYPE_PROC		0
+#define ACPI_PPTT_TYPE_CACHE		1
+
+/* Flags for PPTT */
+#define ACPI_PPTT_PHYSICAL_PACKAGE	BIT(0)
+#define ACPI_PPTT_PROC_ID_VALID		BIT(1)
+#define ACPI_PPTT_PROC_IS_THREAD	BIT(2)
+#define ACPI_PPTT_NODE_IS_LEAF		BIT(3)
+#define ACPI_PPTT_CHILDREN_IDENTICAL	BIT(4)
+
+/**
+ * struct acpi_pptt_header - Processor Properties Topology Table (PPTT) header
+ *
+ * Describes the topological structure of processors and their shared resources,
+ * such as caches.
+ *
+ * See ACPI Spec v6.3 section 5.2.29 for details
+ */
+struct __packed acpi_pptt_header {
+	u8 type;	/* ACPI_PPTT_TYPE_... */
+	u8 length;
+	u16 reserved;
+};
+
+/**
+ * struct acpi_pptt_proc - a processor as described by PPTT
+ */
+struct __packed acpi_pptt_proc {
+	struct acpi_pptt_header hdr;
+	u32 flags;
+	u32 parent;
+	u32 proc_id;
+	u32 num_resources;
+};
+
+/* Cache flags for acpi_pptt_cache */
+#define ACPI_PPTT_SIZE_VALID		BIT(0)
+#define ACPI_PPTT_SETS_VALID		BIT(1)
+#define ACPI_PPTT_ASSOC_VALID		BIT(2)
+#define ACPI_PPTT_ALLOC_TYPE_VALID	BIT(3)
+#define ACPI_PPTT_CACHE_TYPE_VALID	BIT(4)
+#define ACPI_PPTT_WRITE_POLICY_VALID	BIT(5)
+#define ACPI_PPTT_LINE_SIZE_VALID	BIT(6)
+
+#define ACPI_PPTT_ALL_VALID		0x7f
+#define ACPI_PPTT_ALL_BUT_WRITE_POL	0x5f
+
+#define ACPI_PPTT_READ_ALLOC		BIT(0)
+#define ACPI_PPTT_WRITE_ALLOC		BIT(1)
+#define ACPI_PPTT_CACHE_TYPE_SHIFT	2
+#define ACPI_PPTT_CACHE_TYPE_MASK	(3 << ACPI_PPTT_CACHE_TYPE_SHIFT)
+#define ACPI_PPTT_CACHE_TYPE_DATA	0
+#define ACPI_PPTT_CACHE_TYPE_INSTR	1
+#define ACPI_PPTT_CACHE_TYPE_UNIFIED	2
+#define ACPI_PPTT_CACHE_TYPE_DATA	0
+#define ACPI_PPTT_WRITE_THROUGH		BIT(4)
+
+/**
+ * struct acpi_pptt_cache - a cache as described by PPTT
+ */
+struct __packed acpi_pptt_cache {
+	struct acpi_pptt_header hdr;
+	u32 flags;
+	u32 next_cache_level;
+	u32 size;
+	u32 sets;
+	u8 assoc;
+	u8 attributes;
+	u16 line_size;
+};
+
 /* Tables defined/reserved by ACPI and generated by U-Boot */
 enum acpi_tables {
 	ACPITAB_BERT,
@@ -679,16 +884,6 @@
 int acpi_add_table(struct acpi_ctx *ctx, void *table);
 
 /**
- * acpi_setup_base_tables() - Set up context along with RSDP, RSDT and XSDT
- *
- * Set up the context with the given start position. Some basic tables are
- * always needed, so set them up as well.
- *
- * @ctx: Context to set up
- */
-void acpi_setup_base_tables(struct acpi_ctx *ctx, void *start);
-
-/**
  * acpi_write_rsdp() - Write out an RSDP indicating where the ACPI tables are
  *
  * @rsdp: Address to write RSDP
@@ -698,6 +893,26 @@
 void acpi_write_rsdp(struct acpi_rsdp *rsdp, struct acpi_rsdt *rsdt,
 		     struct acpi_xsdt *xsdt);
 
+/**
+ * acpi_fill_header() - Set up a table header
+ *
+ * @header: Pointer to header to set up
+ * @signature: 4-character signature to use (e.g. "FACS")
+ */
+void acpi_fill_header(struct acpi_table_header *header, char *signature);
+
+/**
+ * acpi_fill_csrt() - Fill out the body of the CSRT
+ *
+ * This should write the contents of the Core System Resource Table (CSRT)
+ * to the context. The header (struct acpi_table_header) has already been
+ * written.
+ *
+ * @ctx: ACPI context to write to
+ * @return 0 if OK, -ve on error
+ */
+int acpi_fill_csrt(struct acpi_ctx *ctx);
+
 #endif /* !__ACPI__*/
 
 #include <asm/acpi_table.h>
diff --git a/include/asm-generic/global_data.h b/include/asm-generic/global_data.h
index 104282b..c2f8fad 100644
--- a/include/asm-generic/global_data.h
+++ b/include/asm-generic/global_data.h
@@ -456,6 +456,10 @@
 	 * @acpi_ctx: ACPI context pointer
 	 */
 	struct acpi_ctx *acpi_ctx;
+	/**
+	 * @acpi_start: Start address of ACPI tables
+	 */
+	ulong acpi_start;
 #endif
 #if CONFIG_IS_ENABLED(GENERATE_SMBIOS_TABLE)
 	/**
@@ -512,8 +516,12 @@
 
 #ifdef CONFIG_GENERATE_ACPI_TABLE
 #define gd_acpi_ctx()		gd->acpi_ctx
+#define gd_acpi_start()		gd->acpi_start
+#define gd_set_acpi_start(addr)	gd->acpi_start = addr
 #else
 #define gd_acpi_ctx()		NULL
+#define gd_acpi_start()		0UL
+#define gd_set_acpi_start(addr)
 #endif
 
 #if CONFIG_IS_ENABLED(MULTI_DTB_FIT)
diff --git a/include/dm/acpi.h b/include/dm/acpi.h
index 0fa239e..3adfe21 100644
--- a/include/dm/acpi.h
+++ b/include/dm/acpi.h
@@ -27,6 +27,8 @@
 
 #if !defined(__ACPI__)
 
+#include <linker_lists.h>
+
 struct nhlt;
 struct udevice;
 
@@ -43,10 +45,15 @@
  *
  * @base: Base address of ACPI tables
  * @current: Current address for writing
+ * @tab_start: Address of start of the table being written. This is set up
+ * before the writer or driver method is called. It must not be changed by the
+ * method
  * @rsdp: Pointer to the Root System Description Pointer, typically used when
  *	adding a new table. The RSDP holds pointers to the RSDT and XSDT.
  * @rsdt: Pointer to the Root System Description Table
  * @xsdt: Pointer to the Extended System Description Table
+ * @facs: Pointer to the Firmware ACPI Control Structure
+ * @dsdt: Pointer to the Differentiated System Description Table
  * @nhlt: Intel Non-High-Definition-Audio Link Table (NHLT) pointer, used to
  *	build up information that audio codecs need to provide in the NHLT ACPI
  *	table
@@ -56,15 +63,66 @@
 struct acpi_ctx {
 	void *base;
 	void *current;
+	void *tab_start;
 	struct acpi_rsdp *rsdp;
 	struct acpi_rsdt *rsdt;
 	struct acpi_xsdt *xsdt;
+	struct acpi_facs *facs;
+	struct acpi_table_header *dsdt;
 	struct nhlt *nhlt;
 	char *len_stack[ACPIGEN_LENSTACK_SIZE];
 	int ltop;
 };
 
 /**
+ * enum acpi_writer_flags_t - flags to use for the ACPI writers
+ *
+ * ACPIWF_ALIGN64 - align to 64 bytes after writing this one (default is 16)
+ */
+enum acpi_writer_flags_t {
+	ACPIWF_ALIGN64	= 1 << 0,
+};
+
+struct acpi_writer;
+
+/**
+ * acpi_writer_func() - Function that can write an ACPI table
+ *
+ * @ctx: ACPI context to use for writing
+ * @entry: Linker-list entry for this writer
+ * @return 0 if OK, -ve on error
+ */
+typedef int (*acpi_writer_func)(struct acpi_ctx *ctx,
+				const struct acpi_writer *entry);
+
+/**
+ * struct acpi_writer - an ACPI table that can be written
+ *
+ * @name: Name of the writer
+ * @table: Table name that is generated (e.g. "DSDT")
+ * @h_write: Writer function
+ */
+struct acpi_writer {
+	const char *name;
+	const char *table;
+	acpi_writer_func h_write;
+	int flags;
+};
+
+/* Declare a new ACPI-table writer */
+#define ACPI_WRITER(_name, _table, _write, _flags)					\
+	ll_entry_declare(struct acpi_writer, _name, acpi_writer) = {	\
+		.name = #_name,						\
+		.table = _table,					\
+		.h_write = _write,					\
+		.flags = _flags,					\
+	}
+
+/* Get a pointer to a given ACPI-table writer */
+#define ACPI_WRITER_GET(_name)						\
+	ll_entry_get(struct acpi_writer, _name, acpi_writer)
+
+/**
  * struct acpi_ops - ACPI operations supported by driver model
  */
 struct acpi_ops {
@@ -205,6 +263,20 @@
 int acpi_setup_nhlt(struct acpi_ctx *ctx, struct nhlt *nhlt);
 
 /**
+ * acpi_add_other_item() - Add a new table to the list of ACPI tables
+ *
+ * This adds an entry of type ACPIT_TYPE_OTHER
+ *
+ * @ctx: ACPI context
+ * @writer: Writer entry that generated the data
+ * @type: Table type it refers to
+ * @start: The start of the data (the end is obtained from ctx->current)
+ * @return 0 if OK, -ENOSPC if too many items, -ENOMEM if out of memory
+ */
+int acpi_add_other_item(struct acpi_ctx *ctx, const struct acpi_writer *writer,
+			void *start);
+
+/**
  * acpi_dump_items() - Dump out the collected ACPI items
  *
  * This lists the ACPI DSDT and SSDT items generated by the various U-Boot
@@ -236,6 +308,44 @@
  */
 void acpi_reset_items(void);
 
+/**
+ * acpi_write_one() - Call a single ACPI writer entry
+ *
+ * This handles aligning the context afterwards, if the entry flags indicate
+ * that.
+ *
+ * @ctx: ACPI context to use
+ * @entry: Entry to call
+ * @return 0 if OK, -ENOENT if this writer produced an empty entry, other -ve
+ * value on error
+ */
+int acpi_write_one(struct acpi_ctx *ctx, const struct acpi_writer *entry);
+
+/**
+ * acpi_setup_ctx() - Set up a new ACPI context
+ *
+ * This zeros the context and sets up the base and current pointers, ensuring
+ * that they are aligned. Then it writes the acpi_start and acpi_ctx values in
+ * global_data
+ *
+ * @ctx: ACPI context to set up
+ * @start: Start address for ACPI table
+ */
+void acpi_setup_ctx(struct acpi_ctx *ctx, ulong start);
+
+/**
+ * acpi_write_one() - Call a single ACPI writer entry
+ *
+ * This handles aligning the context afterwards, if the entry flags indicate
+ * that.
+ *
+ * @ctx: ACPI context to use
+ * @entry: Entry to call
+ * @return 0 if OK, -ENOENT if this writer produced an empty entry, other -ve
+ * value on error
+ */
+int acpi_write_one(struct acpi_ctx *ctx, const struct acpi_writer *entry);
+
 #endif /* __ACPI__ */
 
 #endif
diff --git a/include/fdtdec.h b/include/fdtdec.h
index 9a7b6a7..4b0b505 100644
--- a/include/fdtdec.h
+++ b/include/fdtdec.h
@@ -24,16 +24,19 @@
 typedef phys_addr_t fdt_addr_t;
 typedef phys_size_t fdt_size_t;
 
-#define FDT_ADDR_T_NONE (-1U)
 #define FDT_SIZE_T_NONE (-1U)
 
 #ifdef CONFIG_PHYS_64BIT
+#define FDT_ADDR_T_NONE ((ulong)(-1))
+
 #define fdt_addr_to_cpu(reg) be64_to_cpu(reg)
 #define fdt_size_to_cpu(reg) be64_to_cpu(reg)
 #define cpu_to_fdt_addr(reg) cpu_to_be64(reg)
 #define cpu_to_fdt_size(reg) cpu_to_be64(reg)
 typedef fdt64_t fdt_val_t;
 #else
+#define FDT_ADDR_T_NONE (-1U)
+
 #define fdt_addr_to_cpu(reg) be32_to_cpu(reg)
 #define fdt_size_to_cpu(reg) be32_to_cpu(reg)
 #define cpu_to_fdt_addr(reg) cpu_to_be32(reg)
diff --git a/include/image.h b/include/image.h
index fe13562..97e5f2e 100644
--- a/include/image.h
+++ b/include/image.h
@@ -1021,17 +1021,37 @@
 		    const char *comment, int require_keys,
 		    const char *engine_id, const char *cmdname);
 
+#define NODE_MAX_NAME_LEN	80
+
+/**
+ * struct image_summary  - Provides information about signing info added
+ *
+ * @sig_offset: Offset of the node in the blob devicetree where the signature
+ *	was wriiten
+ * @sig_path: Path to @sig_offset
+ * @keydest_offset: Offset of the node in the keydest devicetree where the
+ *	public key was written (-1 if none)
+ * @keydest_path: Path to @keydest_offset
+ */
+struct image_summary {
+	int sig_offset;
+	char sig_path[NODE_MAX_NAME_LEN];
+	int keydest_offset;
+	char keydest_path[NODE_MAX_NAME_LEN];
+};
+
 /**
  * fit_add_verification_data() - add verification data to FIT image nodes
  *
  * @keydir:	Directory containing keys
- * @kwydest:	FDT blob to write public key information to
+ * @kwydest:	FDT blob to write public key information to (NULL if none)
  * @fit:	Pointer to the FIT format image header
  * @comment:	Comment to add to signature nodes
  * @require_keys: Mark all keys as 'required'
  * @engine_id:	Engine to use for signing
  * @cmdname:	Command name used when reporting errors
  * @algo_name:	Algorithm name, or NULL if to be read from FIT
+ * @summary:	Returns information about what data was written
  *
  * Adds hash values for all component images in the FIT blob.
  * Hashes are calculated for all component images which have hash subnodes
@@ -1046,10 +1066,22 @@
 int fit_add_verification_data(const char *keydir, const char *keyfile,
 			      void *keydest, void *fit, const char *comment,
 			      int require_keys, const char *engine_id,
-			      const char *cmdname, const char *algo_name);
+			      const char *cmdname, const char *algo_name,
+			      struct image_summary *summary);
 
+/**
+ * fit_image_verify_with_data() - Verify an image with given data
+ *
+ * @fit:	Pointer to the FIT format image header
+ * @image_offset: Offset in @fit of image to verify
+ * @key_blob:	FDT containing public keys
+ * @data:	Image data to verify
+ * @size:	Size of image data
+ */
 int fit_image_verify_with_data(const void *fit, int image_noffset,
-			       const void *data, size_t size);
+			       const void *key_blob, const void *data,
+			       size_t size);
+
 int fit_image_verify(const void *fit, int noffset);
 int fit_config_verify(const void *fit, int conf_noffset);
 int fit_all_image_verify(const void *fit);
@@ -1232,7 +1264,8 @@
 	 *
 	 * @info:	Specifies key and FIT information
 	 * @keydest:	Destination FDT blob for public key data
-	 * @return: 0, on success, -ve on error
+	 * @return: node offset within the FDT blob where the data was written,
+	 *	or -ve on error
 	 */
 	int (*add_verify_data)(struct image_sign_info *info, void *keydest);
 
@@ -1297,7 +1330,7 @@
  * @image_noffset:	Offset of image node to check
  * @data:		Image data to check
  * @size:		Size of image data
- * @sig_blob:		FDT containing public keys
+ * @key_blob:		FDT containing public keys
  * @no_sigsp:		Returns 1 if no signatures were required, and
  *			therefore nothing was checked. The caller may wish
  *			to fall back to other mechanisms, or refuse to
@@ -1305,7 +1338,7 @@
  * Return: 0 if all verified ok, <0 on error
  */
 int fit_image_verify_required_sigs(const void *fit, int image_noffset,
-		const char *data, size_t size, const void *sig_blob,
+		const char *data, size_t size, const void *key_blob,
 		int *no_sigsp);
 
 /**
@@ -1315,7 +1348,8 @@
  * @noffset:		Offset of signature node to check
  * @data:		Image data to check
  * @size:		Size of image data
- * @required_keynode:	Offset in the control FDT of the required key node,
+ * @keyblob:		Key blob to check (typically the control FDT)
+ * @required_keynode:	Offset in the keyblob of the required key node,
  *			if any. If this is given, then the image wil not
  *			pass verification unless that key is used. If this is
  *			-1 then any signature will do.
@@ -1324,7 +1358,8 @@
  * Return: 0 if all verified ok, <0 on error
  */
 int fit_image_check_sig(const void *fit, int noffset, const void *data,
-		size_t size, int required_keynode, char **err_msgp);
+			size_t size, const void *key_blob, int required_keynode,
+			char **err_msgp);
 
 int fit_image_decrypt_data(const void *fit,
 			   int image_noffset, int cipher_noffset,
diff --git a/include/u-boot/ecdsa.h b/include/u-boot/ecdsa.h
index 0ceb0c1..6e0269e 100644
--- a/include/u-boot/ecdsa.h
+++ b/include/u-boot/ecdsa.h
@@ -44,8 +44,9 @@
  *
  * @info:	Specifies key and FIT information
  * @keydest:	Destination FDT blob for public key data
- * @return: 0, on success, -ENOSPC if the keydest FDT blob ran out of space,
- * other -ve value on error
+ * @return: node offset within the FDT blob where the data was written on
+ *	success, -ENOSPC if the keydest FDT blob ran out of space, other -ve
+ *	value on other error
  */
 int ecdsa_add_verify_data(struct image_sign_info *info, void *keydest);
 
diff --git a/include/u-boot/rsa.h b/include/u-boot/rsa.h
index 2ed2ac7..01b480d 100644
--- a/include/u-boot/rsa.h
+++ b/include/u-boot/rsa.h
@@ -61,8 +61,9 @@
  *
  * @info:	Specifies key and FIT information
  * @keydest:	Destination FDT blob for public key data
- * @return: 0, on success, -ENOSPC if the keydest FDT blob ran out of space,
-		other -ve value on error
+ * @return: node offset within the FDT blob where the data was written on
+ *	success, -ENOSPC if the keydest FDT blob ran out of space, other -ve
+ *	value on other error
 */
 int rsa_add_verify_data(struct image_sign_info *info, void *keydest);
 
diff --git a/lib/Kconfig b/lib/Kconfig
index 52d4b27..3c6fa99 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -202,6 +202,24 @@
 
 endchoice
 
+config SUPPORT_ACPI
+	bool
+	help
+	  Enable this if your arch or board can support generating ACPI
+	  (Advanced Configuration and Power Interface) tables. In this case
+	  U-Boot can generate these tables and pass them to the Operating
+	  System.
+
+config GENERATE_ACPI_TABLE
+	bool "Generate an ACPI (Advanced Configuration and Power Interface) table"
+	depends on SUPPORT_ACPI
+	select QFW if QEMU
+	help
+	  The Advanced Configuration and Power Interface (ACPI) specification
+	  provides an open standard for device configuration and management
+	  by the operating system. It defines platform-independent interfaces
+	  for configuration and power management monitoring.
+
 config SPL_TINY_MEMSET
 	bool "Use a very small memset() in SPL"
 	help
diff --git a/lib/Makefile b/lib/Makefile
index f223892..11b03d1 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -64,7 +64,7 @@
 obj-$(CONFIG_TPM_V2) += tpm-v2.o
 endif
 
-obj-$(CONFIG_$(SPL_)ACPIGEN) += acpi/
+obj-$(CONFIG_$(SPL_TPL_)GENERATE_ACPI_TABLE) += acpi/
 obj-$(CONFIG_$(SPL_)MD5) += md5.o
 obj-$(CONFIG_ECDSA) += ecdsa/
 obj-$(CONFIG_$(SPL_)RSA) += rsa/
diff --git a/lib/acpi/Makefile b/lib/acpi/Makefile
index 5c2f793..f9b5049 100644
--- a/lib/acpi/Makefile
+++ b/lib/acpi/Makefile
@@ -1,7 +1,22 @@
 # SPDX-License-Identifier: GPL-2.0+
 #
 
-obj-y += acpigen.o
-obj-y += acpi_device.o
-obj-y += acpi_dp.o
-obj-y += acpi_table.o
+obj-$(CONFIG_$(SPL_)ACPIGEN) += acpigen.o
+obj-$(CONFIG_$(SPL_)ACPIGEN) += acpi_device.o
+obj-$(CONFIG_$(SPL_)ACPIGEN) += acpi_dp.o
+obj-$(CONFIG_$(SPL_)ACPIGEN) += acpi_table.o
+obj-y += acpi_writer.o
+
+# With QEMU the ACPI tables come from there, not from U-Boot
+ifndef CONFIG_QEMU
+obj-y += base.o
+obj-y += csrt.o
+
+# Sandbox does not build a .asl file
+ifndef CONFIG_SANDBOX
+obj-y += dsdt.o
+endif
+
+obj-y += facs.o
+obj-y += ssdt.o
+endif
diff --git a/lib/acpi/acpi_table.c b/lib/acpi/acpi_table.c
index d168540..f8642f9 100644
--- a/lib/acpi/acpi_table.c
+++ b/lib/acpi/acpi_table.c
@@ -201,88 +201,6 @@
 	return 0;
 }
 
-void acpi_write_rsdp(struct acpi_rsdp *rsdp, struct acpi_rsdt *rsdt,
-		     struct acpi_xsdt *xsdt)
-{
-	memset(rsdp, 0, sizeof(struct acpi_rsdp));
-
-	memcpy(rsdp->signature, RSDP_SIG, 8);
-	memcpy(rsdp->oem_id, OEM_ID, 6);
-
-	rsdp->length = sizeof(struct acpi_rsdp);
-	rsdp->rsdt_address = map_to_sysmem(rsdt);
-
-	rsdp->xsdt_address = map_to_sysmem(xsdt);
-	rsdp->revision = ACPI_RSDP_REV_ACPI_2_0;
-
-	/* Calculate checksums */
-	rsdp->checksum = table_compute_checksum(rsdp, 20);
-	rsdp->ext_checksum = table_compute_checksum(rsdp,
-						    sizeof(struct acpi_rsdp));
-}
-
-static void acpi_write_rsdt(struct acpi_rsdt *rsdt)
-{
-	struct acpi_table_header *header = &rsdt->header;
-
-	/* Fill out header fields */
-	acpi_fill_header(header, "RSDT");
-	header->length = sizeof(struct acpi_rsdt);
-	header->revision = 1;
-
-	/* Entries are filled in later, we come with an empty set */
-
-	/* Fix checksum */
-	header->checksum = table_compute_checksum(rsdt,
-						  sizeof(struct acpi_rsdt));
-}
-
-static void acpi_write_xsdt(struct acpi_xsdt *xsdt)
-{
-	struct acpi_table_header *header = &xsdt->header;
-
-	/* Fill out header fields */
-	acpi_fill_header(header, "XSDT");
-	header->length = sizeof(struct acpi_xsdt);
-	header->revision = 1;
-
-	/* Entries are filled in later, we come with an empty set */
-
-	/* Fix checksum */
-	header->checksum = table_compute_checksum(xsdt,
-						  sizeof(struct acpi_xsdt));
-}
-
-void acpi_setup_base_tables(struct acpi_ctx *ctx, void *start)
-{
-	ctx->base = start;
-	ctx->current = start;
-
-	/* Align ACPI tables to 16 byte */
-	acpi_align(ctx);
-	gd->arch.acpi_start = map_to_sysmem(ctx->current);
-
-	/* We need at least an RSDP and an RSDT Table */
-	ctx->rsdp = ctx->current;
-	acpi_inc_align(ctx, sizeof(struct acpi_rsdp));
-	ctx->rsdt = ctx->current;
-	acpi_inc_align(ctx, sizeof(struct acpi_rsdt));
-	ctx->xsdt = ctx->current;
-	acpi_inc_align(ctx, sizeof(struct acpi_xsdt));
-
-	/* clear all table memory */
-	memset((void *)start, '\0', ctx->current - start);
-
-	acpi_write_rsdp(ctx->rsdp, ctx->rsdt, ctx->xsdt);
-	acpi_write_rsdt(ctx->rsdt);
-	acpi_write_xsdt(ctx->xsdt);
-	/*
-	 * Per ACPI spec, the FACS table address must be aligned to a 64 byte
-	 * boundary (Windows checks this, but Linux does not).
-	 */
-	acpi_align64(ctx);
-}
-
 void acpi_create_dbg2(struct acpi_dbg2_header *dbg2,
 		      int port_type, int port_subtype,
 		      struct acpi_gen_regaddr *address, u32 address_size,
diff --git a/lib/acpi/acpi_writer.c b/lib/acpi/acpi_writer.c
new file mode 100644
index 0000000..946f90e
--- /dev/null
+++ b/lib/acpi/acpi_writer.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Handles writing the declared ACPI tables
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#define LOG_CATEGORY LOGC_ACPI
+
+#include <common.h>
+#include <log.h>
+#include <malloc.h>
+#include <mapmem.h>
+#include <acpi/acpi_table.h>
+#include <asm/global_data.h>
+#include <dm/acpi.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+int acpi_write_one(struct acpi_ctx *ctx, const struct acpi_writer *entry)
+{
+	int ret;
+
+	log_debug("%s: writing table '%s'\n", entry->name,
+		  entry->table);
+	ctx->tab_start = ctx->current;
+	ret = entry->h_write(ctx, entry);
+	if (ret == -ENOENT) {
+		log_debug("%s: Omitted due to being empty\n",
+			  entry->name);
+		ret = 0;
+		ctx->current = ctx->tab_start;	/* drop the table */
+		return ret;
+	}
+	if (ret)
+		return log_msg_ret("write", ret);
+
+	if (entry->flags & ACPIWF_ALIGN64)
+		acpi_align64(ctx);
+	else
+		acpi_align(ctx);
+
+	/* Add the item to the internal list */
+	ret = acpi_add_other_item(ctx, entry, ctx->tab_start);
+	if (ret)
+		return log_msg_ret("add", ret);
+
+	return 0;
+}
+
+#ifndef CONFIG_QEMU
+static int acpi_write_all(struct acpi_ctx *ctx)
+{
+	const struct acpi_writer *writer =
+		 ll_entry_start(struct acpi_writer, acpi_writer);
+	const int n_ents = ll_entry_count(struct acpi_writer, acpi_writer);
+	const struct acpi_writer *entry;
+	int ret;
+
+	for (entry = writer; entry != writer + n_ents; entry++) {
+		ret = acpi_write_one(ctx, entry);
+		if (ret && ret != -ENOENT)
+			return log_msg_ret("one", ret);
+	}
+
+	return 0;
+}
+
+/*
+ * QEMU's version of write_acpi_tables is defined in drivers/misc/qfw.c
+ */
+ulong write_acpi_tables(ulong start_addr)
+{
+	struct acpi_ctx *ctx;
+	ulong addr;
+	int ret;
+
+	ctx = malloc(sizeof(*ctx));
+	if (!ctx)
+		return log_msg_ret("mem", -ENOMEM);
+
+	log_debug("ACPI: Writing ACPI tables at %lx\n", start_addr);
+
+	acpi_reset_items();
+	acpi_setup_ctx(ctx, start_addr);
+
+	ret = acpi_write_all(ctx);
+	if (ret) {
+		log_err("Failed to write ACPI tables (err=%d)\n", ret);
+		return log_msg_ret("write", -ENOMEM);
+	}
+
+	addr = map_to_sysmem(ctx->current);
+	log_debug("ACPI current = %lx\n", addr);
+
+	return addr;
+}
+
+int write_dev_tables(struct acpi_ctx *ctx, const struct acpi_writer *entry)
+{
+	int ret;
+
+	ret = acpi_write_dev_tables(ctx);
+	if (ret)
+		return log_msg_ret("write", ret);
+
+	return 0;
+}
+ACPI_WRITER(8dev, NULL, write_dev_tables, 0);
+
+ulong acpi_get_rsdp_addr(void)
+{
+	if (!gd->acpi_ctx)
+		return 0;
+
+	return map_to_sysmem(gd->acpi_ctx->rsdp);
+}
+#endif /* QEMU */
+
+void acpi_setup_ctx(struct acpi_ctx *ctx, ulong start)
+{
+	gd->acpi_ctx = ctx;
+	memset(ctx, '\0', sizeof(*ctx));
+
+	/* Align ACPI tables to 16-byte boundary */
+	start = ALIGN(start, 16);
+	ctx->base = map_sysmem(start, 0);
+	ctx->current = ctx->base;
+
+	gd_set_acpi_start(start);
+}
diff --git a/lib/acpi/base.c b/lib/acpi/base.c
new file mode 100644
index 0000000..2057bd2
--- /dev/null
+++ b/lib/acpi/base.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Write base ACPI tables
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#define LOG_CATEGORY LOGC_ACPI
+
+#include <common.h>
+#include <acpi/acpi_table.h>
+#include <dm/acpi.h>
+#include <mapmem.h>
+#include <tables_csum.h>
+
+void acpi_write_rsdp(struct acpi_rsdp *rsdp, struct acpi_rsdt *rsdt,
+		     struct acpi_xsdt *xsdt)
+{
+	memset(rsdp, 0, sizeof(struct acpi_rsdp));
+
+	memcpy(rsdp->signature, RSDP_SIG, 8);
+	memcpy(rsdp->oem_id, OEM_ID, 6);
+
+	rsdp->length = sizeof(struct acpi_rsdp);
+	rsdp->rsdt_address = map_to_sysmem(rsdt);
+
+	rsdp->xsdt_address = map_to_sysmem(xsdt);
+	rsdp->revision = ACPI_RSDP_REV_ACPI_2_0;
+
+	/* Calculate checksums */
+	rsdp->checksum = table_compute_checksum(rsdp, 20);
+	rsdp->ext_checksum = table_compute_checksum(rsdp,
+						    sizeof(struct acpi_rsdp));
+}
+
+static void acpi_write_rsdt(struct acpi_rsdt *rsdt)
+{
+	struct acpi_table_header *header = &rsdt->header;
+
+	/* Fill out header fields */
+	acpi_fill_header(header, "RSDT");
+	header->length = sizeof(struct acpi_rsdt);
+	header->revision = 1;
+
+	/* Entries are filled in later, we come with an empty set */
+
+	/* Fix checksum */
+	header->checksum = table_compute_checksum(rsdt,
+						  sizeof(struct acpi_rsdt));
+}
+
+static void acpi_write_xsdt(struct acpi_xsdt *xsdt)
+{
+	struct acpi_table_header *header = &xsdt->header;
+
+	/* Fill out header fields */
+	acpi_fill_header(header, "XSDT");
+	header->length = sizeof(struct acpi_xsdt);
+	header->revision = 1;
+
+	/* Entries are filled in later, we come with an empty set */
+
+	/* Fix checksum */
+	header->checksum = table_compute_checksum(xsdt,
+						  sizeof(struct acpi_xsdt));
+}
+
+static int acpi_write_base(struct acpi_ctx *ctx,
+			   const struct acpi_writer *entry)
+{
+	/* We need at least an RSDP and an RSDT Table */
+	ctx->rsdp = ctx->current;
+	acpi_inc_align(ctx, sizeof(struct acpi_rsdp));
+	ctx->rsdt = ctx->current;
+	acpi_inc_align(ctx, sizeof(struct acpi_rsdt));
+	ctx->xsdt = ctx->current;
+	acpi_inc_align(ctx, sizeof(struct acpi_xsdt));
+
+	/* clear all table memory */
+	memset(ctx->base, '\0', ctx->current - ctx->base);
+
+	acpi_write_rsdp(ctx->rsdp, ctx->rsdt, ctx->xsdt);
+	acpi_write_rsdt(ctx->rsdt);
+	acpi_write_xsdt(ctx->xsdt);
+
+	return 0;
+}
+/*
+ * Per ACPI spec, the FACS table address must be aligned to a 64-byte boundary
+ * (Windows checks this, but Linux does not).
+ *
+ * Use the '0' prefix to put this one first
+ */
+ACPI_WRITER(0base, NULL, acpi_write_base, ACPIWF_ALIGN64);
diff --git a/lib/acpi/csrt.c b/lib/acpi/csrt.c
new file mode 100644
index 0000000..2ba86f2
--- /dev/null
+++ b/lib/acpi/csrt.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Write an ACPI Core System Resource Table (CSRT)
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#define LOG_CATEGORY LOGC_ACPI
+
+#include <common.h>
+#include <mapmem.h>
+#include <tables_csum.h>
+#include <acpi/acpi_table.h>
+#include <dm/acpi.h>
+
+__weak int acpi_fill_csrt(struct acpi_ctx *ctx)
+{
+	return 0;
+}
+
+int acpi_write_csrt(struct acpi_ctx *ctx, const struct acpi_writer *entry)
+{
+	struct acpi_table_header *header;
+	struct acpi_csrt *csrt;
+	int ret;
+
+	csrt = ctx->current;
+	header = &csrt->header;
+
+	memset(csrt, '\0', sizeof(struct acpi_csrt));
+
+	/* Fill out header fields */
+	acpi_fill_header(header, "CSRT");
+	header->revision = 0;
+	acpi_inc(ctx, sizeof(*header));
+
+	ret = acpi_fill_csrt(ctx);
+	if (ret)
+		return log_msg_ret("fill", ret);
+
+	/* (Re)calculate length and checksum */
+	header->length = (ulong)ctx->current - (ulong)csrt;
+	header->checksum = table_compute_checksum(csrt, header->length);
+
+	acpi_add_table(ctx, csrt);
+
+	return 0;
+}
+ACPI_WRITER(5csrt, "CSRT", acpi_write_csrt, 0);
diff --git a/lib/acpi/dsdt.c b/lib/acpi/dsdt.c
new file mode 100644
index 0000000..db98cc2
--- /dev/null
+++ b/lib/acpi/dsdt.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Write the ACPI Differentiated System Description Table (DSDT)
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#define LOG_CATEGORY LOGC_ACPI
+
+#include <common.h>
+#include <acpi/acpi_table.h>
+#include <dm/acpi.h>
+#include <tables_csum.h>
+
+/*
+ * IASL compiles the dsdt entries and writes the hex values
+ * to a C array AmlCode[] (see dsdt.c).
+ */
+extern const unsigned char AmlCode[];
+
+int acpi_write_dsdt(struct acpi_ctx *ctx, const struct acpi_writer *entry)
+{
+	const int thl = sizeof(struct acpi_table_header);
+	struct acpi_table_header *dsdt = ctx->current;
+	int aml_len;
+
+	/* Put the table header first */
+	memcpy(dsdt, &AmlCode, thl);
+	acpi_inc(ctx, thl);
+	log_debug("DSDT starts at %p, hdr ends at %p\n", dsdt, ctx->current);
+
+	/* If the table is not empty, allow devices to inject things */
+	aml_len = dsdt->length - thl;
+	if (aml_len) {
+		void *base = ctx->current;
+		int ret;
+
+		ret = acpi_inject_dsdt(ctx);
+		if (ret)
+			return log_msg_ret("inject", ret);
+		log_debug("Added %lx bytes from inject_dsdt, now at %p\n",
+			  (ulong)(ctx->current - base), ctx->current);
+		log_debug("Copy AML code size %x to %p\n", aml_len,
+			  ctx->current);
+		memcpy(ctx->current, AmlCode + thl, aml_len);
+		acpi_inc(ctx, aml_len);
+	}
+
+	ctx->dsdt = dsdt;
+	dsdt->length = ctx->current - (void *)dsdt;
+	log_debug("Updated DSDT length to %x\n", dsdt->length);
+
+	return 0;
+}
+ACPI_WRITER(3dsdt, "DSDT", acpi_write_dsdt, 0);
diff --git a/lib/acpi/facs.c b/lib/acpi/facs.c
new file mode 100644
index 0000000..e89f43c
--- /dev/null
+++ b/lib/acpi/facs.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Write an ACPI Firmware ACPI Control Structure (FACS) table
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#define LOG_CATEGORY LOGC_ACPI
+
+#include <common.h>
+#include <acpi/acpi_table.h>
+#include <dm/acpi.h>
+
+int acpi_write_facs(struct acpi_ctx *ctx, const struct acpi_writer *entry)
+{
+	struct acpi_facs *facs = ctx->current;
+
+	memset((void *)facs, '\0', sizeof(struct acpi_facs));
+
+	memcpy(facs->signature, "FACS", 4);
+	facs->length = sizeof(struct acpi_facs);
+	facs->hardware_signature = 0;
+	facs->firmware_waking_vector = 0;
+	facs->global_lock = 0;
+	facs->flags = 0;
+	facs->x_firmware_waking_vector_l = 0;
+	facs->x_firmware_waking_vector_h = 0;
+	facs->version = 1;
+
+	ctx->facs = facs;
+	acpi_inc(ctx, sizeof(struct acpi_facs));
+
+	return 0;
+}
+ACPI_WRITER(1facs, "FACS", acpi_write_facs, 0);
diff --git a/lib/acpi/ssdt.c b/lib/acpi/ssdt.c
new file mode 100644
index 0000000..659c1aa
--- /dev/null
+++ b/lib/acpi/ssdt.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Write an ACPI Secondary System Descriptor Table (SSDT) table
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#define LOG_CATEGORY LOGC_ACPI
+
+#include <common.h>
+#include <acpi/acpi_table.h>
+#include <dm/acpi.h>
+#include <tables_csum.h>
+
+int acpi_write_ssdt(struct acpi_ctx *ctx, const struct acpi_writer *entry)
+{
+	struct acpi_table_header *ssdt;
+	int ret;
+
+	ssdt = ctx->current;
+	memset((void *)ssdt, '\0', sizeof(struct acpi_table_header));
+
+	acpi_fill_header(ssdt, "SSDT");
+	memcpy(ssdt->oem_table_id, OEM_TABLE_ID, sizeof(ssdt->oem_table_id));
+	ssdt->revision = acpi_get_table_revision(ACPITAB_SSDT);
+	ssdt->aslc_revision = 1;
+	ssdt->length = sizeof(struct acpi_table_header);
+
+	acpi_inc(ctx, sizeof(struct acpi_table_header));
+
+	ret = acpi_fill_ssdt(ctx);
+	if (ret) {
+		ctx->current = ssdt;
+		return log_msg_ret("fill", ret);
+	}
+
+	/* (Re)calculate length and checksum */
+	ssdt->length = ctx->current - (void *)ssdt;
+	ssdt->checksum = table_compute_checksum((void *)ssdt, ssdt->length);
+	log_debug("SSDT at %p, length %x\n", ssdt, ssdt->length);
+
+	/* Drop the table if it is empty */
+	if (ssdt->length == sizeof(struct acpi_table_header))
+		return log_msg_ret("fill", -ENOENT);
+	acpi_add_table(ctx, ssdt);
+
+	return 0;
+}
+ACPI_WRITER(6ssdt, "SSDT", acpi_write_ssdt, 0);
diff --git a/lib/ecdsa/ecdsa-libcrypto.c b/lib/ecdsa/ecdsa-libcrypto.c
index ae6dfa0..d5939af 100644
--- a/lib/ecdsa/ecdsa-libcrypto.c
+++ b/lib/ecdsa/ecdsa-libcrypto.c
@@ -301,7 +301,7 @@
 	if (ret < 0)
 		return ret;
 
-	return 0;
+	return key_node;
 }
 
 int ecdsa_add_verify_data(struct image_sign_info *info, void *fdt)
@@ -313,7 +313,7 @@
 	fdt_key_name = info->keyname ? info->keyname : "default-key";
 	ret = prepare_ctx(&ctx, info);
 	if (ret >= 0)
-		do_add(&ctx, fdt, fdt_key_name);
+		ret = do_add(&ctx, fdt, fdt_key_name);
 
 	free_ctx(&ctx);
 	return ret;
diff --git a/lib/efi_loader/efi_acpi.c b/lib/efi_loader/efi_acpi.c
index 83f025e..2ddc350 100644
--- a/lib/efi_loader/efi_acpi.c
+++ b/lib/efi_loader/efi_acpi.c
@@ -8,6 +8,7 @@
 #include <common.h>
 #include <efi_loader.h>
 #include <log.h>
+#include <mapmem.h>
 #include <acpi/acpi_table.h>
 
 static const efi_guid_t acpi_guid = EFI_ACPI_TABLE_GUID;
@@ -22,6 +23,7 @@
 	/* Map within the low 32 bits, to allow for 32bit ACPI tables */
 	u64 acpi = U32_MAX;
 	efi_status_t ret;
+	ulong addr;
 
 	/* Reserve 64kiB page for ACPI */
 	ret = efi_allocate_pages(EFI_ALLOCATE_MAX_ADDRESS,
@@ -34,7 +36,8 @@
 	 * a 4k-aligned address, so it is safe to assume that
 	 * write_acpi_tables() will write the table at that address.
 	 */
-	write_acpi_tables(acpi);
+	addr = map_to_sysmem((void *)(ulong)acpi);
+	write_acpi_tables(addr);
 
 	/* And expose them to our EFI payload */
 	return efi_install_configuration_table(&acpi_guid,
diff --git a/lib/rsa/rsa-sign.c b/lib/rsa/rsa-sign.c
index a95a3d2..3e7b798 100644
--- a/lib/rsa/rsa-sign.c
+++ b/lib/rsa/rsa-sign.c
@@ -703,5 +703,8 @@
 	if (info->engine_id)
 		rsa_engine_remove(e);
 
-	return ret;
+	if (ret)
+		return ret;
+
+	return node;
 }
diff --git a/lib/rsa/rsa-verify.c b/lib/rsa/rsa-verify.c
index fbb2d35..32c7507 100644
--- a/lib/rsa/rsa-verify.c
+++ b/lib/rsa/rsa-verify.c
@@ -79,14 +79,14 @@
 	struct checksum_algo *checksum = info->checksum;
 	int ret, pad_len = msg_len - checksum->checksum_len;
 
-	/* Check pkcs1.5 padding bytes. */
+	/* Check pkcs1.5 padding bytes */
 	ret = rsa_verify_padding(msg, pad_len, checksum);
 	if (ret) {
 		debug("In RSAVerify(): Padding check failed!\n");
 		return -EINVAL;
 	}
 
-	/* Check hash. */
+	/* Check hash */
 	if (memcmp((uint8_t *)msg + pad_len, hash, msg_len - pad_len)) {
 		debug("In RSAVerify(): Hash check failed!\n");
 		return -EACCES;
@@ -502,7 +502,8 @@
 	if (CONFIG_IS_ENABLED(RSA_VERIFY_WITH_PKEY) && !info->fdt_blob) {
 		/* don't rely on fdt properties */
 		ret = rsa_verify_with_pkey(info, hash, sig, sig_len);
-
+		if (ret)
+			debug("%s: rsa_verify_with_pkey() failed\n", __func__);
 		return ret;
 	}
 
@@ -522,6 +523,9 @@
 		if (info->required_keynode != -1) {
 			ret = rsa_verify_with_keynode(info, hash, sig, sig_len,
 						      info->required_keynode);
+			if (ret)
+				debug("%s: Failed to verify required_keynode\n",
+				      __func__);
 			return ret;
 		}
 
@@ -531,6 +535,8 @@
 		ret = rsa_verify_with_keynode(info, hash, sig, sig_len, node);
 		if (!ret)
 			return ret;
+		debug("%s: Could not verify key '%s', trying all\n", __func__,
+		      name);
 
 		/* No luck, so try each of the keys in turn */
 		for (ndepth = 0, noffset = fdt_next_node(blob, sig_node,
@@ -546,6 +552,7 @@
 			}
 		}
 	}
+	debug("%s: Failed to verify by any means\n", __func__);
 
 	return ret;
 }
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 77ad282..93cb09a 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -320,8 +320,11 @@
 quiet_cmd_dtc = DTC     $@
 # Modified for U-Boot
 # Bring in any U-Boot-specific include at the end of the file
+# And finally any custom .dtsi fragments specified with CONFIG_DEVICE_TREE_INCLUDES
 cmd_dtc = mkdir -p $(dir ${dtc-tmp}) ; \
 	(cat $<; $(if $(u_boot_dtsi),echo '$(pound)include "$(u_boot_dtsi)"')) > $(pre-tmp); \
+	$(foreach f,$(subst $(quote),,$(CONFIG_DEVICE_TREE_INCLUDES)), \
+	  echo '$(pound)include "$(f)"' >> $(pre-tmp);) \
 	$(HOSTCC) -E $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $(pre-tmp) ; \
 	$(DTC) -O dtb -o $@ -b 0 \
 		-i $(dir $<) $(DTC_FLAGS) \
@@ -461,7 +464,8 @@
 cmd_acpi_c_asl=         \
 	$(CPP) -x assembler-with-cpp -D__ASSEMBLY__ -D__ACPI__ \
 		-P $(UBOOTINCLUDE) -o $(ASL_TMP) $< && \
-	iasl -p $@ -tc $(ASL_TMP) $(if $(KBUILD_VERBOSE:1=), >/dev/null) && \
+	iasl -p $@ -I $(srctree)/board/$(BOARDDIR) -tc $(ASL_TMP) \
+		$(if $(KBUILD_VERBOSE:1=), >/dev/null) && \
 	mv $(patsubst %.c,%.hex,$@) $@
 
 $(obj)/dsdt.c:    $(src)/dsdt.asl
diff --git a/test/dm/acpi.c b/test/dm/acpi.c
index c51073c..edad913 100644
--- a/test/dm/acpi.c
+++ b/test/dm/acpi.c
@@ -45,6 +45,27 @@
 	bool no_name;
 };
 
+/**
+ * setup_ctx_and_base_tables() - Set up context along with RSDP, RSDT and XSDT
+ *
+ * Set up the context with the given start position. Some basic tables are
+ * always needed, so set them up as well.
+ *
+ * @ctx: Context to set up
+ */
+static int setup_ctx_and_base_tables(struct unit_test_state *uts,
+				     struct acpi_ctx *ctx, ulong start)
+{
+	struct acpi_writer *entry = ACPI_WRITER_GET(0base);
+
+	acpi_setup_ctx(ctx, start);
+
+	ctx->tab_start = ctx->current;
+	ut_assertok(acpi_write_one(ctx, entry));
+
+	return 0;
+}
+
 static int testacpi_write_tables(const struct udevice *dev,
 				 struct acpi_ctx *ctx)
 {
@@ -240,13 +261,15 @@
 {
 	struct acpi_dmar *dmar;
 	struct acpi_ctx ctx;
+	ulong addr;
 	void *buf;
 	int i;
 
 	buf = malloc(BUF_SIZE);
 	ut_assertnonnull(buf);
+	addr = map_to_sysmem(buf);
 
-	acpi_setup_base_tables(&ctx, buf);
+	ut_assertok(setup_ctx_and_base_tables(uts, &ctx, addr));
 	dmar = ctx.current;
 	ut_assertok(acpi_write_dev_tables(&ctx));
 
@@ -304,14 +327,15 @@
 }
 DM_TEST(dm_test_acpi_basic, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
 
-/* Test acpi_setup_base_tables */
-static int dm_test_acpi_setup_base_tables(struct unit_test_state *uts)
+/* Test setup_ctx_and_base_tables */
+static int dm_test_setup_ctx_and_base_tables(struct unit_test_state *uts)
 {
 	struct acpi_rsdp *rsdp;
 	struct acpi_rsdt *rsdt;
 	struct acpi_xsdt *xsdt;
 	struct acpi_ctx ctx;
 	void *buf, *end;
+	ulong addr;
 
 	/*
 	 * Use an unaligned address deliberately, by allocating an aligned
@@ -319,8 +343,9 @@
 	 */
 	buf = memalign(64, BUF_SIZE);
 	ut_assertnonnull(buf);
-	acpi_setup_base_tables(&ctx, buf + 4);
-	ut_asserteq(map_to_sysmem(PTR_ALIGN(buf + 4, 16)), gd->arch.acpi_start);
+	addr = map_to_sysmem(buf);
+	ut_assertok(setup_ctx_and_base_tables(uts, &ctx, addr + 4));
+	ut_asserteq(map_to_sysmem(PTR_ALIGN(buf + 4, 16)), gd_acpi_start());
 
 	rsdp = buf + 16;
 	ut_asserteq_ptr(rsdp, ctx.rsdp);
@@ -349,7 +374,7 @@
 
 	return 0;
 }
-DM_TEST(dm_test_acpi_setup_base_tables,
+DM_TEST(dm_test_setup_ctx_and_base_tables,
 	UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);
 
 /* Test 'acpi list' command */
@@ -361,32 +386,33 @@
 
 	buf = memalign(16, BUF_SIZE);
 	ut_assertnonnull(buf);
-	acpi_setup_base_tables(&ctx, buf);
+	addr = map_to_sysmem(buf);
+	ut_assertok(setup_ctx_and_base_tables(uts, &ctx, addr));
 
 	ut_assertok(acpi_write_dev_tables(&ctx));
 
 	console_record_reset();
 	run_command("acpi list", 0);
-	addr = (ulong)map_to_sysmem(buf);
-	ut_assert_nextline("ACPI tables start at %lx", addr);
-	ut_assert_nextline("RSDP %08lx %06zx (v02 U-BOOT)", addr,
+	ut_assert_nextline("Name      Base   Size  Detail");
+	ut_assert_nextline("----  --------  -----  ------");
+	ut_assert_nextline("RSDP  %08lx  %5zx  v02 U-BOOT", addr,
 			   sizeof(struct acpi_rsdp));
 	addr = ALIGN(addr + sizeof(struct acpi_rsdp), 16);
-	ut_assert_nextline("RSDT %08lx %06zx (v01 U-BOOT U-BOOTBL %x INTL 0)",
+	ut_assert_nextline("RSDT  %08lx  %5zx  v01 U-BOOT U-BOOTBL %x INTL 0",
 			   addr, sizeof(struct acpi_table_header) +
 			   3 * sizeof(u32), OEM_REVISION);
 	addr = ALIGN(addr + sizeof(struct acpi_rsdt), 16);
-	ut_assert_nextline("XSDT %08lx %06zx (v01 U-BOOT U-BOOTBL %x INTL 0)",
+	ut_assert_nextline("XSDT  %08lx  %5zx  v01 U-BOOT U-BOOTBL %x INTL 0",
 			   addr, sizeof(struct acpi_table_header) +
 			   3 * sizeof(u64), OEM_REVISION);
 	addr = ALIGN(addr + sizeof(struct acpi_xsdt), 64);
-	ut_assert_nextline("DMAR %08lx %06zx (v01 U-BOOT U-BOOTBL %x INTL 0)",
+	ut_assert_nextline("DMAR  %08lx  %5zx  v01 U-BOOT U-BOOTBL %x INTL 0",
 			   addr, sizeof(struct acpi_dmar), OEM_REVISION);
 	addr = ALIGN(addr + sizeof(struct acpi_dmar), 16);
-	ut_assert_nextline("DMAR %08lx %06zx (v01 U-BOOT U-BOOTBL %x INTL 0)",
+	ut_assert_nextline("DMAR  %08lx  %5zx  v01 U-BOOT U-BOOTBL %x INTL 0",
 			   addr, sizeof(struct acpi_dmar), OEM_REVISION);
 	addr = ALIGN(addr + sizeof(struct acpi_dmar), 16);
-	ut_assert_nextline("DMAR %08lx %06zx (v01 U-BOOT U-BOOTBL %x INTL 0)",
+	ut_assert_nextline("DMAR  %08lx  %5zx  v01 U-BOOT U-BOOTBL %x INTL 0",
 			   addr, sizeof(struct acpi_dmar), OEM_REVISION);
 	ut_assert_console_end();
 
@@ -403,7 +429,8 @@
 
 	buf = memalign(16, BUF_SIZE);
 	ut_assertnonnull(buf);
-	acpi_setup_base_tables(&ctx, buf);
+	addr = map_to_sysmem(buf);
+	ut_assertok(setup_ctx_and_base_tables(uts, &ctx, addr));
 
 	ut_assertok(acpi_write_dev_tables(&ctx));
 
@@ -540,18 +567,22 @@
 static int dm_test_acpi_cmd_items(struct unit_test_state *uts)
 {
 	struct acpi_ctx ctx;
+	ulong addr;
 	void *buf;
 
 	buf = malloc(BUF_SIZE);
 	ut_assertnonnull(buf);
+	addr = map_to_sysmem(buf);
 
 	acpi_reset_items();
 	ctx.current = buf;
 	ut_assertok(acpi_fill_ssdt(&ctx));
 	console_record_reset();
 	run_command("acpi items", 0);
-	ut_assert_nextline("dev 'acpi-test', type 1, size 2");
-	ut_assert_nextline("dev 'acpi-test2', type 1, size 2");
+	ut_assert_nextline("Seq  Type       Base   Size  Device/Writer");
+	ut_assert_nextline("---  -----  --------   ----  -------------");
+	ut_assert_nextline("  0  ssdt   %8lx      2  acpi-test", addr);
+	ut_assert_nextline("  1  ssdt   %8lx      2  acpi-test2", addr + 2);
 	ut_assert_console_end();
 
 	acpi_reset_items();
@@ -559,16 +590,20 @@
 	ut_assertok(acpi_inject_dsdt(&ctx));
 	console_record_reset();
 	run_command("acpi items", 0);
-	ut_assert_nextline("dev 'acpi-test', type 2, size 2");
-	ut_assert_nextline("dev 'acpi-test2', type 2, size 2");
+	ut_assert_nextlinen("Seq");
+	ut_assert_nextlinen("---");
+	ut_assert_nextline("  0  dsdt   %8lx      2  acpi-test", addr);
+	ut_assert_nextline("  1  dsdt   %8lx      2  acpi-test2", addr + 2);
 	ut_assert_console_end();
 
 	console_record_reset();
 	run_command("acpi items -d", 0);
-	ut_assert_nextline("dev 'acpi-test', type 2, size 2");
+	ut_assert_nextlinen("Seq");
+	ut_assert_nextlinen("---");
+	ut_assert_nextline("  0  dsdt   %8lx      2  acpi-test", addr);
 	ut_assert_nextlines_are_dump(2);
 	ut_assert_nextline("%s", "");
-	ut_assert_nextline("dev 'acpi-test2', type 2, size 2");
+	ut_assert_nextline("  1  dsdt   %8lx      2  acpi-test2", addr + 2);
 	ut_assert_nextlines_are_dump(2);
 	ut_assert_nextline("%s", "");
 	ut_assert_console_end();
diff --git a/test/dm/ofnode.c b/test/dm/ofnode.c
index 5e7c968..dab0480 100644
--- a/test/dm/ofnode.c
+++ b/test/dm/ofnode.c
@@ -286,7 +286,7 @@
 	ut_assert(ofnode_valid(node));
 	addr = ofnode_get_addr(node);
 	size = ofnode_get_size(node);
-	ut_asserteq(FDT_ADDR_T_NONE, addr);
+	ut_asserteq_64(FDT_ADDR_T_NONE, addr);
 	ut_asserteq(FDT_SIZE_T_NONE, size);
 
 	node = ofnode_path("/translation-test@8000/noxlatebus@3,300/dev@42");
diff --git a/test/dm/pci.c b/test/dm/pci.c
index fa2e4a8..00e4440 100644
--- a/test/dm/pci.c
+++ b/test/dm/pci.c
@@ -331,10 +331,10 @@
 	struct udevice *swap1f, *swap1;
 
 	ut_assertok(dm_pci_bus_find_bdf(PCI_BDF(0, 0x1f, 0), &swap1f));
-	ut_asserteq(FDT_ADDR_T_NONE, dev_read_addr_pci(swap1f));
+	ut_asserteq_64(FDT_ADDR_T_NONE, dev_read_addr_pci(swap1f));
 
 	ut_assertok(dm_pci_bus_find_bdf(PCI_BDF(0, 0x1, 0), &swap1));
-	ut_asserteq(FDT_ADDR_T_NONE, dev_read_addr_pci(swap1));
+	ut_asserteq_64(FDT_ADDR_T_NONE, dev_read_addr_pci(swap1));
 
 	return 0;
 }
diff --git a/test/dm/test-fdt.c b/test/dm/test-fdt.c
index 8866d4d..e1de066 100644
--- a/test/dm/test-fdt.c
+++ b/test/dm/test-fdt.c
@@ -768,7 +768,7 @@
 	/* Test setting generic properties */
 
 	/* Non-existent in DTB */
-	ut_asserteq(FDT_ADDR_T_NONE, dev_read_addr(dev));
+	ut_asserteq_64(FDT_ADDR_T_NONE, dev_read_addr(dev));
 	/* reg = 0x42, size = 0x100 */
 	ut_assertok(ofnode_write_prop(node, "reg", 8,
 				      "\x00\x00\x00\x42\x00\x00\x01\x00"));
diff --git a/tools/binman/binman.rst b/tools/binman/binman.rst
index 3e063d1..ab5a5e0 100644
--- a/tools/binman/binman.rst
+++ b/tools/binman/binman.rst
@@ -185,14 +185,37 @@
 the configuration of the Intel-format descriptor.
 
 
-Running binman
---------------
+Installing binman
+-----------------
 
 First install prerequisites, e.g::
 
     sudo apt-get install python-pyelftools python3-pyelftools lzma-alone \
         liblz4-tool
 
+You can run binman directly if you put it on your PATH. But if you want to
+install into your `~/.local` Python directory, use::
+
+    pip install tools/patman tools/dtoc tools/binman
+
+Note that binman makes use of libraries from patman and dtoc, which is why these
+need to be installed. Also you need `libfdt` and `pylibfdt` which can be
+installed like this::
+
+   git clone git://git.kernel.org/pub/scm/utils/dtc/dtc.git
+   cd dtc
+   pip install .
+   make NO_PYTHON=1 install
+
+This installs the `libfdt.so` library into `~/lib` so you can use
+`LD_LIBRARY_PATH=~/lib` when running binman. If you want to install it in the
+system-library directory, replace the last line with::
+
+   make NO_PYTHON=1 PREFIX=/ install
+
+Running binman
+--------------
+
 Type::
 
     binman build -b <board_name>
@@ -707,7 +730,7 @@
 U-Boot executable and can be updated separately by binman as needed. It can be
 disabled with the --no-expanded flag if required.
 
-The same applies for u-boot-spl and u-boot-spl. In those cases, the expansion
+The same applies for u-boot-spl and u-boot-tpl. In those cases, the expansion
 includes the BSS padding, so for example::
 
     spl {
@@ -1004,6 +1027,77 @@
 You can use BINMAN_VERBOSE=5 (for example) when building to select this.
 
 
+Bintools
+========
+
+`Bintool` is the name binman gives to a binary tool which it uses to create and
+manipulate binaries that binman cannot handle itself. Bintools are often
+necessary since Binman only supports a subset of the available file formats
+natively.
+
+Many SoC vendors invent ways to load code into their SoC using new file formats,
+sometimes changing the format with successive SoC generations. Sometimes the
+tool is available as Open Source. Sometimes it is a pre-compiled binary that
+must be downloaded from the vendor's website. Sometimes it is available in
+source form but difficult or slow to build.
+
+Even for images that use bintools, binman still assembles the image from its
+image description. It may handle parts of the image natively and part with
+various bintools.
+
+Binman relies on these tools so provides various features to manage them:
+
+- Determining whether the tool is currently installed
+- Downloading or building the tool
+- Determining the version of the tool that is installed
+- Deciding which tools are needed to build an image
+
+The Bintool class is an interface to the tool, a thin level of abstration, using
+Python functions to run the tool for each purpose (e.g. creating a new
+structure, adding a file to an existing structure) rather than just lists of
+string arguments.
+
+As with external blobs, bintools (which are like 'external' tools) can be
+missing. When building an image requires a bintool and it is not installed,
+binman detects this and reports the problem, but continues to build an image.
+This is useful in CI systems which want to check that everything is correct but
+don't have access to the bintools.
+
+To make this work, all calls to bintools (e.g. with Bintool.run_cmd()) must cope
+with the tool being missing, i.e. when None is returned, by:
+
+- Calling self.record_missing_bintool()
+- Setting up some fake contents so binman can continue
+
+Of course the image will not work, but binman reports which bintools are needed
+and also provide a way to fetch them.
+
+To see the available bintools, use::
+
+    binman tool --list
+
+To fetch tools which are missing, use::
+
+    binman tool --fetch missing
+
+You can also use `--fetch all` to fetch all tools or `--fetch <tool>` to fetch
+a particular tool. Some tools are built from source code, in which case you will
+need to have at least the `build-essential` and `git` packages installed.
+
+Bintool Documentation
+=====================
+
+To provide details on the various bintools supported by binman, bintools.rst is
+generated from the source code using:
+
+    binman bintool-docs >tools/binman/bintools.rst
+
+.. toctree::
+   :maxdepth: 2
+
+   bintools
+
+
 Technical details
 =================
 
@@ -1138,6 +1232,35 @@
    $ sudo apt-get install python-coverage python3-coverage python-pytest
 
 
+Error messages
+--------------
+
+This section provides some guidance for some of the less obvious error messages
+produced by binman.
+
+
+Expected __bss_size symbol
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Example::
+
+   binman: Node '/binman/u-boot-spl-ddr/u-boot-spl/u-boot-spl-bss-pad':
+      Expected __bss_size symbol in spl/u-boot-spl
+
+This indicates that binman needs the `__bss_size` symbol to be defined in the
+SPL binary, where `spl/u-boot-spl` is the ELF file containing the symbols. The
+symbol tells binman the size of the BSS region, in bytes. It needs this to be
+able to pad the image so that the following entries do not overlap the BSS,
+which would cause them to be overwritte by variable access in SPL.
+
+This symbols is normally defined in the linker script, immediately after
+_bss_start and __bss_end are defined, like this::
+
+    __bss_size = __bss_end - __bss_start;
+
+You may need to add it to your linker script if you get this error.
+
+
 Concurrent tests
 ----------------
 
diff --git a/tools/binman/bintool.py b/tools/binman/bintool.py
new file mode 100644
index 0000000..e2e5660
--- /dev/null
+++ b/tools/binman/bintool.py
@@ -0,0 +1,466 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Base class for all bintools
+
+This defines the common functionality for all bintools, including running
+the tool, checking its version and fetching it if needed.
+"""
+
+import collections
+import glob
+import importlib
+import multiprocessing
+import os
+import shutil
+import tempfile
+import urllib.error
+
+from patman import command
+from patman import terminal
+from patman import tools
+from patman import tout
+
+BINMAN_DIR = os.path.dirname(os.path.realpath(__file__))
+
+# Format string for listing bintools, see also the header in list_all()
+FORMAT = '%-16.16s %-12.12s %-26.26s %s'
+
+# List of known modules, to avoid importing the module multiple times
+modules = {}
+
+# Possible ways of fetching a tool (FETCH_COUNT is number of ways)
+FETCH_ANY, FETCH_BIN, FETCH_BUILD, FETCH_COUNT = range(4)
+
+FETCH_NAMES = {
+    FETCH_ANY: 'any method',
+    FETCH_BIN: 'binary download',
+    FETCH_BUILD: 'build from source'
+    }
+
+# Status of tool fetching
+FETCHED, FAIL, PRESENT, STATUS_COUNT = range(4)
+
+DOWNLOAD_DESTDIR = os.path.join(os.getenv('HOME'), 'bin')
+
+class Bintool:
+    """Tool which operates on binaries to help produce entry contents
+
+    This is the base class for all bintools
+    """
+    # List of bintools to regard as missing
+    missing_list = []
+
+    def __init__(self, name, desc):
+        self.name = name
+        self.desc = desc
+
+    @staticmethod
+    def find_bintool_class(btype):
+        """Look up the bintool class for bintool
+
+        Args:
+            byte: Bintool to use, e.g. 'mkimage'
+
+        Returns:
+            The bintool class object if found, else a tuple:
+                module name that could not be found
+                exception received
+        """
+        # Convert something like 'u-boot' to 'u_boot' since we are only
+        # interested in the type.
+        module_name = btype.replace('-', '_')
+        module = modules.get(module_name)
+
+        # Import the module if we have not already done so
+        if not module:
+            try:
+                module = importlib.import_module('binman.btool.' + module_name)
+            except ImportError as exc:
+                return module_name, exc
+            modules[module_name] = module
+
+        # Look up the expected class name
+        return getattr(module, 'Bintool%s' % module_name)
+
+    @staticmethod
+    def create(name):
+        """Create a new bintool object
+
+        Args:
+            name (str): Bintool to create, e.g. 'mkimage'
+
+        Returns:
+            A new object of the correct type (a subclass of Binutil)
+        """
+        cls = Bintool.find_bintool_class(name)
+        if isinstance(cls, tuple):
+            raise ValueError("Cannot import bintool module '%s': %s" % cls)
+
+        # Call its constructor to get the object we want.
+        obj = cls(name)
+        return obj
+
+    def show(self):
+        """Show a line of information about a bintool"""
+        if self.is_present():
+            version = self.version()
+        else:
+            version = '-'
+        print(FORMAT % (self.name, version, self.desc,
+                        self.get_path() or '(not found)'))
+
+    @classmethod
+    def set_missing_list(cls, missing_list):
+        cls.missing_list = missing_list or []
+
+    @staticmethod
+    def get_tool_list(include_testing=False):
+        """Get a list of the known tools
+
+        Returns:
+            list of str: names of all tools known to binman
+        """
+        files = glob.glob(os.path.join(BINMAN_DIR, 'btool/*'))
+        names = [os.path.splitext(os.path.basename(fname))[0]
+                 for fname in files]
+        names = [name for name in names if name[0] != '_']
+        if include_testing:
+            names.append('_testing')
+        return sorted(names)
+
+    @staticmethod
+    def list_all():
+        """List all the bintools known to binman"""
+        names = Bintool.get_tool_list()
+        print(FORMAT % ('Name', 'Version', 'Description', 'Path'))
+        print(FORMAT % ('-' * 15,'-' * 11, '-' * 25, '-' * 30))
+        for name in names:
+            btool = Bintool.create(name)
+            btool.show()
+
+    def is_present(self):
+        """Check if a bintool is available on the system
+
+        Returns:
+            bool: True if available, False if not
+        """
+        if self.name in self.missing_list:
+            return False
+        return bool(self.get_path())
+
+    def get_path(self):
+        """Get the path of a bintool
+
+        Returns:
+            str: Path to the tool, if available, else None
+        """
+        return tools.tool_find(self.name)
+
+    def fetch_tool(self, method, col, skip_present):
+        """Fetch a single tool
+
+        Args:
+            method (FETCH_...): Method to use
+            col (terminal.Color): Color terminal object
+            skip_present (boo;): Skip fetching if it is already present
+
+        Returns:
+            int: Result of fetch either FETCHED, FAIL, PRESENT
+        """
+        def try_fetch(meth):
+            res = None
+            try:
+                res = self.fetch(meth)
+            except urllib.error.URLError as uerr:
+                message = uerr.reason
+                print(col.Color(col.RED, f'- {message}'))
+
+            except ValueError as exc:
+                print(f'Exception: {exc}')
+            return res
+
+        if skip_present and self.is_present():
+            return PRESENT
+        print(col.Color(col.YELLOW, 'Fetch: %s' % self.name))
+        if method == FETCH_ANY:
+            for try_method in range(1, FETCH_COUNT):
+                print(f'- trying method: {FETCH_NAMES[try_method]}')
+                result = try_fetch(try_method)
+                if result:
+                    break
+        else:
+            result = try_fetch(method)
+        if not result:
+            return FAIL
+        if result is not True:
+            fname, tmpdir = result
+            dest = os.path.join(DOWNLOAD_DESTDIR, self.name)
+            print(f"- writing to '{dest}'")
+            shutil.move(fname, dest)
+            if tmpdir:
+                shutil.rmtree(tmpdir)
+        return FETCHED
+
+    @staticmethod
+    def fetch_tools(method, names_to_fetch):
+        """Fetch bintools from a suitable place
+
+        This fetches or builds the requested bintools so that they can be used
+        by binman
+
+        Args:
+            names_to_fetch (list of str): names of bintools to fetch
+
+        Returns:
+            True on success, False on failure
+        """
+        def show_status(color, prompt, names):
+            print(col.Color(
+                color, f'{prompt}:%s{len(names):2}: %s' %
+                (' ' * (16 - len(prompt)), ' '.join(names))))
+
+        col = terminal.Color()
+        skip_present = False
+        name_list = names_to_fetch
+        if len(names_to_fetch) == 1 and names_to_fetch[0] in ['all', 'missing']:
+            name_list = Bintool.get_tool_list()
+            if names_to_fetch[0] == 'missing':
+                skip_present = True
+            print(col.Color(col.YELLOW,
+                            'Fetching tools:      %s' % ' '.join(name_list)))
+        status = collections.defaultdict(list)
+        for name in name_list:
+            btool = Bintool.create(name)
+            result = btool.fetch_tool(method, col, skip_present)
+            status[result].append(name)
+            if result == FAIL:
+                if method == FETCH_ANY:
+                    print('- failed to fetch with all methods')
+                else:
+                    print(f"- method '{FETCH_NAMES[method]}' is not supported")
+
+        if len(name_list) > 1:
+            if skip_present:
+                show_status(col.GREEN, 'Already present', status[PRESENT])
+            show_status(col.GREEN, 'Tools fetched', status[FETCHED])
+            if status[FAIL]:
+                show_status(col.RED, 'Failures', status[FAIL])
+        return not status[FAIL]
+
+    def run_cmd_result(self, *args, binary=False, raise_on_error=True):
+        """Run the bintool using command-line arguments
+
+        Args:
+            args (list of str): Arguments to provide, in addition to the bintool
+                name
+            binary (bool): True to return output as bytes instead of str
+            raise_on_error (bool): True to raise a ValueError exception if the
+                tool returns a non-zero return code
+
+        Returns:
+            CommandResult: Resulting output from the bintool, or None if the
+                tool is not present
+        """
+        if self.name in self.missing_list:
+            return None
+        name = os.path.expanduser(self.name)  # Expand paths containing ~
+        all_args = (name,) + args
+        env = tools.get_env_with_path()
+        tout.Detail(f"bintool: {' '.join(all_args)}")
+        result = command.RunPipe(
+            [all_args], capture=True, capture_stderr=True, env=env,
+            raise_on_error=False, binary=binary)
+
+        if result.return_code:
+            # Return None if the tool was not found. In this case there is no
+            # output from the tool and it does not appear on the path. We still
+            # try to run it (as above) since RunPipe() allows faking the tool's
+            # output
+            if not any([result.stdout, result.stderr, tools.tool_find(name)]):
+                tout.Info(f"bintool '{name}' not found")
+                return None
+            if raise_on_error:
+                tout.Info(f"bintool '{name}' failed")
+                raise ValueError("Error %d running '%s': %s" %
+                                (result.return_code, ' '.join(all_args),
+                                result.stderr or result.stdout))
+        if result.stdout:
+            tout.Debug(result.stdout)
+        if result.stderr:
+            tout.Debug(result.stderr)
+        return result
+
+    def run_cmd(self, *args, binary=False):
+        """Run the bintool using command-line arguments
+
+        Args:
+            args (list of str): Arguments to provide, in addition to the bintool
+                name
+            binary (bool): True to return output as bytes instead of str
+
+        Returns:
+            str or bytes: Resulting stdout from the bintool
+        """
+        result = self.run_cmd_result(*args, binary=binary)
+        if result:
+            return result.stdout
+
+    @classmethod
+    def build_from_git(cls, git_repo, make_target, bintool_path):
+        """Build a bintool from a git repo
+
+        This clones the repo in a temporary directory, builds it with 'make',
+        then returns the filename of the resulting executable bintool
+
+        Args:
+            git_repo (str): URL of git repo
+            make_target (str): Target to pass to 'make' to build the tool
+            bintool_path (str): Relative path of the tool in the repo, after
+                build is complete
+
+        Returns:
+            tuple:
+                str: Filename of fetched file to copy to a suitable directory
+                str: Name of temp directory to remove, or None
+            or None on error
+        """
+        tmpdir = tempfile.mkdtemp(prefix='binmanf.')
+        print(f"- clone git repo '{git_repo}' to '{tmpdir}'")
+        tools.Run('git', 'clone', '--depth', '1', git_repo, tmpdir)
+        print(f"- build target '{make_target}'")
+        tools.Run('make', '-C', tmpdir, '-j', f'{multiprocessing.cpu_count()}',
+                  make_target)
+        fname = os.path.join(tmpdir, bintool_path)
+        if not os.path.exists(fname):
+            print(f"- File '{fname}' was not produced")
+            return None
+        return fname, tmpdir
+
+    @classmethod
+    def fetch_from_url(cls, url):
+        """Fetch a bintool from a URL
+
+        Args:
+            url (str): URL to fetch from
+
+        Returns:
+            tuple:
+                str: Filename of fetched file to copy to a suitable directory
+                str: Name of temp directory to remove, or None
+        """
+        fname, tmpdir = tools.Download(url)
+        tools.Run('chmod', 'a+x', fname)
+        return fname, tmpdir
+
+    @classmethod
+    def fetch_from_drive(cls, drive_id):
+        """Fetch a bintool from Google drive
+
+        Args:
+            drive_id (str): ID of file to fetch. For a URL of the form
+            'https://drive.google.com/file/d/xxx/view?usp=sharing' the value
+            passed here should be 'xxx'
+
+        Returns:
+            tuple:
+                str: Filename of fetched file to copy to a suitable directory
+                str: Name of temp directory to remove, or None
+        """
+        url = f'https://drive.google.com/uc?export=download&id={drive_id}'
+        return cls.fetch_from_url(url)
+
+    @classmethod
+    def apt_install(cls, package):
+        """Install a bintool using the 'aot' tool
+
+        This requires use of servo so may request a password
+
+        Args:
+            package (str): Name of package to install
+
+        Returns:
+            True, assuming it completes without error
+        """
+        args = ['sudo', 'apt', 'install', '-y', package]
+        print('- %s' % ' '.join(args))
+        tools.Run(*args)
+        return True
+
+    @staticmethod
+    def WriteDocs(modules, test_missing=None):
+        """Write out documentation about the various bintools to stdout
+
+        Args:
+            modules: List of modules to include
+            test_missing: Used for testing. This is a module to report
+                as missing
+        """
+        print('''.. SPDX-License-Identifier: GPL-2.0+
+
+Binman bintool Documentation
+============================
+
+This file describes the bintools (binary tools) supported by binman. Bintools
+are binman's name for external executables that it runs to generate or process
+binaries. It is fairly easy to create new bintools. Just add a new file to the
+'btool' directory. You can use existing bintools as examples.
+
+
+''')
+        modules = sorted(modules)
+        missing = []
+        for name in modules:
+            module = Bintool.find_bintool_class(name)
+            docs = getattr(module, '__doc__')
+            if test_missing == name:
+                docs = None
+            if docs:
+                lines = docs.splitlines()
+                first_line = lines[0]
+                rest = [line[4:] for line in lines[1:]]
+                hdr = 'Bintool: %s: %s' % (name, first_line)
+                print(hdr)
+                print('-' * len(hdr))
+                print('\n'.join(rest))
+                print()
+                print()
+            else:
+                missing.append(name)
+
+        if missing:
+            raise ValueError('Documentation is missing for modules: %s' %
+                             ', '.join(missing))
+
+    # pylint: disable=W0613
+    def fetch(self, method):
+        """Fetch handler for a bintool
+
+        This should be implemented by the base class
+
+        Args:
+            method (FETCH_...): Method to use
+
+        Returns:
+            tuple:
+                str: Filename of fetched file to copy to a suitable directory
+                str: Name of temp directory to remove, or None
+            or True if the file was fetched and already installed
+            or None if no fetch() implementation is available
+
+        Raises:
+            Valuerror: Fetching could not be completed
+        """
+        print(f"No method to fetch bintool '{self.name}'")
+        return False
+
+    # pylint: disable=R0201
+    def version(self):
+        """Version handler for a bintool
+
+        This should be implemented by the base class
+
+        Returns:
+            str: Version string for this bintool
+        """
+        return 'unknown'
diff --git a/tools/binman/bintool_test.py b/tools/binman/bintool_test.py
new file mode 100644
index 0000000..3d6bcda
--- /dev/null
+++ b/tools/binman/bintool_test.py
@@ -0,0 +1,353 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+
+"""Tests for the Bintool class"""
+
+import collections
+import os
+import shutil
+import tempfile
+import unittest
+import unittest.mock
+import urllib.error
+
+from binman import bintool
+from binman.bintool import Bintool
+
+from patman import command
+from patman import terminal
+from patman import test_util
+from patman import tools
+
+# pylint: disable=R0904
+class TestBintool(unittest.TestCase):
+    """Tests for the Bintool class"""
+    def setUp(self):
+        # Create a temporary directory for test files
+        self._indir = tempfile.mkdtemp(prefix='bintool.')
+        self.seq = None
+        self.count = None
+        self.fname = None
+        self.btools = None
+
+    def tearDown(self):
+        """Remove the temporary input directory and its contents"""
+        if self._indir:
+            shutil.rmtree(self._indir)
+        self._indir = None
+
+    def test_missing_btype(self):
+        """Test that unknown bintool types are detected"""
+        with self.assertRaises(ValueError) as exc:
+            Bintool.create('missing')
+        self.assertIn("No module named 'binman.btool.missing'",
+                      str(exc.exception))
+
+    def test_fresh_bintool(self):
+        """Check that the _testing bintool is not cached"""
+        btest = Bintool.create('_testing')
+        btest.present = True
+        btest2 = Bintool.create('_testing')
+        self.assertFalse(btest2.present)
+
+    def test_version(self):
+        """Check handling of a tool being present or absent"""
+        btest = Bintool.create('_testing')
+        with test_util.capture_sys_output() as (stdout, _):
+            btest.show()
+        self.assertFalse(btest.is_present())
+        self.assertIn('-', stdout.getvalue())
+        btest.present = True
+        self.assertTrue(btest.is_present())
+        self.assertEqual('123', btest.version())
+        with test_util.capture_sys_output() as (stdout, _):
+            btest.show()
+        self.assertIn('123', stdout.getvalue())
+
+    def test_fetch_present(self):
+        """Test fetching of a tool"""
+        btest = Bintool.create('_testing')
+        btest.present = True
+        col = terminal.Color()
+        self.assertEqual(bintool.PRESENT,
+                         btest.fetch_tool(bintool.FETCH_ANY, col, True))
+
+    @classmethod
+    def check_fetch_url(cls, fake_download, method):
+        """Check the output from fetching a tool
+
+        Args:
+            fake_download (function): Function to call instead of
+                tools.Download()
+            method (bintool.FETCH_...: Fetch method to use
+
+        Returns:
+            str: Contents of stdout
+        """
+        btest = Bintool.create('_testing')
+        col = terminal.Color()
+        with unittest.mock.patch.object(tools, 'Download',
+                                        side_effect=fake_download):
+            with test_util.capture_sys_output() as (stdout, _):
+                btest.fetch_tool(method, col, False)
+        return stdout.getvalue()
+
+    def test_fetch_url_err(self):
+        """Test an error while fetching a tool from a URL"""
+        def fail_download(url):
+            """Take the tools.Download() function by raising an exception"""
+            raise urllib.error.URLError('my error')
+
+        stdout = self.check_fetch_url(fail_download, bintool.FETCH_ANY)
+        self.assertIn('my error', stdout)
+
+    def test_fetch_url_exception(self):
+        """Test an exception while fetching a tool from a URL"""
+        def cause_exc(url):
+            raise ValueError('exc error')
+
+        stdout = self.check_fetch_url(cause_exc, bintool.FETCH_ANY)
+        self.assertIn('exc error', stdout)
+
+    def test_fetch_method(self):
+        """Test fetching using a particular method"""
+        def fail_download(url):
+            """Take the tools.Download() function by raising an exception"""
+            raise urllib.error.URLError('my error')
+
+        stdout = self.check_fetch_url(fail_download, bintool.FETCH_BIN)
+        self.assertIn('my error', stdout)
+
+    def test_fetch_pass_fail(self):
+        """Test fetching multiple tools with some passing and some failing"""
+        def handle_download(_):
+            """Take the tools.Download() function by writing a file"""
+            if self.seq:
+                raise urllib.error.URLError('not found')
+            self.seq += 1
+            tools.WriteFile(fname, expected)
+            return fname, dirname
+
+        expected = b'this is a test'
+        dirname = os.path.join(self._indir, 'download_dir')
+        os.mkdir(dirname)
+        fname = os.path.join(dirname, 'downloaded')
+        destdir = os.path.join(self._indir, 'dest_dir')
+        os.mkdir(destdir)
+        dest_fname = os.path.join(destdir, '_testing')
+        self.seq = 0
+
+        with unittest.mock.patch.object(bintool, 'DOWNLOAD_DESTDIR', destdir):
+            with unittest.mock.patch.object(tools, 'Download',
+                                            side_effect=handle_download):
+                with test_util.capture_sys_output() as (stdout, _):
+                    Bintool.fetch_tools(bintool.FETCH_ANY, ['_testing'] * 2)
+        self.assertTrue(os.path.exists(dest_fname))
+        data = tools.ReadFile(dest_fname)
+        self.assertEqual(expected, data)
+
+        lines = stdout.getvalue().splitlines()
+        self.assertTrue(len(lines) > 2)
+        self.assertEqual('Tools fetched:    1: _testing', lines[-2])
+        self.assertEqual('Failures:         1: _testing', lines[-1])
+
+    def test_tool_list(self):
+        """Test listing available tools"""
+        self.assertGreater(len(Bintool.get_tool_list()), 3)
+
+    def check_fetch_all(self, method):
+        """Helper to check the operation of fetching all tools"""
+
+        # pylint: disable=W0613
+        def fake_fetch(method, col, skip_present):
+            """Fakes the Binutils.fetch() function
+
+            Returns FETCHED and FAIL on alternate calls
+            """
+            self.seq += 1
+            result = bintool.FETCHED if self.seq & 1 else bintool.FAIL
+            self.count[result] += 1
+            return result
+
+        self.seq = 0
+        self.count = collections.defaultdict(int)
+        with unittest.mock.patch.object(bintool.Bintool, 'fetch_tool',
+                                        side_effect=fake_fetch):
+            with test_util.capture_sys_output() as (stdout, _):
+                Bintool.fetch_tools(method, ['all'])
+        lines = stdout.getvalue().splitlines()
+        self.assertIn(f'{self.count[bintool.FETCHED]}: ', lines[-2])
+        self.assertIn(f'{self.count[bintool.FAIL]}: ', lines[-1])
+
+    def test_fetch_all(self):
+        """Test fetching all tools"""
+        self.check_fetch_all(bintool.FETCH_ANY)
+
+    def test_fetch_all_specific(self):
+        """Test fetching all tools with a specific method"""
+        self.check_fetch_all(bintool.FETCH_BIN)
+
+    def test_fetch_missing(self):
+        """Test fetching missing tools"""
+        # pylint: disable=W0613
+        def fake_fetch2(method, col, skip_present):
+            """Fakes the Binutils.fetch() function
+
+            Returns PRESENT only for the '_testing' bintool
+            """
+            btool = list(self.btools.values())[self.seq]
+            self.seq += 1
+            print('fetch', btool.name)
+            if btool.name == '_testing':
+                return bintool.PRESENT
+            return bintool.FETCHED
+
+        # Preload a list of tools to return when get_tool_list() and create()
+        # are called
+        all_tools = Bintool.get_tool_list(True)
+        self.btools = collections.OrderedDict()
+        for name in all_tools:
+            self.btools[name] = Bintool.create(name)
+        self.seq = 0
+        with unittest.mock.patch.object(bintool.Bintool, 'fetch_tool',
+                                        side_effect=fake_fetch2):
+            with unittest.mock.patch.object(bintool.Bintool,
+                                            'get_tool_list',
+                                            side_effect=[all_tools]):
+                with unittest.mock.patch.object(bintool.Bintool, 'create',
+                                                side_effect=self.btools.values()):
+                    with test_util.capture_sys_output() as (stdout, _):
+                        Bintool.fetch_tools(bintool.FETCH_ANY, ['missing'])
+        lines = stdout.getvalue().splitlines()
+        num_tools = len(self.btools)
+        fetched = [line for line in lines if 'Tools fetched:' in line].pop()
+        present = [line for line in lines if 'Already present:' in line].pop()
+        self.assertIn(f'{num_tools - 1}: ', fetched)
+        self.assertIn('1: ', present)
+
+    def check_build_method(self, write_file):
+        """Check the output from fetching using the BUILD method
+
+        Args:
+            write_file (bool): True to write the output file when 'make' is
+                called
+
+        Returns:
+            tuple:
+                str: Filename of written file (or missing 'make' output)
+                str: Contents of stdout
+        """
+        def fake_run(*cmd):
+            if cmd[0] == 'make':
+                # See Bintool.build_from_git()
+                tmpdir = cmd[2]
+                self.fname = os.path.join(tmpdir, 'pathname')
+                if write_file:
+                    tools.WriteFile(self.fname, b'hello')
+
+        btest = Bintool.create('_testing')
+        col = terminal.Color()
+        self.fname = None
+        with unittest.mock.patch.object(bintool, 'DOWNLOAD_DESTDIR',
+                                        self._indir):
+            with unittest.mock.patch.object(tools, 'Run', side_effect=fake_run):
+                with test_util.capture_sys_output() as (stdout, _):
+                    btest.fetch_tool(bintool.FETCH_BUILD, col, False)
+        fname = os.path.join(self._indir, '_testing')
+        return fname if write_file else self.fname, stdout.getvalue()
+
+    def test_build_method(self):
+        """Test fetching using the build method"""
+        fname, stdout = self.check_build_method(write_file=True)
+        self.assertTrue(os.path.exists(fname))
+        self.assertIn(f"writing to '{fname}", stdout)
+
+    def test_build_method_fail(self):
+        """Test fetching using the build method when no file is produced"""
+        fname, stdout = self.check_build_method(write_file=False)
+        self.assertFalse(os.path.exists(fname))
+        self.assertIn(f"File '{fname}' was not produced", stdout)
+
+    def test_install(self):
+        """Test fetching using the install method"""
+        btest = Bintool.create('_testing')
+        btest.install = True
+        col = terminal.Color()
+        with unittest.mock.patch.object(tools, 'Run', return_value=None):
+            with test_util.capture_sys_output() as _:
+                result = btest.fetch_tool(bintool.FETCH_BIN, col, False)
+        self.assertEqual(bintool.FETCHED, result)
+
+    def test_no_fetch(self):
+        """Test fetching when there is no method"""
+        btest = Bintool.create('_testing')
+        btest.disable = True
+        col = terminal.Color()
+        with test_util.capture_sys_output() as _:
+            result = btest.fetch_tool(bintool.FETCH_BIN, col, False)
+        self.assertEqual(bintool.FAIL, result)
+
+    def test_all_bintools(self):
+        """Test that all bintools can handle all available fetch types"""
+        def handle_download(_):
+            """Take the tools.Download() function by writing a file"""
+            tools.WriteFile(fname, expected)
+            return fname, dirname
+
+        def fake_run(*cmd):
+            if cmd[0] == 'make':
+                # See Bintool.build_from_git()
+                tmpdir = cmd[2]
+                self.fname = os.path.join(tmpdir, 'pathname')
+                tools.WriteFile(self.fname, b'hello')
+
+        expected = b'this is a test'
+        dirname = os.path.join(self._indir, 'download_dir')
+        os.mkdir(dirname)
+        fname = os.path.join(dirname, 'downloaded')
+
+        with unittest.mock.patch.object(tools, 'Run', side_effect=fake_run):
+            with unittest.mock.patch.object(tools, 'Download',
+                                            side_effect=handle_download):
+                with test_util.capture_sys_output() as _:
+                    for name in Bintool.get_tool_list():
+                        btool = Bintool.create(name)
+                        for method in range(bintool.FETCH_COUNT):
+                            result = btool.fetch(method)
+                            self.assertTrue(result is not False)
+                            if result is not True and result is not None:
+                                result_fname, _ = result
+                                self.assertTrue(os.path.exists(result_fname))
+                                data = tools.ReadFile(result_fname)
+                                self.assertEqual(expected, data)
+                                os.remove(result_fname)
+
+    def test_all_bintool_versions(self):
+        """Test handling of bintool version when it cannot be run"""
+        all_tools = Bintool.get_tool_list()
+        for name in all_tools:
+            btool = Bintool.create(name)
+            with unittest.mock.patch.object(
+                btool, 'run_cmd_result', return_value=command.CommandResult()):
+                self.assertEqual('unknown', btool.version())
+
+    def test_force_missing(self):
+        btool = Bintool.create('_testing')
+        btool.present = True
+        self.assertTrue(btool.is_present())
+
+        btool.present = None
+        Bintool.set_missing_list(['_testing'])
+        self.assertFalse(btool.is_present())
+
+    def test_failed_command(self):
+        """Check that running a command that does not exist returns None"""
+        btool = Bintool.create('_testing')
+        result = btool.run_cmd_result('fred')
+        self.assertIsNone(result)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tools/binman/bintools.rst b/tools/binman/bintools.rst
new file mode 100644
index 0000000..edb373a
--- /dev/null
+++ b/tools/binman/bintools.rst
@@ -0,0 +1,115 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+Binman bintool Documentation
+============================
+
+This file describes the bintools (binary tools) supported by binman. Bintools
+are binman's name for external executables that it runs to generate or process
+binaries. It is fairly easy to create new bintools. Just add a new file to the
+'btool' directory. You can use existing bintools as examples.
+
+
+
+Bintool: cbfstool: Coreboot filesystem (CBFS) tool
+--------------------------------------------------
+
+This bintool supports creating new CBFS images and adding files to an
+existing image, i.e. the features needed by binman.
+
+It also supports fetching a binary cbfstool, since building it from source
+is fairly slow.
+
+Documentation about CBFS is at https://www.coreboot.org/CBFS
+
+
+
+Bintool: fiptool: Image generation for ARM Trusted Firmware
+-----------------------------------------------------------
+
+This bintool supports running `fiptool` with some basic parameters as
+neeed by binman.
+
+It also supports build fiptool from source.
+
+fiptool provides a way to package firmware in an ARM Trusted Firmware
+Firmware Image Package (ATF FIP) format. It is used with Trusted Firmware A,
+for example.
+
+See `TF-A FIP tool documentation`_ for more information.
+
+.. _`TF-A FIP tool documentation`:
+    https://trustedfirmware-a.readthedocs.io/en/latest/getting_started/tools-build.html?highlight=fiptool#building-and-using-the-fip-tool
+
+
+
+Bintool: futility: Handles the 'futility' tool
+----------------------------------------------
+
+futility (flash utility) is a tool for working with Chromium OS flash
+images. This Bintool implements just the features used by Binman, related to
+GBB creation and firmware signing.
+
+A binary version of the tool can be fetched.
+
+See `Chromium OS vboot documentation`_ for more information.
+
+.. _`Chromium OS vboot documentation`:
+    https://chromium.googlesource.com/chromiumos/platform/vboot/+/refs/heads/main/_vboot_reference/README
+
+
+
+Bintool: ifwitool: Handles the 'ifwitool' tool
+----------------------------------------------
+
+This bintool supports running `ifwitool` with some basic parameters as
+neeed by binman. It includes creating a file from a FIT as well as adding,
+replacing, deleting and extracting subparts.
+
+The tool is built as part of U-Boot, but a binary version can be fetched if
+required.
+
+ifwitool provides a way to package firmware in an Intel Firmware Image
+(IFWI) file on some Intel SoCs, e.g. Apolo Lake.
+
+
+
+Bintool: lz4: Compression/decompression using the LZ4 algorithm
+---------------------------------------------------------------
+
+This bintool supports running `lz4` to compress and decompress data, as
+used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+    man lz4
+
+
+
+Bintool: lzma_alone: Compression/decompression using the LZMA algorithm
+-----------------------------------------------------------------------
+
+This bintool supports running `lzma_alone` to compress and decompress data,
+as used by binman.
+
+It is also possible to fetch the tool, which uses `apt` to install it.
+
+Documentation is available via::
+
+    man lzma_alone
+
+
+
+Bintool: mkimage: Image generation for U-Boot
+---------------------------------------------
+
+This bintool supports running `mkimage` with some basic parameters as
+neeed by binman.
+
+Normally binman uses the mkimage built by U-Boot. But when run outside the
+U-Boot build system, binman can use the version installed in your system.
+Support is provided for fetching this on Debian-like systems, using apt.
+
+
+
diff --git a/tools/binman/btool/_testing.py b/tools/binman/btool/_testing.py
new file mode 100644
index 0000000..4005e8a
--- /dev/null
+++ b/tools/binman/btool/_testing.py
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool used for testing
+
+This is not a real bintool, just one used for testing"""
+
+from binman import bintool
+
+# pylint: disable=C0103
+class Bintool_testing(bintool.Bintool):
+    """Bintool used for testing"""
+    def __init__(self, name):
+        super().__init__(name, 'testing')
+        self.present = False
+        self.install = False
+        self.disable = False
+
+    def is_present(self):
+        if self.present is None:
+            return super().is_present()
+        return self.present
+
+    def version(self):
+        return '123'
+
+    def fetch(self, method):
+        if self.disable:
+            return super().fetch(method)
+        if method == bintool.FETCH_BIN:
+            if self.install:
+                return self.apt_install('package')
+            return self.fetch_from_drive('junk')
+        if method == bintool.FETCH_BUILD:
+            return self.build_from_git('url', 'target', 'pathname')
+        return None
diff --git a/tools/binman/btool/cbfstool.py b/tools/binman/btool/cbfstool.py
new file mode 100644
index 0000000..29be2d8
--- /dev/null
+++ b/tools/binman/btool/cbfstool.py
@@ -0,0 +1,219 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for cbfstool
+
+cfstool provides a number of features useful with Coreboot Filesystem binaries.
+
+Documentation is at https://www.coreboot.org/CBFS
+
+Source code is at https://github.com/coreboot/coreboot/blob/master/util/cbfstool/cbfstool.c
+
+Here is the help:
+
+cbfstool: Management utility for CBFS formatted ROM images
+
+USAGE:
+ cbfstool [-h]
+ cbfstool FILE COMMAND [-v] [PARAMETERS]...
+
+OPTIONs:
+  -H header_offset Do not search for header; use this offset*
+  -T               Output top-aligned memory address
+  -u               Accept short data; fill upward/from bottom
+  -d               Accept short data; fill downward/from top
+  -F               Force action
+  -g               Generate position and alignment arguments
+  -U               Unprocessed; don't decompress or make ELF
+  -v               Provide verbose output
+  -h               Display this help message
+
+COMMANDs:
+ add [-r image,regions] -f FILE -n NAME -t TYPE [-A hash] \
+        [-c compression] [-b base-address | -a alignment] \
+        [-p padding size] [-y|--xip if TYPE is FSP]       \
+        [-j topswap-size] (Intel CPUs only) [--ibb]
+        Add a component
+        -j valid size: 0x10000 0x20000 0x40000 0x80000 0x100000
+ add-payload [-r image,regions] -f FILE -n NAME [-A hash] \
+        [-c compression] [-b base-address] \
+        (linux specific: [-C cmdline] [-I initrd])
+        Add a payload to the ROM
+ add-stage [-r image,regions] -f FILE -n NAME [-A hash] \
+        [-c compression] [-b base] [-S section-to-ignore] \
+        [-a alignment] [-y|--xip] [-P page-size] [--ibb]
+        Add a stage to the ROM
+ add-flat-binary [-r image,regions] -f FILE -n NAME \
+        [-A hash] -l load-address -e entry-point \
+        [-c compression] [-b base]
+        Add a 32bit flat mode binary
+ add-int [-r image,regions] -i INTEGER -n NAME [-b base]
+ Add a raw 64-bit integer value
+ add-master-header [-r image,regions] \
+        [-j topswap-size] (Intel CPUs only)
+        Add a legacy CBFS master header
+ remove [-r image,regions] -n NAME
+ Remove a component
+ compact -r image,regions
+ Defragment CBFS image.
+ copy -r image,regions -R source-region
+ Create a copy (duplicate) cbfs instance in fmap
+ create -m ARCH -s size [-b bootblock offset] \
+        [-o CBFS offset] [-H header offset] [-B bootblock]
+        Create a legacy ROM file with CBFS master header*
+ create -M flashmap [-r list,of,regions,containing,cbfses]
+ Create a new-style partitioned firmware image
+ locate [-r image,regions] -f FILE -n NAME [-P page-size] \
+        [-a align] [-T]
+        Find a place for a file of that size
+ layout [-w]
+ List mutable (or, with -w, readable) image regions
+ print [-r image,regions]
+ Show the contents of the ROM
+ extract [-r image,regions] [-m ARCH] -n NAME -f FILE [-U]
+ Extracts a file from ROM
+ write [-F] -r image,regions -f file [-u | -d] [-i int]
+ Write file into same-size [or larger] raw region
+ read [-r fmap-region] -f file
+ Extract raw region contents into binary file
+ truncate [-r fmap-region]
+ Truncate CBFS and print new size on stdout
+ expand [-r fmap-region]
+ Expand CBFS to span entire region
+OFFSETs:
+  Numbers accompanying -b, -H, and -o switches* may be provided
+  in two possible formats: if their value is greater than
+  0x80000000, they are interpreted as a top-aligned x86 memory
+  address; otherwise, they are treated as an offset into flash.
+ARCHes:
+  arm64, arm, mips, ppc64, power8, riscv, x86, unknown
+TYPEs:
+ bootblock, cbfs header, stage, simple elf, fit, optionrom, bootsplash, raw,
+ vsa, mbi, microcode, fsp, mrc, cmos_default, cmos_layout, spd,
+ mrc_cache, mma, efi, struct, deleted, null
+
+* Note that these actions and switches are only valid when
+  working with legacy images whose structure is described
+  primarily by a CBFS master header. New-style images, in
+  contrast, exclusively make use of an FMAP to describe their
+  layout: this must minimally contain an 'FMAP' section
+  specifying the location of this FMAP itself and a 'COREBOOT'
+  section describing the primary CBFS. It should also be noted
+  that, when working with such images, the -F and -r switches
+  default to 'COREBOOT' for convenience, and both the -b switch to
+  CBFS operations and the output of the locate action become
+  relative to the selected CBFS region's lowest address.
+  The one exception to this rule is the top-aligned address,
+  which is always relative to the end of the entire image
+  rather than relative to the local region; this is true for
+  for both input (sufficiently large) and output (-T) data.
+
+
+Since binman has a native implementation of CBFS (see cbfs_util.py), we don't
+actually need this tool, except for sanity checks in the tests.
+"""
+
+from binman import bintool
+
+class Bintoolcbfstool(bintool.Bintool):
+    """Coreboot filesystem (CBFS) tool
+
+    This bintool supports creating new CBFS images and adding files to an
+    existing image, i.e. the features needed by binman.
+
+    It also supports fetching a binary cbfstool, since building it from source
+    is fairly slow.
+
+    Documentation about CBFS is at https://www.coreboot.org/CBFS
+    """
+    def __init__(self, name):
+        super().__init__(name, 'Manipulate CBFS files')
+
+    def create_new(self, cbfs_fname, size, arch='x86'):
+        """Create a new CBFS
+
+        Args:
+            cbfs_fname (str): Filename of CBFS to create
+            size (int): Size of CBFS in bytes
+            arch (str): Architecture for which this CBFS is intended
+
+        Returns:
+            str: Tool output
+        """
+        args = [cbfs_fname, 'create', '-s', f'{size:#x}', '-m', arch]
+        return self.run_cmd(*args)
+
+    # pylint: disable=R0913
+    def add_raw(self, cbfs_fname, name, fname, compress=None, base=None):
+        """Add a raw file to the CBFS
+
+        Args:
+            cbfs_fname (str): Filename of CBFS to create
+            name (str): Name to use inside the CBFS
+            fname (str): Filename of file to add
+            compress (str): Compression to use (cbfs_util.COMPRESS_NAMES) or
+                None for None
+            base (int): Address to place the file, or None for anywhere
+
+        Returns:
+            str: Tool output
+        """
+        args = [cbfs_fname,
+                'add',
+                '-n', name,
+                '-t', 'raw',
+                '-f', fname,
+                '-c', compress or 'none']
+        if base:
+            args += ['-b', f'{base:#x}']
+        return self.run_cmd(*args)
+
+    def add_stage(self, cbfs_fname, name, fname):
+        """Add a stage file to the CBFS
+
+        Args:
+            cbfs_fname (str): Filename of CBFS to create
+            name (str): Name to use inside the CBFS
+            fname (str): Filename of file to add
+
+        Returns:
+            str: Tool output
+        """
+        args = [cbfs_fname,
+                'add-stage',
+                '-n', name,
+                '-f', fname
+            ]
+        return self.run_cmd(*args)
+
+    def fail(self):
+        """Run cbfstool with invalid arguments to check it reports failure
+
+        This is really just a sanity check
+
+        Returns:
+            CommandResult: Result from running the bad command
+        """
+        args = ['missing-file', 'bad-command']
+        return self.run_cmd_result(*args)
+
+    def fetch(self, method):
+        """Fetch handler for cbfstool
+
+        This installs cbfstool by downloading from Google Drive.
+
+        Args:
+            method (FETCH_...): Method to use
+
+        Returns:
+            True if the file was fetched and now installed, None if a method
+            other than FETCH_BIN was requested
+
+        Raises:
+            Valuerror: Fetching could not be completed
+        """
+        if method != bintool.FETCH_BIN:
+            return None
+        fname, tmpdir = self.fetch_from_drive(
+            '1IOnE0Qvy97d-0WOCwF64xBGpKSY2sMtJ')
+        return fname, tmpdir
diff --git a/tools/binman/btool/fiptool.py b/tools/binman/btool/fiptool.py
new file mode 100644
index 0000000..c6d71ce
--- /dev/null
+++ b/tools/binman/btool/fiptool.py
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for fiptool
+
+fiptool provides a way to package firmware in an ARM Trusted Firmware Firmware
+Image Package (ATF FIP) format. It is used with Trusted Firmware A, for example.
+
+Documentation is at:
+https://trustedfirmware-a.readthedocs.io/en/latest/getting_started/tools-build.html?highlight=fiptool#building-and-using-the-fip-tool
+
+Source code is at:
+https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git
+
+Here is the help:
+
+usage: fiptool [--verbose] <command> [<args>]
+Global options supported:
+  --verbose	Enable verbose output for all commands.
+
+Commands supported:
+  info		List images contained in FIP.
+  create	Create a new FIP with the given images.
+  update	Update an existing FIP with the given images.
+  unpack	Unpack images from FIP.
+  remove	Remove images from FIP.
+  version	Show fiptool version.
+  help		Show help for given command.
+
+"""
+
+from binman import bintool
+
+class Bintoolfiptool(bintool.Bintool):
+    """Image generation for ARM Trusted Firmware
+
+    This bintool supports running `fiptool` with some basic parameters as
+    neeed by binman.
+
+    It also supports build fiptool from source.
+
+    fiptool provides a way to package firmware in an ARM Trusted Firmware
+    Firmware Image Package (ATF FIP) format. It is used with Trusted Firmware A,
+    for example.
+
+    See `TF-A FIP tool documentation`_ for more information.
+
+    .. _`TF-A FIP tool documentation`:
+        https://trustedfirmware-a.readthedocs.io/en/latest/getting_started/tools-build.html?highlight=fiptool#building-and-using-the-fip-tool
+    """
+    def __init__(self, name):
+        super().__init__(name, 'Manipulate ATF FIP files')
+
+    def info(self, fname):
+        """Get info on a FIP image
+
+        Args:
+            fname (str): Filename to check
+
+        Returns:
+            str: Tool output
+        """
+        args = ['info', fname]
+        return self.run_cmd(*args)
+
+    # pylint: disable=R0913
+    def create_new(self, fname, align, plat_toc_flags, fwu, tb_fw, blob_uuid,
+                   blob_file):
+        """Create a new FIP
+
+        Args:
+            fname (str): Filename to write to
+            align (int): Alignment to use for entries
+            plat_toc_flags (int): Flags to use for the TOC header
+            fwu (str): Filename for the fwu entry
+            tb_fw (str): Filename for the tb_fw entry
+            blob_uuid (str): UUID for the blob entry
+            blob_file (str): Filename for the blob entry
+
+        Returns:
+            str: Tool output
+        """
+        args = [
+            'create',
+            '--align', f'{align:x}',
+            '--plat-toc-flags', f'{plat_toc_flags:#x}',
+            '--fwu', fwu,
+            '--tb-fw', tb_fw,
+            '--blob', f'uuid={blob_uuid},file={blob_file}',
+            fname]
+        return self.run_cmd(*args)
+
+    def create_bad(self):
+        """Run fiptool with invalid arguments"""
+        args = ['create', '--fred']
+        return self.run_cmd_result(*args)
+
+    def fetch(self, method):
+        """Fetch handler for fiptool
+
+        This builds the tool from source
+
+        Returns:
+            tuple:
+                str: Filename of fetched file to copy to a suitable directory
+                str: Name of temp directory to remove, or None
+        """
+        if method != bintool.FETCH_BUILD:
+            return None
+        result = self.build_from_git(
+            'https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git',
+            'fiptool',
+            'tools/fiptool/fiptool')
+        return result
+
+    def version(self):
+        """Version handler for fiptool
+
+        Returns:
+            str: Version number of fiptool
+        """
+        out = self.run_cmd('version').strip()
+        return out or super().version()
diff --git a/tools/binman/btool/futility.py b/tools/binman/btool/futility.py
new file mode 100644
index 0000000..614daaa
--- /dev/null
+++ b/tools/binman/btool/futility.py
@@ -0,0 +1,178 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for futility
+
+futility (flash utility) is a tool for working with Chromium OS flash images.
+This implements just the features used by Binman.
+
+Documentation is at:
+   https://chromium.googlesource.com/chromiumos/platform/vboot/+/refs/heads/main/_vboot_reference/README
+
+Source code:
+   https://chromium.googlesource.com/chromiumos/platform/vboot/+/refs/heads/master/_vboot_reference/futility
+
+Here is the help:
+Usage: futility [options] COMMAND [args...]
+
+This is the unified firmware utility, which will eventually replace
+most of the distinct verified boot tools formerly produced by the
+vboot_reference package.
+
+When symlinked under the name of one of those previous tools, it should
+fully implement the original behavior. It can also be invoked directly
+as futility, followed by the original name as the first argument.
+
+Global options:
+
+  --vb1        Use only vboot v1.0 binary formats
+  --vb21       Use only vboot v2.1 binary formats
+  --debug      Be noisy about what's going on
+
+The following commands are built-in:
+
+  bdb                  Common boot flow utility
+  create               Create a keypair from an RSA .pem file
+  dump_fmap            Display FMAP contents from a firmware image
+  dump_kernel_config   Prints the kernel command line
+  gbb                  Manipulate the Google Binary Block (GBB)
+  gbb_utility          Legacy name for `gbb` command
+  help                 Show a bit of help (you're looking at it)
+  load_fmap            Replace the contents of specified FMAP areas
+  pcr                  Simulate a TPM PCR extension operation
+  show                 Display the content of various binary components
+  sign                 Sign / resign various binary components
+  update               Update system firmware
+  validate_rec_mrc     Validates content of Recovery MRC cache
+  vbutil_firmware      Verified boot firmware utility
+  vbutil_kernel        Creates, signs, and verifies the kernel partition
+  vbutil_key           Wraps RSA keys with vboot headers
+  vbutil_keyblock      Creates, signs, and verifies a keyblock
+  verify               Verify the signatures of various binary components
+  version              Show the futility source revision and build date
+"""
+
+from binman import bintool
+
+class Bintoolfutility(bintool.Bintool):
+    """Handles the 'futility' tool
+
+    futility (flash utility) is a tool for working with Chromium OS flash
+    images. This Bintool implements just the features used by Binman, related to
+    GBB creation and firmware signing.
+
+    A binary version of the tool can be fetched.
+
+    See `Chromium OS vboot documentation`_ for more information.
+
+    .. _`Chromium OS vboot documentation`:
+        https://chromium.googlesource.com/chromiumos/platform/vboot/+/refs/heads/main/_vboot_reference/README
+    """
+    def __init__(self, name):
+        super().__init__(name, 'Chromium OS firmware utility')
+
+    def gbb_create(self, fname, sizes):
+        """Create a new Google Binary Block
+
+        Args:
+            fname (str): Filename to write to
+            sizes (list of int): Sizes of each regions:
+               hwid_size, rootkey_size, bmpfv_size, recoverykey_size
+
+        Returns:
+            str: Tool output
+        """
+        args = [
+            'gbb_utility',
+            '-c',
+            ','.join(['%#x' % size for size in sizes]),
+            fname
+            ]
+        return self.run_cmd(*args)
+
+    # pylint: disable=R0913
+    def gbb_set(self, fname, hwid, rootkey, recoverykey, flags, bmpfv):
+        """Set the parameters in a Google Binary Block
+
+        Args:
+            fname (str): Filename to update
+            hwid (str): Hardware ID to use
+            rootkey (str): Filename of root key, e.g. 'root_key.vbpubk'
+            recoverykey (str): Filename of recovery key,
+                e.g. 'recovery_key.vbpubk'
+            flags (int): GBB flags to use
+            bmpfv (str): Filename of firmware bitmaps (bmpblk file)
+
+        Returns:
+            str: Tool output
+        """
+        args = ['gbb_utility'
+            '-s',
+            f'--hwid={hwid}',
+            f'--rootkey={rootkey}',
+            f'--recoverykey={recoverykey}',
+            f'--flags={flags}',
+            f'--bmpfv={bmpfv}',
+            fname
+            ]
+        return self.run_cmd(*args)
+
+    def sign_firmware(self, vblock, keyblock, signprivate, version, firmware,
+                      kernelkey, flags):
+        """Sign firmware to create a vblock file
+
+        Args:
+            vblock (str): Filename to write the vblock too
+            keyblock (str): Filename of keyblock file
+            signprivate (str): Filename of private key
+            version (int): Version number
+            firmware (str): Filename of firmware binary to sign
+            kernelkey (str): Filename of kernel key
+            flags (int): Preamble flags
+
+        Returns:
+            str: Tool output
+        """
+        args = [
+            'vbutil_firmware',
+            '--vblock', vblock,
+            '--keyblock', keyblock,
+            '--signprivate', signprivate,
+            '--version', version,
+            '--fw', firmware,
+            '--kernelkey', kernelkey,
+            '--flags', flags
+            ]
+        return self.run_cmd(*args)
+
+    def fetch(self, method):
+        """Fetch handler for futility
+
+        This installs futility using a binary download.
+
+        Args:
+            method (FETCH_...): Method to use
+
+        Returns:
+            True if the file was fetched, None if a method other than FETCH_BIN
+            was requested
+
+        Raises:
+            Valuerror: Fetching could not be completed
+        """
+        if method != bintool.FETCH_BIN:
+            return None
+        fname, tmpdir = self.fetch_from_drive(
+            '1hdsInzsE4aJbmBeJ663kYgjOQyW1I-E0')
+        return fname, tmpdir
+
+    def version(self):
+        """Version handler for futility
+
+        Returns:
+            str: Version string for futility
+        """
+        out = self.run_cmd('version').strip()
+        if not out:
+            return super().version()
+        return out
diff --git a/tools/binman/btool/ifwitool.py b/tools/binman/btool/ifwitool.py
new file mode 100644
index 0000000..96778fc
--- /dev/null
+++ b/tools/binman/btool/ifwitool.py
@@ -0,0 +1,166 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for ifwitool
+
+ifwitool provides a way to package firmware in an Intel Firmware Image (IFWI)
+file on some Intel SoCs, e.g. Apolo Lake.
+
+Documentation is not really available so far as I can tell
+
+Source code is at tools/ifwitool.c which is a cleaned-up version of
+https://github.com/coreboot/coreboot/blob/master/util/cbfstool/ifwitool.c
+
+Here is the help:
+
+ifwitool: Utility for IFWI manipulation
+
+USAGE:
+ /tmp/b/sandbox/tools/ifwitool [-h]
+ /tmp/b/sandbox/tools/ifwitool FILE COMMAND [PARAMETERS]
+
+COMMANDs:
+ add -f FILE -n NAME [-d -e ENTRY]
+ create -f FILE
+ delete -n NAME
+ extract -f FILE -n NAME [-d -e ENTRY]
+ print [-d]
+ replace -f FILE -n NAME [-d -e ENTRY]
+OPTIONs:
+ -f FILE : File to read/write/create/extract
+ -d      : Perform directory operation
+ -e ENTRY: Name of directory entry to operate on
+ -v      : Verbose level
+ -h      : Help message
+ -n NAME : Name of sub-partition to operate on
+
+NAME should be one of:
+SMIP(SMIP)
+RBEP(CSE_RBE)
+FTPR(CSE_BUP)
+UCOD(Microcode)
+IBBP(Bootblock)
+S_BPDT(S-BPDT)
+OBBP(OEM boot block)
+NFTP(CSE_MAIN)
+ISHP(ISH)
+DLMP(CSE_IDLM)
+IFP_OVERRIDE(IFP_OVERRIDE)
+DEBUG_TOKENS(Debug Tokens)
+UFS_PHY(UFS Phy)
+UFS_GPP(UFS GPP)
+PMCP(PMC firmware)
+IUNP(IUNIT)
+NVM_CONFIG(NVM Config)
+UEP(UEP)
+UFS_RATE_B(UFS Rate B Config)
+"""
+
+from binman import bintool
+
+class Bintoolifwitool(bintool.Bintool):
+    """Handles the 'ifwitool' tool
+
+    This bintool supports running `ifwitool` with some basic parameters as
+    neeed by binman. It includes creating a file from a FIT as well as adding,
+    replacing, deleting and extracting subparts.
+
+    The tool is built as part of U-Boot, but a binary version can be fetched if
+    required.
+
+    ifwitool provides a way to package firmware in an Intel Firmware Image
+    (IFWI) file on some Intel SoCs, e.g. Apolo Lake.
+    """
+    def __init__(self, name):
+        super().__init__(name, 'Manipulate Intel IFWI files')
+
+    def create_ifwi(self, intel_fit, ifwi_file):
+        """Create a new IFWI file, using an existing Intel FIT binary
+
+        Args:
+            intel_fit (str): Filename of exist Intel FIT file
+            ifwi_file (str): Output filename to write the new IFWI too
+
+        Returns:
+            str: Tool output
+        """
+        args = [intel_fit, 'create', '-f', ifwi_file]
+        return self.run_cmd(*args)
+
+    def delete_subpart(self, ifwi_file, subpart):
+        """Delete a subpart within the IFWI file
+
+        Args:
+            ifwi_file (str): IFWI filename to update
+            subpart (str): Name of subpart to delete, e.g. 'OBBP'
+
+        Returns:
+            str: Tool output
+        """
+        args = [ifwi_file, 'delete', '-n', subpart]
+        return self.run_cmd(*args)
+
+    # pylint: disable=R0913
+    def add_subpart(self, ifwi_file, subpart, entry_name, infile,
+                    replace=False):
+        """Add or replace a subpart within the IFWI file
+
+        Args:
+            ifwi_file (str): IFWI filename to update
+            subpart (str): Name of subpart to add/replace
+            entry_nme (str): Name of entry to add/replace
+            replace (bool): True to replace the existing entry, False to add a
+                new one
+
+        Returns:
+            str: Tool output
+        """
+        args = [
+            ifwi_file,
+            'replace' if replace else 'add',
+            '-n', subpart,
+            '-d', '-e', entry_name,
+            '-f', infile,
+            ]
+        return self.run_cmd(*args)
+
+    def extract(self, ifwi_file, subpart, entry_name, outfile):
+        """Extract a subpart from the IFWI file
+
+        Args:
+            ifwi_file (str): IFWI filename to extract from
+            subpart (str): Name of subpart to extract
+            entry_nme (str): Name of entry to extract
+
+        Returns:
+            str: Tool output
+        """
+        args = [
+            ifwi_file,
+            'extract',
+            '-n', subpart,
+            '-d', '-e', entry_name,
+            '-f', outfile,
+            ]
+        return self.run_cmd(*args)
+
+    def fetch(self, method):
+        """Fetch handler for ifwitool
+
+        This installs ifwitool using a binary download.
+
+        Args:
+            method (FETCH_...): Method to use
+
+        Returns:
+            True if the file was fetched, None if a method other than FETCH_BIN
+            was requested
+
+        Raises:
+            Valuerror: Fetching could not be completed
+        """
+        if method != bintool.FETCH_BIN:
+            return None
+        fname, tmpdir = self.fetch_from_drive(
+            '18JDghOxlt2Hcc5jv51O1t6uNVHQ0XKJS')
+        return fname, tmpdir
diff --git a/tools/binman/btool/lz4.py b/tools/binman/btool/lz4.py
new file mode 100644
index 0000000..d165f52
--- /dev/null
+++ b/tools/binman/btool/lz4.py
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for lz4
+
+lz4 allows compression and decompression of files.
+
+Documentation is available via::
+
+   man lz4
+
+Here is the help:
+
+*** LZ4 command line interface 64-bits v1.9.3, by Yann Collet ***
+Usage :
+      lz4 [arg] [input] [output]
+
+input   : a filename
+          with no FILE, or when FILE is - or stdin, read standard input
+Arguments :
+ -1     : Fast compression (default)
+ -9     : High compression
+ -d     : decompression (default for .lz4 extension)
+ -z     : force compression
+ -D FILE: use FILE as dictionary
+ -f     : overwrite output without prompting
+ -k     : preserve source files(s)  (default)
+--rm    : remove source file(s) after successful de/compression
+ -h/-H  : display help/long help and exit
+
+Advanced arguments :
+ -V     : display Version number and exit
+ -v     : verbose mode
+ -q     : suppress warnings; specify twice to suppress errors too
+ -c     : force write to standard output, even if it is the console
+ -t     : test compressed file integrity
+ -m     : multiple input files (implies automatic output filenames)
+ -r     : operate recursively on directories (sets also -m)
+ -l     : compress using Legacy format (Linux kernel compression)
+ -B#    : cut file into blocks of size # bytes [32+]
+                     or predefined block size [4-7] (default: 7)
+ -BI    : Block Independence (default)
+ -BD    : Block dependency (improves compression ratio)
+ -BX    : enable block checksum (default:disabled)
+--no-frame-crc : disable stream checksum (default:enabled)
+--content-size : compressed frame includes original size (default:not present)
+--list FILE : lists information about .lz4 files (useful for files compressed
+    with --content-size flag)
+--[no-]sparse  : sparse mode (default:enabled on file, disabled on stdout)
+--favor-decSpeed: compressed files decompress faster, but are less compressed
+--fast[=#]: switch to ultra fast compression level (default: 1)
+--best  : same as -12
+Benchmark arguments :
+ -b#    : benchmark file(s), using # compression level (default : 1)
+ -e#    : test all compression levels from -bX to # (default : 1)
+ -i#    : minimum evaluation time in seconds (default : 3s)
+"""
+
+import re
+import tempfile
+
+from binman import bintool
+from patman import tools
+
+# pylint: disable=C0103
+class Bintoollz4(bintool.Bintool):
+    """Compression/decompression using the LZ4 algorithm
+
+    This bintool supports running `lz4` to compress and decompress data, as
+    used by binman.
+
+    It is also possible to fetch the tool, which uses `apt` to install it.
+
+    Documentation is available via::
+
+        man lz4
+    """
+    def __init__(self, name):
+        super().__init__(name, 'lz4 compression')
+
+    def compress(self, indata):
+        """Compress data with lz4
+
+        Args:
+            indata (bytes): Data to compress
+
+        Returns:
+            bytes: Compressed data
+        """
+        with tempfile.NamedTemporaryFile(prefix='comp.tmp',
+                                         dir=tools.GetOutputDir()) as tmp:
+            tools.WriteFile(tmp.name, indata)
+            args = ['--no-frame-crc', '-B4', '-5', '-c', tmp.name]
+            return self.run_cmd(*args, binary=True)
+
+    def decompress(self, indata):
+        """Decompress data with lz4
+
+        Args:
+            indata (bytes): Data to decompress
+
+        Returns:
+            bytes: Decompressed data
+        """
+        with tempfile.NamedTemporaryFile(prefix='decomp.tmp',
+                                         dir=tools.GetOutputDir()) as inf:
+            tools.WriteFile(inf.name, indata)
+            args = ['-cd', inf.name]
+            return self.run_cmd(*args, binary=True)
+
+    def fetch(self, method):
+        """Fetch handler for lz4
+
+        This installs the lz4 package using the apt utility.
+
+        Args:
+            method (FETCH_...): Method to use
+
+        Returns:
+            True if the file was fetched and now installed, None if a method
+            other than FETCH_BIN was requested
+
+        Raises:
+            Valuerror: Fetching could not be completed
+        """
+        if method != bintool.FETCH_BIN:
+            return None
+        return self.apt_install('lz4')
+
+    def version(self):
+        """Version handler
+
+        Returns:
+            str: Version number of lz4
+        """
+        out = self.run_cmd('-V').strip()
+        if not out:
+            return super().version()
+        m_version = re.match(r'.* (v[0-9.]*),.*', out)
+        return m_version.group(1) if m_version else out
diff --git a/tools/binman/btool/lzma_alone.py b/tools/binman/btool/lzma_alone.py
new file mode 100644
index 0000000..d7c62df
--- /dev/null
+++ b/tools/binman/btool/lzma_alone.py
@@ -0,0 +1,126 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for lzma_alone
+
+lzma_alone allows compression and decompression of files, using an older version
+of lzma.
+
+Documentation is available via::
+
+   man lzma_alone
+
+Here is the help:
+
+LZMA 9.22 beta : Igor Pavlov : Public domain : 2011-04-18
+
+Usage:  LZMA <e|d> inputFile outputFile [<switches>...]
+  e: encode file
+  d: decode file
+  b: Benchmark
+<Switches>
+  -a{N}:  set compression mode - [0, 1], default: 1 (max)
+  -d{N}:  set dictionary size - [12, 30], default: 23 (8MB)
+  -fb{N}: set number of fast bytes - [5, 273], default: 128
+  -mc{N}: set number of cycles for match finder
+  -lc{N}: set number of literal context bits - [0, 8], default: 3
+  -lp{N}: set number of literal pos bits - [0, 4], default: 0
+  -pb{N}: set number of pos bits - [0, 4], default: 2
+  -mf{MF_ID}: set Match Finder: [bt2, bt3, bt4, hc4], default: bt4
+  -mt{N}: set number of CPU threads
+  -eos:   write End Of Stream marker
+  -si:    read data from stdin
+  -so:    write data to stdout
+"""
+
+import re
+import tempfile
+
+from binman import bintool
+from patman import tools
+
+# pylint: disable=C0103
+class Bintoollzma_alone(bintool.Bintool):
+    """Compression/decompression using the LZMA algorithm
+
+    This bintool supports running `lzma_alone` to compress and decompress data,
+    as used by binman.
+
+    It is also possible to fetch the tool, which uses `apt` to install it.
+
+    Documentation is available via::
+
+        man lzma_alone
+    """
+    def __init__(self, name):
+        super().__init__(name, 'lzma_alone compression')
+
+    def compress(self, indata):
+        """Compress data with lzma_alone
+
+        Args:
+            indata (bytes): Data to compress
+
+        Returns:
+            bytes: Compressed data
+        """
+        with tempfile.NamedTemporaryFile(prefix='comp.tmp',
+                                         dir=tools.GetOutputDir()) as inf:
+            tools.WriteFile(inf.name, indata)
+            with tempfile.NamedTemporaryFile(prefix='compo.otmp',
+                                             dir=tools.GetOutputDir()) as outf:
+                args = ['e', inf.name, outf.name, '-lc1', '-lp0', '-pb0', '-d8']
+                self.run_cmd(*args, binary=True)
+                return tools.ReadFile(outf.name)
+
+    def decompress(self, indata):
+        """Decompress data with lzma_alone
+
+        Args:
+            indata (bytes): Data to decompress
+
+        Returns:
+            bytes: Decompressed data
+        """
+        with tempfile.NamedTemporaryFile(prefix='decomp.tmp',
+                                         dir=tools.GetOutputDir()) as inf:
+            tools.WriteFile(inf.name, indata)
+            with tempfile.NamedTemporaryFile(prefix='compo.otmp',
+                                             dir=tools.GetOutputDir()) as outf:
+                args = ['d', inf.name, outf.name]
+                self.run_cmd(*args, binary=True)
+                return tools.ReadFile(outf.name, binary=True)
+
+    def fetch(self, method):
+        """Fetch handler for lzma_alone
+
+        This installs the lzma-alone package using the apt utility.
+
+        Args:
+            method (FETCH_...): Method to use
+
+        Returns:
+            True if the file was fetched and now installed, None if a method
+            other than FETCH_BIN was requested
+
+        Raises:
+            Valuerror: Fetching could not be completed
+        """
+        if method != bintool.FETCH_BIN:
+            return None
+        return self.apt_install('lzma-alone')
+
+    def version(self):
+        """Version handler
+
+        Returns:
+            str: Version number of lzma_alone
+        """
+        out = self.run_cmd_result('', raise_on_error=False).stderr.strip()
+        lines = out.splitlines()
+        if not lines:
+            return super().version()
+        out = lines[0]
+        # e.g. LZMA 9.22 beta : Igor Pavlov : Public domain : 2011-04-18
+        m_version = re.match(r'LZMA ([^:]*).*', out)
+        return m_version.group(1).strip() if m_version else out
diff --git a/tools/binman/btool/mkimage.py b/tools/binman/btool/mkimage.py
new file mode 100644
index 0000000..c85bfe0
--- /dev/null
+++ b/tools/binman/btool/mkimage.py
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Bintool implementation for mkimage"""
+
+import re
+
+from binman import bintool
+
+class Bintoolmkimage(bintool.Bintool):
+    """Image generation for U-Boot
+
+    This bintool supports running `mkimage` with some basic parameters as
+    neeed by binman.
+
+    Normally binman uses the mkimage built by U-Boot. But when run outside the
+    U-Boot build system, binman can use the version installed in your system.
+    Support is provided for fetching this on Debian-like systems, using apt.
+    """
+    def __init__(self, name):
+        super().__init__(name, 'Generate image for U-Boot')
+
+    # pylint: disable=R0913
+    def run(self, reset_timestamp=False, output_fname=None, external=False,
+            pad=None, version=False):
+        """Run mkimage
+
+        Args:
+            reset_timestamp: True to update the timestamp in the FIT
+            output_fname: Output filename to write to
+            external: True to create an 'external' FIT, where the binaries are
+                located outside the main data structure
+            pad: Bytes to use for padding the FIT devicetree output. This allows
+                other things to be easily added later, if required, such as
+                signatures
+            version: True to get the mkimage version
+        """
+        args = []
+        if external:
+            args.append('-E')
+        if pad:
+            args += ['-p', f'{pad:x}']
+        if reset_timestamp:
+            args.append('-t')
+        if output_fname:
+            args += ['-F', output_fname]
+        if version:
+            args.append('-V')
+        return self.run_cmd(*args)
+
+    def fetch(self, method):
+        """Fetch handler for mkimage
+
+        This installs mkimage using the apt utility.
+
+        Args:
+            method (FETCH_...): Method to use
+
+        Returns:
+            True if the file was fetched and now installed, None if a method
+            other than FETCH_BIN was requested
+
+        Raises:
+            Valuerror: Fetching could not be completed
+        """
+        if method != bintool.FETCH_BIN:
+            return None
+        return self.apt_install('u-boot-tools')
+
+    def version(self):
+        """Version handler for mkimage
+
+        Returns:
+            str: Version string for mkimage
+        """
+        out = self.run(version=True).strip()
+        if not out:
+            return super().version()
+        m_version = re.match(r'mkimage version (.*)', out)
+        return m_version.group(1) if m_version else out
diff --git a/tools/binman/cbfs_util.py b/tools/binman/cbfs_util.py
index 3997337..eea7868 100644
--- a/tools/binman/cbfs_util.py
+++ b/tools/binman/cbfs_util.py
@@ -20,6 +20,7 @@
 import struct
 import sys
 
+from binman import comp_util
 from binman import elf
 from patman import command
 from patman import tools
@@ -240,9 +241,9 @@
         """Handle decompressing data if necessary"""
         indata = self.data
         if self.compress == COMPRESS_LZ4:
-            data = tools.Decompress(indata, 'lz4', with_header=False)
+            data = comp_util.decompress(indata, 'lz4', with_header=False)
         elif self.compress == COMPRESS_LZMA:
-            data = tools.Decompress(indata, 'lzma', with_header=False)
+            data = comp_util.decompress(indata, 'lzma', with_header=False)
         else:
             data = indata
         self.memlen = len(data)
@@ -361,9 +362,9 @@
         elif self.ftype == TYPE_RAW:
             orig_data = data
             if self.compress == COMPRESS_LZ4:
-                data = tools.Compress(orig_data, 'lz4', with_header=False)
+                data = comp_util.compress(orig_data, 'lz4', with_header=False)
             elif self.compress == COMPRESS_LZMA:
-                data = tools.Compress(orig_data, 'lzma', with_header=False)
+                data = comp_util.compress(orig_data, 'lzma', with_header=False)
             self.memlen = len(orig_data)
             self.data_len = len(data)
             attr = struct.pack(ATTR_COMPRESSION_FORMAT,
@@ -861,27 +862,3 @@
                 val += data[:pos]
                 break
         return val.decode('utf-8')
-
-
-def cbfstool(fname, *cbfs_args, **kwargs):
-    """Run cbfstool with provided arguments
-
-    If the tool fails then this function raises an exception and prints out the
-    output and stderr.
-
-    Args:
-        fname: Filename of CBFS
-        *cbfs_args: List of arguments to pass to cbfstool
-
-    Returns:
-        CommandResult object containing the results
-    """
-    args = ['cbfstool', fname] + list(cbfs_args)
-    if kwargs.get('base') is not None:
-        args += ['-b', '%#x' % kwargs['base']]
-    result = command.RunPipe([args], capture=not VERBOSE,
-                             capture_stderr=not VERBOSE, raise_on_error=False)
-    if result.return_code:
-        print(result.stderr, file=sys.stderr)
-        raise Exception("Failed to run (error %d): '%s'" %
-                        (result.return_code, ' '.join(args)))
diff --git a/tools/binman/cbfs_util_test.py b/tools/binman/cbfs_util_test.py
index 2c62c8a..494f614 100755
--- a/tools/binman/cbfs_util_test.py
+++ b/tools/binman/cbfs_util_test.py
@@ -16,8 +16,10 @@
 import tempfile
 import unittest
 
+from binman import bintool
 from binman import cbfs_util
 from binman.cbfs_util import CbfsWriter
+from binman import comp_util
 from binman import elf
 from patman import test_util
 from patman import tools
@@ -45,18 +47,10 @@
         # compressing files
         tools.PrepareOutputDir(None)
 
-        cls.have_cbfstool = True
-        try:
-            tools.Run('which', 'cbfstool')
-        except:
-            cls.have_cbfstool = False
+        cls.cbfstool = bintool.Bintool.create('cbfstool')
+        cls.have_cbfstool = cls.cbfstool.is_present()
 
-        cls.have_lz4 = True
-        try:
-            tools.Run('lz4', '--no-frame-crc', '-c',
-                      tools.GetInputFilename('u-boot.bin'), binary=True)
-        except:
-            cls.have_lz4 = False
+        cls.have_lz4 = comp_util.HAVE_LZ4
 
     @classmethod
     def tearDownClass(cls):
@@ -177,19 +171,19 @@
         if not self.have_cbfstool or not self.have_lz4:
             return None
         cbfs_fname = os.path.join(self._indir, 'test.cbfs')
-        cbfs_util.cbfstool(cbfs_fname, 'create', '-m', arch, '-s', '%#x' % size)
+        self.cbfstool.create_new(cbfs_fname, size, arch)
         if base:
             base = [(1 << 32) - size + b for b in base]
-        cbfs_util.cbfstool(cbfs_fname, 'add', '-n', 'u-boot', '-t', 'raw',
-                           '-c', compress and compress[0] or 'none',
-                           '-f', tools.GetInputFilename(
-                               compress and 'compress' or 'u-boot.bin'),
-                           base=base[0] if base else None)
-        cbfs_util.cbfstool(cbfs_fname, 'add', '-n', 'u-boot-dtb', '-t', 'raw',
-                           '-c', compress and compress[1] or 'none',
-                           '-f', tools.GetInputFilename(
-                               compress and 'compress' or 'u-boot.dtb'),
-                           base=base[1] if base else None)
+        self.cbfstool.add_raw(
+            cbfs_fname, 'u-boot',
+            tools.GetInputFilename(compress and 'compress' or 'u-boot.bin'),
+            compress[0] if compress else None,
+            base[0] if base else None)
+        self.cbfstool.add_raw(
+            cbfs_fname, 'u-boot-dtb',
+            tools.GetInputFilename(compress and 'compress' or 'u-boot.dtb'),
+            compress[1] if compress else None,
+            base[1] if base else None)
         return cbfs_fname
 
     def _compare_expected_cbfs(self, data, cbfstool_fname):
@@ -223,18 +217,9 @@
         """Test failure to run cbfstool"""
         if not self.have_cbfstool:
             self.skipTest('No cbfstool available')
-        try:
-            # In verbose mode this test fails since stderr is not captured. Fix
-            # this by turning off verbosity.
-            old_verbose = cbfs_util.VERBOSE
-            cbfs_util.VERBOSE = False
-            with test_util.capture_sys_output() as (_stdout, stderr):
-                with self.assertRaises(Exception) as e:
-                    cbfs_util.cbfstool('missing-file', 'bad-command')
-        finally:
-            cbfs_util.VERBOSE = old_verbose
-        self.assertIn('Unknown command', stderr.getvalue())
-        self.assertIn('Failed to run', str(e.exception))
+        with self.assertRaises(ValueError) as exc:
+            out = self.cbfstool.fail()
+        self.assertIn('cbfstool missing-file bad-command', str(exc.exception))
 
     def test_cbfs_raw(self):
         """Test base handling of a Coreboot Filesystem (CBFS)"""
@@ -515,10 +500,8 @@
         # Compare against what cbfstool creates
         if self.have_cbfstool:
             cbfs_fname = os.path.join(self._indir, 'test.cbfs')
-            cbfs_util.cbfstool(cbfs_fname, 'create', '-m', 'x86', '-s',
-                               '%#x' % size)
-            cbfs_util.cbfstool(cbfs_fname, 'add-stage', '-n', 'u-boot',
-                               '-f', elf_fname)
+            self.cbfstool.create_new(cbfs_fname, size)
+            self.cbfstool.add_stage(cbfs_fname, 'u-boot', elf_fname)
             self._compare_expected_cbfs(data, cbfs_fname)
 
     def test_cbfs_raw_compress(self):
diff --git a/tools/binman/cmdline.py b/tools/binman/cmdline.py
index 6c68595..0626b85 100644
--- a/tools/binman/cmdline.py
+++ b/tools/binman/cmdline.py
@@ -105,6 +105,8 @@
             help='Use fake device tree contents (for testing only)')
     build_parser.add_argument('--fake-ext-blobs', action='store_true',
             help='Create fake ext blobs with dummy content (for testing only)')
+    build_parser.add_argument('--force-missing-bintools', type=str,
+            help='Comma-separated list of bintools to consider missing (for testing)')
     build_parser.add_argument('-i', '--image', type=str, action='append',
             help='Image filename to build (if not specified, build all)')
     build_parser.add_argument('-I', '--indir', action='append',
@@ -128,6 +130,9 @@
         help='Update an ELF file with the output dtb: infile,outfile,begin_sym,end_sym')
 
     subparsers.add_parser(
+        'bintool-docs', help='Write out bintool documentation (see bintool.rst)')
+
+    subparsers.add_parser(
         'entry-docs', help='Write out entry documentation (see entries.rst)')
 
     list_parser = subparsers.add_parser('ls', help='List files in an image')
@@ -167,4 +172,11 @@
     test_parser.add_argument('tests', nargs='*',
                              help='Test names to run (omit for all)')
 
+    tool_parser = subparsers.add_parser('tool', help='Check bintools')
+    tool_parser.add_argument('-l', '--list', action='store_true',
+                             help='List all known bintools')
+    tool_parser.add_argument('-f', '--fetch', action='store_true',
+                             help='fetch a bintool from a known location (or: all/missing)')
+    tool_parser.add_argument('bintools', type=str, nargs='*')
+
     return parser.parse_args(argv)
diff --git a/tools/binman/comp_util.py b/tools/binman/comp_util.py
new file mode 100644
index 0000000..dc76ada
--- /dev/null
+++ b/tools/binman/comp_util.py
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2022 Google LLC
+#
+"""Utilities to compress and decompress data"""
+
+import struct
+import tempfile
+
+from binman import bintool
+from patman import tools
+
+LZ4 = bintool.Bintool.create('lz4')
+HAVE_LZ4 = LZ4.is_present()
+
+LZMA_ALONE = bintool.Bintool.create('lzma_alone')
+HAVE_LZMA_ALONE = LZMA_ALONE.is_present()
+
+
+def compress(indata, algo, with_header=True):
+    """Compress some data using a given algorithm
+
+    Note that for lzma this uses an old version of the algorithm, not that
+    provided by xz.
+
+    This requires 'lz4' and 'lzma_alone' tools. It also requires an output
+    directory to be previously set up, by calling PrepareOutputDir().
+
+    Args:
+        indata (bytes): Input data to compress
+        algo (str): Algorithm to use ('none', 'lz4' or 'lzma')
+
+    Returns:
+        bytes: Compressed data
+    """
+    if algo == 'none':
+        return indata
+    if algo == 'lz4':
+        data = LZ4.compress(indata)
+    # cbfstool uses a very old version of lzma
+    elif algo == 'lzma':
+        data = LZMA_ALONE.compress(indata)
+    else:
+        raise ValueError("Unknown algorithm '%s'" % algo)
+    if with_header:
+        hdr = struct.pack('<I', len(data))
+        data = hdr + data
+    return data
+
+def decompress(indata, algo, with_header=True):
+    """Decompress some data using a given algorithm
+
+    Note that for lzma this uses an old version of the algorithm, not that
+    provided by xz.
+
+    This requires 'lz4' and 'lzma_alone' tools. It also requires an output
+    directory to be previously set up, by calling PrepareOutputDir().
+
+    Args:
+        indata (bytes): Input data to decompress
+        algo (str): Algorithm to use ('none', 'lz4' or 'lzma')
+
+    Returns:
+        (bytes) Compressed data
+    """
+    if algo == 'none':
+        return indata
+    if with_header:
+        data_len = struct.unpack('<I', indata[:4])[0]
+        indata = indata[4:4 + data_len]
+    if algo == 'lz4':
+        data = LZ4.decompress(indata)
+    elif algo == 'lzma':
+        data = LZMA_ALONE.decompress(indata)
+    else:
+        raise ValueError("Unknown algorithm '%s'" % algo)
+    return data
diff --git a/tools/binman/control.py b/tools/binman/control.py
index f4c1fd0..2daad05 100644
--- a/tools/binman/control.py
+++ b/tools/binman/control.py
@@ -14,6 +14,7 @@
 import sys
 from patman import tools
 
+from binman import bintool
 from binman import cbfs_util
 from binman import elf
 from patman import command
@@ -139,7 +140,7 @@
 
     Args:
         modules: List of Module objects to get docs for
-        test_missing: Used for testing only, to force an entry's documeentation
+        test_missing: Used for testing only, to force an entry's documentation
             to show as missing even if it is present. Should be set to None in
             normal use.
     """
@@ -147,6 +148,18 @@
     Entry.WriteDocs(modules, test_missing)
 
 
+def write_bintool_docs(modules, test_missing=None):
+    """Write out documentation for all bintools
+
+    Args:
+        modules: List of Module objects to get docs for
+        test_missing: Used for testing only, to force an entry's documentation
+            to show as missing even if it is present. Should be set to None in
+            normal use.
+    """
+    bintool.Bintool.WriteDocs(modules, test_missing)
+
+
 def ListEntries(image_fname, entry_paths):
     """List the entries in an image
 
@@ -487,6 +500,7 @@
     # without changing the device-tree size, thus ensuring that our
     # entry offsets remain the same.
     for image in images.values():
+        image.CollectBintools()
         image.ExpandEntries()
         if update_fdt:
             image.AddMissingProperties(True)
@@ -578,11 +592,17 @@
     image.CheckFakedBlobs(faked_list)
     if faked_list:
         tout.Warning(
-            "Image '%s:%s' has faked external blobs and is non-functional: %s" %
-            (image.name, image.image_name,
-             ' '.join([os.path.basename(e.GetDefaultFilename())
-                       for e in faked_list])))
-    return bool(missing_list) or bool(faked_list)
+            "Image '%s' has faked external blobs and is non-functional: %s" %
+            (image.name, ' '.join([os.path.basename(e.GetDefaultFilename())
+                                   for e in faked_list])))
+    missing_bintool_list = []
+    image.check_missing_bintools(missing_bintool_list)
+    if missing_bintool_list:
+        tout.Warning(
+            "Image '%s' has missing bintools and is non-functional: %s" %
+            (image.name, ' '.join([os.path.basename(bintool.name)
+                                   for bintool in missing_bintool_list])))
+    return any([missing_list, faked_list, missing_bintool_list])
 
 
 def Binman(args):
@@ -607,7 +627,7 @@
     from binman.image import Image
     from binman import state
 
-    if args.cmd in ['ls', 'extract', 'replace']:
+    if args.cmd in ['ls', 'extract', 'replace', 'tool']:
         try:
             tout.Init(args.verbosity)
             tools.PrepareOutputDir(None)
@@ -622,6 +642,19 @@
                 ReplaceEntries(args.image, args.filename, args.indir, args.paths,
                                do_compress=not args.compressed,
                                allow_resize=not args.fix_size, write_map=args.map)
+
+            if args.cmd == 'tool':
+                tools.SetToolPaths(args.toolpath)
+                if args.list:
+                    bintool.Bintool.list_all()
+                elif args.fetch:
+                    if not args.bintools:
+                        raise ValueError(
+                            "Please specify bintools to fetch or 'all' or 'missing'")
+                    bintool.Bintool.fetch_tools(bintool.FETCH_ANY,
+                                                args.bintools)
+                else:
+                    raise ValueError("Invalid arguments to 'tool' subcommand")
         except:
             raise
         finally:
@@ -674,6 +707,9 @@
                 # Set the first image to timeout, used in testThreadTimeout()
                 images[list(images.keys())[0]].test_section_timeout = True
             invalid = False
+            bintool.Bintool.set_missing_list(
+                args.force_missing_bintools.split(',') if
+                args.force_missing_bintools else None)
             for image in images.values():
                 invalid |= ProcessImage(image, args.update_fdt, args.map,
                                        allow_missing=args.allow_missing,
diff --git a/tools/binman/elf_test.py b/tools/binman/elf_test.py
index ac69a95..f727258 100644
--- a/tools/binman/elf_test.py
+++ b/tools/binman/elf_test.py
@@ -99,17 +99,17 @@
         """Test that we can obtain a symbol from the ELF file"""
         fname = self.ElfTestFile('u_boot_ucode_ptr')
         syms = elf.GetSymbols(fname, [])
-        self.assertIn('.ucode', syms)
+        self.assertIn('_dt_ucode_base_size', syms)
 
     def testRegexSymbols(self):
         """Test that we can obtain from the ELF file by regular expression"""
         fname = self.ElfTestFile('u_boot_ucode_ptr')
         syms = elf.GetSymbols(fname, ['ucode'])
-        self.assertIn('.ucode', syms)
+        self.assertIn('_dt_ucode_base_size', syms)
         syms = elf.GetSymbols(fname, ['missing'])
-        self.assertNotIn('.ucode', syms)
+        self.assertNotIn('_dt_ucode_base_size', syms)
         syms = elf.GetSymbols(fname, ['missing', 'ucode'])
-        self.assertIn('.ucode', syms)
+        self.assertIn('_dt_ucode_base_size', syms)
 
     def testMissingFile(self):
         """Test that a missing file is detected"""
diff --git a/tools/binman/entry.py b/tools/binman/entry.py
index bac90bb..08770ec 100644
--- a/tools/binman/entry.py
+++ b/tools/binman/entry.py
@@ -10,6 +10,8 @@
 import pathlib
 import sys
 
+from binman import bintool
+from binman import comp_util
 from dtoc import fdt_util
 from patman import tools
 from patman.tools import ToHex, ToHexSize
@@ -74,6 +76,8 @@
         allow_fake: Allow creating a dummy fake file if the blob file is not
             available. This is mainly used for testing.
         external: True if this entry contains an external binary blob
+        bintools: Bintools used by this entry (only populated for Image)
+        missing_bintools: List of missing bintools for this entry
     """
     def __init__(self, section, etype, node, name_prefix=''):
         # Put this here to allow entry-docs and help to work without libfdt
@@ -105,6 +109,8 @@
         self.external = False
         self.allow_missing = False
         self.allow_fake = False
+        self.bintools = {}
+        self.missing_bintools = []
 
     @staticmethod
     def FindEntryClass(etype, expanded):
@@ -960,7 +966,7 @@
         Args:
             allow_fake: True if allowed, False if not allowed
         """
-        pass
+        self.allow_fake = allow_fake
 
     def CheckMissing(self, missing_list):
         """Check if any entries in this section have missing external blobs
@@ -1011,6 +1017,24 @@
         """
         return self.allow_missing
 
+    def record_missing_bintool(self, bintool):
+        """Record a missing bintool that was needed to produce this entry
+
+        Args:
+            bintool (Bintool): Bintool that was missing
+        """
+        self.missing_bintools.append(bintool)
+
+    def check_missing_bintools(self, missing_list):
+        """Check if any entries in this section have missing bintools
+
+        If there are missing bintools, these are added to the list
+
+        Args:
+            missing_list: List of Bintool objects to be added to
+        """
+        missing_list += self.missing_bintools
+
     def GetHelpTags(self):
         """Get the tags use for missing-blob help
 
@@ -1031,7 +1055,7 @@
         self.uncomp_data = indata
         if self.compress != 'none':
             self.uncomp_size = len(indata)
-        data = tools.Compress(indata, self.compress)
+        data = comp_util.compress(indata, self.compress)
         return data
 
     @classmethod
@@ -1065,3 +1089,22 @@
                 value: Help text
         """
         pass
+
+    def AddBintools(self, tools):
+        """Add the bintools used by this entry type
+
+        Args:
+            tools (dict of Bintool):
+        """
+        pass
+
+    @classmethod
+    def AddBintool(self, tools, name):
+        """Add a new bintool to the tools used by this etype
+
+        Args:
+            name: Name of the tool
+        """
+        btool = bintool.Bintool.create(name)
+        tools[name] = btool
+        return btool
diff --git a/tools/binman/etype/blob_ext_list.py b/tools/binman/etype/blob_ext_list.py
index 136ae81..29c9092 100644
--- a/tools/binman/etype/blob_ext_list.py
+++ b/tools/binman/etype/blob_ext_list.py
@@ -37,6 +37,7 @@
         missing = False
         pathnames = []
         for fname in self._filenames:
+            fname = self.check_fake_fname(fname)
             pathname = tools.GetInputFilename(
                 fname, self.external and self.section.GetAllowMissing())
             # Allow the file to be missing
diff --git a/tools/binman/etype/fit.py b/tools/binman/etype/fit.py
index b41187d..6e5f020 100644
--- a/tools/binman/etype/fit.py
+++ b/tools/binman/etype/fit.py
@@ -134,6 +134,7 @@
                 self._fdts = fdts.split()
         self._fit_default_dt = self.GetEntryArgsOrProps([EntryArg('default-dt',
                                                                   str)])[0]
+        self.mkimage = None
 
     def ReadNode(self):
         self.ReadEntries()
@@ -250,13 +251,21 @@
         tools.WriteFile(input_fname, data)
         tools.WriteFile(output_fname, data)
 
-        args = []
+        args = {}
         ext_offset = self._fit_props.get('fit,external-offset')
         if ext_offset is not None:
-            args += ['-E', '-p', '%x' % fdt_util.fdt32_to_cpu(ext_offset.value)]
-        tools.Run('mkimage', '-t', '-F', output_fname, *args)
+            args = {
+                'external': True,
+                'pad': fdt_util.fdt32_to_cpu(ext_offset.value)
+                }
+        if self.mkimage.run(reset_timestamp=True, output_fname=output_fname,
+                            **args) is not None:
+            self.SetContents(tools.ReadFile(output_fname))
+        else:
+            # Bintool is missing; just use empty data as the output
+            self.record_missing_bintool(self.mkimage)
+            self.SetContents(tools.GetBytes(0, 1024))
 
-        self.SetContents(tools.ReadFile(output_fname))
         return True
 
     def _BuildInput(self, fdt):
@@ -295,3 +304,6 @@
     def SetAllowMissing(self, allow_missing):
         for section in self._fit_sections.values():
             section.SetAllowMissing(allow_missing)
+
+    def AddBintools(self, tools):
+        self.mkimage = self.AddBintool(tools, 'mkimage')
diff --git a/tools/binman/etype/gbb.py b/tools/binman/etype/gbb.py
index 41554eb..ca8af1b 100644
--- a/tools/binman/etype/gbb.py
+++ b/tools/binman/etype/gbb.py
@@ -77,20 +77,27 @@
         bmpfv_size = gbb_size - 0x2180
         if bmpfv_size < 0:
             self.Raise('GBB is too small (minimum 0x2180 bytes)')
-        sizes = [0x100, 0x1000, bmpfv_size, 0x1000]
-        sizes = ['%#x' % size for size in sizes]
         keydir = tools.GetInputFilename(self.keydir)
-        gbb_set_command = [
-            'gbb_utility', '-s',
-            '--hwid=%s' % self.hardware_id,
-            '--rootkey=%s/root_key.vbpubk' % keydir,
-            '--recoverykey=%s/recovery_key.vbpubk' % keydir,
-            '--flags=%d' % self.gbb_flags,
-            '--bmpfv=%s' % tools.GetInputFilename(self.bmpblk),
-            fname]
 
-        tools.Run('futility', 'gbb_utility', '-c', ','.join(sizes), fname)
-        tools.Run('futility', *gbb_set_command)
+        stdout = self.futility.gbb_create(
+            fname, [0x100, 0x1000, bmpfv_size, 0x1000])
+        if stdout is not None:
+            stdout = self.futility.gbb_set(
+                fname,
+                hwid=self.hardware_id,
+                rootkey='%s/root_key.vbpubk' % keydir,
+                recoverykey='%s/recovery_key.vbpubk' % keydir,
+                flags=self.gbb_flags,
+                bmpfv=tools.GetInputFilename(self.bmpblk))
 
-        self.SetContents(tools.ReadFile(fname))
+        if stdout is not None:
+            self.SetContents(tools.ReadFile(fname))
+        else:
+            # Bintool is missing; just use the required amount of zero data
+            self.record_missing_bintool(self.futility)
+            self.SetContents(tools.GetBytes(0, gbb_size))
+
         return True
+
+    def AddBintools(self, tools):
+        self.futility = self.AddBintool(tools, 'futility')
diff --git a/tools/binman/etype/intel_ifwi.py b/tools/binman/etype/intel_ifwi.py
index ecbd78d..ed14046 100644
--- a/tools/binman/etype/intel_ifwi.py
+++ b/tools/binman/etype/intel_ifwi.py
@@ -59,15 +59,23 @@
         if self._convert_fit:
             inname = self._pathname
             outname = tools.GetOutputFilename('ifwi.bin')
-            tools.RunIfwiTool(inname, tools.CMD_CREATE, outname)
+            if self.ifwitool.create_ifwi(inname, outname) is None:
+                # Bintool is missing; just create a zeroed ifwi.bin
+                self.record_missing_bintool(self.ifwitool)
+                self.SetContents(tools.GetBytes(0, 1024))
+
             self._filename = 'ifwi.bin'
             self._pathname = outname
         else:
             # Provide a different code path here to ensure we have test coverage
             outname = self._pathname
 
-        # Delete OBBP if it is there, then add the required new items.
-        tools.RunIfwiTool(outname, tools.CMD_DELETE, subpart='OBBP')
+        # Delete OBBP if it is there, then add the required new items
+        if self.ifwitool.delete_subpart(outname, 'OBBP') is None:
+            # Bintool is missing; just use zero data
+            self.record_missing_bintool(self.ifwitool)
+            self.SetContents(tools.GetBytes(0, 1024))
+            return True
 
         for entry in self._ifwi_entries.values():
             # First get the input data and put it in a file
@@ -76,9 +84,11 @@
             input_fname = tools.GetOutputFilename('input.%s' % uniq)
             tools.WriteFile(input_fname, data)
 
-            tools.RunIfwiTool(outname,
-                tools.CMD_REPLACE if entry._ifwi_replace else tools.CMD_ADD,
-                input_fname, entry._ifwi_subpart, entry._ifwi_entry_name)
+            # At this point we know that ifwitool is present, so we don't need
+            # to check for None here
+            self.ifwitool.add_subpart(
+                outname, entry._ifwi_subpart, entry._ifwi_entry_name,
+                input_fname, entry._ifwi_replace)
 
         self.ReadBlobContents()
         return True
@@ -132,3 +142,6 @@
         if not self.missing:
             for entry in self._ifwi_entries.values():
                 entry.WriteSymbols(self)
+
+    def AddBintools(self, tools):
+        self.ifwitool = self.AddBintool(tools, 'ifwitool')
diff --git a/tools/binman/etype/mkimage.py b/tools/binman/etype/mkimage.py
index 1903987..201ee4b 100644
--- a/tools/binman/etype/mkimage.py
+++ b/tools/binman/etype/mkimage.py
@@ -51,8 +51,14 @@
         input_fname = tools.GetOutputFilename('mkimage.%s' % uniq)
         tools.WriteFile(input_fname, data)
         output_fname = tools.GetOutputFilename('mkimage-out.%s' % uniq)
-        tools.Run('mkimage', '-d', input_fname, *self._args, output_fname)
-        self.SetContents(tools.ReadFile(output_fname))
+        if self.mkimage.run_cmd('-d', input_fname, *self._args,
+                                output_fname) is not None:
+            self.SetContents(tools.ReadFile(output_fname))
+        else:
+            # Bintool is missing; just use the input data as the output
+            self.record_missing_bintool(self.mkimage)
+            self.SetContents(data)
+
         return True
 
     def ReadEntries(self):
@@ -81,3 +87,6 @@
         """
         for entry in self._mkimage_entries.values():
             entry.CheckFakedBlobs(faked_blobs_list)
+
+    def AddBintools(self, tools):
+        self.mkimage = self.AddBintool(tools, 'mkimage')
diff --git a/tools/binman/etype/section.py b/tools/binman/etype/section.py
index 7a55d03..bb375e9 100644
--- a/tools/binman/etype/section.py
+++ b/tools/binman/etype/section.py
@@ -13,6 +13,7 @@
 import re
 import sys
 
+from binman import comp_util
 from binman.entry import Entry
 from binman import state
 from dtoc import fdt_util
@@ -775,7 +776,7 @@
         data = parent_data[offset:offset + child.size]
         if decomp:
             indata = data
-            data = tools.Decompress(indata, child.compress)
+            data = comp_util.decompress(indata, child.compress)
             if child.uncomp_size:
                 tout.Info("%s: Decompressing data size %#x with algo '%s' to data size %#x" %
                             (child.GetPath(), len(indata), child.compress,
@@ -805,6 +806,7 @@
         Args:
             allow_fake_blob: True if allowed, False if not allowed
         """
+        super().SetAllowFakeBlob(allow_fake)
         for entry in self._entries.values():
             entry.SetAllowFakeBlob(allow_fake)
 
@@ -830,6 +832,17 @@
         for entry in self._entries.values():
             entry.CheckFakedBlobs(faked_blobs_list)
 
+    def check_missing_bintools(self, missing_list):
+        """Check if any entries in this section have missing bintools
+
+        If there are missing bintools, these are added to the list
+
+        Args:
+            missing_list: List of Bintool objects to be added to
+        """
+        for entry in self._entries.values():
+            entry.check_missing_bintools(missing_list)
+
     def _CollectEntries(self, entries, entries_by_name, add_entry):
         """Collect all the entries in an section
 
@@ -879,3 +892,7 @@
     def CheckAltFormats(self, alt_formats):
         for entry in self._entries.values():
             entry.CheckAltFormats(alt_formats)
+
+    def AddBintools(self, tools):
+        for entry in self._entries.values():
+            entry.AddBintools(tools)
diff --git a/tools/binman/etype/vblock.py b/tools/binman/etype/vblock.py
index c0a6a28..8bbba27 100644
--- a/tools/binman/etype/vblock.py
+++ b/tools/binman/etype/vblock.py
@@ -38,6 +38,7 @@
     """
     def __init__(self, section, etype, node):
         super().__init__(section, etype, node)
+        self.futility = None
         (self.keydir, self.keyblock, self.signprivate, self.version,
          self.kernelkey, self.preamble_flags) = self.GetEntryArgsOrProps([
             EntryArg('keydir', str),
@@ -68,19 +69,21 @@
         input_fname = tools.GetOutputFilename('input.%s' % uniq)
         tools.WriteFile(input_fname, input_data)
         prefix = self.keydir + '/'
-        args = [
-            'vbutil_firmware',
-            '--vblock', output_fname,
-            '--keyblock', prefix + self.keyblock,
-            '--signprivate', prefix + self.signprivate,
-            '--version', '%d' % self.version,
-            '--fv', input_fname,
-            '--kernelkey', prefix + self.kernelkey,
-            '--flags', '%d' % self.preamble_flags,
-        ]
-        #out.Notice("Sign '%s' into %s" % (', '.join(self.value), self.label))
-        stdout = tools.Run('futility', *args)
-        return tools.ReadFile(output_fname)
+        stdout = self.futility.sign_firmware(
+            vblock=output_fname,
+            keyblock=prefix + self.keyblock,
+            signprivate=prefix + self.signprivate,
+            version=f'{self.version,}',
+            firmware=input_fname,
+            kernelkey=prefix + self.kernelkey,
+            flags=f'{self.preamble_flags}')
+        if stdout is not None:
+            data = tools.ReadFile(output_fname)
+        else:
+            # Bintool is missing; just use 4KB of zero data
+            self.record_missing_bintool(self.futility)
+            data = tools.GetBytes(0, 4096)
+        return data
 
     def ObtainContents(self):
         data = self.GetVblock(False)
@@ -93,3 +96,6 @@
         # The blob may have changed due to WriteSymbols()
         data = self.GetVblock(True)
         return self.ProcessContentsUpdate(data)
+
+    def AddBintools(self, tools):
+        self.futility = self.AddBintool(tools, 'futility')
diff --git a/tools/binman/fip_util.py b/tools/binman/fip_util.py
index 5f7dbc0..868d0b6 100755
--- a/tools/binman/fip_util.py
+++ b/tools/binman/fip_util.py
@@ -623,31 +623,5 @@
     return 0
 
 
-def fiptool(fname, *fip_args):
-    """Run fiptool with provided arguments
-
-    If the tool fails then this function raises an exception and prints out the
-    output and stderr.
-
-    Args:
-        fname (str): Filename of FIP
-        *fip_args: List of arguments to pass to fiptool
-
-    Returns:
-        CommandResult: object containing the results
-
-    Raises:
-        ValueError: the tool failed to run
-    """
-    args = ['fiptool', fname] + list(fip_args)
-    result = command.RunPipe([args], capture=not VERBOSE,
-                             capture_stderr=not VERBOSE, raise_on_error=False)
-    if result.return_code:
-        print(result.stderr, file=sys.stderr)
-        raise ValueError("Failed to run (error %d): '%s'" %
-                         (result.return_code, ' '.join(args)))
-    return result
-
-
 if __name__ == "__main__":
     sys.exit(main(sys.argv[1:], OUR_FILE))  # pragma: no cover
diff --git a/tools/binman/fip_util_test.py b/tools/binman/fip_util_test.py
index 4839b29..4d2093b 100755
--- a/tools/binman/fip_util_test.py
+++ b/tools/binman/fip_util_test.py
@@ -22,13 +22,11 @@
 # pylint: disable=C0413
 from patman import test_util
 from patman import tools
-import fip_util
+from binman import bintool
+from binman import fip_util
 
-HAVE_FIPTOOL = True
-try:
-    tools.Run('which', 'fiptool')
-except ValueError:
-    HAVE_FIPTOOL = False
+FIPTOOL = bintool.Bintool.create('fiptool')
+HAVE_FIPTOOL = FIPTOOL.is_present()
 
 # pylint: disable=R0902,R0904
 class TestFip(unittest.TestCase):
@@ -286,13 +284,13 @@
         data = fip.get_data()
         fname = tools.GetOutputFilename('data.fip')
         tools.WriteFile(fname, data)
-        result = fip_util.fiptool('info', fname)
+        result = FIPTOOL.info(fname)
         self.assertEqual(
             '''Firmware Updater NS_BL2U: offset=0xB0, size=0x7, cmdline="--fwu"
 Trusted Boot Firmware BL2: offset=0xC0, size=0xE, cmdline="--tb-fw"
 00010203-0405-0607-0809-0A0B0C0D0E0F: offset=0xD0, size=0xE, cmdline="--blob"
 ''',
-            result.stdout)
+            result)
 
     fwu_data = b'my data'
     tb_fw_data = b'some more data'
@@ -315,11 +313,7 @@
 
         fname = tools.GetOutputFilename('data.fip')
         uuid = 'e3b78d9e-4a64-11ec-b45c-fba2b9b49788'
-        fip_util.fiptool('create', '--align', '8', '--plat-toc-flags', '0x123',
-                         '--fwu', fwu,
-                         '--tb-fw', tb_fw,
-                         '--blob', f'uuid={uuid},file={other_fw}',
-                          fname)
+        FIPTOOL.create_new(fname, 8, 0x123, fwu, tb_fw, uuid, other_fw)
 
         return fip_util.FipReader(tools.ReadFile(fname))
 
@@ -396,9 +390,8 @@
         """Check some error reporting from fiptool"""
         with self.assertRaises(Exception) as err:
             with test_util.capture_sys_output():
-                fip_util.fiptool('create', '--fred')
-        self.assertIn("Failed to run (error 1): 'fiptool create --fred'",
-                      str(err.exception))
+                FIPTOOL.create_bad()
+        self.assertIn("unrecognized option '--fred'", str(err.exception))
 
 
 if __name__ == '__main__':
diff --git a/tools/binman/ftest.py b/tools/binman/ftest.py
index f4ff7b6..ca200ae 100644
--- a/tools/binman/ftest.py
+++ b/tools/binman/ftest.py
@@ -17,9 +17,13 @@
 import sys
 import tempfile
 import unittest
+import unittest.mock
+import urllib.error
 
+from binman import bintool
 from binman import cbfs_util
 from binman import cmdline
+from binman import comp_util
 from binman import control
 from binman import elf
 from binman import elf_test
@@ -193,13 +197,7 @@
 
         TestFunctional._MakeInputFile('env.txt', ENV_DATA)
 
-        # Travis-CI may have an old lz4
-        cls.have_lz4 = True
-        try:
-            tools.Run('lz4', '--no-frame-crc', '-c',
-                      os.path.join(cls._indir, 'u-boot.bin'), binary=True)
-        except:
-            cls.have_lz4 = False
+        cls.have_lz4 = comp_util.HAVE_LZ4
 
     @classmethod
     def tearDownClass(cls):
@@ -312,7 +310,8 @@
                     entry_args=None, images=None, use_real_dtb=False,
                     use_expanded=False, verbosity=None, allow_missing=False,
                     allow_fake_blobs=False, extra_indirs=None, threads=None,
-                    test_section_timeout=False, update_fdt_in_elf=None):
+                    test_section_timeout=False, update_fdt_in_elf=None,
+                    force_missing_bintools=''):
         """Run binman with a given test file
 
         Args:
@@ -341,6 +340,8 @@
             test_section_timeout: True to force the first time to timeout, as
                 used in testThreadTimeout()
             update_fdt_in_elf: Value to pass with --update-fdt-in-elf=xxx
+            force_missing_tools (str): comma-separated list of bintools to
+                regard as missing
 
         Returns:
             int return code, 0 on success
@@ -375,6 +376,8 @@
             args.append('-M')
         if allow_fake_blobs:
             args.append('--fake-ext-blobs')
+        if force_missing_bintools:
+            args += ['--force-missing-bintools', force_missing_bintools]
         if update_fdt_in_elf:
             args += ['--update-fdt-in-elf', update_fdt_in_elf]
         if images:
@@ -1715,6 +1718,18 @@
         self.assertIn("Node '/binman/gbb': GBB must have a fixed size",
                       str(e.exception))
 
+    def testGbbMissing(self):
+        """Test that binman still produces an image if futility is missing"""
+        entry_args = {
+            'keydir': 'devkeys',
+        }
+        with test_util.capture_sys_output() as (_, stderr):
+            self._DoTestFile('071_gbb.dts', force_missing_bintools='futility',
+                             entry_args=entry_args)
+        err = stderr.getvalue()
+        self.assertRegex(err,
+                         "Image 'main-section'.*missing bintools.*: futility")
+
     def _HandleVblockCommand(self, pipe_list):
         """Fake calls to the futility utility
 
@@ -1800,6 +1815,19 @@
         expected_hashval = m.digest()
         self.assertEqual(expected_hashval, hashval)
 
+    def testVblockMissing(self):
+        """Test that binman still produces an image if futility is missing"""
+        entry_args = {
+            'keydir': 'devkeys',
+        }
+        with test_util.capture_sys_output() as (_, stderr):
+            self._DoTestFile('074_vblock.dts',
+                             force_missing_bintools='futility',
+                             entry_args=entry_args)
+        err = stderr.getvalue()
+        self.assertRegex(err,
+                         "Image 'main-section'.*missing bintools.*: futility")
+
     def testTpl(self):
         """Test that an image with TPL and its device tree can be created"""
         # ELF file with a '__bss_size' symbol
@@ -1923,7 +1951,7 @@
             self._ResetDtbs()
 
     def _decompress(self, data):
-        return tools.Decompress(data, 'lz4')
+        return comp_util.decompress(data, 'lz4')
 
     def testCompress(self):
         """Test compression of blobs"""
@@ -2311,8 +2339,8 @@
         # We expect to find the TPL wil in subpart IBBP entry IBBL
         image_fname = tools.GetOutputFilename('image.bin')
         tpl_fname = tools.GetOutputFilename('tpl.out')
-        tools.RunIfwiTool(image_fname, tools.CMD_EXTRACT, fname=tpl_fname,
-                          subpart='IBBP', entry_name='IBBL')
+        ifwitool = bintool.Bintool.create('ifwitool')
+        ifwitool.extract(image_fname, 'IBBP', 'IBBL', tpl_fname)
 
         tpl_data = tools.ReadFile(tpl_fname)
         self.assertEqual(U_BOOT_TPL_DATA, tpl_data[:len(U_BOOT_TPL_DATA)])
@@ -2337,6 +2365,16 @@
         self.assertIn('Could not complete processing of contents',
                       str(e.exception))
 
+    def testIfwiMissing(self):
+        """Test that binman still produces an image if ifwitool is missing"""
+        self._SetupIfwi('fitimage.bin')
+        with test_util.capture_sys_output() as (_, stderr):
+            self._DoTestFile('111_x86_rom_ifwi.dts',
+                             force_missing_bintools='ifwitool')
+        err = stderr.getvalue()
+        self.assertRegex(err,
+                         "Image 'main-section'.*missing bintools.*: ifwitool")
+
     def testCbfsOffset(self):
         """Test a CBFS with files at particular offsets
 
@@ -2802,7 +2840,7 @@
     def testExtractCbfsRaw(self):
         """Test extracting CBFS compressed data without decompressing it"""
         data = self._RunExtractCmd('section/cbfs/u-boot-dtb', decomp=False)
-        dtb = tools.Decompress(data, 'lzma', with_header=False)
+        dtb = comp_util.decompress(data, 'lzma', with_header=False)
         self.assertEqual(EXTRACT_DTB_SIZE, len(dtb))
 
     def testExtractBadEntry(self):
@@ -3616,6 +3654,15 @@
         # Just check that the data appears in the file somewhere
         self.assertIn(U_BOOT_SPL_DATA, data)
 
+    def testMkimageMissing(self):
+        """Test that binman still produces an image if mkimage is missing"""
+        with test_util.capture_sys_output() as (_, stderr):
+            self._DoTestFile('156_mkimage.dts',
+                             force_missing_bintools='mkimage')
+        err = stderr.getvalue()
+        self.assertRegex(err,
+                         "Image 'main-section'.*missing bintools.*: mkimage")
+
     def testExtblob(self):
         """Test an image with an external blob"""
         data = self._DoReadFile('157_blob_ext.dts')
@@ -3713,11 +3760,37 @@
         data = self._DoReadFile('162_fit_external.dts')
         fit_data = data[len(U_BOOT_DATA):-2]  # _testing is 2 bytes
 
+        # Size of the external-data region as set up by mkimage
+        external_data_size = len(U_BOOT_DATA) + 2
+        expected_size = (len(U_BOOT_DATA) + 0x400 +
+                         tools.Align(external_data_size, 4) +
+                         len(U_BOOT_NODTB_DATA))
+
         # The data should be outside the FIT
         dtb = fdt.Fdt.FromData(fit_data)
         dtb.Scan()
         fnode = dtb.GetNode('/images/kernel')
         self.assertNotIn('data', fnode.props)
+        self.assertEqual(len(U_BOOT_DATA),
+                         fdt_util.fdt32_to_cpu(fnode.props['data-size'].value))
+        fit_pos = 0x400;
+        self.assertEqual(
+            fit_pos,
+            fdt_util.fdt32_to_cpu(fnode.props['data-position'].value))
+
+        self.assertEquals(expected_size, len(data))
+        actual_pos = len(U_BOOT_DATA) + fit_pos
+        self.assertEqual(U_BOOT_DATA + b'aa',
+                         data[actual_pos:actual_pos + external_data_size])
+
+    def testFitMissing(self):
+        """Test that binman still produces a FIT image if mkimage is missing"""
+        with test_util.capture_sys_output() as (_, stderr):
+            self._DoTestFile('162_fit_external.dts',
+                             force_missing_bintools='mkimage')
+        err = stderr.getvalue()
+        self.assertRegex(err,
+                         "Image 'main-section'.*missing bintools.*: mkimage")
 
     def testSectionIgnoreHashSignature(self):
         """Test that sections ignore hash, signature nodes for its data"""
@@ -4212,13 +4285,13 @@
 
         # Check compressed data
         section1 = self._decompress(rest)
-        expect1 = tools.Compress(COMPRESS_DATA + U_BOOT_DATA, 'lz4')
+        expect1 = comp_util.compress(COMPRESS_DATA + U_BOOT_DATA, 'lz4')
         self.assertEquals(expect1, rest[:len(expect1)])
         self.assertEquals(COMPRESS_DATA + U_BOOT_DATA, section1)
         rest1 = rest[len(expect1):]
 
         section2 = self._decompress(rest1)
-        expect2 = tools.Compress(COMPRESS_DATA + COMPRESS_DATA, 'lz4')
+        expect2 = comp_util.compress(COMPRESS_DATA + COMPRESS_DATA, 'lz4')
         self.assertEquals(expect2, rest1[:len(expect2)])
         self.assertEquals(COMPRESS_DATA + COMPRESS_DATA, section2)
         rest2 = rest1[len(expect2):]
@@ -4965,6 +5038,68 @@
             err,
             "Image '.*' has faked external blobs and is non-functional: .*")
 
+    def testExtblobListFaked(self):
+        """Test an extblob with missing external blob that are faked"""
+        with test_util.capture_sys_output() as (stdout, stderr):
+            self._DoTestFile('216_blob_ext_list_missing.dts',
+                             allow_fake_blobs=True)
+        err = stderr.getvalue()
+        self.assertRegex(err, "Image 'main-section'.*faked.*: blob-ext-list")
+
+    def testListBintools(self):
+        args = ['tool', '--list']
+        with test_util.capture_sys_output() as (stdout, _):
+            self._DoBinman(*args)
+        out = stdout.getvalue().splitlines()
+        self.assertTrue(len(out) >= 2)
+
+    def testFetchBintools(self):
+        def fail_download(url):
+            """Take the tools.Download() function by raising an exception"""
+            raise urllib.error.URLError('my error')
+
+        args = ['tool']
+        with self.assertRaises(ValueError) as e:
+            self._DoBinman(*args)
+        self.assertIn("Invalid arguments to 'tool' subcommand",
+                      str(e.exception))
+
+        args = ['tool', '--fetch']
+        with self.assertRaises(ValueError) as e:
+            self._DoBinman(*args)
+        self.assertIn('Please specify bintools to fetch', str(e.exception))
+
+        args = ['tool', '--fetch', '_testing']
+        with unittest.mock.patch.object(tools, 'Download',
+                                        side_effect=fail_download):
+            with test_util.capture_sys_output() as (stdout, _):
+                self._DoBinman(*args)
+        self.assertIn('failed to fetch with all methods', stdout.getvalue())
+
+    def testInvalidCompress(self):
+        with self.assertRaises(ValueError) as e:
+            comp_util.compress(b'', 'invalid')
+        self.assertIn("Unknown algorithm 'invalid'", str(e.exception))
+
+        with self.assertRaises(ValueError) as e:
+            comp_util.decompress(b'1234', 'invalid')
+        self.assertIn("Unknown algorithm 'invalid'", str(e.exception))
+
+    def testBintoolDocs(self):
+        """Test for creation of bintool documentation"""
+        with test_util.capture_sys_output() as (stdout, stderr):
+            control.write_bintool_docs(control.bintool.Bintool.get_tool_list())
+        self.assertTrue(len(stdout.getvalue()) > 0)
+
+    def testBintoolDocsMissing(self):
+        """Test handling of missing bintool documentation"""
+        with self.assertRaises(ValueError) as e:
+            with test_util.capture_sys_output() as (stdout, stderr):
+                control.write_bintool_docs(
+                    control.bintool.Bintool.get_tool_list(), 'mkimage')
+        self.assertIn('Documentation is missing for modules: mkimage',
+                      str(e.exception))
+
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/tools/binman/image.py b/tools/binman/image.py
index f0a7d65..0f0c1d2 100644
--- a/tools/binman/image.py
+++ b/tools/binman/image.py
@@ -82,6 +82,7 @@
         self.missing_etype = missing_etype
         self.use_expanded = use_expanded
         self.test_section_timeout = False
+        self.bintools = {}
         if not test:
             self.ReadNode()
 
@@ -394,3 +395,16 @@
         self._CollectEntries(entries, entries_by_name, self)
         return self.LookupSymbol(sym_name, optional, msg, base_addr,
                                  entries_by_name)
+
+    def CollectBintools(self):
+        """Collect all the bintools used by this image
+
+        Returns:
+            Dict of bintools:
+                key: name of tool
+                value: Bintool object
+        """
+        bintools = {}
+        super().AddBintools(bintools)
+        self.bintools = bintools
+        return bintools
diff --git a/tools/binman/main.py b/tools/binman/main.py
index 35944f3..03462e7 100755
--- a/tools/binman/main.py
+++ b/tools/binman/main.py
@@ -35,6 +35,7 @@
 # in PYTHONPATH)
 sys.path.insert(2, our1_path)
 
+from binman import bintool
 from patman import test_util
 
 # Bring in the libfdt module
@@ -68,6 +69,7 @@
             name to execute (as in 'binman test testSections', for example)
         toolpath: List of paths to use for tools
     """
+    from binman import bintool_test
     from binman import cbfs_util_test
     from binman import elf_test
     from binman import entry_test
@@ -85,9 +87,9 @@
     test_util.RunTestSuites(
         result, debug, verbosity, test_preserve_dirs, processes, test_name,
         toolpath,
-        [entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt,
-         elf_test.TestElf, image_test.TestImage, cbfs_util_test.TestCbfs,
-         fip_util_test.TestFip])
+        [bintool_test.TestBintool, entry_test.TestEntry, ftest.TestFunctional,
+         fdt_test.TestFdt, elf_test.TestElf, image_test.TestImage,
+         cbfs_util_test.TestCbfs, fip_util_test.TestFip])
 
     return test_util.ReportResult('binman', test_name, result)
 
@@ -128,6 +130,9 @@
                                 args.test_preserve_dirs, args.tests,
                                 args.toolpath)
 
+    elif args.cmd == 'bintool-docs':
+        control.write_bintool_docs(bintool.Bintool.get_tool_list())
+
     elif args.cmd == 'entry-docs':
         control.WriteEntryDocs(control.GetEntryModules())
 
diff --git a/tools/binman/test/162_fit_external.dts b/tools/binman/test/162_fit_external.dts
index 19518e0..6f2a629 100644
--- a/tools/binman/test/162_fit_external.dts
+++ b/tools/binman/test/162_fit_external.dts
@@ -10,7 +10,7 @@
 		u-boot {
 		};
 		fit {
-			fit,external-offset = <0>;
+			fit,external-offset = <0x400>;
 			description = "test-desc";
 			#address-cells = <1>;
 
diff --git a/tools/binman/test/218_blob_ext_list_fake.dts b/tools/binman/test/218_blob_ext_list_fake.dts
new file mode 100644
index 0000000..54ee54f
--- /dev/null
+++ b/tools/binman/test/218_blob_ext_list_fake.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		blob-ext-list {
+			filenames = "refcode.bin", "fake-file";
+		};
+	};
+};
diff --git a/tools/buildman/toolchain.py b/tools/buildman/toolchain.py
index 4e2471f..adc75a7 100644
--- a/tools/buildman/toolchain.py
+++ b/tools/buildman/toolchain.py
@@ -515,50 +515,6 @@
             return arch, links
         return None
 
-    def Download(self, url):
-        """Download a file to a temporary directory
-
-        Args:
-            url: URL to download
-        Returns:
-            Tuple:
-                Temporary directory name
-                Full path to the downloaded archive file in that directory,
-                    or None if there was an error while downloading
-        """
-        print('Downloading: %s' % url)
-        leaf = url.split('/')[-1]
-        tmpdir = tempfile.mkdtemp('.buildman')
-        response = urllib.request.urlopen(url)
-        fname = os.path.join(tmpdir, leaf)
-        fd = open(fname, 'wb')
-        meta = response.info()
-        size = int(meta.get('Content-Length'))
-        done = 0
-        block_size = 1 << 16
-        status = ''
-
-        # Read the file in chunks and show progress as we go
-        while True:
-            buffer = response.read(block_size)
-            if not buffer:
-                print(chr(8) * (len(status) + 1), '\r', end=' ')
-                break
-
-            done += len(buffer)
-            fd.write(buffer)
-            status = r'%10d MiB  [%3d%%]' % (done // 1024 // 1024,
-                                             done * 100 // size)
-            status = status + chr(8) * (len(status) + 1)
-            print(status, end=' ')
-            sys.stdout.flush()
-        fd.close()
-        if done != size:
-            print('Error, failed to download')
-            os.remove(fname)
-            fname = None
-        return tmpdir, fname
-
     def Unpack(self, fname, dest):
         """Unpack a tar file
 
@@ -615,7 +571,7 @@
             os.mkdir(dest)
 
         # Download the tar file for this toolchain and unpack it
-        tmpdir, tarfile = self.Download(url)
+        tarfile, tmpdir = tools.Download(url, '.buildman')
         if not tarfile:
             return 1
         print(col.Color(col.GREEN, 'Unpacking to: %s' % dest), end=' ')
diff --git a/tools/fdt_host.h b/tools/fdt_host.h
index 15c07c7..bc42306 100644
--- a/tools/fdt_host.h
+++ b/tools/fdt_host.h
@@ -27,6 +27,14 @@
  */
 int fdt_remove_unused_strings(const void *old, void *new);
 
+/**
+ * fit_check_sign() - Check a signature in a FIT
+ *
+ * @fit: FIT to check
+ * @key: Key FDT blob to check against
+ * @fit_uname_config: Name of configuration to check (NULL for default)
+ * @return 0 if OK, -ve if signature failed
+ */
 int fit_check_sign(const void *fit, const void *key,
 		   const char *fit_uname_config);
 
diff --git a/tools/fit_check_sign.c b/tools/fit_check_sign.c
index 5573842..3d1d33f 100644
--- a/tools/fit_check_sign.c
+++ b/tools/fit_check_sign.c
@@ -27,7 +27,7 @@
 {
 	fprintf(stderr, "Usage: %s -f fit file -k key file -c config name\n"
 			 "          -f ==> set fit file which should be checked'\n"
-			 "          -k ==> set key file which contains the key'\n"
+			 "          -k ==> set key .dtb file which contains the key'\n"
 			 "          -c ==> set the configuration name'\n",
 		cmdname);
 	exit(EXIT_FAILURE);
@@ -89,7 +89,7 @@
 		fprintf(stderr, "Signature check OK\n");
 	} else {
 		ret = EXIT_FAILURE;
-		fprintf(stderr, "Signature check Bad (error %d)\n", ret);
+		fprintf(stderr, "Signature check bad (error %d)\n", ret);
 	}
 
 	(void) munmap((void *)fit_blob, fsbuf.st_size);
diff --git a/tools/fit_common.c b/tools/fit_common.c
index 5c8920d..0164976 100644
--- a/tools/fit_common.c
+++ b/tools/fit_common.c
@@ -119,3 +119,72 @@
 
 	return -1;
 }
+
+int copyfile(const char *src, const char *dst)
+{
+	int fd_src = -1, fd_dst = -1;
+	void *buf = NULL;
+	ssize_t size;
+	size_t count;
+	int ret = -1;
+
+	fd_src = open(src, O_RDONLY);
+	if (fd_src < 0) {
+		printf("Can't open file %s (%s)\n", src, strerror(errno));
+		goto out;
+	}
+
+	fd_dst = open(dst, O_WRONLY | O_CREAT | O_TRUNC, 0666);
+	if (fd_dst < 0) {
+		printf("Can't open file %s (%s)\n", dst, strerror(errno));
+		goto out;
+	}
+
+	buf = calloc(1, 512);
+	if (!buf) {
+		printf("Can't allocate buffer to copy file\n");
+		goto out;
+	}
+
+	while (1) {
+		size = read(fd_src, buf, 512);
+		if (size < 0) {
+			printf("Can't read file %s\n", src);
+			goto out;
+		}
+		if (!size)
+			break;
+
+		count = size;
+		size = write(fd_dst, buf, count);
+		if (size < 0) {
+			printf("Can't write file %s\n", dst);
+			goto out;
+		}
+	}
+
+	ret = 0;
+
+ out:
+	if (fd_src >= 0)
+		close(fd_src);
+	if (fd_dst >= 0)
+		close(fd_dst);
+	if (buf)
+		free(buf);
+
+	return ret;
+}
+
+void summary_show(struct image_summary *summary, const char *imagefile,
+		  const char *keydest)
+{
+	if (summary->sig_offset) {
+		printf("Signature written to '%s', node '%s'\n", imagefile,
+		       summary->sig_path);
+		if (keydest) {
+			printf("Public key written to '%s', node '%s'\n",
+			       keydest, summary->keydest_path);
+		}
+	}
+}
diff --git a/tools/fit_common.h b/tools/fit_common.h
index 0e8ee79..920a16a 100644
--- a/tools/fit_common.h
+++ b/tools/fit_common.h
@@ -39,4 +39,27 @@
 	     void **blobp, struct stat *sbuf, bool delete_on_error,
 	     bool read_only);
 
+/**
+ * copyfile() - Copy a file
+ *
+ * This uses read()/write() to copy file @src to file @dst
+ *
+ * If @dst exists, it is overwritten and truncated to the correct size.
+ *
+ * @src: Filename to read from
+ * @dst: Filename to write to
+ * @return 0 if OK, -1 on error
+ */
+int copyfile(const char *src, const char *dst);
+
+/**
+ * summary_show() - Show summary information about the signing process
+ *
+ * @summary: Summary info to show
+ * @imagefile: Filename of the output image
+ * @keydest: Filename where the key information is written (NULL if none)
+ */
+void summary_show(struct image_summary *summary, const char *imagefile,
+		  const char *keydest);
+
 #endif /* _FIT_COMMON_H_ */
diff --git a/tools/fit_image.c b/tools/fit_image.c
index 8df95c4..15f7c82 100644
--- a/tools/fit_image.c
+++ b/tools/fit_image.c
@@ -74,7 +74,8 @@
 						params->require_keys,
 						params->engine_id,
 						params->cmdname,
-						params->algo_name);
+						params->algo_name,
+						&params->summary);
 	}
 
 	if (dest_blob) {
@@ -525,8 +526,9 @@
 	/* Check if an offset for the external data was set. */
 	if (params->external_offset > 0) {
 		if (params->external_offset < new_size) {
-			debug("External offset %x overlaps FIT length %x\n",
-			      params->external_offset, new_size);
+			fprintf(stderr,
+				"External offset %x overlaps FIT length %x\n",
+				params->external_offset, new_size);
 			ret = -EINVAL;
 			goto err;
 		}
@@ -655,62 +657,6 @@
 	return ret;
 }
 
-static int copyfile(const char *src, const char *dst)
-{
-	int fd_src = -1, fd_dst = -1;
-	void *buf = NULL;
-	ssize_t size;
-	size_t count;
-	int ret = -1;
-
-	fd_src = open(src, O_RDONLY);
-	if (fd_src < 0) {
-		printf("Can't open file %s (%s)\n", src, strerror(errno));
-		goto out;
-	}
-
-	fd_dst = open(dst, O_WRONLY | O_CREAT, 0666);
-	if (fd_dst < 0) {
-		printf("Can't open file %s (%s)\n", dst, strerror(errno));
-		goto out;
-	}
-
-	buf = calloc(1, 512);
-	if (!buf) {
-		printf("Can't allocate buffer to copy file\n");
-		goto out;
-	}
-
-	while (1) {
-		size = read(fd_src, buf, 512);
-		if (size < 0) {
-			printf("Can't read file %s\n", src);
-			goto out;
-		}
-		if (!size)
-			break;
-
-		count = size;
-		size = write(fd_dst, buf, count);
-		if (size < 0) {
-			printf("Can't write file %s\n", dst);
-			goto out;
-		}
-	}
-
-	ret = 0;
-
- out:
-	if (fd_src >= 0)
-		close(fd_src);
-	if (fd_dst >= 0)
-		close(fd_dst);
-	if (buf)
-		free(buf);
-
-	return ret;
-}
-
 /**
  * fit_handle_file - main FIT file processing function
  *
diff --git a/tools/image-host.c b/tools/image-host.c
index f86e1fb..eaeb765 100644
--- a/tools/image-host.c
+++ b/tools/image-host.c
@@ -48,10 +48,10 @@
  * fit_image_process_hash - Process a single subnode of the images/ node
  *
  * Check each subnode and process accordingly. For hash nodes we generate
- * a hash of the supplised data and store it in the node.
+ * a hash of the supplied data and store it in the node.
  *
  * @fit:	pointer to the FIT format image header
- * @image_name:	name of image being processes (used to display errors)
+ * @image_name:	name of image being processed (used to display errors)
  * @noffset:	subnode offset
  * @data:	data to process
  * @size:	size of data in bytes
@@ -200,19 +200,20 @@
  * fit_image_process_sig- Process a single subnode of the images/ node
  *
  * Check each subnode and process accordingly. For signature nodes we
- * generate a signed hash of the supplised data and store it in the node.
+ * generate a signed hash of the supplied data and store it in the node.
  *
  * @keydir:	Directory containing keys to use for signing
- * @keydest:	Destination FDT blob to write public keys into
+ * @keydest:	Destination FDT blob to write public keys into (NULL if none)
  * @fit:	pointer to the FIT format image header
- * @image_name:	name of image being processes (used to display errors)
+ * @image_name:	name of image being processed (used to display errors)
  * @noffset:	subnode offset
  * @data:	data to process
  * @size:	size of data in bytes
  * @comment:	Comment to add to signature nodes
  * @require_keys: Mark all keys as 'required'
  * @engine_id:	Engine to use for signing
- * Return: 0 if ok, -1 on error
+ * Return: keydest node if @keydest is non-NULL, else 0 if none; -ve error code
+ *	on failure
  */
 static int fit_image_process_sig(const char *keydir, const char *keyfile,
 		void *keydest, void *fit, const char *image_name,
@@ -267,11 +268,13 @@
 	 */
 	if (keydest) {
 		ret = info.crypto->add_verify_data(&info, keydest);
-		if (ret) {
+		if (ret < 0) {
 			printf("Failed to add verification data for '%s' signature node in '%s' image node\n",
 			       node_name, image_name);
 			return ret;
 		}
+		/* Return the node that was written to */
+		return ret;
 	}
 
 	return 0;
@@ -649,7 +652,7 @@
 				comment, require_keys, engine_id, cmdname,
 				algo_name);
 		}
-		if (ret)
+		if (ret < 0)
 			return ret;
 	}
 
@@ -689,14 +692,14 @@
 	return 0;
 }
 
-static const char *fit_config_get_image_list(void *fit, int noffset,
-		int *lenp, int *allow_missingp)
+static const char *fit_config_get_image_list(const void *fit, int noffset,
+					     int *lenp, int *allow_missingp)
 {
 	static const char default_list[] = FIT_KERNEL_PROP "\0"
 			FIT_FDT_PROP;
 	const char *prop;
 
-	/* If there is an "image" property, use that */
+	/* If there is an "sign-image" property, use that */
 	prop = fdt_getprop(fit, noffset, "sign-images", lenp);
 	if (prop) {
 		*allow_missingp = 0;
@@ -710,10 +713,26 @@
 	return default_list;
 }
 
-static int fit_config_add_hash(void *fit, const char *conf_name, const char *sig_name,
-			       struct strlist *node_inc, const char *iname, int image_noffset)
+/**
+ * fit_config_add_hash() - Add a list of nodes to hash for an image
+ *
+ * This adds a list of paths to image nodes (as referred to by a particular
+ * offset) that need to be hashed, to protect a configuration
+ *
+ * @fit:	Pointer to the FIT format image header
+ * @image_noffset: Offset of image to process (e.g. /images/kernel-1)
+ * @node_inc:	List of nodes to add to
+ * @conf_name	Configuration-node name, child of /configurations node (only
+ *	used for error messages)
+ * @sig_name	Signature-node name (only used for error messages)
+ * @iname:	Name of image being processed (e.g. "kernel-1" (only used
+ *	for error messages)
+ */
+static int fit_config_add_hash(const void *fit, int image_noffset,
+			       struct strlist *node_inc, const char *conf_name,
+			       const char *sig_name, const char *iname)
 {
-	char name[200], path[200];
+	char path[200];
 	int noffset;
 	int hash_count;
 	int ret;
@@ -724,9 +743,6 @@
 	if (strlist_add(node_inc, path))
 		goto err_mem;
 
-	snprintf(name, sizeof(name), "%s/%s", FIT_CONFS_PATH,
-		 conf_name);
-
 	/* Add all this image's hashes */
 	hash_count = 0;
 	for (noffset = fdt_first_subnode(fit, image_noffset);
@@ -781,7 +797,21 @@
 	return -ENOENT;
 }
 
-static int fit_config_get_hash_list(void *fit, int conf_noffset,
+/**
+ * fit_config_get_hash_list() - Get the regions to sign
+ *
+ * This calculates a list of nodes to hash for this particular configuration,
+ * returning it as a string list (struct strlist, not a devicetree string list)
+ *
+ * @fit:	Pointer to the FIT format image header
+ * @conf_noffset: Offset of configuration node to sign (child of
+ *	/configurations node)
+ * @sig_offset:	Offset of signature node containing info about how to sign it
+ *	(child of 'signatures' node)
+ * @return 0 if OK, -ENOENT if an image referred to by the configuration cannot
+ *	be found, -ENOMSG if ther were no images in the configuration
+ */
+static int fit_config_get_hash_list(const void *fit, int conf_noffset,
 				    int sig_offset, struct strlist *node_inc)
 {
 	int allow_missing;
@@ -832,9 +862,8 @@
 				return -ENOENT;
 			}
 
-			ret = fit_config_add_hash(fit, conf_name,
-						  sig_name, node_inc,
-						  iname, image_noffset);
+			ret = fit_config_add_hash(fit, image_noffset, node_inc,
+						  conf_name, sig_name, iname);
 			if (ret < 0)
 				return ret;
 
@@ -856,9 +885,32 @@
 	return -ENOMEM;
 }
 
-static int fit_config_get_data(void *fit, int conf_noffset, int noffset,
-		struct image_region **regionp, int *region_countp,
-		char **region_propp, int *region_proplen)
+/**
+ * fit_config_get_regions() - Get the regions to sign
+ *
+ * This calculates a list of node to hash for this particular configuration,
+ * then finds which regions of the devicetree they correspond to.
+ *
+ * @fit:	Pointer to the FIT format image header
+ * @conf_noffset: Offset of configuration node to sign (child of
+ *	/configurations node)
+ * @sig_offset:	Offset of signature node containing info about how to sign it
+ *	(child of 'signatures' node)
+ * @regionp: Returns list of regions that need to be hashed (allocated; must be
+ *	freed by the caller)
+ * @region_count: Returns number of regions
+ * @region_propp: Returns string-list property containing the list of nodes
+ *	that correspond to the regions. Each entry is a full path to the node.
+ *	This is in devicetree format, i.e. a \0 between each string. This is
+ *	allocated and must be freed by the caller.
+ * @region_proplen: Returns length of *@@region_propp in bytes
+ * @return 0 if OK, -ENOMEM if out of memory, -EIO if the regions to hash could
+ * not be found, -EINVAL if no registers were found to hash
+ */
+static int fit_config_get_regions(const void *fit, int conf_noffset,
+				  int sig_offset, struct image_region **regionp,
+				  int *region_countp, char **region_propp,
+				  int *region_proplen)
 {
 	char * const exc_prop[] = {"data"};
 	struct strlist node_inc;
@@ -871,11 +923,12 @@
 	int ret, len;
 
 	conf_name = fit_get_name(fit, conf_noffset, NULL);
-	sig_name = fit_get_name(fit, noffset, NULL);
+	sig_name = fit_get_name(fit, sig_offset, NULL);
 	debug("%s: conf='%s', sig='%s'\n", __func__, conf_name, sig_name);
 
 	/* Get a list of nodes we want to hash */
-	ret = fit_config_get_hash_list(fit, conf_noffset, noffset, &node_inc);
+	ret = fit_config_get_hash_list(fit, conf_noffset, sig_offset,
+				       &node_inc);
 	if (ret)
 		return ret;
 
@@ -928,8 +981,26 @@
 	return 0;
 }
 
+/**
+ * fit_config_process_sig - Process a single subnode of the configurations/ node
+ *
+ * Generate a signed hash of the supplied data and store it in the node.
+ *
+ * @keydir:	Directory containing keys to use for signing
+ * @keydest:	Destination FDT blob to write public keys into (NULL if none)
+ * @fit:	pointer to the FIT format image header
+ * @conf_name	name of config being processed (used to display errors)
+ * @conf_noffset: Offset of configuration node, e.g. '/configurations/conf-1'
+ * @noffset:	subnode offset, e.g. '/configurations/conf-1/sig-1'
+ * @comment:	Comment to add to signature nodes
+ * @require_keys: Mark all keys as 'required'
+ * @engine_id:	Engine to use for signing
+ * @cmdname:	Command name used when reporting errors
+ * @return keydest node if @keydest is non-NULL, else 0 if none; -ve error code
+ *	on failure
+ */
 static int fit_config_process_sig(const char *keydir, const char *keyfile,
-		void *keydest,	void *fit, const char *conf_name,
+		void *keydest, void *fit, const char *conf_name,
 		int conf_noffset, int noffset, const char *comment,
 		int require_keys, const char *engine_id, const char *cmdname,
 		const char *algo_name)
@@ -945,8 +1016,9 @@
 	int ret;
 
 	node_name = fit_get_name(fit, noffset, NULL);
-	if (fit_config_get_data(fit, conf_noffset, noffset, &region,
-				&region_count, &region_prop, &region_proplen))
+	if (fit_config_get_regions(fit, conf_noffset, noffset, &region,
+				   &region_count, &region_prop,
+				   &region_proplen))
 		return -1;
 
 	if (fit_image_setup_sig(&info, keydir, keyfile, fit, conf_name, noffset,
@@ -986,7 +1058,7 @@
 	/* Write the public key into the supplied FDT file */
 	if (keydest) {
 		ret = info.crypto->add_verify_data(&info, keydest);
-		if (ret) {
+		if (ret < 0) {
 			printf("Failed to add verification data for '%s' signature node in '%s' configuration node\n",
 			       node_name, conf_name);
 		}
@@ -999,7 +1071,8 @@
 static int fit_config_add_verification_data(const char *keydir,
 		const char *keyfile, void *keydest, void *fit, int conf_noffset,
 		const char *comment, int require_keys, const char *engine_id,
-		const char *cmdname, const char *algo_name)
+		const char *cmdname, const char *algo_name,
+		struct image_summary *summary)
 {
 	const char *conf_name;
 	int noffset;
@@ -1019,9 +1092,20 @@
 			ret = fit_config_process_sig(keydir, keyfile, keydest,
 				fit, conf_name, conf_noffset, noffset, comment,
 				require_keys, engine_id, cmdname, algo_name);
+			if (ret < 0)
+				return ret;
+
+			summary->sig_offset = noffset;
+			fdt_get_path(fit, noffset, summary->sig_path,
+				     sizeof(summary->sig_path));
+
+			if (keydest) {
+				summary->keydest_offset = ret;
+				fdt_get_path(keydest, ret,
+					     summary->keydest_path,
+					     sizeof(summary->keydest_path));
+			}
 		}
-		if (ret)
-			return ret;
 	}
 
 	return 0;
@@ -1065,7 +1149,8 @@
 int fit_add_verification_data(const char *keydir, const char *keyfile,
 			      void *keydest, void *fit, const char *comment,
 			      int require_keys, const char *engine_id,
-			      const char *cmdname, const char *algo_name)
+			      const char *cmdname, const char *algo_name,
+			      struct image_summary *summary)
 {
 	int images_noffset, confs_noffset;
 	int noffset;
@@ -1114,7 +1199,7 @@
 						       fit, noffset, comment,
 						       require_keys,
 						       engine_id, cmdname,
-						       algo_name);
+						       algo_name, summary);
 		if (ret)
 			return ret;
 	}
diff --git a/tools/imagetool.h b/tools/imagetool.h
index b7ac3a2..413e97c 100644
--- a/tools/imagetool.h
+++ b/tools/imagetool.h
@@ -21,6 +21,8 @@
 #include <unistd.h>
 #include <u-boot/sha1.h>
 
+#include <image.h>
+
 #include "fdt_host.h"
 
 #define ARRAY_SIZE(x)		(sizeof(x) / sizeof((x)[0]))
@@ -84,6 +86,7 @@
 	int bl_len;		/* Block length in byte for external data */
 	const char *engine_id;	/* Engine to use for signing */
 	bool reset_timestamp;	/* Reset the timestamp on an existing image */
+	struct image_summary summary;	/* results of signing process */
 };
 
 /*
diff --git a/tools/mkimage.c b/tools/mkimage.c
index 0ec28da..c8f4ecd 100644
--- a/tools/mkimage.c
+++ b/tools/mkimage.c
@@ -10,6 +10,7 @@
 #include "imagetool.h"
 #include "mkimage.h"
 #include "imximage.h"
+#include <fit_common.h>
 #include <image.h>
 #include <version.h>
 #ifdef __linux__
@@ -472,6 +473,9 @@
 
 		(void) munmap((void *)ptr, sbuf.st_size);
 		(void) close (ifd);
+		if (!retval)
+			summary_show(&params.summary, params.imagefile,
+				     params.keydest);
 
 		exit (retval);
 	}
diff --git a/tools/patman/tools.py b/tools/patman/tools.py
index 86c4f61..5dfecaf 100644
--- a/tools/patman/tools.py
+++ b/tools/patman/tools.py
@@ -7,9 +7,9 @@
 import os
 import shlex
 import shutil
-import struct
 import sys
 import tempfile
+import urllib.request
 
 from patman import command
 from patman import tout
@@ -313,6 +313,93 @@
         target_name = name
     return target_name, extra_args
 
+def get_env_with_path():
+    """Get an updated environment with the PATH variable set correctly
+
+    If there are any search paths set, these need to come first in the PATH so
+    that these override any other version of the tools.
+
+    Returns:
+        dict: New environment with PATH updated, or None if there are not search
+            paths
+    """
+    if tool_search_paths:
+        env = dict(os.environ)
+        env['PATH'] = ':'.join(tool_search_paths) + ':' + env['PATH']
+        return env
+
+def run_result(name, *args, **kwargs):
+    """Run a tool with some arguments
+
+    This runs a 'tool', which is a program used by binman to process files and
+    perhaps produce some output. Tools can be located on the PATH or in a
+    search path.
+
+    Args:
+        name: Command name to run
+        args: Arguments to the tool
+        for_host: True to resolve the command to the version for the host
+        for_target: False to run the command as-is, without resolving it
+                   to the version for the compile target
+        raise_on_error: Raise an error if the command fails (True by default)
+
+    Returns:
+        CommandResult object
+    """
+    try:
+        binary = kwargs.get('binary')
+        for_host = kwargs.get('for_host', False)
+        for_target = kwargs.get('for_target', not for_host)
+        raise_on_error = kwargs.get('raise_on_error', True)
+        env = get_env_with_path()
+        if for_target:
+            name, extra_args = GetTargetCompileTool(name)
+            args = tuple(extra_args) + args
+        elif for_host:
+            name, extra_args = GetHostCompileTool(name)
+            args = tuple(extra_args) + args
+        name = os.path.expanduser(name)  # Expand paths containing ~
+        all_args = (name,) + args
+        result = command.RunPipe([all_args], capture=True, capture_stderr=True,
+                                 env=env, raise_on_error=False, binary=binary)
+        if result.return_code:
+            if raise_on_error:
+                raise ValueError("Error %d running '%s': %s" %
+                                 (result.return_code,' '.join(all_args),
+                                  result.stderr or result.stdout))
+        return result
+    except ValueError:
+        if env and not PathHasFile(env['PATH'], name):
+            msg = "Please install tool '%s'" % name
+            package = packages.get(name)
+            if package:
+                 msg += " (e.g. from package '%s')" % package
+            raise ValueError(msg)
+        raise
+
+def tool_find(name):
+    """Search the current path for a tool
+
+    This uses both PATH and any value from SetToolPaths() to search for a tool
+
+    Args:
+        name (str): Name of tool to locate
+
+    Returns:
+        str: Full path to tool if found, else None
+    """
+    name = os.path.expanduser(name)  # Expand paths containing ~
+    paths = []
+    pathvar = os.environ.get('PATH')
+    if pathvar:
+        paths = pathvar.split(':')
+    if tool_search_paths:
+        paths += tool_search_paths
+    for path in paths:
+        fname = os.path.join(path, name)
+        if os.path.isfile(fname) and os.access(fname, os.X_OK):
+            return fname
+
 def Run(name, *args, **kwargs):
     """Run a tool with some arguments
 
@@ -330,37 +417,9 @@
     Returns:
         CommandResult object
     """
-    try:
-        binary = kwargs.get('binary')
-        for_host = kwargs.get('for_host', False)
-        for_target = kwargs.get('for_target', not for_host)
-        env = None
-        if tool_search_paths:
-            env = dict(os.environ)
-            env['PATH'] = ':'.join(tool_search_paths) + ':' + env['PATH']
-        if for_target:
-            name, extra_args = GetTargetCompileTool(name)
-            args = tuple(extra_args) + args
-        elif for_host:
-            name, extra_args = GetHostCompileTool(name)
-            args = tuple(extra_args) + args
-        name = os.path.expanduser(name)  # Expand paths containing ~
-        all_args = (name,) + args
-        result = command.RunPipe([all_args], capture=True, capture_stderr=True,
-                                 env=env, raise_on_error=False, binary=binary)
-        if result.return_code:
-            raise ValueError("Error %d running '%s': %s" %
-               (result.return_code,' '.join(all_args),
-                result.stderr))
+    result = run_result(name, *args, **kwargs)
+    if result is not None:
         return result.stdout
-    except:
-        if env and not PathHasFile(env['PATH'], name):
-            msg = "Please install tool '%s'" % name
-            package = packages.get(name)
-            if package:
-                 msg += " (e.g. from package '%s')" % package
-            raise ValueError(msg)
-        raise
 
 def Filename(fname):
     """Resolve a file path to an absolute path.
@@ -458,115 +517,6 @@
     """
     return bval.decode('utf-8')
 
-def Compress(indata, algo, with_header=True):
-    """Compress some data using a given algorithm
-
-    Note that for lzma this uses an old version of the algorithm, not that
-    provided by xz.
-
-    This requires 'lz4' and 'lzma_alone' tools. It also requires an output
-    directory to be previously set up, by calling PrepareOutputDir().
-
-    Care is taken to use unique temporary files so that this function can be
-    called from multiple threads.
-
-    Args:
-        indata: Input data to compress
-        algo: Algorithm to use ('none', 'gzip', 'lz4' or 'lzma')
-
-    Returns:
-        Compressed data
-    """
-    if algo == 'none':
-        return indata
-    fname = tempfile.NamedTemporaryFile(prefix='%s.comp.tmp' % algo,
-                                        dir=outdir).name
-    WriteFile(fname, indata)
-    if algo == 'lz4':
-        data = Run('lz4', '--no-frame-crc', '-B4', '-5', '-c', fname,
-                   binary=True)
-    # cbfstool uses a very old version of lzma
-    elif algo == 'lzma':
-        outfname = tempfile.NamedTemporaryFile(prefix='%s.comp.otmp' % algo,
-                                               dir=outdir).name
-        Run('lzma_alone', 'e', fname, outfname, '-lc1', '-lp0', '-pb0', '-d8')
-        data = ReadFile(outfname)
-    elif algo == 'gzip':
-        data = Run('gzip', '-c', fname, binary=True)
-    else:
-        raise ValueError("Unknown algorithm '%s'" % algo)
-    if with_header:
-        hdr = struct.pack('<I', len(data))
-        data = hdr + data
-    return data
-
-def Decompress(indata, algo, with_header=True):
-    """Decompress some data using a given algorithm
-
-    Note that for lzma this uses an old version of the algorithm, not that
-    provided by xz.
-
-    This requires 'lz4' and 'lzma_alone' tools. It also requires an output
-    directory to be previously set up, by calling PrepareOutputDir().
-
-    Args:
-        indata: Input data to decompress
-        algo: Algorithm to use ('none', 'gzip', 'lz4' or 'lzma')
-
-    Returns:
-        Compressed data
-    """
-    if algo == 'none':
-        return indata
-    if with_header:
-        data_len = struct.unpack('<I', indata[:4])[0]
-        indata = indata[4:4 + data_len]
-    fname = GetOutputFilename('%s.decomp.tmp' % algo)
-    with open(fname, 'wb') as fd:
-        fd.write(indata)
-    if algo == 'lz4':
-        data = Run('lz4', '-dc', fname, binary=True)
-    elif algo == 'lzma':
-        outfname = GetOutputFilename('%s.decomp.otmp' % algo)
-        Run('lzma_alone', 'd', fname, outfname)
-        data = ReadFile(outfname, binary=True)
-    elif algo == 'gzip':
-        data = Run('gzip', '-cd', fname, binary=True)
-    else:
-        raise ValueError("Unknown algorithm '%s'" % algo)
-    return data
-
-CMD_CREATE, CMD_DELETE, CMD_ADD, CMD_REPLACE, CMD_EXTRACT = range(5)
-
-IFWITOOL_CMDS = {
-    CMD_CREATE: 'create',
-    CMD_DELETE: 'delete',
-    CMD_ADD: 'add',
-    CMD_REPLACE: 'replace',
-    CMD_EXTRACT: 'extract',
-    }
-
-def RunIfwiTool(ifwi_file, cmd, fname=None, subpart=None, entry_name=None):
-    """Run ifwitool with the given arguments:
-
-    Args:
-        ifwi_file: IFWI file to operation on
-        cmd: Command to execute (CMD_...)
-        fname: Filename of file to add/replace/extract/create (None for
-            CMD_DELETE)
-        subpart: Name of sub-partition to operation on (None for CMD_CREATE)
-        entry_name: Name of directory entry to operate on, or None if none
-    """
-    args = ['ifwitool', ifwi_file]
-    args.append(IFWITOOL_CMDS[cmd])
-    if fname:
-        args += ['-f', fname]
-    if subpart:
-        args += ['-n', subpart]
-    if entry_name:
-        args += ['-d', '-e', entry_name]
-    Run(*args)
-
 def ToHex(val):
     """Convert an integer value (or None) to a string
 
@@ -596,3 +546,51 @@
     if not pager:
         pager = ['more']
     command.Run(*pager, fname)
+
+def Download(url, tmpdir_pattern='.patman'):
+    """Download a file to a temporary directory
+
+    Args:
+        url (str): URL to download
+        tmpdir_pattern (str): pattern to use for the temporary directory
+
+    Returns:
+        Tuple:
+            Full path to the downloaded archive file in that directory,
+                or None if there was an error while downloading
+            Temporary directory name
+    """
+    print('- downloading: %s' % url)
+    leaf = url.split('/')[-1]
+    tmpdir = tempfile.mkdtemp(tmpdir_pattern)
+    response = urllib.request.urlopen(url)
+    fname = os.path.join(tmpdir, leaf)
+    fd = open(fname, 'wb')
+    meta = response.info()
+    size = int(meta.get('Content-Length'))
+    done = 0
+    block_size = 1 << 16
+    status = ''
+
+    # Read the file in chunks and show progress as we go
+    while True:
+        buffer = response.read(block_size)
+        if not buffer:
+            print(chr(8) * (len(status) + 1), '\r', end=' ')
+            break
+
+        done += len(buffer)
+        fd.write(buffer)
+        status = r'%10d MiB  [%3d%%]' % (done // 1024 // 1024,
+                                            done * 100 // size)
+        status = status + chr(8) * (len(status) + 1)
+        print(status, end=' ')
+        sys.stdout.flush()
+    print('\r', end='')
+    sys.stdout.flush()
+    fd.close()
+    if done != size:
+        print('Error, failed to download')
+        os.remove(fname)
+        fname = None
+    return fname, tmpdir