aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/Kconfig18
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/blk-uclass.c1
-rw-r--r--drivers/block/blkmap.c519
-rw-r--r--drivers/fastboot/fb_mmc.c19
5 files changed, 552 insertions, 6 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index e95da48bdc..5a1aeb3d2b 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -67,6 +67,24 @@ config BLOCK_CACHE
it will prevent repeated reads from directory structures and other
filesystem data structures.
+config BLKMAP
+ bool "Composable virtual block devices (blkmap)"
+ depends on BLK
+ help
+ Create virtual block devices that are backed by various sources,
+ e.g. RAM, or parts of an existing block device. Though much more
+ rudimentary, it borrows a lot of ideas from Linux's device mapper
+ subsystem.
+
+ Example use-cases:
+ - Treat a region of RAM as a block device, i.e. a RAM disk. This let's
+ you extract files from filesystem images stored in RAM (perhaps as a
+ result of a TFTP transfer).
+ - Create a virtual partition on an existing device. This let's you
+ access filesystems that aren't stored at an exact partition
+ boundary. A common example is a filesystem image embedded in an FIT
+ image.
+
config SPL_BLOCK_CACHE
bool "Use block device cache in SPL"
depends on SPL_BLK
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index f12447d78d..a161d145fd 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_IDE) += ide.o
endif
obj-$(CONFIG_SANDBOX) += sandbox.o host-uclass.o host_dev.o
obj-$(CONFIG_$(SPL_TPL_)BLOCK_CACHE) += blkcache.o
+obj-$(CONFIG_BLKMAP) += blkmap.o
obj-$(CONFIG_EFI_MEDIA) += efi-media-uclass.o
obj-$(CONFIG_EFI_MEDIA_SANDBOX) += sb_efi_media.o
diff --git a/drivers/block/blk-uclass.c b/drivers/block/blk-uclass.c
index c69fc4d518..cb73faaeda 100644
--- a/drivers/block/blk-uclass.c
+++ b/drivers/block/blk-uclass.c
@@ -32,6 +32,7 @@ static struct {
{ UCLASS_EFI_LOADER, "efiloader" },
{ UCLASS_VIRTIO, "virtio" },
{ UCLASS_PVBLOCK, "pvblock" },
+ { UCLASS_BLKMAP, "blkmap" },
};
static enum uclass_id uclass_name_to_iftype(const char *uclass_idname)
diff --git a/drivers/block/blkmap.c b/drivers/block/blkmap.c
new file mode 100644
index 0000000000..2bb0acc20f
--- /dev/null
+++ b/drivers/block/blkmap.c
@@ -0,0 +1,519 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2023 Addiva Elektronik
+ * Author: Tobias Waldekranz <tobias@waldekranz.com>
+ */
+
+#include <common.h>
+#include <blk.h>
+#include <blkmap.h>
+#include <dm.h>
+#include <malloc.h>
+#include <mapmem.h>
+#include <part.h>
+#include <dm/device-internal.h>
+#include <dm/lists.h>
+#include <dm/root.h>
+
+struct blkmap;
+
+/**
+ * struct blkmap_slice - Region mapped to a blkmap
+ *
+ * Common data for a region mapped to a blkmap, specialized by each
+ * map type.
+ *
+ * @node: List node used to associate this slice with a blkmap
+ * @blknr: Start block number of the mapping
+ * @blkcnt: Number of blocks covered by this mapping
+ */
+struct blkmap_slice {
+ struct list_head node;
+
+ lbaint_t blknr;
+ lbaint_t blkcnt;
+
+ /**
+ * @read: - Read from slice
+ *
+ * @read.bm: Blkmap to which this slice belongs
+ * @read.bms: This slice
+ * @read.blknr: Start block number to read from
+ * @read.blkcnt: Number of blocks to read
+ * @read.buffer: Buffer to store read data to
+ */
+ ulong (*read)(struct blkmap *bm, struct blkmap_slice *bms,
+ lbaint_t blknr, lbaint_t blkcnt, void *buffer);
+
+ /**
+ * @write: - Write to slice
+ *
+ * @write.bm: Blkmap to which this slice belongs
+ * @write.bms: This slice
+ * @write.blknr: Start block number to write to
+ * @write.blkcnt: Number of blocks to write
+ * @write.buffer: Data to be written
+ */
+ ulong (*write)(struct blkmap *bm, struct blkmap_slice *bms,
+ lbaint_t blknr, lbaint_t blkcnt, const void *buffer);
+
+ /**
+ * @destroy: - Tear down slice
+ *
+ * @read.bm: Blkmap to which this slice belongs
+ * @read.bms: This slice
+ */
+ void (*destroy)(struct blkmap *bm, struct blkmap_slice *bms);
+};
+
+/**
+ * struct blkmap - Block map
+ *
+ * Data associated with a blkmap.
+ *
+ * @label: Human readable name of this blkmap
+ * @blk: Underlying block device
+ * @slices: List of slices associated with this blkmap
+ */
+struct blkmap {
+ char *label;
+ struct udevice *blk;
+ struct list_head slices;
+};
+
+static bool blkmap_slice_contains(struct blkmap_slice *bms, lbaint_t blknr)
+{
+ return (blknr >= bms->blknr) && (blknr < (bms->blknr + bms->blkcnt));
+}
+
+static bool blkmap_slice_available(struct blkmap *bm, struct blkmap_slice *new)
+{
+ struct blkmap_slice *bms;
+ lbaint_t first, last;
+
+ first = new->blknr;
+ last = new->blknr + new->blkcnt - 1;
+
+ list_for_each_entry(bms, &bm->slices, node) {
+ if (blkmap_slice_contains(bms, first) ||
+ blkmap_slice_contains(bms, last) ||
+ blkmap_slice_contains(new, bms->blknr) ||
+ blkmap_slice_contains(new, bms->blknr + bms->blkcnt - 1))
+ return false;
+ }
+
+ return true;
+}
+
+static int blkmap_slice_add(struct blkmap *bm, struct blkmap_slice *new)
+{
+ struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
+ struct list_head *insert = &bm->slices;
+ struct blkmap_slice *bms;
+
+ if (!blkmap_slice_available(bm, new))
+ return -EBUSY;
+
+ list_for_each_entry(bms, &bm->slices, node) {
+ if (bms->blknr < new->blknr)
+ continue;
+
+ insert = &bms->node;
+ break;
+ }
+
+ list_add_tail(&new->node, insert);
+
+ /* Disk might have grown, update the size */
+ bms = list_last_entry(&bm->slices, struct blkmap_slice, node);
+ bd->lba = bms->blknr + bms->blkcnt;
+ return 0;
+}
+
+/**
+ * struct blkmap_linear - Linear mapping to other block device
+ *
+ * @slice: Common map data
+ * @blk: Target block device of this mapping
+ * @blknr: Start block number of the target device
+ */
+struct blkmap_linear {
+ struct blkmap_slice slice;
+
+ struct udevice *blk;
+ lbaint_t blknr;
+};
+
+static ulong blkmap_linear_read(struct blkmap *bm, struct blkmap_slice *bms,
+ lbaint_t blknr, lbaint_t blkcnt, void *buffer)
+{
+ struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
+
+ return blk_read(bml->blk, bml->blknr + blknr, blkcnt, buffer);
+}
+
+static ulong blkmap_linear_write(struct blkmap *bm, struct blkmap_slice *bms,
+ lbaint_t blknr, lbaint_t blkcnt,
+ const void *buffer)
+{
+ struct blkmap_linear *bml = container_of(bms, struct blkmap_linear, slice);
+
+ return blk_write(bml->blk, bml->blknr + blknr, blkcnt, buffer);
+}
+
+int blkmap_map_linear(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
+ struct udevice *lblk, lbaint_t lblknr)
+{
+ struct blkmap *bm = dev_get_plat(dev);
+ struct blkmap_linear *linear;
+ struct blk_desc *bd, *lbd;
+ int err;
+
+ bd = dev_get_uclass_plat(bm->blk);
+ lbd = dev_get_uclass_plat(lblk);
+ if (lbd->blksz != bd->blksz)
+ /* We could support block size translation, but we
+ * don't yet.
+ */
+ return -EINVAL;
+
+ linear = malloc(sizeof(*linear));
+ if (!linear)
+ return -ENOMEM;
+
+ *linear = (struct blkmap_linear) {
+ .slice = {
+ .blknr = blknr,
+ .blkcnt = blkcnt,
+
+ .read = blkmap_linear_read,
+ .write = blkmap_linear_write,
+ },
+
+ .blk = lblk,
+ .blknr = lblknr,
+ };
+
+ err = blkmap_slice_add(bm, &linear->slice);
+ if (err)
+ free(linear);
+
+ return err;
+}
+
+/**
+ * struct blkmap_mem - Memory mapping
+ *
+ * @slice: Common map data
+ * @addr: Target memory region of this mapping
+ * @remapped: True if @addr is backed by a physical to virtual memory
+ * mapping that must be torn down at the end of this mapping's
+ * lifetime.
+ */
+struct blkmap_mem {
+ struct blkmap_slice slice;
+ void *addr;
+ bool remapped;
+};
+
+static ulong blkmap_mem_read(struct blkmap *bm, struct blkmap_slice *bms,
+ lbaint_t blknr, lbaint_t blkcnt, void *buffer)
+{
+ struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
+ struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
+ char *src;
+
+ src = bmm->addr + (blknr << bd->log2blksz);
+ memcpy(buffer, src, blkcnt << bd->log2blksz);
+ return blkcnt;
+}
+
+static ulong blkmap_mem_write(struct blkmap *bm, struct blkmap_slice *bms,
+ lbaint_t blknr, lbaint_t blkcnt,
+ const void *buffer)
+{
+ struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
+ struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
+ char *dst;
+
+ dst = bmm->addr + (blknr << bd->log2blksz);
+ memcpy(dst, buffer, blkcnt << bd->log2blksz);
+ return blkcnt;
+}
+
+static void blkmap_mem_destroy(struct blkmap *bm, struct blkmap_slice *bms)
+{
+ struct blkmap_mem *bmm = container_of(bms, struct blkmap_mem, slice);
+
+ if (bmm->remapped)
+ unmap_sysmem(bmm->addr);
+}
+
+int __blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
+ void *addr, bool remapped)
+{
+ struct blkmap *bm = dev_get_plat(dev);
+ struct blkmap_mem *bmm;
+ int err;
+
+ bmm = malloc(sizeof(*bmm));
+ if (!bmm)
+ return -ENOMEM;
+
+ *bmm = (struct blkmap_mem) {
+ .slice = {
+ .blknr = blknr,
+ .blkcnt = blkcnt,
+
+ .read = blkmap_mem_read,
+ .write = blkmap_mem_write,
+ .destroy = blkmap_mem_destroy,
+ },
+
+ .addr = addr,
+ .remapped = remapped,
+ };
+
+ err = blkmap_slice_add(bm, &bmm->slice);
+ if (err)
+ free(bmm);
+
+ return err;
+}
+
+int blkmap_map_mem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
+ void *addr)
+{
+ return __blkmap_map_mem(dev, blknr, blkcnt, addr, false);
+}
+
+int blkmap_map_pmem(struct udevice *dev, lbaint_t blknr, lbaint_t blkcnt,
+ phys_addr_t paddr)
+{
+ struct blkmap *bm = dev_get_plat(dev);
+ struct blk_desc *bd = dev_get_uclass_plat(bm->blk);
+ void *addr;
+ int err;
+
+ addr = map_sysmem(paddr, blkcnt << bd->log2blksz);
+ if (!addr)
+ return -ENOMEM;
+
+ err = __blkmap_map_mem(dev, blknr, blkcnt, addr, true);
+ if (err)
+ unmap_sysmem(addr);
+
+ return err;
+}
+
+static ulong blkmap_blk_read_slice(struct blkmap *bm, struct blkmap_slice *bms,
+ lbaint_t blknr, lbaint_t blkcnt,
+ void *buffer)
+{
+ lbaint_t nr, cnt;
+
+ nr = blknr - bms->blknr;
+ cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
+ return bms->read(bm, bms, nr, cnt, buffer);
+}
+
+static ulong blkmap_blk_read(struct udevice *dev, lbaint_t blknr,
+ lbaint_t blkcnt, void *buffer)
+{
+ struct blk_desc *bd = dev_get_uclass_plat(dev);
+ struct blkmap *bm = dev_get_plat(dev->parent);
+ struct blkmap_slice *bms;
+ lbaint_t cnt, total = 0;
+
+ list_for_each_entry(bms, &bm->slices, node) {
+ if (!blkmap_slice_contains(bms, blknr))
+ continue;
+
+ cnt = blkmap_blk_read_slice(bm, bms, blknr, blkcnt, buffer);
+ blknr += cnt;
+ blkcnt -= cnt;
+ buffer += cnt << bd->log2blksz;
+ total += cnt;
+ }
+
+ return total;
+}
+
+static ulong blkmap_blk_write_slice(struct blkmap *bm, struct blkmap_slice *bms,
+ lbaint_t blknr, lbaint_t blkcnt,
+ const void *buffer)
+{
+ lbaint_t nr, cnt;
+
+ nr = blknr - bms->blknr;
+ cnt = (blkcnt < bms->blkcnt) ? blkcnt : bms->blkcnt;
+ return bms->write(bm, bms, nr, cnt, buffer);
+}
+
+static ulong blkmap_blk_write(struct udevice *dev, lbaint_t blknr,
+ lbaint_t blkcnt, const void *buffer)
+{
+ struct blk_desc *bd = dev_get_uclass_plat(dev);
+ struct blkmap *bm = dev_get_plat(dev->parent);
+ struct blkmap_slice *bms;
+ lbaint_t cnt, total = 0;
+
+ list_for_each_entry(bms, &bm->slices, node) {
+ if (!blkmap_slice_contains(bms, blknr))
+ continue;
+
+ cnt = blkmap_blk_write_slice(bm, bms, blknr, blkcnt, buffer);
+ blknr += cnt;
+ blkcnt -= cnt;
+ buffer += cnt << bd->log2blksz;
+ total += cnt;
+ }
+
+ return total;
+}
+
+static const struct blk_ops blkmap_blk_ops = {
+ .read = blkmap_blk_read,
+ .write = blkmap_blk_write,
+};
+
+U_BOOT_DRIVER(blkmap_blk) = {
+ .name = "blkmap_blk",
+ .id = UCLASS_BLK,
+ .ops = &blkmap_blk_ops,
+};
+
+int blkmap_dev_bind(struct udevice *dev)
+{
+ struct blkmap *bm = dev_get_plat(dev);
+ struct blk_desc *bd;
+ int err;
+
+ err = blk_create_devicef(dev, "blkmap_blk", "blk", UCLASS_BLKMAP,
+ dev_seq(dev), 512, 0, &bm->blk);
+ if (err)
+ return log_msg_ret("blk", err);
+
+ INIT_LIST_HEAD(&bm->slices);
+
+ bd = dev_get_uclass_plat(bm->blk);
+ snprintf(bd->vendor, BLK_VEN_SIZE, "U-Boot");
+ snprintf(bd->product, BLK_PRD_SIZE, "blkmap");
+ snprintf(bd->revision, BLK_REV_SIZE, "1.0");
+
+ /* EFI core isn't keen on zero-sized disks, so we lie. This is
+ * updated with the correct size once the user adds a
+ * mapping.
+ */
+ bd->lba = 1;
+
+ return 0;
+}
+
+int blkmap_dev_unbind(struct udevice *dev)
+{
+ struct blkmap *bm = dev_get_plat(dev);
+ struct blkmap_slice *bms, *tmp;
+ int err;
+
+ list_for_each_entry_safe(bms, tmp, &bm->slices, node) {
+ list_del(&bms->node);
+ free(bms);
+ }
+
+ err = device_remove(bm->blk, DM_REMOVE_NORMAL);
+ if (err)
+ return err;
+
+ return device_unbind(bm->blk);
+}
+
+U_BOOT_DRIVER(blkmap_root) = {
+ .name = "blkmap_dev",
+ .id = UCLASS_BLKMAP,
+ .bind = blkmap_dev_bind,
+ .unbind = blkmap_dev_unbind,
+ .plat_auto = sizeof(struct blkmap),
+};
+
+struct udevice *blkmap_from_label(const char *label)
+{
+ struct udevice *dev;
+ struct uclass *uc;
+ struct blkmap *bm;
+
+ uclass_id_foreach_dev(UCLASS_BLKMAP, dev, uc) {
+ bm = dev_get_plat(dev);
+ if (bm->label && !strcmp(label, bm->label))
+ return dev;
+ }
+
+ return NULL;
+}
+
+int blkmap_create(const char *label, struct udevice **devp)
+{
+ char *hname, *hlabel;
+ struct udevice *dev;
+ struct blkmap *bm;
+ size_t namelen;
+ int err;
+
+ dev = blkmap_from_label(label);
+ if (dev) {
+ err = -EBUSY;
+ goto err;
+ }
+
+ hlabel = strdup(label);
+ if (!hlabel) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ namelen = strlen("blkmap-") + strlen(label) + 1;
+ hname = malloc(namelen);
+ if (!hname) {
+ err = -ENOMEM;
+ goto err_free_hlabel;
+ }
+
+ strlcpy(hname, "blkmap-", namelen);
+ strlcat(hname, label, namelen);
+
+ err = device_bind_driver(dm_root(), "blkmap_dev", hname, &dev);
+ if (err)
+ goto err_free_hname;
+
+ device_set_name_alloced(dev);
+ bm = dev_get_plat(dev);
+ bm->label = hlabel;
+
+ if (devp)
+ *devp = dev;
+
+ return 0;
+
+err_free_hname:
+ free(hname);
+err_free_hlabel:
+ free(hlabel);
+err:
+ return err;
+}
+
+int blkmap_destroy(struct udevice *dev)
+{
+ int err;
+
+ err = device_remove(dev, DM_REMOVE_NORMAL);
+ if (err)
+ return err;
+
+ return device_unbind(dev);
+}
+
+UCLASS_DRIVER(blkmap) = {
+ .id = UCLASS_BLKMAP,
+ .name = "blkmap",
+};
diff --git a/drivers/fastboot/fb_mmc.c b/drivers/fastboot/fb_mmc.c
index a06c590234..9d25c40202 100644
--- a/drivers/fastboot/fb_mmc.c
+++ b/drivers/fastboot/fb_mmc.c
@@ -287,7 +287,7 @@ static void fb_mmc_boot_ops(struct blk_desc *dev_desc, void *buffer,
*/
static lbaint_t fb_mmc_get_boot_header(struct blk_desc *dev_desc,
struct disk_partition *info,
- struct andr_img_hdr *hdr,
+ struct andr_boot_img_hdr_v0 *hdr,
char *response)
{
ulong sector_size; /* boot partition sector size */
@@ -296,7 +296,7 @@ static lbaint_t fb_mmc_get_boot_header(struct blk_desc *dev_desc,
/* Calculate boot image sectors count */
sector_size = info->blksz;
- hdr_sectors = DIV_ROUND_UP(sizeof(struct andr_img_hdr), sector_size);
+ hdr_sectors = DIV_ROUND_UP(sizeof(struct andr_boot_img_hdr_v0), sector_size);
if (hdr_sectors == 0) {
pr_err("invalid number of boot sectors: 0\n");
fastboot_fail("invalid number of boot sectors: 0", response);
@@ -313,8 +313,7 @@ static lbaint_t fb_mmc_get_boot_header(struct blk_desc *dev_desc,
}
/* Check boot header magic string */
- res = android_image_check_header(hdr);
- if (res != 0) {
+ if (!is_android_boot_image_header(hdr)) {
pr_err("bad boot image magic\n");
fastboot_fail("boot partition not initialized", response);
return 0;
@@ -338,7 +337,7 @@ static int fb_mmc_update_zimage(struct blk_desc *dev_desc,
char *response)
{
uintptr_t hdr_addr; /* boot image header address */
- struct andr_img_hdr *hdr; /* boot image header */
+ struct andr_boot_img_hdr_v0 *hdr; /* boot image header */
lbaint_t hdr_sectors; /* boot image header sectors */
u8 *ramdisk_buffer;
u32 ramdisk_sector_start;
@@ -361,7 +360,7 @@ static int fb_mmc_update_zimage(struct blk_desc *dev_desc,
/* Put boot image header in fastboot buffer after downloaded zImage */
hdr_addr = (uintptr_t)download_buffer + ALIGN(download_bytes, PAGE_SIZE);
- hdr = (struct andr_img_hdr *)hdr_addr;
+ hdr = (struct andr_boot_img_hdr_v0 *)hdr_addr;
/* Read boot image header */
hdr_sectors = fb_mmc_get_boot_header(dev_desc, &info, hdr, response);
@@ -371,6 +370,14 @@ static int fb_mmc_update_zimage(struct blk_desc *dev_desc,
return -1;
}
+ /* Check if boot image header version is 2 or less */
+ if (hdr->header_version > 2) {
+ pr_err("zImage flashing supported only for boot images v2 and less\n");
+ fastboot_fail("zImage flashing supported only for boot images v2 and less",
+ response);
+ return -EOPNOTSUPP;
+ }
+
/* Check if boot image has second stage in it (we don't support it) */
if (hdr->second_size > 0) {
pr_err("moving second stage is not supported yet\n");