blob: 1faaa2c938d4ba637a25af552e320a2e3e857f25 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Matt Porter24de3572012-01-31 12:03:57 +00002/*
3 * (C) Copyright 2000-2004
4 * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
5 *
6 * (C) Copyright 2011
7 * Texas Instruments, <www.ti.com>
8 *
9 * Matt Porter <mporter@ti.com>
Matt Porter24de3572012-01-31 12:03:57 +000010 */
11#include <common.h>
Simon Glass0c670fc2019-08-01 09:46:36 -060012#include <gzip.h>
Simon Glass4d72caa2020-05-10 11:40:01 -060013#include <image.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060014#include <log.h>
Tom Rini47f7bca2012-08-13 12:03:19 -070015#include <spl.h>
Matt Porter24de3572012-01-31 12:03:57 +000016#include <xyzModem.h>
17#include <asm/u-boot.h>
Masahiro Yamadab08c8c42018-03-05 01:20:11 +090018#include <linux/libfdt.h>
Matt Porter24de3572012-01-31 12:03:57 +000019
20#define BUF_SIZE 1024
21
Lokesh Vutlafa715192016-05-24 10:34:44 +053022/*
23 * Information required to load image using ymodem.
24 *
25 * @image_read: Now of bytes read from the image.
26 * @buf: pointer to the previous read block.
27 */
28struct ymodem_fit_info {
29 int image_read;
30 char *buf;
31};
32
Matt Porter24de3572012-01-31 12:03:57 +000033static int getcymodem(void) {
34 if (tstc())
Heinrich Schuchardtc670aee2020-10-07 18:11:48 +020035 return (getchar());
Matt Porter24de3572012-01-31 12:03:57 +000036 return -1;
37}
38
Lokesh Vutlafa715192016-05-24 10:34:44 +053039static ulong ymodem_read_fit(struct spl_load_info *load, ulong offset,
40 ulong size, void *addr)
41{
Lokesh Vutla095764e2019-11-14 18:33:30 +053042 int res, err, buf_offset;
Lokesh Vutlafa715192016-05-24 10:34:44 +053043 struct ymodem_fit_info *info = load->priv;
44 char *buf = info->buf;
Vignesh Raghavendrac1335e22022-01-31 09:49:19 +053045 ulong copy_size = size;
Lokesh Vutlafa715192016-05-24 10:34:44 +053046
47 while (info->image_read < offset) {
48 res = xyzModem_stream_read(buf, BUF_SIZE, &err);
49 if (res <= 0)
Andreas Dannenberg9d6ee3e2019-08-15 15:55:26 -050050 break;
51
Lokesh Vutlafa715192016-05-24 10:34:44 +053052 info->image_read += res;
53 }
54
55 if (info->image_read > offset) {
56 res = info->image_read - offset;
Lokesh Vutla095764e2019-11-14 18:33:30 +053057 if (info->image_read % BUF_SIZE)
58 buf_offset = (info->image_read % BUF_SIZE);
59 else
60 buf_offset = BUF_SIZE;
Vignesh Raghavendrac1335e22022-01-31 09:49:19 +053061
62 if (res > copy_size) {
63 memcpy(addr, &buf[buf_offset - res], copy_size);
64 goto done;
65 }
Lokesh Vutla095764e2019-11-14 18:33:30 +053066 memcpy(addr, &buf[buf_offset - res], res);
Lokesh Vutlafa715192016-05-24 10:34:44 +053067 addr = addr + res;
Vignesh Raghavendrac1335e22022-01-31 09:49:19 +053068 copy_size -= res;
Lokesh Vutlafa715192016-05-24 10:34:44 +053069 }
70
71 while (info->image_read < offset + size) {
72 res = xyzModem_stream_read(buf, BUF_SIZE, &err);
73 if (res <= 0)
Andreas Dannenberg9d6ee3e2019-08-15 15:55:26 -050074 break;
Lokesh Vutlafa715192016-05-24 10:34:44 +053075
Lokesh Vutlafa715192016-05-24 10:34:44 +053076 info->image_read += res;
Vignesh Raghavendrac1335e22022-01-31 09:49:19 +053077 if (res > copy_size) {
78 memcpy(addr, buf, copy_size);
79 goto done;
80 }
81 memcpy(addr, buf, res);
Lokesh Vutlafa715192016-05-24 10:34:44 +053082 addr += res;
Vignesh Raghavendrac1335e22022-01-31 09:49:19 +053083 copy_size -= res;
Lokesh Vutlafa715192016-05-24 10:34:44 +053084 }
85
Vignesh Raghavendrac1335e22022-01-31 09:49:19 +053086done:
Lokesh Vutlafa715192016-05-24 10:34:44 +053087 return size;
88}
89
Andreas Dannenberge4130332019-08-15 15:55:27 -050090int spl_ymodem_load_image(struct spl_image_info *spl_image,
91 struct spl_boot_device *bootdev)
Matt Porter24de3572012-01-31 12:03:57 +000092{
Marek Vasut92e5cb82019-01-07 21:23:22 +010093 ulong size = 0;
Matt Porter24de3572012-01-31 12:03:57 +000094 int err;
95 int res;
96 int ret;
97 connection_info_t info;
98 char buf[BUF_SIZE];
Simon Glassf3543e62022-09-06 20:26:52 -060099 struct legacy_img_hdr *ih = NULL;
Matt Porter24de3572012-01-31 12:03:57 +0000100 ulong addr = 0;
101
102 info.mode = xyzModem_ymodem;
103 ret = xyzModem_stream_open(&info, &err);
Lokesh Vutlafa715192016-05-24 10:34:44 +0530104 if (ret) {
Matt Porter24de3572012-01-31 12:03:57 +0000105 printf("spl: ymodem err - %s\n", xyzModem_error(err));
Nikita Kiryanov36afd452015-11-08 17:11:49 +0200106 return ret;
Matt Porter24de3572012-01-31 12:03:57 +0000107 }
108
Lokesh Vutlafa715192016-05-24 10:34:44 +0530109 res = xyzModem_stream_read(buf, BUF_SIZE, &err);
110 if (res <= 0)
111 goto end_stream;
112
Marek Vasut792dd572019-03-06 22:04:31 +0100113 if (IS_ENABLED(CONFIG_SPL_LOAD_FIT_FULL) &&
Simon Glassf3543e62022-09-06 20:26:52 -0600114 image_get_magic((struct legacy_img_hdr *)buf) == FDT_MAGIC) {
Marek Vasut792dd572019-03-06 22:04:31 +0100115 addr = CONFIG_SYS_LOAD_ADDR;
Simon Glassf3543e62022-09-06 20:26:52 -0600116 ih = (struct legacy_img_hdr *)addr;
Marek Vasut792dd572019-03-06 22:04:31 +0100117
118 memcpy((void *)addr, buf, res);
119 size += res;
120 addr += res;
121
122 while ((res = xyzModem_stream_read(buf, BUF_SIZE, &err)) > 0) {
123 memcpy((void *)addr, buf, res);
124 size += res;
125 addr += res;
126 }
127
Pali Rohár2e0429b2022-01-14 14:31:38 +0100128 ret = spl_parse_image_header(spl_image, bootdev, ih);
Marek Vasut792dd572019-03-06 22:04:31 +0100129 if (ret)
130 return ret;
131 } else if (IS_ENABLED(CONFIG_SPL_LOAD_FIT) &&
Simon Glassf3543e62022-09-06 20:26:52 -0600132 image_get_magic((struct legacy_img_hdr *)buf) == FDT_MAGIC) {
Lokesh Vutlafa715192016-05-24 10:34:44 +0530133 struct spl_load_info load;
134 struct ymodem_fit_info info;
135
136 debug("Found FIT\n");
Lokesh Vutlafa715192016-05-24 10:34:44 +0530137 load.priv = (void *)&info;
Sean Anderson5271e352023-11-08 11:48:43 -0500138 spl_set_bl_len(&load, 1);
Lokesh Vutlafa715192016-05-24 10:34:44 +0530139 info.buf = buf;
140 info.image_read = BUF_SIZE;
141 load.read = ymodem_read_fit;
Simon Glassf4d7d852016-09-24 18:20:16 -0600142 ret = spl_load_simple_fit(spl_image, &load, 0, (void *)buf);
Lokesh Vutlafa715192016-05-24 10:34:44 +0530143 size = info.image_read;
144
145 while ((res = xyzModem_stream_read(buf, BUF_SIZE, &err)) > 0)
146 size += res;
147 } else {
Simon Glassf3543e62022-09-06 20:26:52 -0600148 ih = (struct legacy_img_hdr *)buf;
Pali Rohár2e0429b2022-01-14 14:31:38 +0100149 ret = spl_parse_image_header(spl_image, bootdev, ih);
Lokesh Vutlafa715192016-05-24 10:34:44 +0530150 if (ret)
Marek Vasut6d8dbe42019-03-12 04:02:39 +0100151 goto end_stream;
Marek Vasut92e5cb82019-01-07 21:23:22 +0100152#ifdef CONFIG_SPL_GZIP
153 if (ih->ih_comp == IH_COMP_GZIP)
154 addr = CONFIG_SYS_LOAD_ADDR;
155 else
156#endif
157 addr = spl_image->load_addr;
Lokesh Vutlafa715192016-05-24 10:34:44 +0530158 memcpy((void *)addr, buf, res);
Simon Glassf3543e62022-09-06 20:26:52 -0600159 ih = (struct legacy_img_hdr *)addr;
Lokesh Vutlafa715192016-05-24 10:34:44 +0530160 size += res;
161 addr += res;
162
163 while ((res = xyzModem_stream_read(buf, BUF_SIZE, &err)) > 0) {
164 memcpy((void *)addr, buf, res);
165 size += res;
166 addr += res;
167 }
168 }
169
170end_stream:
Matt Porter24de3572012-01-31 12:03:57 +0000171 xyzModem_stream_close(&err);
172 xyzModem_stream_terminate(false, &getcymodem);
173
Marek Vasut92e5cb82019-01-07 21:23:22 +0100174 printf("Loaded %lu bytes\n", size);
Marek Vasut6d8dbe42019-03-12 04:02:39 +0100175
Marek Vasutd574c192019-03-12 04:00:09 +0100176#ifdef CONFIG_SPL_GZIP
177 if (!(IS_ENABLED(CONFIG_SPL_LOAD_FIT) &&
Simon Glassf3543e62022-09-06 20:26:52 -0600178 image_get_magic((struct legacy_img_hdr *)buf) == FDT_MAGIC) &&
Marek Vasutd574c192019-03-12 04:00:09 +0100179 (ih->ih_comp == IH_COMP_GZIP)) {
180 if (gunzip((void *)(spl_image->load_addr + sizeof(*ih)),
181 CONFIG_SYS_BOOTM_LEN,
182 (void *)(CONFIG_SYS_LOAD_ADDR + sizeof(*ih)),
183 &size)) {
184 puts("Uncompressing error\n");
185 return -EIO;
186 }
187 }
188#endif
189
Marek Vasut6d8dbe42019-03-12 04:02:39 +0100190 return ret;
Matt Porter24de3572012-01-31 12:03:57 +0000191}
Simon Glassebc4ef62016-11-30 15:30:50 -0700192SPL_LOAD_IMAGE_METHOD("UART", 0, BOOT_DEVICE_UART, spl_ymodem_load_image);