blob: b68d98573c9a5c0441b406548035494d3b15a138 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Pierre Aubert91fdabc2014-04-24 10:30:06 +02002/*
3 * Copyright 2014, Staubli Faverges
4 * Pierre Aubert
5 *
6 * eMMC- Replay Protected Memory Block
7 * According to JEDEC Standard No. 84-A441
Pierre Aubert91fdabc2014-04-24 10:30:06 +02008 */
9
10#include <config.h>
11#include <common.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060012#include <log.h>
Simon Glasscf92e052015-09-02 17:24:58 -060013#include <memalign.h>
Pierre Aubert91fdabc2014-04-24 10:30:06 +020014#include <mmc.h>
Bharat Kumar Reddy Gootyae93d812019-11-22 15:13:09 -080015#include <sdhci.h>
Jeroen Hofstee2b9912e2014-06-12 22:27:12 +020016#include <u-boot/sha256.h>
Pierre Aubert91fdabc2014-04-24 10:30:06 +020017#include "mmc_private.h"
18
19/* Request codes */
20#define RPMB_REQ_KEY 1
21#define RPMB_REQ_WCOUNTER 2
22#define RPMB_REQ_WRITE_DATA 3
23#define RPMB_REQ_READ_DATA 4
24#define RPMB_REQ_STATUS 5
25
26/* Response code */
27#define RPMB_RESP_KEY 0x0100
28#define RPMB_RESP_WCOUNTER 0x0200
29#define RPMB_RESP_WRITE_DATA 0x0300
30#define RPMB_RESP_READ_DATA 0x0400
31
32/* Error codes */
33#define RPMB_OK 0
34#define RPMB_ERR_GENERAL 1
35#define RPMB_ERR_AUTH 2
36#define RPMB_ERR_COUNTER 3
37#define RPMB_ERR_ADDRESS 4
38#define RPMB_ERR_WRITE 5
39#define RPMB_ERR_READ 6
40#define RPMB_ERR_KEY 7
41#define RPMB_ERR_CNT_EXPIRED 0x80
42#define RPMB_ERR_MSK 0x7
43
44/* Sizes of RPMB data frame */
45#define RPMB_SZ_STUFF 196
46#define RPMB_SZ_MAC 32
47#define RPMB_SZ_DATA 256
48#define RPMB_SZ_NONCE 16
49
50#define SHA256_BLOCK_SIZE 64
51
52/* Error messages */
53static const char * const rpmb_err_msg[] = {
54 "",
55 "General failure",
56 "Authentication failure",
57 "Counter failure",
58 "Address failure",
59 "Write failure",
60 "Read failure",
61 "Authentication key not yet programmed",
62};
63
64
65/* Structure of RPMB data frame. */
66struct s_rpmb {
67 unsigned char stuff[RPMB_SZ_STUFF];
68 unsigned char mac[RPMB_SZ_MAC];
69 unsigned char data[RPMB_SZ_DATA];
70 unsigned char nonce[RPMB_SZ_NONCE];
Kever Yang343749c2017-06-08 09:20:04 +080071 unsigned int write_counter;
Pierre Aubert91fdabc2014-04-24 10:30:06 +020072 unsigned short address;
73 unsigned short block_count;
74 unsigned short result;
75 unsigned short request;
76};
77
78static int mmc_set_blockcount(struct mmc *mmc, unsigned int blockcount,
79 bool is_rel_write)
80{
81 struct mmc_cmd cmd = {0};
82
83 cmd.cmdidx = MMC_CMD_SET_BLOCK_COUNT;
84 cmd.cmdarg = blockcount & 0x0000FFFF;
85 if (is_rel_write)
86 cmd.cmdarg |= 1 << 31;
87 cmd.resp_type = MMC_RSP_R1;
88
89 return mmc_send_cmd(mmc, &cmd, NULL);
90}
91static int mmc_rpmb_request(struct mmc *mmc, const struct s_rpmb *s,
92 unsigned int count, bool is_rel_write)
93{
94 struct mmc_cmd cmd = {0};
95 struct mmc_data data;
Bharat Kumar Reddy Gootyae93d812019-11-22 15:13:09 -080096 struct sdhci_host *host = mmc->priv;
Pierre Aubert91fdabc2014-04-24 10:30:06 +020097 int ret;
98
99 ret = mmc_set_blockcount(mmc, count, is_rel_write);
100 if (ret) {
101#ifdef CONFIG_MMC_RPMB_TRACE
102 printf("%s:mmc_set_blockcount-> %d\n", __func__, ret);
103#endif
104 return 1;
105 }
106
107 cmd.cmdidx = MMC_CMD_WRITE_MULTIPLE_BLOCK;
108 cmd.cmdarg = 0;
Akio Hirayama24b1e0c2019-06-28 21:16:25 +0900109 cmd.resp_type = MMC_RSP_R1;
Pierre Aubert91fdabc2014-04-24 10:30:06 +0200110
Bharat Kumar Reddy Gootyae93d812019-11-22 15:13:09 -0800111 if (host->quirks & SDHCI_QUIRK_BROKEN_R1B)
112 cmd.resp_type = MMC_RSP_R1;
113
Pierre Aubert91fdabc2014-04-24 10:30:06 +0200114 data.src = (const char *)s;
115 data.blocks = 1;
116 data.blocksize = MMC_MAX_BLOCK_LEN;
117 data.flags = MMC_DATA_WRITE;
118
119 ret = mmc_send_cmd(mmc, &cmd, &data);
120 if (ret) {
121#ifdef CONFIG_MMC_RPMB_TRACE
122 printf("%s:mmc_send_cmd-> %d\n", __func__, ret);
123#endif
124 return 1;
125 }
126 return 0;
127}
128static int mmc_rpmb_response(struct mmc *mmc, struct s_rpmb *s,
129 unsigned short expected)
130{
131 struct mmc_cmd cmd = {0};
132 struct mmc_data data;
133 int ret;
134
135 ret = mmc_set_blockcount(mmc, 1, false);
136 if (ret) {
137#ifdef CONFIG_MMC_RPMB_TRACE
138 printf("%s:mmc_set_blockcount-> %d\n", __func__, ret);
139#endif
140 return -1;
141 }
142 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
143 cmd.cmdarg = 0;
144 cmd.resp_type = MMC_RSP_R1;
145
146 data.dest = (char *)s;
147 data.blocks = 1;
148 data.blocksize = MMC_MAX_BLOCK_LEN;
149 data.flags = MMC_DATA_READ;
150
151 ret = mmc_send_cmd(mmc, &cmd, &data);
152 if (ret) {
153#ifdef CONFIG_MMC_RPMB_TRACE
154 printf("%s:mmc_send_cmd-> %d\n", __func__, ret);
155#endif
156 return -1;
157 }
158 /* Check the response and the status */
159 if (be16_to_cpu(s->request) != expected) {
160#ifdef CONFIG_MMC_RPMB_TRACE
161 printf("%s:response= %x\n", __func__,
162 be16_to_cpu(s->request));
163#endif
164 return -1;
165 }
166 ret = be16_to_cpu(s->result);
167 if (ret) {
168 printf("%s %s\n", rpmb_err_msg[ret & RPMB_ERR_MSK],
169 (ret & RPMB_ERR_CNT_EXPIRED) ?
170 "Write counter has expired" : "");
171 }
172
173 /* Return the status of the command */
174 return ret;
175}
176static int mmc_rpmb_status(struct mmc *mmc, unsigned short expected)
177{
178 ALLOC_CACHE_ALIGN_BUFFER(struct s_rpmb, rpmb_frame, 1);
179
180 memset(rpmb_frame, 0, sizeof(struct s_rpmb));
181 rpmb_frame->request = cpu_to_be16(RPMB_REQ_STATUS);
182 if (mmc_rpmb_request(mmc, rpmb_frame, 1, false))
183 return -1;
184
185 /* Read the result */
186 return mmc_rpmb_response(mmc, rpmb_frame, expected);
187}
188static void rpmb_hmac(unsigned char *key, unsigned char *buff, int len,
189 unsigned char *output)
190{
191 sha256_context ctx;
192 int i;
193 unsigned char k_ipad[SHA256_BLOCK_SIZE];
194 unsigned char k_opad[SHA256_BLOCK_SIZE];
195
196 sha256_starts(&ctx);
197
198 /* According to RFC 4634, the HMAC transform looks like:
199 SHA(K XOR opad, SHA(K XOR ipad, text))
200
201 where K is an n byte key.
202 ipad is the byte 0x36 repeated blocksize times
203 opad is the byte 0x5c repeated blocksize times
204 and text is the data being protected.
205 */
206
207 for (i = 0; i < RPMB_SZ_MAC; i++) {
208 k_ipad[i] = key[i] ^ 0x36;
209 k_opad[i] = key[i] ^ 0x5c;
210 }
211 /* remaining pad bytes are '\0' XOR'd with ipad and opad values */
212 for ( ; i < SHA256_BLOCK_SIZE; i++) {
213 k_ipad[i] = 0x36;
214 k_opad[i] = 0x5c;
215 }
216 sha256_update(&ctx, k_ipad, SHA256_BLOCK_SIZE);
217 sha256_update(&ctx, buff, len);
218 sha256_finish(&ctx, output);
219
220 /* Init context for second pass */
221 sha256_starts(&ctx);
222
223 /* start with outer pad */
224 sha256_update(&ctx, k_opad, SHA256_BLOCK_SIZE);
225
226 /* then results of 1st hash */
227 sha256_update(&ctx, output, RPMB_SZ_MAC);
228
229 /* finish up 2nd pass */
230 sha256_finish(&ctx, output);
231}
232int mmc_rpmb_get_counter(struct mmc *mmc, unsigned long *pcounter)
233{
234 int ret;
235 ALLOC_CACHE_ALIGN_BUFFER(struct s_rpmb, rpmb_frame, 1);
236
237 /* Fill the request */
238 memset(rpmb_frame, 0, sizeof(struct s_rpmb));
239 rpmb_frame->request = cpu_to_be16(RPMB_REQ_WCOUNTER);
240 if (mmc_rpmb_request(mmc, rpmb_frame, 1, false))
241 return -1;
242
243 /* Read the result */
244 ret = mmc_rpmb_response(mmc, rpmb_frame, RPMB_RESP_WCOUNTER);
245 if (ret)
246 return ret;
247
248 *pcounter = be32_to_cpu(rpmb_frame->write_counter);
249 return 0;
250}
251int mmc_rpmb_set_key(struct mmc *mmc, void *key)
252{
253 ALLOC_CACHE_ALIGN_BUFFER(struct s_rpmb, rpmb_frame, 1);
254 /* Fill the request */
255 memset(rpmb_frame, 0, sizeof(struct s_rpmb));
256 rpmb_frame->request = cpu_to_be16(RPMB_REQ_KEY);
257 memcpy(rpmb_frame->mac, key, RPMB_SZ_MAC);
258
259 if (mmc_rpmb_request(mmc, rpmb_frame, 1, true))
260 return -1;
261
262 /* read the operation status */
263 return mmc_rpmb_status(mmc, RPMB_RESP_KEY);
264}
265int mmc_rpmb_read(struct mmc *mmc, void *addr, unsigned short blk,
266 unsigned short cnt, unsigned char *key)
267{
268 ALLOC_CACHE_ALIGN_BUFFER(struct s_rpmb, rpmb_frame, 1);
269 int i;
270
271 for (i = 0; i < cnt; i++) {
272 /* Fill the request */
273 memset(rpmb_frame, 0, sizeof(struct s_rpmb));
274 rpmb_frame->address = cpu_to_be16(blk + i);
275 rpmb_frame->request = cpu_to_be16(RPMB_REQ_READ_DATA);
276 if (mmc_rpmb_request(mmc, rpmb_frame, 1, false))
277 break;
278
279 /* Read the result */
280 if (mmc_rpmb_response(mmc, rpmb_frame, RPMB_RESP_READ_DATA))
281 break;
282
283 /* Check the HMAC if key is provided */
284 if (key) {
285 unsigned char ret_hmac[RPMB_SZ_MAC];
286
287 rpmb_hmac(key, rpmb_frame->data, 284, ret_hmac);
288 if (memcmp(ret_hmac, rpmb_frame->mac, RPMB_SZ_MAC)) {
289 printf("MAC error on block #%d\n", i);
290 break;
291 }
292 }
293 /* Copy data */
294 memcpy(addr + i * RPMB_SZ_DATA, rpmb_frame->data, RPMB_SZ_DATA);
295 }
296 return i;
297}
298int mmc_rpmb_write(struct mmc *mmc, void *addr, unsigned short blk,
299 unsigned short cnt, unsigned char *key)
300{
301 ALLOC_CACHE_ALIGN_BUFFER(struct s_rpmb, rpmb_frame, 1);
302 unsigned long wcount;
303 int i;
304
305 for (i = 0; i < cnt; i++) {
306 if (mmc_rpmb_get_counter(mmc, &wcount)) {
307 printf("Cannot read RPMB write counter\n");
308 break;
309 }
310
311 /* Fill the request */
312 memset(rpmb_frame, 0, sizeof(struct s_rpmb));
313 memcpy(rpmb_frame->data, addr + i * RPMB_SZ_DATA, RPMB_SZ_DATA);
314 rpmb_frame->address = cpu_to_be16(blk + i);
315 rpmb_frame->block_count = cpu_to_be16(1);
316 rpmb_frame->write_counter = cpu_to_be32(wcount);
317 rpmb_frame->request = cpu_to_be16(RPMB_REQ_WRITE_DATA);
318 /* Computes HMAC */
319 rpmb_hmac(key, rpmb_frame->data, 284, rpmb_frame->mac);
320
321 if (mmc_rpmb_request(mmc, rpmb_frame, 1, true))
322 break;
323
324 /* Get status */
325 if (mmc_rpmb_status(mmc, RPMB_RESP_WRITE_DATA))
326 break;
327 }
328 return i;
329}
Jens Wiklander4853ad32018-09-25 16:40:08 +0200330
331static int send_write_mult_block(struct mmc *mmc, const struct s_rpmb *frm,
332 unsigned short cnt)
333{
334 struct mmc_cmd cmd = {
335 .cmdidx = MMC_CMD_WRITE_MULTIPLE_BLOCK,
Akio Hirayama24b1e0c2019-06-28 21:16:25 +0900336 .resp_type = MMC_RSP_R1,
Jens Wiklander4853ad32018-09-25 16:40:08 +0200337 };
338 struct mmc_data data = {
339 .src = (const void *)frm,
340 .blocks = cnt,
341 .blocksize = sizeof(*frm),
342 .flags = MMC_DATA_WRITE,
343 };
344
345 return mmc_send_cmd(mmc, &cmd, &data);
346}
347
348static int send_read_mult_block(struct mmc *mmc, struct s_rpmb *frm,
349 unsigned short cnt)
350{
351 struct mmc_cmd cmd = {
352 .cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK,
353 .resp_type = MMC_RSP_R1,
354 };
355 struct mmc_data data = {
356 .dest = (void *)frm,
357 .blocks = cnt,
358 .blocksize = sizeof(*frm),
359 .flags = MMC_DATA_READ,
360 };
361
362 return mmc_send_cmd(mmc, &cmd, &data);
363}
364
365static int rpmb_route_write_req(struct mmc *mmc, struct s_rpmb *req,
366 unsigned short req_cnt, struct s_rpmb *rsp,
367 unsigned short rsp_cnt)
368{
369 int ret;
370
371 /*
372 * Send the write request.
373 */
374 ret = mmc_set_blockcount(mmc, req_cnt, true);
375 if (ret)
376 return ret;
377
378 ret = send_write_mult_block(mmc, req, req_cnt);
379 if (ret)
380 return ret;
381
382 /*
383 * Read the result of the request.
384 */
385 ret = mmc_set_blockcount(mmc, 1, false);
386 if (ret)
387 return ret;
388
389 memset(rsp, 0, sizeof(*rsp));
390 rsp->request = cpu_to_be16(RPMB_REQ_STATUS);
391 ret = send_write_mult_block(mmc, rsp, 1);
392 if (ret)
393 return ret;
394
395 ret = mmc_set_blockcount(mmc, 1, false);
396 if (ret)
397 return ret;
398
399 return send_read_mult_block(mmc, rsp, 1);
400}
401
402static int rpmb_route_read_req(struct mmc *mmc, struct s_rpmb *req,
403 unsigned short req_cnt, struct s_rpmb *rsp,
404 unsigned short rsp_cnt)
405{
406 int ret;
407
408 /*
409 * Send the read request.
410 */
411 ret = mmc_set_blockcount(mmc, 1, false);
412 if (ret)
413 return ret;
414
415 ret = send_write_mult_block(mmc, req, 1);
416 if (ret)
417 return ret;
418
419 /*
420 * Read the result of the request.
421 */
422
423 ret = mmc_set_blockcount(mmc, rsp_cnt, false);
424 if (ret)
425 return ret;
426
427 return send_read_mult_block(mmc, rsp, rsp_cnt);
428}
429
430static int rpmb_route_frames(struct mmc *mmc, struct s_rpmb *req,
431 unsigned short req_cnt, struct s_rpmb *rsp,
432 unsigned short rsp_cnt)
433{
434 unsigned short n;
435
436 /*
437 * If multiple request frames are provided, make sure that all are
438 * of the same type.
439 */
440 for (n = 1; n < req_cnt; n++)
441 if (req[n].request != req->request)
442 return -EINVAL;
443
444 switch (be16_to_cpu(req->request)) {
445 case RPMB_REQ_KEY:
446 if (req_cnt != 1 || rsp_cnt != 1)
447 return -EINVAL;
448 return rpmb_route_write_req(mmc, req, req_cnt, rsp, rsp_cnt);
449
450 case RPMB_REQ_WRITE_DATA:
451 if (!req_cnt || rsp_cnt != 1)
452 return -EINVAL;
453 return rpmb_route_write_req(mmc, req, req_cnt, rsp, rsp_cnt);
454
455 case RPMB_REQ_WCOUNTER:
456 if (req_cnt != 1 || rsp_cnt != 1)
457 return -EINVAL;
458 return rpmb_route_read_req(mmc, req, req_cnt, rsp, rsp_cnt);
459
460 case RPMB_REQ_READ_DATA:
461 if (req_cnt != 1 || !req_cnt)
462 return -EINVAL;
463 return rpmb_route_read_req(mmc, req, req_cnt, rsp, rsp_cnt);
464
465 default:
466 debug("Unsupported message type: %d\n",
467 be16_to_cpu(req->request));
468 return -EINVAL;
469 }
470}
471
472int mmc_rpmb_route_frames(struct mmc *mmc, void *req, unsigned long reqlen,
473 void *rsp, unsigned long rsplen)
474{
475 /*
476 * Whoever crafted the data supplied to this function knows how to
477 * format the PRMB frames and which response is expected. If
478 * there's some unexpected mismatch it's more helpful to report an
479 * error immediately than trying to guess what was the intention
480 * and possibly just delay an eventual error which will be harder
481 * to track down.
482 */
litchipia9f7be52021-06-15 08:53:06 +0000483 void *rpmb_data = NULL;
484 int ret;
Jens Wiklander4853ad32018-09-25 16:40:08 +0200485
486 if (reqlen % sizeof(struct s_rpmb) || rsplen % sizeof(struct s_rpmb))
487 return -EINVAL;
488
litchipia9f7be52021-06-15 08:53:06 +0000489 if (!IS_ALIGNED((uintptr_t)req, ARCH_DMA_MINALIGN)) {
490 /* Memory alignment is required by MMC driver */
491 rpmb_data = malloc(reqlen);
492 if (!rpmb_data)
493 return -ENOMEM;
494
495 memcpy(rpmb_data, req, reqlen);
496 req = rpmb_data;
497 }
498
499 ret = rpmb_route_frames(mmc, req, reqlen / sizeof(struct s_rpmb),
500 rsp, rsplen / sizeof(struct s_rpmb));
501 free(rpmb_data);
502 return ret;
Jens Wiklander4853ad32018-09-25 16:40:08 +0200503}