blob: e167d0a2fe3d24948ce10e55b68ef775b5318e9e [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glassed3f5a32013-11-10 10:27:05 -07002/*
3 * Copyright (c) 2013 Google, Inc
Simon Glassed3f5a32013-11-10 10:27:05 -07004 */
5
6#include <common.h>
Simon Glass21baf152015-08-22 18:31:35 -06007#include <dm.h>
Miquel Raynald677bfe2018-05-15 11:57:06 +02008#include <tpm-v1.h>
Simon Glassed3f5a32013-11-10 10:27:05 -07009#include <asm/state.h>
10#include <asm/unaligned.h>
Simon Glassc3a4d1c2019-11-14 12:57:14 -070011#include <u-boot/crc.h>
Simon Glassed3f5a32013-11-10 10:27:05 -070012
13/* TPM NVRAM location indices. */
14#define FIRMWARE_NV_INDEX 0x1007
15#define KERNEL_NV_INDEX 0x1008
Simon Glassef8a2502018-10-01 11:55:18 -060016#define BACKUP_NV_INDEX 0x1009
17#define FWMP_NV_INDEX 0x100a
18#define REC_HASH_NV_INDEX 0x100b
19#define REC_HASH_NV_SIZE VB2_SHA256_DIGEST_SIZE
Simon Glassed3f5a32013-11-10 10:27:05 -070020
21#define NV_DATA_PUBLIC_PERMISSIONS_OFFSET 60
22
23/* Kernel TPM space - KERNEL_NV_INDEX, locked with physical presence */
24#define ROLLBACK_SPACE_KERNEL_VERSION 2
25#define ROLLBACK_SPACE_KERNEL_UID 0x4752574C /* 'GRWL' */
26
27struct rollback_space_kernel {
28 /* Struct version, for backwards compatibility */
29 uint8_t struct_version;
30 /* Unique ID to detect space redefinition */
31 uint32_t uid;
32 /* Kernel versions */
33 uint32_t kernel_versions;
34 /* Reserved for future expansion */
35 uint8_t reserved[3];
36 /* Checksum (v2 and later only) */
37 uint8_t crc8;
38} __packed rollback_space_kernel;
39
40/*
41 * These numbers derive from adding the sizes of command fields as shown in
42 * the TPM commands manual.
43 */
44#define TPM_REQUEST_HEADER_LENGTH 10
45#define TPM_RESPONSE_HEADER_LENGTH 10
46
47/* These are the different non-volatile spaces that we emulate */
48enum {
49 NV_GLOBAL_LOCK,
50 NV_SEQ_FIRMWARE,
51 NV_SEQ_KERNEL,
Simon Glassef8a2502018-10-01 11:55:18 -060052 NV_SEQ_BACKUP,
53 NV_SEQ_FWMP,
54 NV_SEQ_REC_HASH,
55
Simon Glassed3f5a32013-11-10 10:27:05 -070056 NV_SEQ_COUNT,
57};
58
59/* Size of each non-volatile space */
60#define NV_DATA_SIZE 0x20
61
Simon Glassef8a2502018-10-01 11:55:18 -060062struct nvdata_state {
63 bool present;
64 u8 data[NV_DATA_SIZE];
65};
66
Simon Glassed3f5a32013-11-10 10:27:05 -070067/*
68 * Information about our TPM emulation. This is preserved in the sandbox
69 * state file if enabled.
70 */
71static struct tpm_state {
Simon Glassef8a2502018-10-01 11:55:18 -060072 bool valid;
73 struct nvdata_state nvdata[NV_SEQ_COUNT];
Simon Glass21baf152015-08-22 18:31:35 -060074} g_state;
Simon Glassed3f5a32013-11-10 10:27:05 -070075
76/**
77 * sandbox_tpm_read_state() - read the sandbox EC state from the state file
78 *
79 * If data is available, then blob and node will provide access to it. If
80 * not this function sets up an empty TPM.
81 *
82 * @blob: Pointer to device tree blob, or NULL if no data to read
83 * @node: Node offset to read from
84 */
85static int sandbox_tpm_read_state(const void *blob, int node)
86{
87 const char *prop;
88 int len;
89 int i;
90
91 if (!blob)
92 return 0;
93
94 for (i = 0; i < NV_SEQ_COUNT; i++) {
95 char prop_name[20];
96
97 sprintf(prop_name, "nvdata%d", i);
98 prop = fdt_getprop(blob, node, prop_name, &len);
Simon Glassef8a2502018-10-01 11:55:18 -060099 if (prop && len == NV_DATA_SIZE) {
100 memcpy(g_state.nvdata[i].data, prop, NV_DATA_SIZE);
101 g_state.nvdata[i].present = true;
102 }
Simon Glassed3f5a32013-11-10 10:27:05 -0700103 }
Simon Glassef8a2502018-10-01 11:55:18 -0600104 g_state.valid = true;
Simon Glassed3f5a32013-11-10 10:27:05 -0700105
106 return 0;
107}
108
109/**
110 * cros_ec_write_state() - Write out our state to the state file
111 *
112 * The caller will ensure that there is a node ready for the state. The node
113 * may already contain the old state, in which case it is overridden.
114 *
115 * @blob: Device tree blob holding state
116 * @node: Node to write our state into
117 */
118static int sandbox_tpm_write_state(void *blob, int node)
119{
120 int i;
121
122 /*
123 * We are guaranteed enough space to write basic properties.
124 * We could use fdt_add_subnode() to put each set of data in its
125 * own node - perhaps useful if we add access informaiton to each.
126 */
127 for (i = 0; i < NV_SEQ_COUNT; i++) {
128 char prop_name[20];
129
Simon Glassef8a2502018-10-01 11:55:18 -0600130 if (g_state.nvdata[i].present) {
131 sprintf(prop_name, "nvdata%d", i);
132 fdt_setprop(blob, node, prop_name,
133 g_state.nvdata[i].data, NV_DATA_SIZE);
134 }
Simon Glassed3f5a32013-11-10 10:27:05 -0700135 }
136
137 return 0;
138}
139
140SANDBOX_STATE_IO(sandbox_tpm, "google,sandbox-tpm", sandbox_tpm_read_state,
141 sandbox_tpm_write_state);
142
143static int index_to_seq(uint32_t index)
144{
145 switch (index) {
146 case FIRMWARE_NV_INDEX:
147 return NV_SEQ_FIRMWARE;
148 case KERNEL_NV_INDEX:
149 return NV_SEQ_KERNEL;
Simon Glassef8a2502018-10-01 11:55:18 -0600150 case BACKUP_NV_INDEX:
151 return NV_SEQ_BACKUP;
152 case FWMP_NV_INDEX:
153 return NV_SEQ_FWMP;
154 case REC_HASH_NV_INDEX:
155 return NV_SEQ_REC_HASH;
Simon Glassed3f5a32013-11-10 10:27:05 -0700156 case 0:
157 return NV_GLOBAL_LOCK;
158 }
159
160 printf("Invalid nv index %#x\n", index);
161 return -1;
162}
163
Simon Glassef8a2502018-10-01 11:55:18 -0600164static void handle_cap_flag_space(u8 **datap, uint index)
165{
166 struct tpm_nv_data_public pub;
167
168 /* TPM_NV_PER_PPWRITE */
169 memset(&pub, '\0', sizeof(pub));
170 pub.nv_index = __cpu_to_be32(index);
171 pub.pcr_info_read.pcr_selection.size_of_select = __cpu_to_be16(
172 sizeof(pub.pcr_info_read.pcr_selection.pcr_select));
173 pub.permission.attributes = __cpu_to_be32(1);
174 pub.pcr_info_write = pub.pcr_info_read;
175 memcpy(*datap, &pub, sizeof(pub));
176 *datap += sizeof(pub);
177}
178
Simon Glass21baf152015-08-22 18:31:35 -0600179static int sandbox_tpm_xfer(struct udevice *dev, const uint8_t *sendbuf,
180 size_t send_size, uint8_t *recvbuf,
181 size_t *recv_len)
Simon Glassed3f5a32013-11-10 10:27:05 -0700182{
Simon Glass21baf152015-08-22 18:31:35 -0600183 struct tpm_state *tpm = dev_get_priv(dev);
Simon Glassed3f5a32013-11-10 10:27:05 -0700184 uint32_t code, index, length, type;
185 uint8_t *data;
186 int seq;
187
188 code = get_unaligned_be32(sendbuf + sizeof(uint16_t) +
189 sizeof(uint32_t));
Simon Glass04488c42018-11-06 15:21:23 -0700190#ifdef DEBUG
Simon Glassed3f5a32013-11-10 10:27:05 -0700191 printf("tpm: %zd bytes, recv_len %zd, cmd = %x\n", send_size,
192 *recv_len, code);
193 print_buffer(0, sendbuf, 1, send_size, 0);
Simon Glass04488c42018-11-06 15:21:23 -0700194#endif
Simon Glassed3f5a32013-11-10 10:27:05 -0700195 switch (code) {
Simon Glass998af312018-10-01 11:55:17 -0600196 case TPM_CMD_GET_CAPABILITY:
Simon Glassed3f5a32013-11-10 10:27:05 -0700197 type = get_unaligned_be32(sendbuf + 14);
198 switch (type) {
Simon Glass998af312018-10-01 11:55:17 -0600199 case TPM_CAP_FLAG:
Simon Glassed3f5a32013-11-10 10:27:05 -0700200 index = get_unaligned_be32(sendbuf + 18);
201 printf("Get flags index %#02x\n", index);
202 *recv_len = 22;
203 memset(recvbuf, '\0', *recv_len);
Simon Glassed3f5a32013-11-10 10:27:05 -0700204 data = recvbuf + TPM_RESPONSE_HEADER_LENGTH +
205 sizeof(uint32_t);
206 switch (index) {
207 case FIRMWARE_NV_INDEX:
208 break;
209 case KERNEL_NV_INDEX:
Simon Glassef8a2502018-10-01 11:55:18 -0600210 handle_cap_flag_space(&data, index);
211 *recv_len = data - recvbuf -
212 TPM_RESPONSE_HEADER_LENGTH -
213 sizeof(uint32_t);
214 break;
215 case TPM_CAP_FLAG_PERMANENT: {
216 struct tpm_permanent_flags *pflags;
217
218 pflags = (struct tpm_permanent_flags *)data;
219 memset(pflags, '\0', sizeof(*pflags));
220 put_unaligned_be32(TPM_TAG_PERMANENT_FLAGS,
221 &pflags->tag);
222 *recv_len = TPM_HEADER_SIZE + 4 +
223 sizeof(*pflags);
Simon Glassed3f5a32013-11-10 10:27:05 -0700224 break;
225 }
Simon Glassef8a2502018-10-01 11:55:18 -0600226 default:
227 printf(" ** Unknown flags index %x\n", index);
228 return -ENOSYS;
229 }
230 put_unaligned_be32(*recv_len,
231 recvbuf +
232 TPM_RESPONSE_HEADER_LENGTH);
Simon Glassed3f5a32013-11-10 10:27:05 -0700233 break;
Simon Glass998af312018-10-01 11:55:17 -0600234 case TPM_CAP_NV_INDEX:
Simon Glassed3f5a32013-11-10 10:27:05 -0700235 index = get_unaligned_be32(sendbuf + 18);
236 printf("Get cap nv index %#02x\n", index);
237 put_unaligned_be32(22, recvbuf +
238 TPM_RESPONSE_HEADER_LENGTH);
239 break;
240 default:
241 printf(" ** Unknown 0x65 command type %#02x\n",
242 type);
Simon Glass998af312018-10-01 11:55:17 -0600243 return -ENOSYS;
Simon Glassed3f5a32013-11-10 10:27:05 -0700244 }
245 break;
Simon Glass998af312018-10-01 11:55:17 -0600246 case TPM_CMD_NV_WRITE_VALUE:
Simon Glassed3f5a32013-11-10 10:27:05 -0700247 index = get_unaligned_be32(sendbuf + 10);
248 length = get_unaligned_be32(sendbuf + 18);
249 seq = index_to_seq(index);
250 if (seq < 0)
Simon Glass998af312018-10-01 11:55:17 -0600251 return -EINVAL;
Simon Glassed3f5a32013-11-10 10:27:05 -0700252 printf("tpm: nvwrite index=%#02x, len=%#02x\n", index, length);
Simon Glassef8a2502018-10-01 11:55:18 -0600253 memcpy(&tpm->nvdata[seq].data, sendbuf + 22, length);
254 tpm->nvdata[seq].present = true;
Simon Glassed3f5a32013-11-10 10:27:05 -0700255 *recv_len = 12;
256 memset(recvbuf, '\0', *recv_len);
257 break;
Simon Glass998af312018-10-01 11:55:17 -0600258 case TPM_CMD_NV_READ_VALUE: /* nvread */
Simon Glassed3f5a32013-11-10 10:27:05 -0700259 index = get_unaligned_be32(sendbuf + 10);
260 length = get_unaligned_be32(sendbuf + 18);
261 seq = index_to_seq(index);
262 if (seq < 0)
Simon Glass998af312018-10-01 11:55:17 -0600263 return -EINVAL;
Simon Glassef8a2502018-10-01 11:55:18 -0600264 printf("tpm: nvread index=%#02x, len=%#02x, seq=%#02x\n", index,
265 length, seq);
Simon Glassed3f5a32013-11-10 10:27:05 -0700266 *recv_len = TPM_RESPONSE_HEADER_LENGTH + sizeof(uint32_t) +
267 length;
268 memset(recvbuf, '\0', *recv_len);
269 put_unaligned_be32(length, recvbuf +
270 TPM_RESPONSE_HEADER_LENGTH);
271 if (seq == NV_SEQ_KERNEL) {
272 struct rollback_space_kernel rsk;
273
274 data = recvbuf + TPM_RESPONSE_HEADER_LENGTH +
275 sizeof(uint32_t);
Tom Rini7e019da2016-04-12 15:11:22 -0400276 memset(&rsk, 0, sizeof(struct rollback_space_kernel));
Simon Glassed3f5a32013-11-10 10:27:05 -0700277 rsk.struct_version = 2;
278 rsk.uid = ROLLBACK_SPACE_KERNEL_UID;
Stefan Roese456ecd02016-04-08 15:56:29 +0200279 rsk.crc8 = crc8(0, (unsigned char *)&rsk,
Simon Glassed3f5a32013-11-10 10:27:05 -0700280 offsetof(struct rollback_space_kernel,
281 crc8));
282 memcpy(data, &rsk, sizeof(rsk));
Simon Glassef8a2502018-10-01 11:55:18 -0600283 } else if (!tpm->nvdata[seq].present) {
284 put_unaligned_be32(TPM_BADINDEX, recvbuf +
285 sizeof(uint16_t) + sizeof(uint32_t));
Simon Glassed3f5a32013-11-10 10:27:05 -0700286 } else {
287 memcpy(recvbuf + TPM_RESPONSE_HEADER_LENGTH +
Simon Glassef8a2502018-10-01 11:55:18 -0600288 sizeof(uint32_t), &tpm->nvdata[seq].data,
289 length);
Simon Glassed3f5a32013-11-10 10:27:05 -0700290 }
291 break;
Simon Glassef8a2502018-10-01 11:55:18 -0600292 case TPM_CMD_EXTEND:
293 *recv_len = 30;
294 memset(recvbuf, '\0', *recv_len);
295 break;
296 case TPM_CMD_NV_DEFINE_SPACE:
Simon Glassed3f5a32013-11-10 10:27:05 -0700297 case 0x15: /* pcr read */
298 case 0x5d: /* force clear */
299 case 0x6f: /* physical enable */
300 case 0x72: /* physical set deactivated */
301 case 0x99: /* startup */
Simon Glassef8a2502018-10-01 11:55:18 -0600302 case 0x50: /* self test full */
Simon Glassed3f5a32013-11-10 10:27:05 -0700303 case 0x4000000a: /* assert physical presence */
304 *recv_len = 12;
305 memset(recvbuf, '\0', *recv_len);
306 break;
307 default:
308 printf("Unknown tpm command %02x\n", code);
Simon Glass998af312018-10-01 11:55:17 -0600309 return -ENOSYS;
Simon Glassed3f5a32013-11-10 10:27:05 -0700310 }
Simon Glass04488c42018-11-06 15:21:23 -0700311#ifdef DEBUG
312 printf("tpm: rx recv_len %zd\n", *recv_len);
313 print_buffer(0, recvbuf, 1, *recv_len, 0);
314#endif
Simon Glassed3f5a32013-11-10 10:27:05 -0700315
316 return 0;
317}
318
Simon Glass21baf152015-08-22 18:31:35 -0600319static int sandbox_tpm_get_desc(struct udevice *dev, char *buf, int size)
Simon Glassed3f5a32013-11-10 10:27:05 -0700320{
Simon Glass21baf152015-08-22 18:31:35 -0600321 if (size < 15)
322 return -ENOSPC;
323
324 return snprintf(buf, size, "sandbox TPM");
325}
326
327static int sandbox_tpm_probe(struct udevice *dev)
328{
329 struct tpm_state *tpm = dev_get_priv(dev);
330
331 memcpy(tpm, &g_state, sizeof(*tpm));
332
Simon Glassed3f5a32013-11-10 10:27:05 -0700333 return 0;
334}
335
Simon Glass21baf152015-08-22 18:31:35 -0600336static int sandbox_tpm_open(struct udevice *dev)
Simon Glassed3f5a32013-11-10 10:27:05 -0700337{
Simon Glassed3f5a32013-11-10 10:27:05 -0700338 return 0;
339}
340
Simon Glass21baf152015-08-22 18:31:35 -0600341static int sandbox_tpm_close(struct udevice *dev)
Simon Glassed3f5a32013-11-10 10:27:05 -0700342{
Simon Glassed3f5a32013-11-10 10:27:05 -0700343 return 0;
344}
Simon Glass21baf152015-08-22 18:31:35 -0600345
346static const struct tpm_ops sandbox_tpm_ops = {
347 .open = sandbox_tpm_open,
348 .close = sandbox_tpm_close,
349 .get_desc = sandbox_tpm_get_desc,
350 .xfer = sandbox_tpm_xfer,
351};
352
353static const struct udevice_id sandbox_tpm_ids[] = {
354 { .compatible = "google,sandbox-tpm" },
355 { }
356};
357
Walter Lozanoe3e24702020-06-25 01:10:04 -0300358U_BOOT_DRIVER(google_sandbox_tpm) = {
359 .name = "google_sandbox_tpm",
Simon Glass21baf152015-08-22 18:31:35 -0600360 .id = UCLASS_TPM,
361 .of_match = sandbox_tpm_ids,
362 .ops = &sandbox_tpm_ops,
363 .probe = sandbox_tpm_probe,
364 .priv_auto_alloc_size = sizeof(struct tpm_state),
365};